summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChanho Park <chanho61.park@samsung.com>2014-08-19 19:35:08 +0900
committerChanho Park <chanho61.park@samsung.com>2014-08-19 19:35:08 +0900
commitf153198bebe97a3d7576aefcec4841bc04b45ddc (patch)
treefdc7119511266a613744d336ee58a2564f008d28
parentbed873b4f7b3c21d12bf296eb3a3dbea1bc1dda0 (diff)
downloadpython-f153198bebe97a3d7576aefcec4841bc04b45ddc.tar.gz
python-f153198bebe97a3d7576aefcec4841bc04b45ddc.tar.bz2
python-f153198bebe97a3d7576aefcec4841bc04b45ddc.zip
Imported Upstream version 2.7.8upstream/2.7.8
-rw-r--r--.hg_archival.txt4
-rw-r--r--.hgtags154
-rw-r--r--[-rwxr-xr-x]Demo/comparisons/patterns0
-rwxr-xr-x[-rw-r--r--]Demo/curses/ncurses.py0
-rwxr-xr-x[-rw-r--r--]Demo/curses/rain.py0
-rwxr-xr-x[-rw-r--r--]Demo/curses/tclock.py0
-rw-r--r--[-rwxr-xr-x]Demo/md5test/foo0
-rw-r--r--Demo/newmetaclasses/Eiffel.py2
-rw-r--r--[-rwxr-xr-x]Demo/scripts/newslist.doc0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/about.xpm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/bold.xbm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/capital.xbm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/centerj.xbm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/combobox.xbm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/combobox.xpm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/combobox.xpm.10
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/drivea.xbm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/drivea.xpm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/exit.xpm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/filebox.xbm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/filebox.xpm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/italic.xbm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/justify.xbm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/leftj.xbm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/netw.xbm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/netw.xpm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/optmenu.xpm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/rightj.xbm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/select.xpm0
-rw-r--r--[-rwxr-xr-x]Demo/tix/bitmaps/underline.xbm0
-rwxr-xr-x[-rw-r--r--]Demo/tkinter/guido/canvasevents.py0
-rwxr-xr-x[-rw-r--r--]Demo/tkinter/guido/newmenubardemo.py0
-rwxr-xr-x[-rw-r--r--]Demo/tkinter/guido/sortvisu.py0
-rw-r--r--Demo/turtle/demohelp.txt44
-rwxr-xr-x[-rw-r--r--]Demo/turtle/tdemo_I_dontlike_tiltdemo.py0
-rwxr-xr-x[-rw-r--r--]Demo/turtle/tdemo_bytedesign.py0
-rwxr-xr-x[-rw-r--r--]Demo/turtle/tdemo_clock.py36
-rwxr-xr-x[-rw-r--r--]Demo/turtle/tdemo_fractalcurves.py0
-rwxr-xr-x[-rw-r--r--]Demo/turtle/tdemo_lindenmayer_indian.py0
-rwxr-xr-x[-rw-r--r--]Demo/turtle/tdemo_minimal_hanoi.py10
-rwxr-xr-x[-rw-r--r--]Demo/turtle/tdemo_paint.py12
-rwxr-xr-x[-rw-r--r--]Demo/turtle/tdemo_peace.py16
-rwxr-xr-x[-rw-r--r--]Demo/turtle/tdemo_penrose.py0
-rwxr-xr-x[-rw-r--r--]Demo/turtle/tdemo_planet_and_moon.py9
-rwxr-xr-x[-rw-r--r--]Demo/turtle/tdemo_tree.py6
-rwxr-xr-x[-rw-r--r--]Demo/turtle/tdemo_yinyang.py0
-rwxr-xr-x[-rw-r--r--]Demo/turtle/turtleDemo.py26
-rwxr-xr-x[-rw-r--r--]Demo/turtle/turtledemo_two_canvases.py0
-rw-r--r--Doc/ACKS.txt230
-rw-r--r--Doc/Makefile13
-rw-r--r--Doc/README.txt48
-rw-r--r--Doc/about.rst19
-rw-r--r--Doc/bugs.rst30
-rw-r--r--Doc/c-api/allocation.rst6
-rw-r--r--Doc/c-api/buffer.rst8
-rw-r--r--Doc/c-api/codec.rst8
-rw-r--r--Doc/c-api/dict.rst7
-rw-r--r--Doc/c-api/exceptions.rst36
-rw-r--r--Doc/c-api/file.rst3
-rw-r--r--Doc/c-api/gcsupport.rst28
-rw-r--r--Doc/c-api/index.rst3
-rw-r--r--Doc/c-api/init.rst56
-rw-r--r--Doc/c-api/intro.rst6
-rw-r--r--Doc/c-api/iter.rst11
-rw-r--r--Doc/c-api/memory.rst6
-rw-r--r--Doc/c-api/objbuffer.rst2
-rw-r--r--Doc/c-api/object.rst20
-rw-r--r--Doc/c-api/sequence.rst8
-rw-r--r--Doc/c-api/set.rst2
-rw-r--r--Doc/c-api/string.rst2
-rw-r--r--Doc/c-api/structures.rst4
-rw-r--r--Doc/c-api/typeobj.rst293
-rw-r--r--Doc/c-api/unicode.rst8
-rw-r--r--Doc/c-api/veryhigh.rst2
-rw-r--r--Doc/copyright.rst2
-rw-r--r--Doc/data/refcounts.dat2
-rw-r--r--Doc/distutils/apiref.rst92
-rw-r--r--Doc/distutils/configfile.rst2
-rw-r--r--Doc/distutils/examples.rst3
-rw-r--r--Doc/distutils/index.rst13
-rw-r--r--Doc/distutils/packageindex.rst129
-rw-r--r--Doc/distutils/setupscript.rst33
-rw-r--r--Doc/distutils/sourcedist.rst3
-rw-r--r--Doc/distutils/uploading.rst74
-rw-r--r--Doc/extending/building.rst5
-rw-r--r--Doc/extending/embedding.rst93
-rw-r--r--Doc/extending/extending.rst10
-rw-r--r--Doc/extending/index.rst24
-rw-r--r--Doc/extending/newtypes.rst111
-rw-r--r--Doc/faq/design.rst36
-rw-r--r--Doc/faq/extending.rst4
-rw-r--r--Doc/faq/general.rst22
-rw-r--r--Doc/faq/gui.rst34
-rw-r--r--Doc/faq/index.rst5
-rw-r--r--Doc/faq/library.rst122
-rw-r--r--Doc/faq/programming.rst270
-rw-r--r--Doc/faq/windows.rst338
-rw-r--r--Doc/glossary.rst165
-rw-r--r--Doc/howto/advocacy.rst356
-rw-r--r--Doc/howto/argparse.rst764
-rw-r--r--Doc/howto/cporting.rst96
-rw-r--r--Doc/howto/curses.rst32
-rw-r--r--Doc/howto/descriptor.rst23
-rw-r--r--Doc/howto/functional.rst11
-rw-r--r--Doc/howto/index.rst2
-rw-r--r--Doc/howto/logging-cookbook.rst487
-rw-r--r--Doc/howto/logging.rst142
-rwxr-xr-xDoc/howto/logging_flow.pngbin0 -> 49648 bytes
-rw-r--r--Doc/howto/pyporting.rst646
-rw-r--r--Doc/howto/regex.rst81
-rw-r--r--Doc/howto/sockets.rst30
-rw-r--r--Doc/howto/sorting.rst6
-rw-r--r--Doc/howto/unicode.rst38
-rw-r--r--Doc/howto/urllib2.rst56
-rw-r--r--Doc/howto/webservers.rst2
-rw-r--r--Doc/includes/email-unpack.py2
-rw-r--r--Doc/includes/sqlite3/complete_statement.py2
-rw-r--r--Doc/includes/sqlite3/execute_1.py11
-rw-r--r--Doc/includes/sqlite3/execute_2.py12
-rw-r--r--Doc/includes/sqlite3/executemany_2.py4
-rw-r--r--Doc/includes/sqlite3/rowclass.py8
-rw-r--r--Doc/includes/sqlite3/text_factory.py11
-rw-r--r--Doc/install/index.rst30
-rw-r--r--Doc/library/2to3.rst13
-rw-r--r--Doc/library/__future__.rst2
-rw-r--r--Doc/library/_winreg.rst4
-rw-r--r--Doc/library/abc.rst18
-rw-r--r--Doc/library/aifc.rst4
-rw-r--r--Doc/library/al.rst4
-rw-r--r--Doc/library/anydbm.rst12
-rw-r--r--Doc/library/archiving.rst1
-rw-r--r--Doc/library/argparse.rst553
-rw-r--r--Doc/library/array.rst6
-rw-r--r--Doc/library/ast.rst10
-rw-r--r--Doc/library/asyncore.rst13
-rw-r--r--Doc/library/atexit.rst17
-rw-r--r--Doc/library/audioop.rst20
-rw-r--r--Doc/library/base64.rst2
-rw-r--r--Doc/library/basehttpserver.rst6
-rw-r--r--Doc/library/bastion.rst2
-rw-r--r--Doc/library/bdb.rst12
-rw-r--r--Doc/library/binascii.rst2
-rw-r--r--Doc/library/bisect.rst6
-rw-r--r--Doc/library/bsddb.rst8
-rw-r--r--Doc/library/bz2.rst8
-rw-r--r--Doc/library/calendar.rst9
-rw-r--r--Doc/library/carbon.rst7
-rw-r--r--Doc/library/cd.rst2
-rw-r--r--Doc/library/cgi.rst39
-rw-r--r--Doc/library/cgihttpserver.rst4
-rw-r--r--Doc/library/chunk.rst5
-rw-r--r--Doc/library/code.rst10
-rw-r--r--Doc/library/codecs.rst233
-rw-r--r--Doc/library/collections.rst129
-rw-r--r--Doc/library/colorsys.rst8
-rw-r--r--Doc/library/commands.rst2
-rw-r--r--Doc/library/compileall.rst2
-rw-r--r--Doc/library/compiler.rst6
-rw-r--r--Doc/library/configparser.rst18
-rw-r--r--Doc/library/cookie.rst14
-rw-r--r--Doc/library/cookielib.rst22
-rw-r--r--Doc/library/copy_reg.rst35
-rw-r--r--Doc/library/csv.rst118
-rw-r--r--Doc/library/ctypes.rst46
-rw-r--r--Doc/library/curses.rst46
-rw-r--r--Doc/library/datetime.rst395
-rw-r--r--Doc/library/dbhash.rst2
-rw-r--r--Doc/library/dbm.rst12
-rw-r--r--Doc/library/decimal.rst68
-rw-r--r--Doc/library/difflib.rst16
-rw-r--r--Doc/library/dircache.rst2
-rw-r--r--Doc/library/dl.rst2
-rw-r--r--Doc/library/doctest.rst108
-rw-r--r--Doc/library/docxmlrpcserver.rst4
-rw-r--r--Doc/library/dumbdbm.rst12
-rw-r--r--Doc/library/dummy_thread.rst4
-rw-r--r--Doc/library/email.charset.rst6
-rw-r--r--Doc/library/email.encoders.rst8
-rw-r--r--Doc/library/email.errors.rst33
-rw-r--r--Doc/library/email.generator.rst20
-rw-r--r--Doc/library/email.header.rst6
-rw-r--r--Doc/library/email.iterators.rst15
-rw-r--r--Doc/library/email.message.rst17
-rw-r--r--Doc/library/email.mime.rst42
-rw-r--r--Doc/library/email.parser.rst50
-rw-r--r--Doc/library/email.rst193
-rw-r--r--Doc/library/email.util.rst28
-rw-r--r--Doc/library/exceptions.rst45
-rw-r--r--Doc/library/fcntl.rst51
-rw-r--r--Doc/library/filecmp.rst40
-rw-r--r--Doc/library/fileinput.rst15
-rw-r--r--Doc/library/fl.rst6
-rw-r--r--Doc/library/fm.rst2
-rw-r--r--Doc/library/fnmatch.rst5
-rw-r--r--Doc/library/formatter.rst4
-rw-r--r--Doc/library/fpectl.rst6
-rw-r--r--Doc/library/fpformat.rst2
-rw-r--r--Doc/library/fractions.rst1
-rw-r--r--Doc/library/ftplib.rst38
-rw-r--r--Doc/library/functions.rst337
-rw-r--r--Doc/library/future_builtins.rst5
-rw-r--r--Doc/library/gc.rst8
-rw-r--r--Doc/library/gdbm.rst9
-rw-r--r--Doc/library/getopt.rst3
-rw-r--r--Doc/library/gl.rst6
-rw-r--r--Doc/library/glob.rst18
-rw-r--r--Doc/library/gzip.rst29
-rw-r--r--Doc/library/hashlib.rst47
-rw-r--r--Doc/library/heapq.rst2
-rw-r--r--Doc/library/hmac.rst35
-rw-r--r--Doc/library/htmllib.rst6
-rw-r--r--Doc/library/httplib.rst23
-rw-r--r--Doc/library/idle.rst74
-rw-r--r--Doc/library/imageop.rst2
-rw-r--r--Doc/library/imaplib.rst5
-rw-r--r--Doc/library/imgfile.rst2
-rw-r--r--Doc/library/imghdr.rst2
-rw-r--r--Doc/library/imp.rst13
-rw-r--r--Doc/library/imputil.rst2
-rw-r--r--Doc/library/index.rst3
-rw-r--r--Doc/library/io.rst69
-rw-r--r--Doc/library/itertools.rst29
-rw-r--r--Doc/library/jpeg.rst2
-rw-r--r--Doc/library/json.rst219
-rw-r--r--Doc/library/locale.rst18
-rw-r--r--Doc/library/logging.config.rst46
-rw-r--r--Doc/library/logging.handlers.rst49
-rw-r--r--Doc/library/logging.rst168
-rw-r--r--Doc/library/mac.rst5
-rw-r--r--Doc/library/macosa.rst3
-rw-r--r--Doc/library/macostools.rst2
-rw-r--r--Doc/library/mailbox.rst46
-rw-r--r--Doc/library/mailcap.rst4
-rw-r--r--Doc/library/markup.rst6
-rw-r--r--Doc/library/marshal.rst5
-rw-r--r--Doc/library/math.rst12
-rw-r--r--Doc/library/mhlib.rst2
-rw-r--r--Doc/library/mimetypes.rst3
-rw-r--r--Doc/library/mimewriter.rst2
-rw-r--r--Doc/library/mmap.rst29
-rw-r--r--Doc/library/msilib.rst5
-rw-r--r--Doc/library/multiprocessing.rst154
-rw-r--r--Doc/library/mutex.rst2
-rw-r--r--Doc/library/netrc.rst8
-rw-r--r--Doc/library/new.rst2
-rw-r--r--Doc/library/nntplib.rst16
-rw-r--r--Doc/library/numbers.rst8
-rw-r--r--Doc/library/operator.rst49
-rw-r--r--Doc/library/optparse.rst21
-rw-r--r--Doc/library/os.path.rst56
-rw-r--r--Doc/library/os.rst117
-rw-r--r--Doc/library/ossaudiodev.rst13
-rw-r--r--Doc/library/othergui.rst3
-rw-r--r--Doc/library/parser.rst4
-rw-r--r--Doc/library/pickle.rst64
-rw-r--r--Doc/library/pickletools.rst2
-rw-r--r--Doc/library/pipes.rst45
-rw-r--r--Doc/library/platform.rst4
-rw-r--r--Doc/library/plistlib.rst4
-rw-r--r--Doc/library/poplib.rst4
-rw-r--r--Doc/library/posix.rst9
-rw-r--r--Doc/library/posixfile.rst2
-rw-r--r--Doc/library/pprint.rst13
-rw-r--r--Doc/library/profile.rst735
-rw-r--r--Doc/library/pyclbr.rst4
-rw-r--r--Doc/library/pyexpat.rst20
-rw-r--r--Doc/library/queue.rst14
-rw-r--r--Doc/library/random.rst12
-rw-r--r--Doc/library/re.rst228
-rw-r--r--Doc/library/repr.rst13
-rw-r--r--Doc/library/resource.rst21
-rw-r--r--Doc/library/rexec.rst4
-rw-r--r--Doc/library/rfc822.rst2
-rw-r--r--Doc/library/robotparser.rst10
-rw-r--r--Doc/library/runpy.rst6
-rw-r--r--Doc/library/scrolledtext.rst4
-rw-r--r--Doc/library/select.rst16
-rw-r--r--Doc/library/sgmllib.rst2
-rw-r--r--Doc/library/shelve.rst16
-rw-r--r--Doc/library/shlex.rst110
-rw-r--r--Doc/library/shutil.rst28
-rw-r--r--Doc/library/simplehttpserver.rst4
-rw-r--r--Doc/library/simplexmlrpcserver.rst38
-rw-r--r--Doc/library/site.rst12
-rw-r--r--Doc/library/smtplib.rst66
-rw-r--r--Doc/library/socket.rst58
-rw-r--r--Doc/library/socketserver.rst22
-rw-r--r--Doc/library/sqlite3.rst538
-rw-r--r--Doc/library/ssl.rst90
-rw-r--r--Doc/library/stat.rst8
-rw-r--r--Doc/library/statvfs.rst2
-rw-r--r--Doc/library/stdtypes.rst250
-rw-r--r--Doc/library/string.rst70
-rw-r--r--Doc/library/stringprep.rst1
-rw-r--r--Doc/library/struct.rst4
-rw-r--r--Doc/library/subprocess.rst242
-rw-r--r--Doc/library/sunaudio.rst4
-rw-r--r--Doc/library/sys.rst19
-rw-r--r--Doc/library/sysconfig.rst2
-rw-r--r--Doc/library/syslog.rst6
-rw-r--r--Doc/library/tarfile.rst13
-rw-r--r--Doc/library/telnetlib.rst4
-rw-r--r--Doc/library/tempfile.rst5
-rw-r--r--Doc/library/test.rst10
-rw-r--r--Doc/library/textwrap.rst27
-rw-r--r--Doc/library/thread.rst4
-rw-r--r--Doc/library/threading.rst83
-rw-r--r--Doc/library/time.rst36
-rw-r--r--Doc/library/timeit.rst286
-rw-r--r--Doc/library/tix.rst6
-rw-r--r--Doc/library/tkinter.rst32
-rw-r--r--Doc/library/tokenize.rst28
-rw-r--r--Doc/library/trace.rst10
-rw-r--r--Doc/library/traceback.rst2
-rw-r--r--Doc/library/ttk.rst32
-rw-r--r--Doc/library/turtle.rst10
-rw-r--r--Doc/library/unicodedata.rst2
-rw-r--r--Doc/library/unittest.rst118
-rw-r--r--Doc/library/urllib.rst39
-rw-r--r--Doc/library/urllib2.rst39
-rw-r--r--Doc/library/urlparse.rst18
-rw-r--r--Doc/library/user.rst2
-rw-r--r--Doc/library/userdict.rst12
-rw-r--r--Doc/library/warnings.rst7
-rw-r--r--Doc/library/weakref.rst10
-rw-r--r--Doc/library/webbrowser.rst15
-rw-r--r--Doc/library/whichdb.rst4
-rw-r--r--Doc/library/winsound.rst4
-rw-r--r--Doc/library/wsgiref.rst10
-rw-r--r--Doc/library/xdrlib.rst3
-rw-r--r--Doc/library/xml.dom.minidom.rst28
-rw-r--r--Doc/library/xml.dom.pulldom.rst7
-rw-r--r--Doc/library/xml.dom.rst11
-rw-r--r--Doc/library/xml.etree.elementtree.rst356
-rw-r--r--Doc/library/xml.rst136
-rw-r--r--Doc/library/xml.sax.handler.rst11
-rw-r--r--Doc/library/xml.sax.reader.rst32
-rw-r--r--Doc/library/xml.sax.rst43
-rw-r--r--Doc/library/xml.sax.utils.rst17
-rw-r--r--Doc/library/xmlrpclib.rst17
-rw-r--r--Doc/library/zipfile.rst47
-rw-r--r--Doc/library/zipimport.rst12
-rw-r--r--Doc/library/zlib.rst32
-rw-r--r--Doc/license.rst62
-rw-r--r--Doc/make.bat2
-rw-r--r--Doc/reference/compound_stmts.rst66
-rw-r--r--Doc/reference/datamodel.rst113
-rw-r--r--Doc/reference/expressions.rst81
-rw-r--r--Doc/reference/index.rst3
-rw-r--r--Doc/reference/lexical_analysis.rst7
-rw-r--r--Doc/reference/simple_stmts.rst48
-rwxr-xr-xDoc/tools/dailybuild.py94
-rw-r--r--Doc/tools/sphinx-build.py8
-rw-r--r--Doc/tools/sphinxext/download.html6
-rw-r--r--Doc/tools/sphinxext/indexsidebar.html38
-rw-r--r--Doc/tools/sphinxext/layout.html21
-rw-r--r--Doc/tools/sphinxext/pyspecific.py66
-rw-r--r--Doc/tools/sphinxext/static/basic.css19
-rw-r--r--Doc/tools/sphinxext/static/sidebar.js186
-rw-r--r--Doc/tools/sphinxext/static/version_switch.js67
-rw-r--r--Doc/tools/sphinxext/susp-ignored.csv106
-rw-r--r--Doc/tools/sphinxext/suspicious.py11
-rw-r--r--Doc/tutorial/classes.rst86
-rw-r--r--Doc/tutorial/controlflow.rst86
-rw-r--r--Doc/tutorial/datastructures.rst108
-rw-r--r--Doc/tutorial/errors.rst18
-rw-r--r--Doc/tutorial/index.rst3
-rw-r--r--Doc/tutorial/inputoutput.rst110
-rw-r--r--Doc/tutorial/interpreter.rst6
-rw-r--r--Doc/tutorial/introduction.rst3
-rw-r--r--Doc/tutorial/modules.rst74
-rw-r--r--Doc/tutorial/stdlib.rst8
-rw-r--r--Doc/tutorial/stdlib2.rst74
-rw-r--r--Doc/tutorial/whatnow.rst5
-rw-r--r--Doc/using/cmdline.rst24
-rw-r--r--Doc/using/mac.rst28
-rw-r--r--Doc/using/unix.rst4
-rw-r--r--Doc/using/windows.rst2
-rw-r--r--Doc/whatsnew/2.2.rst6
-rw-r--r--Doc/whatsnew/2.3.rst15
-rw-r--r--Doc/whatsnew/2.4.rst7
-rw-r--r--Doc/whatsnew/2.5.rst16
-rw-r--r--Doc/whatsnew/2.6.rst13
-rw-r--r--Doc/whatsnew/2.7.rst174
-rw-r--r--Include/abstract.h2
-rw-r--r--Include/cStringIO.h4
-rw-r--r--Include/datetime.h2
-rw-r--r--Include/import.h4
-rw-r--r--Include/intobject.h1
-rw-r--r--Include/listobject.h2
-rw-r--r--Include/longobject.h1
-rw-r--r--Include/node.h3
-rw-r--r--Include/object.h33
-rw-r--r--Include/osdefs.h8
-rw-r--r--Include/patchlevel.h6
-rw-r--r--Include/pyfpe.h8
-rw-r--r--Include/pyport.h55
-rw-r--r--Include/pystate.h3
-rw-r--r--Include/pythonrun.h2
-rw-r--r--Include/sysmodule.h3
-rw-r--r--Include/timefuncs.h3
-rw-r--r--Include/weakrefobject.h15
-rw-r--r--LICENSE31
-rw-r--r--Lib/BaseHTTPServer.py6
-rw-r--r--Lib/CGIHTTPServer.py61
-rw-r--r--Lib/Cookie.py9
-rw-r--r--Lib/HTMLParser.py21
-rw-r--r--Lib/Queue.py8
-rw-r--r--Lib/SimpleHTTPServer.py28
-rw-r--r--Lib/SimpleXMLRPCServer.py3
-rw-r--r--Lib/SocketServer.py80
-rw-r--r--Lib/StringIO.py2
-rw-r--r--Lib/_LWPCookieJar.py4
-rw-r--r--Lib/_MozillaCookieJar.py2
-rw-r--r--Lib/__future__.py2
-rw-r--r--Lib/_abcoll.py93
-rw-r--r--Lib/_osx_support.py502
-rw-r--r--Lib/_pyio.py99
-rw-r--r--Lib/_strptime.py17
-rw-r--r--Lib/_weakrefset.py70
-rw-r--r--Lib/aifc.py125
-rw-r--r--Lib/argparse.py56
-rw-r--r--Lib/asyncore.py20
-rw-r--r--Lib/bdb.py15
-rw-r--r--Lib/bsddb/__init__.py6
-rw-r--r--Lib/bsddb/dbobj.py9
-rw-r--r--Lib/bsddb/dbshelve.py33
-rw-r--r--Lib/bsddb/dbtables.py4
-rw-r--r--Lib/bsddb/test/test_all.py9
-rw-r--r--Lib/bsddb/test/test_basics.py50
-rw-r--r--Lib/bsddb/test/test_compare.py394
-rw-r--r--Lib/bsddb/test/test_db.py145
-rw-r--r--Lib/bsddb/test/test_dbenv.py70
-rw-r--r--Lib/bsddb/test/test_dbshelve.py7
-rw-r--r--Lib/bsddb/test/test_dbtables.py2
-rw-r--r--Lib/bsddb/test/test_distributed_transactions.py21
-rw-r--r--Lib/bsddb/test/test_early_close.py21
-rw-r--r--Lib/bsddb/test/test_lock.py7
-rw-r--r--Lib/bsddb/test/test_misc.py11
-rw-r--r--Lib/bsddb/test/test_queue.py5
-rw-r--r--Lib/bsddb/test/test_recno.py26
-rw-r--r--Lib/bsddb/test/test_replication.py86
-rw-r--r--Lib/bsddb/test/test_sequence.py8
-rw-r--r--Lib/bsddb/test/test_thread.py4
-rw-r--r--Lib/calendar.py9
-rwxr-xr-xLib/cgi.py10
-rw-r--r--Lib/cgitb.py11
-rw-r--r--Lib/cmd.py1
-rw-r--r--Lib/codecs.py13
-rw-r--r--Lib/collections.py200
-rw-r--r--Lib/compiler/consts.py2
-rw-r--r--Lib/compiler/pyassem.py2
-rw-r--r--Lib/compiler/pycodegen.py4
-rw-r--r--Lib/compiler/symbols.py4
-rw-r--r--Lib/cookielib.py2
-rw-r--r--Lib/csv.py13
-rw-r--r--Lib/ctypes/test/__init__.py12
-rw-r--r--Lib/ctypes/test/runtests.py2
-rw-r--r--Lib/ctypes/test/test_arrays.py32
-rw-r--r--Lib/ctypes/test/test_as_parameter.py8
-rw-r--r--Lib/ctypes/test/test_bitfields.py37
-rw-r--r--Lib/ctypes/test/test_buffers.py64
-rw-r--r--Lib/ctypes/test/test_byteswap.py55
-rw-r--r--Lib/ctypes/test/test_callbacks.py47
-rw-r--r--Lib/ctypes/test/test_cast.py21
-rw-r--r--Lib/ctypes/test/test_cfuncs.py11
-rw-r--r--Lib/ctypes/test/test_checkretval.py15
-rw-r--r--Lib/ctypes/test/test_errcheck.py19
-rw-r--r--Lib/ctypes/test/test_find.py69
-rw-r--r--Lib/ctypes/test/test_frombuffer.py2
-rw-r--r--Lib/ctypes/test/test_funcptr.py2
-rw-r--r--Lib/ctypes/test/test_functions.py72
-rw-r--r--Lib/ctypes/test/test_integers.py5
-rw-r--r--Lib/ctypes/test/test_keeprefs.py3
-rw-r--r--Lib/ctypes/test/test_loading.py138
-rw-r--r--Lib/ctypes/test/test_macholib.py21
-rw-r--r--Lib/ctypes/test/test_memfunctions.py44
-rw-r--r--Lib/ctypes/test/test_numbers.py57
-rw-r--r--Lib/ctypes/test/test_objects.py11
-rw-r--r--Lib/ctypes/test/test_parameters.py23
-rw-r--r--Lib/ctypes/test/test_pep3118.py15
-rw-r--r--Lib/ctypes/test/test_pointers.py8
-rw-r--r--Lib/ctypes/test/test_prototypes.py102
-rw-r--r--Lib/ctypes/test/test_python_api.py34
-rw-r--r--Lib/ctypes/test/test_random_things.py27
-rw-r--r--Lib/ctypes/test/test_refcounts.py20
-rw-r--r--Lib/ctypes/test/test_returnfuncptrs.py30
-rw-r--r--Lib/ctypes/test/test_slicing.py77
-rw-r--r--Lib/ctypes/test/test_strings.py142
-rw-r--r--Lib/ctypes/test/test_structures.py63
-rw-r--r--Lib/ctypes/test/test_unicode.py255
-rw-r--r--Lib/ctypes/test/test_values.py100
-rw-r--r--Lib/ctypes/test/test_win32.py129
-rw-r--r--Lib/ctypes/test/test_wintypes.py41
-rw-r--r--Lib/ctypes/util.py31
-rw-r--r--Lib/curses/__init__.py2
-rw-r--r--Lib/decimal.py10
-rw-r--r--Lib/difflib.py12
-rw-r--r--Lib/distutils/__init__.py2
-rw-r--r--Lib/distutils/ccompiler.py2
-rw-r--r--Lib/distutils/command/bdist_rpm.py13
-rw-r--r--Lib/distutils/command/build_ext.py8
-rw-r--r--Lib/distutils/command/build_py.py3
-rw-r--r--Lib/distutils/command/check.py3
-rw-r--r--Lib/distutils/command/install.py4
-rw-r--r--Lib/distutils/command/sdist.py2
-rw-r--r--Lib/distutils/command/upload.py11
-rw-r--r--Lib/distutils/config.py9
-rw-r--r--Lib/distutils/core.py7
-rw-r--r--Lib/distutils/cygwinccompiler.py28
-rw-r--r--Lib/distutils/dir_util.py8
-rw-r--r--Lib/distutils/spawn.py63
-rw-r--r--Lib/distutils/sysconfig.py177
-rw-r--r--Lib/distutils/tests/support.py2
-rw-r--r--Lib/distutils/tests/test_archive_util.py2
-rw-r--r--Lib/distutils/tests/test_bdist_dumb.py25
-rw-r--r--Lib/distutils/tests/test_bdist_msi.py10
-rw-r--r--Lib/distutils/tests/test_bdist_rpm.py55
-rw-r--r--Lib/distutils/tests/test_bdist_wininst.py2
-rw-r--r--Lib/distutils/tests/test_build_clib.py11
-rw-r--r--Lib/distutils/tests/test_build_ext.py44
-rw-r--r--Lib/distutils/tests/test_build_py.py31
-rw-r--r--Lib/distutils/tests/test_build_scripts.py8
-rw-r--r--Lib/distutils/tests/test_ccompiler.py7
-rw-r--r--Lib/distutils/tests/test_check.py6
-rw-r--r--Lib/distutils/tests/test_clean.py2
-rw-r--r--Lib/distutils/tests/test_cmd.py25
-rw-r--r--Lib/distutils/tests/test_config.py6
-rw-r--r--Lib/distutils/tests/test_config_cmd.py5
-rw-r--r--Lib/distutils/tests/test_dir_util.py18
-rw-r--r--Lib/distutils/tests/test_install.py45
-rw-r--r--Lib/distutils/tests/test_install_lib.py4
-rw-r--r--Lib/distutils/tests/test_install_scripts.py10
-rw-r--r--Lib/distutils/tests/test_msvc9compiler.py8
-rw-r--r--Lib/distutils/tests/test_register.py41
-rw-r--r--Lib/distutils/tests/test_sdist.py33
-rw-r--r--Lib/distutils/tests/test_sysconfig.py29
-rw-r--r--Lib/distutils/tests/test_unixccompiler.py40
-rw-r--r--Lib/distutils/tests/test_upload.py25
-rw-r--r--Lib/distutils/tests/test_util.py12
-rw-r--r--Lib/distutils/unixccompiler.py70
-rw-r--r--Lib/distutils/util.py119
-rw-r--r--Lib/doctest.py56
-rw-r--r--Lib/dumbdbm.py62
-rw-r--r--Lib/email/_parseaddr.py8
-rw-r--r--Lib/email/base64mime.py2
-rw-r--r--Lib/email/charset.py2
-rw-r--r--Lib/email/feedparser.py4
-rw-r--r--Lib/email/generator.py15
-rw-r--r--Lib/email/test/data/msg_02.txt1
-rw-r--r--Lib/email/test/test_email.py196
-rw-r--r--Lib/email/test/test_email_renamed.py187
-rw-r--r--Lib/email/utils.py4
-rwxr-xr-x[-rw-r--r--]Lib/encodings/rot_13.py0
-rw-r--r--Lib/filecmp.py2
-rw-r--r--Lib/fileinput.py16
-rw-r--r--Lib/ftplib.py42
-rw-r--r--Lib/genericpath.py2
-rw-r--r--Lib/glob.py25
-rw-r--r--Lib/gzip.py13
-rw-r--r--Lib/hashlib.py71
-rw-r--r--Lib/heapq.py97
-rw-r--r--Lib/hmac.py3
-rw-r--r--Lib/httplib.py81
-rw-r--r--Lib/idlelib/AutoComplete.py14
-rw-r--r--Lib/idlelib/AutoCompleteWindow.py3
-rw-r--r--Lib/idlelib/AutoExpand.py21
-rw-r--r--Lib/idlelib/Bindings.py30
-rw-r--r--Lib/idlelib/CallTipWindow.py65
-rw-r--r--Lib/idlelib/CallTips.py204
-rw-r--r--Lib/idlelib/ClassBrowser.py20
-rw-r--r--Lib/idlelib/ColorDelegator.py30
-rw-r--r--Lib/idlelib/Debugger.py6
-rw-r--r--Lib/idlelib/Delegator.py14
-rw-r--r--Lib/idlelib/EditorWindow.py146
-rw-r--r--Lib/idlelib/FormatParagraph.py142
-rw-r--r--Lib/idlelib/GrepDialog.py90
-rw-r--r--Lib/idlelib/HyperParser.py178
-rw-r--r--Lib/idlelib/IOBinding.py85
-rw-r--r--Lib/idlelib/Icons/idle.icobin0 -> 19790 bytes
-rw-r--r--Lib/idlelib/Icons/idle_16.gifbin0 -> 1034 bytes
-rw-r--r--Lib/idlelib/Icons/idle_16.pngbin0 -> 1264 bytes
-rw-r--r--Lib/idlelib/Icons/idle_32.gifbin0 -> 1435 bytes
-rw-r--r--Lib/idlelib/Icons/idle_32.pngbin0 -> 2542 bytes
-rw-r--r--Lib/idlelib/Icons/idle_48.gifbin0 -> 1388 bytes
-rw-r--r--Lib/idlelib/Icons/idle_48.pngbin0 -> 4710 bytes
-rw-r--r--Lib/idlelib/Icons/python.gifbin125 -> 585 bytes
-rw-r--r--Lib/idlelib/IdleHistory.py90
-rw-r--r--Lib/idlelib/MultiCall.py20
-rw-r--r--Lib/idlelib/MultiStatusBar.py35
-rw-r--r--Lib/idlelib/NEWS.txt42
-rw-r--r--Lib/idlelib/ObjectBrowser.py11
-rw-r--r--Lib/idlelib/OutputWindow.py6
-rw-r--r--Lib/idlelib/ParenMatch.py14
-rw-r--r--Lib/idlelib/PathBrowser.py23
-rw-r--r--Lib/idlelib/Percolator.py50
-rwxr-xr-x[-rw-r--r--]Lib/idlelib/PyShell.py316
-rw-r--r--Lib/idlelib/ReplaceDialog.py60
-rw-r--r--Lib/idlelib/RstripExtension.py20
-rw-r--r--Lib/idlelib/ScriptBinding.py27
-rw-r--r--Lib/idlelib/ScrolledList.py19
-rw-r--r--Lib/idlelib/SearchDialog.py27
-rw-r--r--Lib/idlelib/SearchDialogBase.py92
-rw-r--r--Lib/idlelib/SearchEngine.py111
-rw-r--r--Lib/idlelib/StackViewer.py41
-rw-r--r--Lib/idlelib/ToolTip.py22
-rw-r--r--Lib/idlelib/TreeWidget.py31
-rw-r--r--Lib/idlelib/UndoDelegator.py21
-rw-r--r--Lib/idlelib/WidgetRedirector.py15
-rw-r--r--Lib/idlelib/ZoomHeight.py2
-rw-r--r--Lib/idlelib/aboutDialog.py27
-rw-r--r--Lib/idlelib/config-extensions.def2
-rw-r--r--Lib/idlelib/config-keys.def28
-rw-r--r--Lib/idlelib/config-main.def2
-rw-r--r--Lib/idlelib/configDialog.py46
-rw-r--r--Lib/idlelib/configHandler.py65
-rw-r--r--Lib/idlelib/configHelpSourceEdit.py27
-rw-r--r--Lib/idlelib/configSectionNameDialog.py115
-rw-r--r--Lib/idlelib/dynOptionMenuWidget.py26
-rw-r--r--Lib/idlelib/extend.txt4
-rw-r--r--Lib/idlelib/help.txt29
-rw-r--r--Lib/idlelib/idle_test/README.txt115
-rw-r--r--Lib/idlelib/idle_test/__init__.py9
-rw-r--r--Lib/idlelib/idle_test/htest.py368
-rw-r--r--Lib/idlelib/idle_test/mock_idle.py52
-rw-r--r--Lib/idlelib/idle_test/mock_tk.py298
-rw-r--r--Lib/idlelib/idle_test/test_autocomplete.py143
-rw-r--r--Lib/idlelib/idle_test/test_autoexpand.py141
-rw-r--r--Lib/idlelib/idle_test/test_calltips.py180
-rw-r--r--Lib/idlelib/idle_test/test_config_name.py75
-rw-r--r--Lib/idlelib/idle_test/test_delegator.py37
-rw-r--r--Lib/idlelib/idle_test/test_formatparagraph.py377
-rw-r--r--Lib/idlelib/idle_test/test_grep.py82
-rw-r--r--Lib/idlelib/idle_test/test_hyperparser.py191
-rw-r--r--Lib/idlelib/idle_test/test_idlehistory.py167
-rw-r--r--Lib/idlelib/idle_test/test_parenmatch.py121
-rw-r--r--Lib/idlelib/idle_test/test_pathbrowser.py12
-rw-r--r--Lib/idlelib/idle_test/test_rstrip.py49
-rw-r--r--Lib/idlelib/idle_test/test_searchengine.py329
-rw-r--r--Lib/idlelib/idle_test/test_text.py228
-rw-r--r--Lib/idlelib/idle_test/test_textview.py98
-rw-r--r--Lib/idlelib/idle_test/test_warning.py73
-rw-r--r--Lib/idlelib/idlever.py2
-rw-r--r--Lib/idlelib/keybindingDialog.py30
-rw-r--r--Lib/idlelib/macosxSupport.py136
-rw-r--r--Lib/idlelib/rpc.py4
-rw-r--r--Lib/idlelib/run.py85
-rw-r--r--Lib/idlelib/tabbedpages.py10
-rw-r--r--Lib/idlelib/textView.py35
-rw-r--r--Lib/imaplib.py19
-rw-r--r--Lib/imghdr.py20
-rw-r--r--Lib/inspect.py7
-rw-r--r--Lib/io.py18
-rw-r--r--Lib/json/__init__.py50
-rw-r--r--Lib/json/decoder.py54
-rw-r--r--Lib/json/encoder.py18
-rw-r--r--Lib/json/tests/test_decode.py23
-rw-r--r--Lib/json/tests/test_dump.py9
-rw-r--r--Lib/json/tests/test_fail.py24
-rw-r--r--Lib/json/tests/test_float.py15
-rw-r--r--Lib/json/tests/test_pass1.py20
-rw-r--r--Lib/json/tests/test_scanstring.py56
-rw-r--r--Lib/json/tests/test_tool.py69
-rw-r--r--Lib/json/tool.py17
-rwxr-xr-xLib/keyword.py2
-rw-r--r--Lib/lib-tk/Tix.py88
-rw-r--r--Lib/lib-tk/Tkinter.py204
-rw-r--r--Lib/lib-tk/test/runtktests.py46
-rw-r--r--Lib/lib-tk/test/test_tkinter/test_geometry_managers.py889
-rw-r--r--Lib/lib-tk/test/test_tkinter/test_images.py339
-rw-r--r--Lib/lib-tk/test/test_tkinter/test_text.py11
-rw-r--r--Lib/lib-tk/test/test_tkinter/test_variables.py166
-rw-r--r--Lib/lib-tk/test/test_tkinter/test_widgets.py1172
-rw-r--r--Lib/lib-tk/test/test_ttk/support.py55
-rw-r--r--Lib/lib-tk/test/test_ttk/test_extensions.py32
-rw-r--r--Lib/lib-tk/test/test_ttk/test_functions.py91
-rw-r--r--Lib/lib-tk/test/test_ttk/test_style.py9
-rw-r--r--Lib/lib-tk/test/test_ttk/test_widgets.py662
-rw-r--r--Lib/lib-tk/test/widget_tests.py547
-rw-r--r--Lib/lib-tk/tkFont.py3
-rw-r--r--Lib/lib-tk/tkSimpleDialog.py2
-rw-r--r--Lib/lib-tk/ttk.py209
-rw-r--r--Lib/lib-tk/turtle.py30
-rw-r--r--Lib/lib2to3/Grammar.txt7
-rw-r--r--Lib/lib2to3/fixer_util.py16
-rw-r--r--Lib/lib2to3/fixes/fix_import.py2
-rw-r--r--Lib/lib2to3/fixes/fix_itertools.py4
-rw-r--r--Lib/lib2to3/fixes/fix_metaclass.py2
-rw-r--r--Lib/lib2to3/fixes/fix_unicode.py31
-rw-r--r--Lib/lib2to3/pgen2/driver.py17
-rw-r--r--Lib/lib2to3/pgen2/grammar.py5
-rwxr-xr-xLib/lib2to3/pgen2/token.py13
-rw-r--r--Lib/lib2to3/pgen2/tokenize.py15
-rw-r--r--Lib/lib2to3/refactor.py2
-rwxr-xr-x[-rw-r--r--]Lib/lib2to3/tests/data/different_encoding.py0
-rwxr-xr-xLib/lib2to3/tests/data/false_encoding.py2
-rw-r--r--Lib/lib2to3/tests/test_fixers.py61
-rw-r--r--Lib/lib2to3/tests/test_main.py6
-rw-r--r--Lib/lib2to3/tests/test_parser.py19
-rw-r--r--Lib/lib2to3/tests/test_pytree.py16
-rw-r--r--Lib/lib2to3/tests/test_refactor.py10
-rw-r--r--Lib/locale.py188
-rw-r--r--Lib/logging/__init__.py74
-rw-r--r--Lib/logging/config.py177
-rw-r--r--Lib/logging/handlers.py196
-rw-r--r--Lib/macurl2path.py20
-rw-r--r--Lib/mailbox.py96
-rw-r--r--Lib/mailcap.py4
-rw-r--r--Lib/mimetypes.py50
-rw-r--r--Lib/modulefinder.py2
-rw-r--r--Lib/msilib/__init__.py2
-rw-r--r--Lib/multiprocessing/connection.py27
-rw-r--r--Lib/multiprocessing/dummy/__init__.py3
-rw-r--r--Lib/multiprocessing/forking.py25
-rw-r--r--Lib/multiprocessing/managers.py1
-rw-r--r--Lib/multiprocessing/pool.py51
-rw-r--r--Lib/multiprocessing/process.py4
-rw-r--r--Lib/multiprocessing/synchronize.py2
-rw-r--r--Lib/multiprocessing/util.py59
-rw-r--r--Lib/netrc.py29
-rw-r--r--Lib/nntplib.py11
-rw-r--r--Lib/ntpath.py172
-rw-r--r--Lib/numbers.py2
-rw-r--r--Lib/optparse.py9
-rw-r--r--Lib/os.py14
-rwxr-xr-xLib/pdb.py2
-rw-r--r--Lib/pickle.py4
-rw-r--r--Lib/pickletools.py4
-rwxr-xr-xLib/plat-generic/regen2
-rw-r--r--Lib/plat-mac/EasyDialogs.py9
-rwxr-xr-xLib/platform.py62
-rw-r--r--Lib/plistlib.py4
-rw-r--r--Lib/poplib.py2
-rw-r--r--Lib/posixpath.py129
-rw-r--r--Lib/pprint.py28
-rw-r--r--Lib/pstats.py10
-rw-r--r--Lib/py_compile.py2
-rw-r--r--Lib/pyclbr.py2
-rwxr-xr-xLib/pydoc.py78
-rw-r--r--Lib/pydoc_data/topics.py44
-rw-r--r--Lib/random.py66
-rw-r--r--Lib/re.py17
-rw-r--r--Lib/rfc822.py2
-rw-r--r--Lib/rlcompleter.py38
-rw-r--r--Lib/robotparser.py15
-rw-r--r--Lib/runpy.py2
-rw-r--r--Lib/shutil.py31
-rw-r--r--Lib/site.py17
-rwxr-xr-xLib/smtplib.py40
-rw-r--r--Lib/socket.py4
-rw-r--r--Lib/sqlite3/dbapi2.py6
-rw-r--r--Lib/sqlite3/dump.py9
-rw-r--r--Lib/sqlite3/test/dump.py23
-rw-r--r--Lib/sqlite3/test/factory.py81
-rw-r--r--Lib/sqlite3/test/hooks.py21
-rw-r--r--Lib/sqlite3/test/regression.py39
-rw-r--r--Lib/sqlite3/test/types.py2
-rw-r--r--Lib/sqlite3/test/userfunctions.py60
-rw-r--r--Lib/sre_compile.py12
-rw-r--r--Lib/sre_constants.py8
-rw-r--r--Lib/sre_parse.py43
-rw-r--r--Lib/ssl.py51
-rw-r--r--Lib/string.py8
-rw-r--r--Lib/subprocess.py173
-rw-r--r--Lib/sunau.py42
-rwxr-xr-xLib/symbol.py2
-rw-r--r--Lib/symtable.py5
-rw-r--r--Lib/sysconfig.py233
-rw-r--r--Lib/tarfile.py54
-rw-r--r--Lib/telnetlib.py133
-rw-r--r--Lib/tempfile.py55
-rw-r--r--Lib/test/audiodata/pluck-pcm16.aiffbin0 -> 13506 bytes
-rw-r--r--Lib/test/audiodata/pluck-pcm16.aubin0 -> 13252 bytes
-rw-r--r--Lib/test/audiodata/pluck-pcm16.wavbin0 -> 13370 bytes
-rw-r--r--Lib/test/audiodata/pluck-pcm24.aiffbin0 -> 20120 bytes
-rw-r--r--Lib/test/audiodata/pluck-pcm24.wavbin0 -> 19984 bytes
-rw-r--r--Lib/test/audiodata/pluck-pcm32.aiffbin0 -> 26734 bytes
-rw-r--r--Lib/test/audiodata/pluck-pcm32.aubin0 -> 26480 bytes
-rw-r--r--Lib/test/audiodata/pluck-pcm32.wavbin0 -> 26598 bytes
-rw-r--r--Lib/test/audiodata/pluck-pcm8.aiffbin0 -> 6892 bytes
-rw-r--r--Lib/test/audiodata/pluck-pcm8.aubin0 -> 6638 bytes
-rw-r--r--Lib/test/audiodata/pluck-pcm8.wavbin0 -> 6756 bytes
-rw-r--r--Lib/test/audiodata/pluck-ulaw.aifcbin0 -> 6910 bytes
-rw-r--r--Lib/test/audiodata/pluck-ulaw.aubin0 -> 6638 bytes
-rw-r--r--Lib/test/audiotests.py283
-rw-r--r--Lib/test/bad_coding3.py2
-rw-r--r--Lib/test/crashers/buffer_mutate.py30
-rw-r--r--Lib/test/crashers/decref_before_assignment.py44
-rwxr-xr-x[-rw-r--r--]Lib/test/crashers/recursive_call.py0
-rwxr-xr-x[-rw-r--r--]Lib/test/curses_tests.py0
-rw-r--r--Lib/test/imghdrdata/python.bmpbin0 -> 1162 bytes
-rw-r--r--Lib/test/imghdrdata/python.gifbin0 -> 610 bytes
-rw-r--r--Lib/test/imghdrdata/python.jpgbin0 -> 543 bytes
-rw-r--r--Lib/test/imghdrdata/python.pbm3
-rw-r--r--Lib/test/imghdrdata/python.pgmbin0 -> 269 bytes
-rw-r--r--Lib/test/imghdrdata/python.pngbin0 -> 1020 bytes
-rw-r--r--Lib/test/imghdrdata/python.ppmbin0 -> 781 bytes
-rw-r--r--Lib/test/imghdrdata/python.rasbin0 -> 1056 bytes
-rw-r--r--Lib/test/imghdrdata/python.sgibin0 -> 1967 bytes
-rw-r--r--Lib/test/imghdrdata/python.tiffbin0 -> 1326 bytes
-rw-r--r--Lib/test/imghdrdata/python.xbm6
-rw-r--r--Lib/test/inspect_fodder.py2
-rw-r--r--Lib/test/keycert.pem59
-rw-r--r--Lib/test/leakers/test_ctypes.py1
-rw-r--r--Lib/test/mp_fork_bomb.py16
-rw-r--r--Lib/test/nullbytecert.pem90
-rw-r--r--Lib/test/pickletester.py83
-rw-r--r--Lib/test/pydoc_mod.py10
-rwxr-xr-xLib/test/regrtest.py27
-rw-r--r--Lib/test/sample_doctest_no_docstrings.py12
-rw-r--r--Lib/test/sample_doctest_no_doctests.py15
-rw-r--r--Lib/test/script_helper.py8
-rw-r--r--Lib/test/sha256.pem223
-rw-r--r--Lib/test/ssl_cert.pem14
-rw-r--r--Lib/test/ssl_key.pem9
-rw-r--r--Lib/test/string_tests.py69
-rw-r--r--Lib/test/subprocessdata/sigchild_ignore.py11
-rw-r--r--Lib/test/symlink_support.py100
-rw-r--r--Lib/test/test_StringIO.py47
-rw-r--r--Lib/test/test___future__.py1
-rw-r--r--Lib/test/test__osx_support.py281
-rw-r--r--Lib/test/test_aepack.py8
-rw-r--r--Lib/test/test_aifc.py388
-rw-r--r--[-rwxr-xr-x]Lib/test/test_al.py1
-rw-r--r--Lib/test/test_anydbm.py1
-rw-r--r--Lib/test/test_argparse.py200
-rw-r--r--[-rwxr-xr-x]Lib/test/test_array.py101
-rw-r--r--Lib/test/test_ast.py8
-rw-r--r--Lib/test/test_asyncore.py30
-rw-r--r--Lib/test/test_audioop.py405
-rw-r--r--Lib/test/test_base64.py26
-rw-r--r--Lib/test/test_bigmem.py8
-rw-r--r--[-rwxr-xr-x]Lib/test/test_binhex.py1
-rw-r--r--Lib/test/test_bisect.py53
-rw-r--r--[-rwxr-xr-x]Lib/test/test_bsddb.py24
-rw-r--r--Lib/test/test_buffer.py14
-rw-r--r--Lib/test/test_builtin.py115
-rw-r--r--Lib/test/test_bytes.py21
-rw-r--r--Lib/test/test_bz2.py78
-rw-r--r--Lib/test/test_calendar.py26
-rw-r--r--Lib/test/test_capi.py60
-rw-r--r--[-rwxr-xr-x]Lib/test/test_cd.py1
-rw-r--r--Lib/test/test_cfgparser.py12
-rw-r--r--Lib/test/test_cgi.py40
-rw-r--r--[-rwxr-xr-x]Lib/test/test_cl.py1
-rw-r--r--Lib/test/test_class.py7
-rw-r--r--Lib/test/test_cmath.py2
-rw-r--r--Lib/test/test_cmd.py9
-rw-r--r--Lib/test/test_cmd_line.py38
-rw-r--r--Lib/test/test_cmd_line_script.py18
-rw-r--r--Lib/test/test_code.py5
-rw-r--r--Lib/test/test_codeccallbacks.py31
-rw-r--r--Lib/test/test_codecencodings_cn.py1
-rw-r--r--Lib/test/test_codecencodings_hk.py1
-rw-r--r--Lib/test/test_codecencodings_iso2022.py3
-rw-r--r--Lib/test/test_codecencodings_jp.py1
-rw-r--r--Lib/test/test_codecencodings_kr.py1
-rw-r--r--Lib/test/test_codecencodings_tw.py1
-rw-r--r--Lib/test/test_codecmaps_cn.py1
-rw-r--r--Lib/test/test_codecmaps_hk.py2
-rw-r--r--Lib/test/test_codecmaps_jp.py1
-rw-r--r--Lib/test/test_codecmaps_kr.py1
-rw-r--r--Lib/test/test_codecmaps_tw.py1
-rw-r--r--Lib/test/test_codecs.py580
-rw-r--r--Lib/test/test_codeop.py2
-rw-r--r--Lib/test/test_collections.py235
-rw-r--r--Lib/test/test_compile.py71
-rw-r--r--Lib/test/test_compileall.py3
-rw-r--r--Lib/test/test_cookie.py15
-rw-r--r--Lib/test/test_cookielib.py154
-rw-r--r--Lib/test/test_cpickle.py152
-rw-r--r--Lib/test/test_csv.py255
-rw-r--r--Lib/test/test_curses.py22
-rw-r--r--Lib/test/test_datetime.py164
-rw-r--r--Lib/test/test_decimal.py34
-rw-r--r--Lib/test/test_deque.py16
-rw-r--r--Lib/test/test_descr.py399
-rw-r--r--Lib/test/test_descrtut.py2
-rw-r--r--Lib/test/test_dict.py32
-rw-r--r--Lib/test/test_dictcomps.py117
-rw-r--r--Lib/test/test_dictviews.py20
-rw-r--r--Lib/test/test_difflib.py9
-rw-r--r--Lib/test/test_dis.py2
-rw-r--r--[-rwxr-xr-x]Lib/test/test_dl.py1
-rw-r--r--Lib/test/test_doctest.py56
-rw-r--r--Lib/test/test_docxmlrpc.py2
-rw-r--r--Lib/test/test_dumbdbm.py7
-rw-r--r--Lib/test/test_email.py2
-rw-r--r--Lib/test/test_enumerate.py3
-rw-r--r--Lib/test/test_eof.py1
-rw-r--r--Lib/test/test_epoll.py3
-rw-r--r--[-rwxr-xr-x]Lib/test/test_errno.py1
-rw-r--r--Lib/test/test_exceptions.py18
-rw-r--r--Lib/test/test_fcntl.py36
-rw-r--r--Lib/test/test_file.py11
-rw-r--r--Lib/test/test_file2k.py175
-rw-r--r--Lib/test/test_file_eintr.py239
-rw-r--r--Lib/test/test_fileinput.py43
-rw-r--r--Lib/test/test_fileio.py113
-rw-r--r--Lib/test/test_float.py2
-rw-r--r--Lib/test/test_format.py37
-rw-r--r--Lib/test/test_fractions.py20
-rw-r--r--Lib/test/test_ftplib.py70
-rw-r--r--Lib/test/test_functools.py27
-rw-r--r--Lib/test/test_gc.py69
-rw-r--r--Lib/test/test_gdb.py105
-rw-r--r--Lib/test/test_generators.py3
-rw-r--r--Lib/test/test_genericpath.py29
-rw-r--r--Lib/test/test_genexps.py3
-rw-r--r--Lib/test/test_getargs2.py19
-rw-r--r--[-rwxr-xr-x]Lib/test/test_gl.py1
-rw-r--r--Lib/test/test_glob.py117
-rw-r--r--Lib/test/test_grammar.py6
-rw-r--r--Lib/test/test_grp.py6
-rw-r--r--Lib/test/test_gzip.py16
-rw-r--r--Lib/test/test_hashlib.py132
-rw-r--r--Lib/test/test_heapq.py35
-rw-r--r--Lib/test/test_hmac.py114
-rw-r--r--Lib/test/test_htmlparser.py24
-rw-r--r--Lib/test/test_httplib.py125
-rw-r--r--Lib/test/test_httpservers.py121
-rw-r--r--Lib/test/test_idle.py20
-rw-r--r--[-rwxr-xr-x]Lib/test/test_imageop.py2
-rw-r--r--Lib/test/test_imaplib.py16
-rw-r--r--[-rwxr-xr-x]Lib/test/test_imgfile.py2
-rw-r--r--Lib/test/test_imghdr.py120
-rw-r--r--Lib/test/test_imp.py12
-rw-r--r--Lib/test/test_import.py173
-rw-r--r--Lib/test/test_index.py7
-rw-r--r--Lib/test/test_inspect.py25
-rw-r--r--Lib/test/test_int.py63
-rw-r--r--Lib/test/test_io.py232
-rw-r--r--Lib/test/test_iter.py19
-rw-r--r--Lib/test/test_itertools.py32
-rw-r--r--Lib/test/test_kqueue.py41
-rw-r--r--Lib/test/test_locale.py87
-rw-r--r--Lib/test/test_logging.py137
-rw-r--r--Lib/test/test_long.py10
-rw-r--r--Lib/test/test_macos.py11
-rw-r--r--Lib/test/test_macostools.py52
-rw-r--r--Lib/test/test_macurl2path.py31
-rw-r--r--Lib/test/test_mailbox.py306
-rw-r--r--Lib/test/test_marshal.py52
-rw-r--r--Lib/test/test_math.py67
-rw-r--r--Lib/test/test_memoryio.py72
-rw-r--r--Lib/test/test_memoryview.py10
-rw-r--r--Lib/test/test_mimetypes.py85
-rw-r--r--Lib/test/test_minidom.py71
-rw-r--r--Lib/test/test_mmap.py164
-rw-r--r--Lib/test/test_multibytecodec.py90
-rw-r--r--Lib/test/test_multibytecodec_support.py6
-rw-r--r--Lib/test/test_multiprocessing.py355
-rw-r--r--Lib/test/test_mutex.py2
-rw-r--r--Lib/test/test_netrc.py26
-rw-r--r--Lib/test/test_nis.py6
-rw-r--r--Lib/test/test_nntplib.py73
-rw-r--r--Lib/test/test_normalization.py2
-rw-r--r--Lib/test/test_ntpath.py152
-rw-r--r--Lib/test/test_old_mailbox.py16
-rw-r--r--Lib/test/test_openpty.py2
-rw-r--r--Lib/test/test_optparse.py43
-rw-r--r--Lib/test/test_os.py304
-rw-r--r--Lib/test/test_parser.py71
-rw-r--r--Lib/test/test_pdb.py61
-rw-r--r--Lib/test/test_peepholer.py13
-rw-r--r--Lib/test/test_pep263.py23
-rw-r--r--Lib/test/test_pickle.py20
-rw-r--r--Lib/test/test_platform.py13
-rw-r--r--Lib/test/test_plistlib.py12
-rw-r--r--Lib/test/test_poll.py60
-rw-r--r--Lib/test/test_popen.py1
-rw-r--r--Lib/test/test_popen2.py1
-rw-r--r--Lib/test/test_poplib.py34
-rw-r--r--Lib/test/test_posix.py537
-rw-r--r--Lib/test/test_posixpath.py103
-rw-r--r--Lib/test/test_pprint.py104
-rw-r--r--Lib/test/test_property.py4
-rw-r--r--Lib/test/test_pty.py2
-rw-r--r--Lib/test/test_pwd.py30
-rw-r--r--Lib/test/test_py3kwarn.py5
-rw-r--r--Lib/test/test_pyclbr.py5
-rw-r--r--Lib/test/test_pydoc.py252
-rw-r--r--Lib/test/test_pyexpat.py66
-rw-r--r--Lib/test/test_queue.py13
-rw-r--r--Lib/test/test_random.py70
-rw-r--r--Lib/test/test_re.py196
-rw-r--r--Lib/test/test_readline.py4
-rw-r--r--Lib/test/test_repr.py17
-rw-r--r--Lib/test/test_resource.py121
-rw-r--r--Lib/test/test_robotparser.py19
-rw-r--r--Lib/test/test_runpy.py37
-rw-r--r--Lib/test/test_sax.py186
-rw-r--r--Lib/test/test_select.py9
-rw-r--r--Lib/test/test_set.py36
-rw-r--r--Lib/test/test_sets.py2
-rw-r--r--Lib/test/test_shutil.py165
-rw-r--r--Lib/test/test_signal.py16
-rw-r--r--Lib/test/test_site.py1
-rw-r--r--Lib/test/test_smtplib.py6
-rw-r--r--Lib/test/test_smtpnet.py2
-rw-r--r--Lib/test/test_socket.py247
-rw-r--r--Lib/test/test_socketserver.py126
-rw-r--r--Lib/test/test_spwd.py62
-rw-r--r--Lib/test/test_ssl.py110
-rw-r--r--Lib/test/test_stat.py175
-rw-r--r--Lib/test/test_str.py39
-rw-r--r--Lib/test/test_strop.py6
-rw-r--r--Lib/test/test_strptime.py45
-rw-r--r--Lib/test/test_strtod.py32
-rw-r--r--Lib/test/test_struct.py46
-rw-r--r--Lib/test/test_structmembers.py8
-rw-r--r--Lib/test/test_subprocess.py232
-rw-r--r--Lib/test/test_sunau.py100
-rw-r--r--Lib/test/test_sundry.py2
-rw-r--r--Lib/test/test_support.py347
-rw-r--r--Lib/test/test_sys.py315
-rw-r--r--Lib/test/test_sys_settrace.py13
-rw-r--r--Lib/test/test_sysconfig.py9
-rw-r--r--Lib/test/test_tarfile.py178
-rw-r--r--Lib/test/test_tcl.py510
-rw-r--r--Lib/test/test_telnetlib.py101
-rw-r--r--Lib/test/test_tempfile.py186
-rw-r--r--Lib/test/test_textwrap.py62
-rw-r--r--Lib/test/test_thread.py73
-rw-r--r--Lib/test/test_threading.py174
-rw-r--r--Lib/test/test_time.py2
-rw-r--r--Lib/test/test_timeout.py3
-rw-r--r--Lib/test/test_tk.py16
-rw-r--r--Lib/test/test_tokenize.py71
-rw-r--r--Lib/test/test_tools.py368
-rw-r--r--Lib/test/test_traceback.py15
-rw-r--r--Lib/test/test_ttk_guionly.py16
-rw-r--r--Lib/test/test_ucn.py25
-rw-r--r--Lib/test/test_unicode.py77
-rw-r--r--Lib/test/test_urllib.py47
-rw-r--r--Lib/test/test_urllib2.py67
-rw-r--r--Lib/test/test_urllib2_localnet.py15
-rw-r--r--Lib/test/test_urllib2net.py44
-rw-r--r--Lib/test/test_urllibnet.py33
-rw-r--r--Lib/test/test_urlparse.py61
-rw-r--r--[-rwxr-xr-x]Lib/test/test_userstring.py1
-rw-r--r--Lib/test/test_uu.py4
-rw-r--r--Lib/test/test_uuid.py102
-rw-r--r--Lib/test/test_wait4.py8
-rw-r--r--Lib/test/test_warnings.py5
-rw-r--r--Lib/test/test_wave.py152
-rw-r--r--Lib/test/test_weakref.py391
-rw-r--r--Lib/test/test_weakset.py163
-rw-r--r--Lib/test/test_whichdb.py1
-rw-r--r--Lib/test/test_winreg.py66
-rw-r--r--Lib/test/test_winsound.py22
-rw-r--r--Lib/test/test_with.py2
-rw-r--r--Lib/test/test_wsgiref.py118
-rw-r--r--Lib/test/test_xml_etree.py47
-rw-r--r--Lib/test/test_xmlrpc.py12
-rw-r--r--Lib/test/test_xrange.py75
-rw-r--r--Lib/test/test_zipfile.py256
-rw-r--r--Lib/test/test_zipimport_support.py26
-rw-r--r--Lib/test/test_zlib.py216
-rw-r--r--Lib/test/testtar.tarbin281088 -> 281600 bytes
-rw-r--r--Lib/textwrap.py12
-rw-r--r--Lib/threading.py408
-rwxr-xr-x[-rw-r--r--]Lib/timeit.py0
-rw-r--r--[-rwxr-xr-x]Lib/token.py4
-rw-r--r--Lib/tokenize.py42
-rwxr-xr-x[-rw-r--r--]Lib/trace.py0
-rw-r--r--Lib/traceback.py11
-rw-r--r--Lib/unittest/__init__.py2
-rw-r--r--Lib/unittest/case.py46
-rw-r--r--Lib/unittest/loader.py8
-rw-r--r--Lib/unittest/main.py5
-rw-r--r--Lib/unittest/runner.py2
-rw-r--r--Lib/unittest/signals.py16
-rw-r--r--Lib/unittest/test/test_assertions.py4
-rw-r--r--Lib/unittest/test/test_break.py32
-rw-r--r--Lib/unittest/test/test_case.py18
-rw-r--r--Lib/unittest/test/test_discovery.py38
-rw-r--r--Lib/unittest/test/test_loader.py19
-rw-r--r--Lib/unittest/test/test_result.py4
-rw-r--r--Lib/unittest/test/test_runner.py13
-rw-r--r--Lib/unittest/test/test_skipping.py30
-rw-r--r--Lib/urllib.py62
-rw-r--r--Lib/urllib2.py11
-rw-r--r--Lib/urlparse.py76
-rw-r--r--Lib/uuid.py45
-rw-r--r--Lib/wave.py53
-rw-r--r--Lib/weakref.py120
-rwxr-xr-x[-rw-r--r--]Lib/webbrowser.py31
-rw-r--r--Lib/wsgiref/handlers.py12
-rw-r--r--Lib/wsgiref/simple_server.py1
-rw-r--r--Lib/wsgiref/validate.py4
-rw-r--r--Lib/xml/dom/minidom.py8
-rw-r--r--Lib/xml/etree/ElementInclude.py15
-rw-r--r--Lib/xml/etree/ElementTree.py25
-rw-r--r--Lib/xml/sax/_exceptions.py6
-rw-r--r--Lib/xml/sax/expatreader.py5
-rw-r--r--Lib/xml/sax/saxutils.py114
-rw-r--r--Lib/xml/sax/xmlreader.py2
-rw-r--r--Lib/xmlrpclib.py17
-rw-r--r--Lib/zipfile.py594
-rw-r--r--Mac/BuildScript/README.txt134
-rwxr-xr-xMac/BuildScript/build-installer.py650
-rw-r--r--Mac/BuildScript/issue19373_tk_8_5_15_source.patch13
-rw-r--r--Mac/BuildScript/ncurses-5.5.patch36
-rw-r--r--Mac/BuildScript/resources/ReadMe.txt80
-rw-r--r--Mac/BuildScript/resources/Welcome.rtf10
-rwxr-xr-xMac/BuildScript/scripts/postflight.documentation11
-rwxr-xr-xMac/BuildScript/scripts/postflight.framework14
-rw-r--r--Mac/IDLE/Info.plist.in4
-rw-r--r--Mac/Makefile.in29
-rw-r--r--[-rwxr-xr-x]Mac/Modules/carbonevt/_CarbonEvtmodule.c3
-rw-r--r--[-rwxr-xr-x]Mac/Modules/cg/CFMLateImport.c0
-rw-r--r--[-rwxr-xr-x]Mac/Modules/cg/CFMLateImport.h0
-rw-r--r--[-rwxr-xr-x]Mac/Modules/cg/CGStubLib.exp0
-rw-r--r--[-rwxr-xr-x]Mac/Modules/cg/CGStubLib.readme0
-rw-r--r--[-rwxr-xr-x]Mac/Modules/cg/_CGmodule.c0
-rw-r--r--Mac/Modules/fm/_Fmmodule.c8
-rw-r--r--Mac/Modules/list/_Listmodule.c3
-rw-r--r--Mac/Modules/qd/_Qdmodule.c12
-rw-r--r--Mac/Modules/qdoffs/_Qdoffsmodule.c10
-rw-r--r--[-rwxr-xr-x]Mac/PythonLauncher/FileSettings.h5
-rw-r--r--[-rwxr-xr-x]Mac/PythonLauncher/FileSettings.m46
-rw-r--r--Mac/PythonLauncher/Info.plist.in2
-rw-r--r--Mac/PythonLauncher/MyAppDelegate.m6
-rw-r--r--[-rwxr-xr-x]Mac/PythonLauncher/MyDocument.h0
-rw-r--r--[-rwxr-xr-x]Mac/PythonLauncher/MyDocument.m22
-rw-r--r--Mac/PythonLauncher/PreferencesWindowController.m23
-rw-r--r--Mac/PythonLauncher/doscript.h2
-rw-r--r--Mac/PythonLauncher/doscript.m78
-rw-r--r--[-rwxr-xr-x]Mac/PythonLauncher/main.m4
-rw-r--r--Mac/README262
-rw-r--r--Mac/Resources/app/Info.plist.in8
-rw-r--r--Mac/Resources/framework/Info.plist.in4
-rwxr-xr-x[-rw-r--r--]Mac/Tools/fixapplepython23.py0
-rwxr-xr-x[-rw-r--r--]Mac/scripts/buildpkg.py0
-rw-r--r--[-rwxr-xr-x]Mac/scripts/mkestrres-macerrors.h0
-rwxr-xr-x[-rw-r--r--]Mac/scripts/zappycfiles.py0
-rw-r--r--Makefile.pre.in177
-rw-r--r--Misc/ACKS628
-rw-r--r--Misc/NEWS2467
-rw-r--r--Misc/README.OpenBSD2
-rw-r--r--Misc/RPM/python-2.7.spec2
-rw-r--r--Misc/python-config.in3
-rw-r--r--Misc/python.man2
-rw-r--r--Modules/Setup.dist6
-rw-r--r--Modules/_bisectmodule.c25
-rw-r--r--Modules/_bsddb.c1113
-rw-r--r--Modules/_collectionsmodule.c152
-rw-r--r--Modules/_csv.c52
-rw-r--r--Modules/_ctypes/_ctypes.c108
-rw-r--r--Modules/_ctypes/callproc.c17
-rw-r--r--Modules/_ctypes/cfield.c87
-rw-r--r--Modules/_ctypes/ctypes.h4
-rw-r--r--Modules/_ctypes/libffi.diff67
-rw-r--r--Modules/_ctypes/libffi/.gitignore21
-rw-r--r--Modules/_ctypes/libffi/.travis.yml8
-rw-r--r--Modules/_ctypes/libffi/ChangeLog1364
-rw-r--r--Modules/_ctypes/libffi/ChangeLog.libffi37
-rw-r--r--Modules/_ctypes/libffi/LICENSE6
-rw-r--r--Modules/_ctypes/libffi/Makefile.am117
-rw-r--r--Modules/_ctypes/libffi/Makefile.in939
-rw-r--r--Modules/_ctypes/libffi/README173
-rw-r--r--Modules/_ctypes/libffi/aclocal.m41205
-rwxr-xr-xModules/_ctypes/libffi/build-ios.sh67
-rwxr-xr-xModules/_ctypes/libffi/compile21
-rwxr-xr-xModules/_ctypes/libffi/config.guess297
-rwxr-xr-xModules/_ctypes/libffi/config.sub264
-rwxr-xr-xModules/_ctypes/libffi/configure5729
-rw-r--r--Modules/_ctypes/libffi/configure.ac349
-rwxr-xr-xModules/_ctypes/libffi/depcomp116
-rw-r--r--Modules/_ctypes/libffi/doc/libffi.info79
-rw-r--r--Modules/_ctypes/libffi/doc/libffi.texi42
-rw-r--r--Modules/_ctypes/libffi/doc/stamp-vti8
-rw-r--r--Modules/_ctypes/libffi/doc/version.texi8
-rw-r--r--Modules/_ctypes/libffi/fficonfig.h.in21
-rw-r--r--Modules/_ctypes/libffi/fficonfig.py.in1
-rwxr-xr-xModules/_ctypes/libffi/generate-ios-source-and-headers.py160
-rwxr-xr-xModules/_ctypes/libffi/generate-osx-source-and-headers.py153
-rw-r--r--Modules/_ctypes/libffi/include/Makefile.in97
-rw-r--r--Modules/_ctypes/libffi/include/ffi.h.in125
-rw-r--r--Modules/_ctypes/libffi/include/ffi_common.h18
-rwxr-xr-xModules/_ctypes/libffi/install-sh531
-rw-r--r--Modules/_ctypes/libffi/libffi.xcodeproj/project.pbxproj579
-rwxr-xr-xModules/_ctypes/libffi/libtool-ldflags106
-rw-r--r--Modules/_ctypes/libffi/libtool-version2
-rw-r--r--[-rwxr-xr-x]Modules/_ctypes/libffi/ltmain.sh4017
-rw-r--r--Modules/_ctypes/libffi/m4/asmcfi.m413
-rw-r--r--Modules/_ctypes/libffi/m4/ax_append_flag.m469
-rw-r--r--Modules/_ctypes/libffi/m4/ax_cc_maxopt.m4181
-rw-r--r--Modules/_ctypes/libffi/m4/ax_cflags_warn_all.m4122
-rw-r--r--Modules/_ctypes/libffi/m4/ax_check_compile_flag.m472
-rw-r--r--Modules/_ctypes/libffi/m4/ax_compiler_vendor.m484
-rw-r--r--Modules/_ctypes/libffi/m4/ax_configure_args.m470
-rw-r--r--Modules/_ctypes/libffi/m4/ax_enable_builddir.m4300
-rw-r--r--Modules/_ctypes/libffi/m4/ax_gcc_archflag.m4225
-rw-r--r--Modules/_ctypes/libffi/m4/ax_gcc_x86_cpuid.m479
-rw-r--r--Modules/_ctypes/libffi/m4/libtool.m42254
-rw-r--r--Modules/_ctypes/libffi/m4/ltoptions.m432
-rw-r--r--Modules/_ctypes/libffi/m4/ltversion.m412
-rw-r--r--Modules/_ctypes/libffi/m4/lt~obsolete.m412
-rw-r--r--Modules/_ctypes/libffi/man/Makefile.am4
-rw-r--r--Modules/_ctypes/libffi/man/Makefile.in103
-rw-r--r--Modules/_ctypes/libffi/man/ffi.310
-rw-r--r--Modules/_ctypes/libffi/man/ffi_prep_cif.310
-rw-r--r--Modules/_ctypes/libffi/man/ffi_prep_cif_var.373
-rw-r--r--[-rwxr-xr-x]Modules/_ctypes/libffi/mdate-sh0
-rwxr-xr-xModules/_ctypes/libffi/missing104
-rwxr-xr-x[-rw-r--r--]Modules/_ctypes/libffi/msvcc.sh30
-rw-r--r--Modules/_ctypes/libffi/src/aarch64/ffi.c1076
-rw-r--r--Modules/_ctypes/libffi/src/aarch64/ffitarget.h59
-rw-r--r--Modules/_ctypes/libffi/src/aarch64/sysv.S307
-rw-r--r--Modules/_ctypes/libffi/src/alpha/ffi.c6
-rw-r--r--Modules/_ctypes/libffi/src/alpha/ffitarget.h7
-rw-r--r--Modules/_ctypes/libffi/src/alpha/osf.S57
-rw-r--r--Modules/_ctypes/libffi/src/arm/ffi.c507
-rw-r--r--Modules/_ctypes/libffi/src/arm/ffitarget.h28
-rwxr-xr-xModules/_ctypes/libffi/src/arm/gentramp.sh118
-rw-r--r--Modules/_ctypes/libffi/src/arm/sysv.S226
-rw-r--r--Modules/_ctypes/libffi/src/arm/trampoline.S4450
-rw-r--r--Modules/_ctypes/libffi/src/avr32/ffi.c6
-rw-r--r--Modules/_ctypes/libffi/src/avr32/ffitarget.h11
-rw-r--r--Modules/_ctypes/libffi/src/bfin/ffi.c195
-rw-r--r--Modules/_ctypes/libffi/src/bfin/ffitarget.h43
-rw-r--r--Modules/_ctypes/libffi/src/bfin/sysv.S177
-rw-r--r--Modules/_ctypes/libffi/src/closures.c58
-rw-r--r--Modules/_ctypes/libffi/src/cris/ffi.c15
-rw-r--r--Modules/_ctypes/libffi/src/cris/ffitarget.h11
-rw-r--r--Modules/_ctypes/libffi/src/dlmalloc.c70
-rw-r--r--Modules/_ctypes/libffi/src/frv/ffitarget.h15
-rw-r--r--Modules/_ctypes/libffi/src/ia64/ffi.c16
-rw-r--r--Modules/_ctypes/libffi/src/ia64/ffitarget.h11
-rw-r--r--Modules/_ctypes/libffi/src/java_raw_api.c2
-rw-r--r--Modules/_ctypes/libffi/src/m32r/ffitarget.h11
-rw-r--r--Modules/_ctypes/libffi/src/m68k/ffi.c106
-rw-r--r--Modules/_ctypes/libffi/src/m68k/ffitarget.h11
-rw-r--r--Modules/_ctypes/libffi/src/m68k/sysv.S146
-rw-r--r--Modules/_ctypes/libffi/src/metag/ffi.c330
-rw-r--r--Modules/_ctypes/libffi/src/metag/ffitarget.h53
-rw-r--r--Modules/_ctypes/libffi/src/metag/sysv.S311
-rw-r--r--Modules/_ctypes/libffi/src/microblaze/ffi.c321
-rw-r--r--Modules/_ctypes/libffi/src/microblaze/ffitarget.h53
-rw-r--r--Modules/_ctypes/libffi/src/microblaze/sysv.S302
-rw-r--r--Modules/_ctypes/libffi/src/mips/ffi.c26
-rw-r--r--Modules/_ctypes/libffi/src/mips/ffitarget.h36
-rw-r--r--Modules/_ctypes/libffi/src/mips/n32.S1
-rw-r--r--Modules/_ctypes/libffi/src/moxie/eabi.S137
-rw-r--r--Modules/_ctypes/libffi/src/moxie/ffi.c82
-rw-r--r--Modules/_ctypes/libffi/src/moxie/ffitarget.h12
-rw-r--r--Modules/_ctypes/libffi/src/pa/ffi.c11
-rw-r--r--Modules/_ctypes/libffi/src/pa/ffitarget.h18
-rw-r--r--Modules/_ctypes/libffi/src/powerpc/aix.S18
-rw-r--r--Modules/_ctypes/libffi/src/powerpc/aix_closure.S6
-rw-r--r--Modules/_ctypes/libffi/src/powerpc/asm.h2
-rw-r--r--Modules/_ctypes/libffi/src/powerpc/darwin.S292
-rw-r--r--Modules/_ctypes/libffi/src/powerpc/darwin_closure.S459
-rw-r--r--Modules/_ctypes/libffi/src/powerpc/ffi.c601
-rw-r--r--Modules/_ctypes/libffi/src/powerpc/ffi_darwin.c787
-rw-r--r--Modules/_ctypes/libffi/src/powerpc/ffitarget.h43
-rw-r--r--Modules/_ctypes/libffi/src/powerpc/linux64.S21
-rw-r--r--Modules/_ctypes/libffi/src/powerpc/linux64_closure.S20
-rw-r--r--Modules/_ctypes/libffi/src/powerpc/ppc_closure.S19
-rw-r--r--Modules/_ctypes/libffi/src/powerpc/sysv.S6
-rw-r--r--Modules/_ctypes/libffi/src/prep_cif.c90
-rw-r--r--Modules/_ctypes/libffi/src/s390/ffi.c3
-rw-r--r--Modules/_ctypes/libffi/src/s390/ffitarget.h13
-rw-r--r--Modules/_ctypes/libffi/src/sh/ffi.c5
-rw-r--r--Modules/_ctypes/libffi/src/sh/ffitarget.h11
-rw-r--r--Modules/_ctypes/libffi/src/sh64/ffi.c5
-rw-r--r--Modules/_ctypes/libffi/src/sh64/ffitarget.h11
-rw-r--r--Modules/_ctypes/libffi/src/sparc/ffi.c78
-rw-r--r--Modules/_ctypes/libffi/src/sparc/ffitarget.h15
-rw-r--r--Modules/_ctypes/libffi/src/sparc/v8.S35
-rw-r--r--Modules/_ctypes/libffi/src/sparc/v9.S2
-rw-r--r--Modules/_ctypes/libffi/src/tile/ffi.c355
-rw-r--r--Modules/_ctypes/libffi/src/tile/ffitarget.h65
-rw-r--r--Modules/_ctypes/libffi/src/tile/tile.S360
-rw-r--r--Modules/_ctypes/libffi/src/x86/ffi.c262
-rw-r--r--Modules/_ctypes/libffi/src/x86/ffi64.c74
-rw-r--r--Modules/_ctypes/libffi/src/x86/ffitarget.h50
-rw-r--r--Modules/_ctypes/libffi/src/x86/sysv.S81
-rw-r--r--Modules/_ctypes/libffi/src/x86/unix64.S14
-rw-r--r--Modules/_ctypes/libffi/src/x86/win32.S268
-rw-r--r--Modules/_ctypes/libffi/src/x86/win64.S22
-rw-r--r--Modules/_ctypes/libffi/src/xtensa/ffi.c298
-rw-r--r--Modules/_ctypes/libffi/src/xtensa/ffitarget.h53
-rw-r--r--Modules/_ctypes/libffi/src/xtensa/sysv.S253
-rw-r--r--Modules/_ctypes/libffi/stamp-h.in1
-rw-r--r--Modules/_ctypes/libffi/testsuite/Makefile.am140
-rw-r--r--Modules/_ctypes/libffi/testsuite/Makefile.in229
-rw-r--r--Modules/_ctypes/libffi/testsuite/lib/libffi.exp (renamed from Modules/_ctypes/libffi/testsuite/lib/libffi-dg.exp)77
-rw-r--r--Modules/_ctypes/libffi/testsuite/lib/target-libpath.exp22
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/call.exp25
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/closure_stdcall.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/closure_thiscall.c72
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_12byte.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_16byte.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_18byte.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_19byte.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_1_1byte.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_20byte.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_20byte1.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_24byte.c10
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_2byte.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_3_1byte.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_3byte1.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_3byte2.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_4_1byte.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_4byte.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_5_1_byte.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_5byte.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_64byte.c10
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_6_1_byte.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_6byte.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_7_1_byte.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_7byte.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_8byte.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_9byte1.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_9byte2.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_double.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_float.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_longdouble.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_longdouble_split.c10
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_longdouble_split2.c10
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_pointer.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_sint16.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_sint32.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_sint64.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_uint16.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_uint32.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_uint64.c8
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_dbls_struct.c4
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_double_va.c16
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_longdouble.c6
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_longdouble_va.c17
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_pointer.c4
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_pointer_stack.c22
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_struct_va1.c114
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_uchar_va.c44
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_uint_va.c45
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_ulong_va.c45
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_ulonglong.c10
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/cls_ushort_va.c44
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/err_bad_abi.c5
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/err_bad_typedef.c7
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/fastthis1_win32.c50
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/fastthis2_win32.c50
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/fastthis3_win32.c56
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/ffitest.h81
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/float_va.c107
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/huge_struct.c69
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/many2.c57
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/many2_win32.c63
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/negint.c1
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct.c12
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct1.c16
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct10.c12
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct11.c121
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct2.c10
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct3.c10
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct4.c10
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct5.c10
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct6.c12
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct7.c10
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct8.c12
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct9.c12
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/return_dbl.c1
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/return_sc.c2
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/return_uc.c2
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/stret_large.c14
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/stret_large2.c14
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/stret_medium.c10
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/stret_medium2.c10
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/strlen2_win32.c44
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/struct1.c12
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/struct1_win32.c67
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/struct2.c10
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/struct2_win32.c67
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/struct3.c9
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/struct4.c13
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/struct5.c13
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/struct6.c14
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/struct7.c14
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/struct8.c13
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/struct9.c13
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/testclosure.c4
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/uninitialized.c61
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/va_1.c196
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/va_struct1.c121
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/va_struct2.c123
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.call/va_struct3.c125
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.special/ffitestcxx.h41
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.special/special.exp16
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.special/unwindtest.cc1
-rw-r--r--Modules/_ctypes/libffi/testsuite/libffi.special/unwindtest_ffi_call.cc1
-rw-r--r--Modules/_ctypes/libffi/texinfo.tex4941
-rw-r--r--Modules/_ctypes/libffi_osx/x86/darwin64.S6
-rw-r--r--Modules/_ctypes/libffi_osx/x86/x86-darwin.S4
-rw-r--r--Modules/_ctypes/libffi_osx/x86/x86-ffi64.c118
-rw-r--r--Modules/_ctypes/libffi_osx/x86/x86-ffi_darwin.c4
-rw-r--r--Modules/_ctypes/stgdict.c9
-rw-r--r--Modules/_curses_panel.c13
-rw-r--r--Modules/_cursesmodule.c4
-rw-r--r--Modules/_elementtree.c15
-rw-r--r--Modules/_functoolsmodule.c6
-rw-r--r--Modules/_hashopenssl.c239
-rw-r--r--Modules/_heapqmodule.c66
-rw-r--r--Modules/_io/_iomodule.c41
-rw-r--r--Modules/_io/_iomodule.h7
-rw-r--r--Modules/_io/bufferedio.c57
-rw-r--r--Modules/_io/bytesio.c32
-rw-r--r--Modules/_io/fileio.c99
-rw-r--r--Modules/_io/iobase.c34
-rw-r--r--Modules/_io/stringio.c19
-rw-r--r--Modules/_io/textio.c138
-rw-r--r--Modules/_json.c75
-rw-r--r--Modules/_math.c23
-rw-r--r--Modules/_math.h8
-rw-r--r--Modules/_multiprocessing/multiprocessing.c2
-rw-r--r--Modules/_multiprocessing/semaphore.c7
-rw-r--r--Modules/_multiprocessing/socket_connection.c117
-rw-r--r--Modules/_multiprocessing/win32_functions.c1
-rw-r--r--Modules/_randommodule.c19
-rw-r--r--Modules/_sqlite/connection.c94
-rw-r--r--Modules/_sqlite/cursor.c29
-rw-r--r--Modules/_sqlite/row.c26
-rw-r--r--Modules/_sqlite/statement.c23
-rw-r--r--Modules/_sqlite/util.c66
-rw-r--r--Modules/_sqlite/util.h4
-rw-r--r--Modules/_sre.c274
-rw-r--r--Modules/_ssl.c118
-rw-r--r--Modules/_struct.c83
-rw-r--r--Modules/_testcapimodule.c98
-rw-r--r--Modules/_tkinter.c394
-rw-r--r--Modules/arraymodule.c15
-rw-r--r--Modules/audioop.c320
-rw-r--r--Modules/binascii.c55
-rw-r--r--Modules/bsddb.h42
-rw-r--r--Modules/bz2module.c336
-rw-r--r--Modules/cPickle.c283
-rw-r--r--Modules/cStringIO.c137
-rw-r--r--Modules/cdmodule.c18
-rw-r--r--Modules/cmathmodule.c7
-rw-r--r--Modules/dbmmodule.c8
-rw-r--r--Modules/errnomodule.c3
-rw-r--r--Modules/expat/COPYING1
-rw-r--r--Modules/expat/amigaconfig.h64
-rw-r--r--Modules/expat/ascii.h7
-rw-r--r--Modules/expat/expat.h29
-rw-r--r--Modules/expat/expat_external.h4
-rw-r--r--Modules/expat/internal.h2
-rw-r--r--Modules/expat/watcomconfig.h47
-rw-r--r--Modules/expat/xmlparse.c423
-rw-r--r--Modules/expat/xmlrole.c12
-rw-r--r--Modules/expat/xmltok.c24
-rw-r--r--Modules/expat/xmltok_impl.c6
-rw-r--r--Modules/expat/xmltok_ns.c9
-rw-r--r--Modules/fcntlmodule.c26
-rw-r--r--Modules/flmodule.c12
-rw-r--r--Modules/gcmodule.c60
-rw-r--r--Modules/getaddrinfo.c2
-rw-r--r--Modules/getnameinfo.c2
-rw-r--r--Modules/getpath.c23
-rw-r--r--Modules/grpmodule.c26
-rw-r--r--Modules/itertoolsmodule.c66
-rw-r--r--Modules/main.c3
-rw-r--r--Modules/mathmodule.c34
-rw-r--r--Modules/md5module.c34
-rw-r--r--Modules/mmapmodule.c33
-rw-r--r--Modules/operator.c142
-rw-r--r--Modules/ossaudiodev.c11
-rw-r--r--Modules/parsermodule.c85
-rw-r--r--Modules/posixmodule.c419
-rw-r--r--Modules/posixmodule.h25
-rw-r--r--Modules/pwdmodule.c22
-rw-r--r--Modules/pyexpat.c63
-rw-r--r--Modules/readline.c86
-rw-r--r--Modules/resource.c33
-rw-r--r--Modules/selectmodule.c99
-rw-r--r--Modules/shamodule.c36
-rw-r--r--Modules/signalmodule.c32
-rw-r--r--Modules/socketmodule.c140
-rw-r--r--Modules/sre.h18
-rw-r--r--Modules/stropmodule.c28
-rw-r--r--Modules/svmodule.c3
-rw-r--r--Modules/symtablemodule.c5
-rw-r--r--Modules/threadmodule.c7
-rw-r--r--Modules/timemodule.c14
-rw-r--r--Modules/unicodedata.c6
-rw-r--r--Modules/zipimport.c42
-rw-r--r--Modules/zlib/deflate.c4
-rw-r--r--Modules/zlib/zlib.h2
-rw-r--r--Modules/zlibmodule.c91
-rw-r--r--Objects/abstract.c14
-rw-r--r--Objects/bufferobject.c15
-rw-r--r--Objects/bytearrayobject.c24
-rw-r--r--Objects/classobject.c30
-rw-r--r--Objects/complexobject.c4
-rw-r--r--Objects/descrobject.c57
-rw-r--r--Objects/dictobject.c182
-rw-r--r--Objects/exceptions.c15
-rw-r--r--Objects/fileobject.c119
-rw-r--r--Objects/floatobject.c27
-rw-r--r--Objects/frameobject.c6
-rw-r--r--Objects/genobject.c5
-rw-r--r--Objects/intobject.c43
-rw-r--r--Objects/listsort.txt116
-rw-r--r--Objects/longobject.c58
-rw-r--r--Objects/moduleobject.c6
-rw-r--r--Objects/object.c58
-rw-r--r--Objects/obmalloc.c61
-rw-r--r--Objects/rangeobject.c37
-rw-r--r--Objects/setobject.c9
-rw-r--r--Objects/sliceobject.c3
-rw-r--r--Objects/stringlib/formatter.h50
-rw-r--r--Objects/stringlib/string_format.h15
-rw-r--r--Objects/stringlib/transmogrify.h38
-rw-r--r--Objects/stringobject.c123
-rw-r--r--Objects/tupleobject.c10
-rw-r--r--Objects/typeobject.c309
-rw-r--r--Objects/unicodeobject.c441
-rw-r--r--Objects/weakrefobject.c39
-rw-r--r--PC/VC6/bz2.dsp8
-rw-r--r--PC/VC6/pythoncore.dsp4
-rw-r--r--PC/VC6/readme.txt6
-rw-r--r--PC/VS7.1/pythoncore.vcproj3
-rw-r--r--PC/VS8.0/bz2.vcproj4
-rw-r--r--PC/VS8.0/pyproject.vsprops4
-rw-r--r--PC/VS8.0/pythoncore.vcproj4
-rw-r--r--PC/_subprocess.c8
-rw-r--r--PC/_winreg.c30
-rw-r--r--[-rwxr-xr-x]PC/msvcrtmodule.c0
-rw-r--r--PC/pyconfig.h2
-rw-r--r--PC/python_nt.rc2
-rw-r--r--PCbuild/build_ssl.py44
-rw-r--r--PCbuild/bz2.vcproj4
-rw-r--r--PCbuild/pginstrument.vsprops2
-rw-r--r--PCbuild/pyproject.vsprops4
-rw-r--r--PCbuild/readme.txt66
-rw-r--r--PCbuild/rt.bat4
-rwxr-xr-xParser/asdl_c.py18
-rw-r--r--Parser/myreadline.c42
-rw-r--r--Parser/node.c26
-rw-r--r--Parser/parsetok.c3
-rw-r--r--Parser/tokenizer.c41
-rw-r--r--Python/Python-ast.c14
-rw-r--r--Python/ast.c47
-rw-r--r--Python/bltinmodule.c49
-rw-r--r--Python/ceval.c16
-rw-r--r--Python/codecs.c68
-rw-r--r--Python/compile.c39
-rw-r--r--Python/dtoa.c65
-rw-r--r--Python/future.c11
-rw-r--r--Python/getargs.c3
-rw-r--r--Python/getcopyright.c2
-rw-r--r--Python/getopt.c12
-rw-r--r--Python/import.c304
-rw-r--r--Python/marshal.c118
-rw-r--r--Python/peephole.c61
-rw-r--r--Python/pyarena.c7
-rw-r--r--Python/pystate.c16
-rw-r--r--Python/pythonrun.c74
-rw-r--r--Python/random.c12
-rw-r--r--Python/symtable.c10
-rw-r--r--Python/sysmodule.c10
-rw-r--r--Python/thread.c2
-rw-r--r--Python/thread_pthread.h10
-rw-r--r--README21
-rw-r--r--Tools/buildbot/build-amd64.bat1
-rw-r--r--Tools/buildbot/external-amd64.bat18
-rw-r--r--Tools/buildbot/external-common.bat31
-rw-r--r--Tools/buildbot/external.bat20
-rw-r--r--Tools/buildbot/test-amd64.bat2
-rw-r--r--Tools/buildbot/test.bat2
-rw-r--r--Tools/freeze/checkextensions_win32.py4
-rw-r--r--Tools/freeze/makefreeze.py2
-rwxr-xr-x[-rw-r--r--]Tools/gdb/libpython.py91
-rwxr-xr-x[-rw-r--r--]Tools/i18n/makelocalealias.py6
-rwxr-xr-xTools/i18n/msgfmt.py8
-rwxr-xr-xTools/i18n/pygettext.py13
-rw-r--r--Tools/msi/msi.py32
-rw-r--r--Tools/msi/msilib.py4
-rw-r--r--Tools/msi/uuids.py5
-rw-r--r--Tools/pybench/CommandLine.py2
-rw-r--r--Tools/pybench/README2
-rwxr-xr-x[-rw-r--r--]Tools/pybench/Setup.py0
-rwxr-xr-x[-rw-r--r--]Tools/pybench/clockres.py0
-rwxr-xr-x[-rw-r--r--]Tools/pybench/systimes.py2
-rw-r--r--Tools/pynche/DetailsViewer.py2
-rwxr-xr-xTools/scripts/byext.py2
-rwxr-xr-xTools/scripts/findnocoding.py6
-rwxr-xr-xTools/scripts/fixnotice.py2
-rwxr-xr-xTools/scripts/gprof2html.py2
-rwxr-xr-xTools/scripts/h2py.py6
-rwxr-xr-xTools/scripts/ifdef.py4
-rwxr-xr-xTools/scripts/patchcheck.py14
-rwxr-xr-xTools/scripts/pathfix.py2
-rwxr-xr-xTools/scripts/pindent.py198
-rwxr-xr-xTools/scripts/serve.py2
-rwxr-xr-x[-rw-r--r--]Tools/scripts/svneol.py0
-rwxr-xr-x[-rw-r--r--]Tools/ssl/get-remote-certificate.py0
-rwxr-xr-x[-rw-r--r--]Tools/unicode/comparecodecs.py0
-rwxr-xr-xconfig.guess1558
-rwxr-xr-xconfig.sub1791
-rwxr-xr-xconfigure1850
-rw-r--r--configure.ac (renamed from configure.in)422
-rw-r--r--pyconfig.h.in27
-rw-r--r--setup.py364
1607 files changed, 92626 insertions, 29250 deletions
diff --git a/.hg_archival.txt b/.hg_archival.txt
deleted file mode 100644
index a465d0f..0000000
--- a/.hg_archival.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-repo: 3cd033e6b530f6a78b3b14fedbd2074896dbf6de
-node: 70274d53c1ddc60c5f9a2b8a422a49884021447c
-branch: 2.7
-tag: v2.7.3
diff --git a/.hgtags b/.hgtags
deleted file mode 100644
index d28b7b0..0000000
--- a/.hgtags
+++ /dev/null
@@ -1,154 +0,0 @@
-64cc5439e10a6fdf984effaf0141e94fa4cc1004 v0.9.8
-78a7ed6953025e7ecdde9585099b01a6ae40b76a v0.9.9
-b15b8cc9b8d10e0352a0b8b7e8d51fa309db6df3 v1.0.1
-0326b5d61445ee3a8d3de28119f9652cb72d2e3f v1.0.2
-832615ec07646e310c85316b8ba6bc9b17ad3547 v1.1
-9895475d18c7b5f32adaf78f71886ae041e4d10c v1.1.1
-16eb4c51ee97169046340998e850a63c65225b0a v1.2b1
-b45c688756d04fb84d4a0d518fc3d7e3cb25fa8d v1.2b2
-9e82daf7605bad7976a9abc997cb5e0abe434078 v1.2b3
-065e31cf5862e27521cf5725b003aed211f091b2 v1.2b4
-e72257e655454d569468da8b1189e0ec336f3536 v1.2
-e63d83f8275853aaaa3d1972cb86564505e65583 v1.3b1
-7d743c865a9aa6bde8b603e32e0542031bba3c33 v1.3
-4fc85c82cc222554ae6b9c0b87776ed5f2b70c6e v1.4b1
-129f1299d4e97e884bbbbdd00baf101d178973e6 v1.4b2
-44a82ac654a4175569deed8e8a94b0cc8edee08d v1.4b3
-db49494c93dc73de06d5721c74eab533a947a92c v1.4
-062aed8a4ce2c91c81b80e29f02faff1cf5a761b v1.5a1
-c9498ac988372575cf7028b86395b900c9b0a840 v1.5a2
-dc5c968ec992aab3d40a7189df0c443d1c7a1a68 v1.5a3
-746654a0af680c7d9b814b210a026eb91bec9533 v1.5a4
-8ff58b5730f06be08fbbdc2bf592226f7a736201 v1.5b1
-eb78658d819fb0af09a8e6f9bedcb670805ed5f6 v1.5b2
-84461011a1a0ab402e352f06748f29fb5b5559e5 v1.5
-44aba4d26b01fbae0403efe654f9fd0347606732 v1.5.1
-fed63ccbe6dc3ac663bfe97a2f7006b1b28568f9 v1.5.2a1
-21d71f2e27248a0f4e393d0fc321ecf9b89321d2 v1.5.2a2
-f08c7a2a56f80741f5f192fd0ebe0b0967a203cf v1.5.2b1
-8fe7ec4b4fc1518fcac89e6bf674fbbce16150a9 v1.5.2b2
-39fb0dcc83dc375c1565ba65dbce0ed59b1359c9 v1.5.2c1
-61c91c7f101bab3149adfcd5646ae40e048de712 v1.5.2
-605eb9326ffe1fd1e43f40e2338d6652ab449fdf v1.6a1
-011bee8fd9f7f4da457ec71596484fb0882c0614 v1.6a2
-35c4fc1414a59888614b9be784a25f233ba67984 v2.0b1
-55bba197d4870cdae62aeca00e20240a756b84f8 v2.0b2
-e276329cce036a5f9e9d3451256dca5984e543dc v2.0c1
-2fa4e35083e02342ca014bf5bfba46aecb816c31 v2.0
-83459cc191c1c9c8cc6b08ada99c02f0af6ad54f v2.0.1c1
-73de94a722d206a45c0b49d3011c629ead4d62b1 v2.0.1
-b60831eeab5a06dd3c5e8297a99e39297aa8794b v2.1a1
-b382f1f07ec6b2c95551658b30c6139eeb32077a v2.1a2
-d0c830db5e68edd4aaa3401216e610c9ff145826 v2.1b1
-b59a536ae1ef3774fd85c17f623e8926b7b6c095 v2.1b2
-d611276e9ad53b5d32d1e8065e1d811c32f7d96f v2.1c1
-ff065e674af6c9ab895bd9eff7d9e9039a376c7d v2.1c2
-020e95d8180d7943fe54701e1db0a7d7d87e2b1e v2.1
-22bed2dd1b286564dea4d9a67d4ed80dc3225193 v2.1.1c1
-572533a3038d1832c8a200fb8718112dd83758e9 v2.1.1
-f76a425b14e4ee8ade5e9239598538534b3a90a4 v2.1.2c1
-cf31b10e45b48615094cae90e3c5bd072587d8e6 v2.1.2
-7d1504c80fa072b4ba86edda13a13b0a05ae8eab v2.1.3
-08796a137f1ada2462f7a3177306df5f67a767e1 v2.2a3
-765434e7aa019c1bb82f9813b8025830867cbdc0 v2.2.1c1
-11c952c7d4be6638040764910be80bd9c3c17de6 v2.2.1c2
-4045e6e92fafabad42f6c9989b67076fbad452ca v2.2
-394790316aa380d22e4d670bc69a639e725f0300 v2.2.1
-0128313c2855d71753f00fccc60bb39ed92a0c54 v2.2.2b1
-61d531533a86224a1c26917c18013d91f592fbfb v2.2.2
-ac77b91342534d09b1e432e1dc3b70dbbec3a919 v2.2.3c1
-f4f134ad46abd0bc973d3f6094ea56e62ac5c492 v2.2.3
-d054c29647f90bccb8345bd779bca1eecf2dd7f2 v2.3c1
-fce5c9e9abc722394cb2e909b7e2a39080d4448e v2.3c2
-fa65f6527fe134aa97e03e1b2af365d443b67dc7 v2.3.1
-7d7362a71253bff4fc1b0c825c14569778db8c36 v2.3.2c1
-be2533fb10a587c8da89b18c5f381f478d720593 v2.3.2
-d21d4ba2f973467d30794a3d2ac493658180e503 v2.3.3c1
-a8f1286f217129cdb53e320ee125d2c2c80e1377 v2.3.3
-3639861d02646af1da3123c25560c4db3d7edbce v2.3.4c1
-a74f9081c6970878cabe151f969617587915400c v2.3.4
-2639b10daa985b20b3cabec45169d523cac29d68 v2.3.5c1
-d228509424172f977edad4f7860a9c747cd655c8 v2.3.5
-b21427ff26475f9c7f4a701da324e951c2b162fe v2.3.6c1
-3b6fb5439c1061a941401245076c315beafcc787 v2.3.6
-893c7bd4a92bd889a85e691f49188c37b0a72357 v2.3.7c1
-2022943ad450c41d64708fb5caca2d2672b5b8f8 v2.3.7
-92ca658fd420095b6284c9ce6e9082a80285ec9c v2.4a1
-055fc6955f3c6522bfeb7ed4c671c97d5baaaac2 v2.4a2
-186b72550e53533ef6175f6411f932c1298193d7 v2.4a3
-53cff04283855adf88ed0c0fd3698827ca843637 v2.4b1
-7e387a9dcc79954a77695adef8b593da35be1214 v2.4b2
-ff80d8bbef6e13426c8a85d7f9d837b8f8f89834 v2.4c1
-f31e18d313c7a4fc66914b2d27e130a0f72c0b69 v2.4
-1195f9ba3439097cc5b5a367e3ae1b43f157b264 v2.4.1c1
-333cf303543b680ec5e3fdf7e8c9661f9488a50e v2.4.1c2
-f9054d235870029afa33ef945bca7ece99616c10 v2.4.1
-d02d387554e200befceec999b788f4593b434b49 v2.4.2c1
-8d37276fdf077d6d23d963d3f7f95381d00f1926 v2.4.2
-7c5be7c0fdfdbbc0ac1d91c07038bc81412da412 v2.4.3c1
-8cca2492626ce408cd567110811c0692fd1d37dd v2.4.3
-baaeea722b1c97ca03179e5a02a4717e7017ba2a v2.4.4c1
-592ac93532efdfa9a40f47b96b1c1231d2eae0e5 v2.4.4
-1f7d628c70a9aefe762c2c32210876faf8d64c78 v2.4.5c1
-d8df4aa06261de5e4c0aa051e448697d4791e437 v2.4.5
-a6c3c715e2b70740c4b6b4f7dc6f47b16a7e5905 v2.4.6c1
-f3f1f1462c82536bb23796e70c85522144ee24db v2.4.6
-67192da3e69c985bb1272da932d7de6073033fad v2.5a0
-896f9fead17e720ec4a21de3ac214518da84845f v2.5a1
-26d0770f2b7ee289a39a3b55dcec1c1ee65849c5 v2.5a2
-d49b198f545cd82fbf735a22979c332b8a97eecb v2.5b1
-03b6fe57cd6df5bb34ca19f4b760b943186cc806 v2.5b2
-c0abb18299b442e1160d2f958bce18f66850caa9 v2.5b3
-2dc64d570e8d7a4498ab850108c85709c276df93 v2.5c1
-cc3cb3a8237ea545db917d84c49d8a0a5e175cc1 v2.5c2
-c10a71cf16e471425e6d7564e3f7a648f14b702e v2.5
-4777c4007b5b194423953d41b1e5e6adaaba6c5d v2.5.1c1
-490d8c09abcb814950be7bb7a25c3ff2047c7eb5 v2.5.1
-0a8aae575cdddbf3d133fcb4eed91080e3fe41f3 v2.5.2c1
-1d508bbbdb49a2b11629ee186a63928291b08694 v2.5.2
-9530d334d2624219e7ee1aecaf33532e910266bc v2.5.3c1
-3cc056d21c2f4435a534d23345bad9c0a9d9b3cb v2.5.3
-a0a6d9909312ad9c1d844af66e9c91931b93852a v2.5.4
-657f16582943739b906f66f7efad4014492c8b1c v2.5.5c1
-1dc91e9dd5c13a4bc2d3bb7d4b5896ab264f2325 v2.5.5c2
-7098a46f0b75e5aacfaf81d65d72e3613b023532 v2.5.5
-a87c7b96672b69dffef64c59b56fba5bb2059b99 v2.5.6c1
-de34c7b097e8d66b1140c211dbd61d48b31ba483 v2.5.6
-2d0bd095c420b0711000d9be66848f6cfd972b3b v2.6a1
-eec144917a189be11ed5efa35c6604d03bc62bcc v2.6a2
-48e9fb0a721799877f26edaef01ac6e6029b6812 v2.6a3
-2f2f32af8c4ee8f6598f632dc83701a20726d10a v2.6b1
-81ec8263bd6e25c9a8855cd0ce9ae881732972be v2.6b2
-c62862d73f9ec64d655494cf1f61443446f2dea2 v2.6b3
-1ebb2a8cc06c94471d72b5476c2050c4d115a1aa v2.6rc1
-525792097c6308a78a76b7b5f56bbd8a24c26acf v2.6rc2
-95fff5a6a276520b2e7e0f75fe303f49376567a5 v2.6
-cabf303e787aa5cd7209b40e3b2caba3ee75c5a5 v2.6.1
-01f1ae83631ae82bd8598a11c17e3f9b3f56c38a v2.6.2c1
-4feba09a826ba99e5b482707b3d9643eba34156f v2.6.2
-1873542c00000c4b7ce2c7992d1f2d87f866232a v2.6.3rc1
-00c3396d7a8c4ab4c2c841d61013a073ae921d0f v2.6.3
-25aa90a6865b63133962dd64626c53c7b6fd47d5 v2.6.4rc1
-ea1fdafbe4fe2458ac17287e0d6c709ed00fce5c v2.6.4rc2
-8803c3d61da275c71cabe9e9d0274dac9902e2c0 v2.6.4
-c9f68e42ab796a3bab4f8cf3cc69ce10503cb990 v2.6.5rc1
-fa4630916699046357a5ac16884f3fc47bd0eaa6 v2.6.5rc2
-99af4b44e7e490390817a597a542546d749e698e v2.6.5
-c1dc9e7986a2a8e1070ec7bee748520febef382e v2.6.6rc1
-e189dc8fd66154ef46d9cd22584d56669b544ca3 v2.6.6rc2
-9f8771e0905277f8b3c2799113a062fda4164995 v2.6.6
-b4107eb00b4271fb73a9e1b736d4f23460950778 v2.7a1
-adc85ebc7271cc22e24e816782bb2b8d7fa3a6b3 v2.7a2
-4180557b7a9bb9dd5341a18af199f843f199e46e v2.7a3
-d2e1027edde84a02853fcf9790f311f10f46cf0a v2.7a4
-61550a1302b07a731436a84b2d86eb2bab3b2fb0 v2.7b1
-6bb9891d4275bd42a69b3d903d13b687543c915d v2.7b2
-381c5eeb511038d091d8e0808c4b85087ed2f684 v2.7rc1
-13e5b0b2071a2a42067fb03facc931409fa6ba50 v2.7rc2
-2145593d108de62ebf770987a4ac2a57d268c9d1 v2.7
-63d9f00fea0730c1c437a50f64a42b7792bdcbfb v2.7.1rc1
-5395f96588d4f0199d329cb79eb109648dc4ef5e v2.7.1
-f48756685406e8d0fa9d23d841fceb07e36a5656 v2.7.2rc1
-8527427914a29d895bcb30be76a465143993a793 v2.7.2
-b2c6aff96e1251a4f03cf866e7e75fb8232869f2 v2.7.3rc1
-d46c1973d3c407ecaa6a8ee16d3fad3ef506b51f v2.7.3rc2
diff --git a/Demo/comparisons/patterns b/Demo/comparisons/patterns
index f4da846..f4da846 100755..100644
--- a/Demo/comparisons/patterns
+++ b/Demo/comparisons/patterns
diff --git a/Demo/curses/ncurses.py b/Demo/curses/ncurses.py
index 0bdc1a9..0bdc1a9 100644..100755
--- a/Demo/curses/ncurses.py
+++ b/Demo/curses/ncurses.py
diff --git a/Demo/curses/rain.py b/Demo/curses/rain.py
index 9d46e6e..9d46e6e 100644..100755
--- a/Demo/curses/rain.py
+++ b/Demo/curses/rain.py
diff --git a/Demo/curses/tclock.py b/Demo/curses/tclock.py
index 8058d9a..8058d9a 100644..100755
--- a/Demo/curses/tclock.py
+++ b/Demo/curses/tclock.py
diff --git a/Demo/md5test/foo b/Demo/md5test/foo
index f2ba8f8..f2ba8f8 100755..100644
--- a/Demo/md5test/foo
+++ b/Demo/md5test/foo
diff --git a/Demo/newmetaclasses/Eiffel.py b/Demo/newmetaclasses/Eiffel.py
index 730a85d..f3f116f 100644
--- a/Demo/newmetaclasses/Eiffel.py
+++ b/Demo/newmetaclasses/Eiffel.py
@@ -29,7 +29,7 @@ class EiffelBaseMetaClass(type):
pre = dict.get("%s_pre" % m)
post = dict.get("%s_post" % m)
if pre or post:
- dict[k] = cls.make_eiffel_method(dict[m], pre, post)
+ dict[m] = cls.make_eiffel_method(dict[m], pre, post)
class EiffelMetaClass1(EiffelBaseMetaClass):
# an implementation of the "eiffel" meta class that uses nested functions
diff --git a/Demo/scripts/newslist.doc b/Demo/scripts/newslist.doc
index 87fd9ba..87fd9ba 100755..100644
--- a/Demo/scripts/newslist.doc
+++ b/Demo/scripts/newslist.doc
diff --git a/Demo/tix/bitmaps/about.xpm b/Demo/tix/bitmaps/about.xpm
index 33ffcc0..33ffcc0 100755..100644
--- a/Demo/tix/bitmaps/about.xpm
+++ b/Demo/tix/bitmaps/about.xpm
diff --git a/Demo/tix/bitmaps/bold.xbm b/Demo/tix/bitmaps/bold.xbm
index ebff8d1..ebff8d1 100755..100644
--- a/Demo/tix/bitmaps/bold.xbm
+++ b/Demo/tix/bitmaps/bold.xbm
diff --git a/Demo/tix/bitmaps/capital.xbm b/Demo/tix/bitmaps/capital.xbm
index fb4e070..fb4e070 100755..100644
--- a/Demo/tix/bitmaps/capital.xbm
+++ b/Demo/tix/bitmaps/capital.xbm
diff --git a/Demo/tix/bitmaps/centerj.xbm b/Demo/tix/bitmaps/centerj.xbm
index 9d2c064..9d2c064 100755..100644
--- a/Demo/tix/bitmaps/centerj.xbm
+++ b/Demo/tix/bitmaps/centerj.xbm
diff --git a/Demo/tix/bitmaps/combobox.xbm b/Demo/tix/bitmaps/combobox.xbm
index f5947f5..f5947f5 100755..100644
--- a/Demo/tix/bitmaps/combobox.xbm
+++ b/Demo/tix/bitmaps/combobox.xbm
diff --git a/Demo/tix/bitmaps/combobox.xpm b/Demo/tix/bitmaps/combobox.xpm
index d0234ab..d0234ab 100755..100644
--- a/Demo/tix/bitmaps/combobox.xpm
+++ b/Demo/tix/bitmaps/combobox.xpm
diff --git a/Demo/tix/bitmaps/combobox.xpm.1 b/Demo/tix/bitmaps/combobox.xpm.1
index 63792a4..63792a4 100755..100644
--- a/Demo/tix/bitmaps/combobox.xpm.1
+++ b/Demo/tix/bitmaps/combobox.xpm.1
diff --git a/Demo/tix/bitmaps/drivea.xbm b/Demo/tix/bitmaps/drivea.xbm
index 83c636c..83c636c 100755..100644
--- a/Demo/tix/bitmaps/drivea.xbm
+++ b/Demo/tix/bitmaps/drivea.xbm
diff --git a/Demo/tix/bitmaps/drivea.xpm b/Demo/tix/bitmaps/drivea.xpm
index 4d274b9..4d274b9 100755..100644
--- a/Demo/tix/bitmaps/drivea.xpm
+++ b/Demo/tix/bitmaps/drivea.xpm
diff --git a/Demo/tix/bitmaps/exit.xpm b/Demo/tix/bitmaps/exit.xpm
index 505a07b..505a07b 100755..100644
--- a/Demo/tix/bitmaps/exit.xpm
+++ b/Demo/tix/bitmaps/exit.xpm
diff --git a/Demo/tix/bitmaps/filebox.xbm b/Demo/tix/bitmaps/filebox.xbm
index c8f7ac2..c8f7ac2 100755..100644
--- a/Demo/tix/bitmaps/filebox.xbm
+++ b/Demo/tix/bitmaps/filebox.xbm
diff --git a/Demo/tix/bitmaps/filebox.xpm b/Demo/tix/bitmaps/filebox.xpm
index 7377ee6..7377ee6 100755..100644
--- a/Demo/tix/bitmaps/filebox.xpm
+++ b/Demo/tix/bitmaps/filebox.xpm
diff --git a/Demo/tix/bitmaps/italic.xbm b/Demo/tix/bitmaps/italic.xbm
index 169c3cb..169c3cb 100755..100644
--- a/Demo/tix/bitmaps/italic.xbm
+++ b/Demo/tix/bitmaps/italic.xbm
diff --git a/Demo/tix/bitmaps/justify.xbm b/Demo/tix/bitmaps/justify.xbm
index bba660a..bba660a 100755..100644
--- a/Demo/tix/bitmaps/justify.xbm
+++ b/Demo/tix/bitmaps/justify.xbm
diff --git a/Demo/tix/bitmaps/leftj.xbm b/Demo/tix/bitmaps/leftj.xbm
index 5f8e006..5f8e006 100755..100644
--- a/Demo/tix/bitmaps/leftj.xbm
+++ b/Demo/tix/bitmaps/leftj.xbm
diff --git a/Demo/tix/bitmaps/netw.xbm b/Demo/tix/bitmaps/netw.xbm
index a684d65..a684d65 100755..100644
--- a/Demo/tix/bitmaps/netw.xbm
+++ b/Demo/tix/bitmaps/netw.xbm
diff --git a/Demo/tix/bitmaps/netw.xpm b/Demo/tix/bitmaps/netw.xpm
index fff6593..fff6593 100755..100644
--- a/Demo/tix/bitmaps/netw.xpm
+++ b/Demo/tix/bitmaps/netw.xpm
diff --git a/Demo/tix/bitmaps/optmenu.xpm b/Demo/tix/bitmaps/optmenu.xpm
index 63bab81..63bab81 100755..100644
--- a/Demo/tix/bitmaps/optmenu.xpm
+++ b/Demo/tix/bitmaps/optmenu.xpm
diff --git a/Demo/tix/bitmaps/rightj.xbm b/Demo/tix/bitmaps/rightj.xbm
index 1d438e0..1d438e0 100755..100644
--- a/Demo/tix/bitmaps/rightj.xbm
+++ b/Demo/tix/bitmaps/rightj.xbm
diff --git a/Demo/tix/bitmaps/select.xpm b/Demo/tix/bitmaps/select.xpm
index 392e5a0..392e5a0 100755..100644
--- a/Demo/tix/bitmaps/select.xpm
+++ b/Demo/tix/bitmaps/select.xpm
diff --git a/Demo/tix/bitmaps/underline.xbm b/Demo/tix/bitmaps/underline.xbm
index f07bb46..f07bb46 100755..100644
--- a/Demo/tix/bitmaps/underline.xbm
+++ b/Demo/tix/bitmaps/underline.xbm
diff --git a/Demo/tkinter/guido/canvasevents.py b/Demo/tkinter/guido/canvasevents.py
index 74ed76f..74ed76f 100644..100755
--- a/Demo/tkinter/guido/canvasevents.py
+++ b/Demo/tkinter/guido/canvasevents.py
diff --git a/Demo/tkinter/guido/newmenubardemo.py b/Demo/tkinter/guido/newmenubardemo.py
index 57bf13c..57bf13c 100644..100755
--- a/Demo/tkinter/guido/newmenubardemo.py
+++ b/Demo/tkinter/guido/newmenubardemo.py
diff --git a/Demo/tkinter/guido/sortvisu.py b/Demo/tkinter/guido/sortvisu.py
index 9148b73..9148b73 100644..100755
--- a/Demo/tkinter/guido/sortvisu.py
+++ b/Demo/tkinter/guido/sortvisu.py
diff --git a/Demo/turtle/demohelp.txt b/Demo/turtle/demohelp.txt
index d565691..d364e94 100644
--- a/Demo/turtle/demohelp.txt
+++ b/Demo/turtle/demohelp.txt
@@ -2,7 +2,7 @@
----------------------------------------------
- xturtleDemo - Help
+ turtleDemo - Help
----------------------------------------------
@@ -53,23 +53,29 @@
(2) How to add your own demos to the demo repository
- - scriptname: must begin with tdemo_ ,
+ - The script name must begin with tdemo_ ,
so it must have the form tdemo_<your-script-name>.py
- - place: same directory as xturtleDemo.py or some
- subdirectory, the name of which must also begin with
- tdemo_.....
-
- - requirements on source code:
- code must contain a main() function which will
- be executed by the viewer (see provided example scripts)
- main() may return a string which will be displayed
- in the Label below the source code window (when execution
- has finished.)
-
- !! For programs, which are EVENT DRIVEN, main must return
- !! the string "EVENTLOOP". This informs the viewer, that the
- !! script is still running and must be stopped by the user!
-
-
-
+ - The code must contain a main() function which will
+ be executed by the viewer (see provided example scripts).
+ It may return a string which will be displayed in the Label below
+ the source code window (when execution has finished.)
+
+ - In order to run mydemo.py by itself, such as during development,
+ add the following at the end of the file:
+
+ if __name__ == '__main__':
+ main()
+ mainloop() # keep window
+
+ python -m turtledemo.mydemo # will then run it
+
+ - If the demo is EVENT DRIVEN, main must return the string
+ "EVENTLOOP". This informs the demo viewer that the script is
+ still running and must be stopped by the user!
+
+ If an "EVENTLOOP" demo runs by itself, as with clock, which uses
+ ontimer, or minimal_hanoi, which loops by recursion, then the
+ code should catch the turtle.Terminator exception that will be
+ raised when the user presses the STOP button. (Paint is not such
+ a demo; it only acts in response to mouse clicks and movements.)
diff --git a/Demo/turtle/tdemo_I_dontlike_tiltdemo.py b/Demo/turtle/tdemo_I_dontlike_tiltdemo.py
index c9e6e65..c9e6e65 100644..100755
--- a/Demo/turtle/tdemo_I_dontlike_tiltdemo.py
+++ b/Demo/turtle/tdemo_I_dontlike_tiltdemo.py
diff --git a/Demo/turtle/tdemo_bytedesign.py b/Demo/turtle/tdemo_bytedesign.py
index bed671d..bed671d 100644..100755
--- a/Demo/turtle/tdemo_bytedesign.py
+++ b/Demo/turtle/tdemo_bytedesign.py
diff --git a/Demo/turtle/tdemo_clock.py b/Demo/turtle/tdemo_clock.py
index b6280bb..d4e0686 100644..100755
--- a/Demo/turtle/tdemo_clock.py
+++ b/Demo/turtle/tdemo_clock.py
@@ -11,6 +11,7 @@ and time
------------------------------------
"""
from turtle import *
+from turtle import Terminator # not in __all__
from datetime import datetime
mode("logo")
@@ -102,22 +103,25 @@ def tick():
sekunde = t.second + t.microsecond*0.000001
minute = t.minute + sekunde/60.0
stunde = t.hour + minute/60.0
- tracer(False)
- writer.clear()
- writer.home()
- writer.forward(65)
- writer.write(wochentag(t),
- align="center", font=("Courier", 14, "bold"))
- writer.back(150)
- writer.write(datum(t),
- align="center", font=("Courier", 14, "bold"))
- writer.forward(85)
- tracer(True)
- second_hand.setheading(6*sekunde)
- minute_hand.setheading(6*minute)
- hour_hand.setheading(30*stunde)
- tracer(True)
- ontimer(tick, 100)
+ try:
+ tracer(False) # Terminator can occur here
+ writer.clear()
+ writer.home()
+ writer.forward(65)
+ writer.write(wochentag(t),
+ align="center", font=("Courier", 14, "bold"))
+ writer.back(150)
+ writer.write(datum(t),
+ align="center", font=("Courier", 14, "bold"))
+ writer.forward(85)
+ tracer(True)
+ second_hand.setheading(6*sekunde) # or here
+ minute_hand.setheading(6*minute)
+ hour_hand.setheading(30*stunde)
+ tracer(True)
+ ontimer(tick, 100)
+ except Terminator:
+ pass # turtledemo user pressed STOP
def main():
tracer(False)
diff --git a/Demo/turtle/tdemo_fractalcurves.py b/Demo/turtle/tdemo_fractalcurves.py
index 2ac8ecd..2ac8ecd 100644..100755
--- a/Demo/turtle/tdemo_fractalcurves.py
+++ b/Demo/turtle/tdemo_fractalcurves.py
diff --git a/Demo/turtle/tdemo_lindenmayer_indian.py b/Demo/turtle/tdemo_lindenmayer_indian.py
index 92c8cff..92c8cff 100644..100755
--- a/Demo/turtle/tdemo_lindenmayer_indian.py
+++ b/Demo/turtle/tdemo_lindenmayer_indian.py
diff --git a/Demo/turtle/tdemo_minimal_hanoi.py b/Demo/turtle/tdemo_minimal_hanoi.py
index 8a1caa8..3cb9efc 100644..100755
--- a/Demo/turtle/tdemo_minimal_hanoi.py
+++ b/Demo/turtle/tdemo_minimal_hanoi.py
@@ -18,6 +18,7 @@ stretched to rectangles by shapesize()
---------------------------------------
"""
from turtle import *
+from turtle import Terminator # not in __all__
class Disc(Turtle):
def __init__(self, n):
@@ -50,9 +51,12 @@ def hanoi(n, from_, with_, to_):
def play():
onkey(None,"space")
clear()
- hanoi(6, t1, t2, t3)
- write("press STOP button to exit",
- align="center", font=("Courier", 16, "bold"))
+ try:
+ hanoi(6, t1, t2, t3)
+ write("press STOP button to exit",
+ align="center", font=("Courier", 16, "bold"))
+ except Terminator:
+ pass # turtledemo user pressed STOP
def main():
global t1, t2, t3
diff --git a/Demo/turtle/tdemo_paint.py b/Demo/turtle/tdemo_paint.py
index e1d6303..105a06b 100644..100755
--- a/Demo/turtle/tdemo_paint.py
+++ b/Demo/turtle/tdemo_paint.py
@@ -3,11 +3,15 @@
tdemo_paint.py
-A simple eventdriven paint program
+A simple event-driven paint program
-- use left mouse button to move turtle
-- middle mouse button to change color
-- right mouse button do turn filling on/off
+- left mouse button moves turtle
+- middle mouse button changes color
+- right mouse button toogles betweem pen up
+(no line drawn when the turtle moves) and
+pen down (line is drawn). If pen up follows
+at least two pen-down moves, the polygon that
+includes the starting point is filled.
-------------------------------------------
Play around by clicking into the canvas
using all three mouse buttons.
diff --git a/Demo/turtle/tdemo_peace.py b/Demo/turtle/tdemo_peace.py
index 13044c9..8bfa920 100644..100755
--- a/Demo/turtle/tdemo_peace.py
+++ b/Demo/turtle/tdemo_peace.py
@@ -3,14 +3,10 @@
tdemo_peace.py
-A very simple drawing suitable as a beginner's
-programming example.
-
-Uses only commands, which are also available in
-old turtle.py.
-
-Intentionally no variables are used except for the
-colorloop:
+A simple drawing suitable as a beginner's
+programming example. Aside from the
+peacecolors assignment and the for loop,
+it only uses turtle commands.
"""
from turtle import *
@@ -21,7 +17,7 @@ def main():
"royalblue1", "dodgerblue4")
reset()
- s = Screen()
+ Screen()
up()
goto(-320,-195)
width(70)
@@ -58,7 +54,7 @@ def main():
up()
goto(0,300) # vanish if hideturtle() is not available ;-)
- return "Done!!"
+ return "Done!"
if __name__ == "__main__":
main()
diff --git a/Demo/turtle/tdemo_penrose.py b/Demo/turtle/tdemo_penrose.py
index f5824d7..f5824d7 100644..100755
--- a/Demo/turtle/tdemo_penrose.py
+++ b/Demo/turtle/tdemo_penrose.py
diff --git a/Demo/turtle/tdemo_planet_and_moon.py b/Demo/turtle/tdemo_planet_and_moon.py
index 223d87b..a0280d7 100644..100755
--- a/Demo/turtle/tdemo_planet_and_moon.py
+++ b/Demo/turtle/tdemo_planet_and_moon.py
@@ -12,9 +12,9 @@ very light moon!
Planet has a circular orbit, moon a stable
orbit around the planet.
-You can hold the movement temporarily by pressing
-the left mouse button with mouse over the
-scrollbar of the canvas.
+You can hold the movement temporarily by
+pressing the left mouse button with the
+mouse over the scrollbar of the canvas.
"""
from turtle import Shape, Turtle, mainloop, Vec2D as Vec
@@ -108,6 +108,5 @@ def main():
return "Done!"
if __name__ == '__main__':
- msg = main()
- print msg
+ main()
mainloop()
diff --git a/Demo/turtle/tdemo_tree.py b/Demo/turtle/tdemo_tree.py
index 6fc8735..6c6121a 100644..100755
--- a/Demo/turtle/tdemo_tree.py
+++ b/Demo/turtle/tdemo_tree.py
@@ -11,9 +11,9 @@ Uses:
(1) a tree-generator, where the drawing is
quasi the side-effect, whereas the generator
always yields None.
-(2) Turtle-cloning: At each branching point the
-current pen is cloned. So in the end there
-are 1024 turtles.
+(2) Turtle-cloning: At each branching point
+the current pen is cloned. So in the end
+there are 1024 turtles.
"""
from turtle import Turtle, mainloop
from time import clock
diff --git a/Demo/turtle/tdemo_yinyang.py b/Demo/turtle/tdemo_yinyang.py
index 04dd758..04dd758 100644..100755
--- a/Demo/turtle/tdemo_yinyang.py
+++ b/Demo/turtle/tdemo_yinyang.py
diff --git a/Demo/turtle/turtleDemo.py b/Demo/turtle/turtleDemo.py
index 30b5e5b..88b9b20 100644..100755
--- a/Demo/turtle/turtleDemo.py
+++ b/Demo/turtle/turtleDemo.py
@@ -5,11 +5,17 @@ import os
from Tkinter import *
from idlelib.Percolator import Percolator
from idlelib.ColorDelegator import ColorDelegator
-from idlelib.textView import TextViewer
+from idlelib.textView import view_file
import turtle
import time
+demo_dir = os.getcwd()
+if "turtleDemo.py" not in os.listdir(demo_dir):
+ print "Directory of turtleDemo must be current working directory!"
+ print "But in your case this is", demo_dir
+ sys.exit()
+
STARTUP = 1
READY = 2
RUNNING = 3
@@ -21,12 +27,7 @@ btnfont = ("Arial", 12, 'bold')
txtfont = ('Lucida Console', 8, 'normal')
def getExampleEntries():
- cwd = os.getcwd()
- if "turtleDemo.py" not in os.listdir(cwd):
- print "Directory of turtleDemo must be current working directory!"
- print "But in your case this is", cwd
- sys.exit()
- entries1 = [entry for entry in os.listdir(cwd) if
+ entries1 = [entry for entry in os.listdir(demo_dir) if
entry.startswith("tdemo_") and
not entry.endswith(".pyc")]
entries2 = []
@@ -34,7 +35,7 @@ def getExampleEntries():
if entry.endswith(".py"):
entries2.append(entry)
else:
- path = os.path.join(cwd,entry)
+ path = os.path.join(demo_dir, entry)
sys.path.append(path)
subdir = [entry]
scripts = [script for script in os.listdir(path) if
@@ -44,13 +45,16 @@ def getExampleEntries():
return entries2
def showDemoHelp():
- TextViewer(demo.root, "Help on turtleDemo", "demohelp.txt")
+ view_file(demo.root, "Help on turtleDemo",
+ os.path.join(demo_dir, "demohelp.txt"))
def showAboutDemo():
- TextViewer(demo.root, "About turtleDemo", "about_turtledemo.txt")
+ view_file(demo.root, "About turtleDemo",
+ os.path.join(demo_dir, "about_turtledemo.txt"))
def showAboutTurtle():
- TextViewer(demo.root, "About the new turtle module", "about_turtle.txt")
+ view_file(demo.root, "About the new turtle module.",
+ os.path.join(demo_dir, "about_turtle.txt"))
class DemoWindow(object):
diff --git a/Demo/turtle/turtledemo_two_canvases.py b/Demo/turtle/turtledemo_two_canvases.py
index 5a9831d..5a9831d 100644..100755
--- a/Demo/turtle/turtledemo_two_canvases.py
+++ b/Demo/turtle/turtledemo_two_canvases.py
diff --git a/Doc/ACKS.txt b/Doc/ACKS.txt
deleted file mode 100644
index 7507c6c..0000000
--- a/Doc/ACKS.txt
+++ /dev/null
@@ -1,230 +0,0 @@
-Contributors to the Python Documentation
-----------------------------------------
-
-This section lists people who have contributed in some way to the Python
-documentation. It is probably not complete -- if you feel that you or
-anyone else should be on this list, please let us know (send email to
-docs@python.org), and we'll be glad to correct the problem.
-
-.. acks::
-
- * Aahz
- * Michael Abbott
- * Steve Alexander
- * Jim Ahlstrom
- * Fred Allen
- * A. Amoroso
- * Pehr Anderson
- * Oliver Andrich
- * Heidi Annexstad
- * Jesús Cea Avión
- * Manuel Balsera
- * Daniel Barclay
- * Chris Barker
- * Don Bashford
- * Anthony Baxter
- * Alexander Belopolsky
- * Bennett Benson
- * Jonathan Black
- * Robin Boerdijk
- * Michal Bozon
- * Aaron Brancotti
- * Georg Brandl
- * Keith Briggs
- * Ian Bruntlett
- * Lee Busby
- * Arnaud Calmettes
- * Lorenzo M. Catucci
- * Carl Cerecke
- * Mauro Cicognini
- * Gilles Civario
- * Mike Clarkson
- * Steve Clift
- * Dave Cole
- * Matthew Cowles
- * Jeremy Craven
- * Andrew Dalke
- * Ben Darnell
- * L. Peter Deutsch
- * Robert Donohue
- * Fred L. Drake, Jr.
- * Josip Dzolonga
- * Jeff Epler
- * Michael Ernst
- * Blame Andy Eskilsson
- * Carey Evans
- * Martijn Faassen
- * Carl Feynman
- * Dan Finnie
- * Hernán Martínez Foffani
- * Stefan Franke
- * Jim Fulton
- * Peter Funk
- * Lele Gaifax
- * Matthew Gallagher
- * Gabriel Genellina
- * Ben Gertzfield
- * Nadim Ghaznavi
- * Jonathan Giddy
- * Shelley Gooch
- * Nathaniel Gray
- * Grant Griffin
- * Thomas Guettler
- * Anders Hammarquist
- * Mark Hammond
- * Harald Hanche-Olsen
- * Manus Hand
- * Gerhard Häring
- * Travis B. Hartwell
- * Tim Hatch
- * Janko Hauser
- * Ben Hayden
- * Thomas Heller
- * Bernhard Herzog
- * Magnus L. Hetland
- * Konrad Hinsen
- * Stefan Hoffmeister
- * Albert Hofkamp
- * Gregor Hoffleit
- * Steve Holden
- * Thomas Holenstein
- * Gerrit Holl
- * Rob Hooft
- * Brian Hooper
- * Randall Hopper
- * Michael Hudson
- * Eric Huss
- * Jeremy Hylton
- * Roger Irwin
- * Jack Jansen
- * Philip H. Jensen
- * Pedro Diaz Jimenez
- * Kent Johnson
- * Lucas de Jonge
- * Andreas Jung
- * Robert Kern
- * Jim Kerr
- * Jan Kim
- * Kamil Kisiel
- * Greg Kochanski
- * Guido Kollerie
- * Peter A. Koren
- * Daniel Kozan
- * Andrew M. Kuchling
- * Dave Kuhlman
- * Erno Kuusela
- * Ross Lagerwall
- * Thomas Lamb
- * Detlef Lannert
- * Piers Lauder
- * Glyph Lefkowitz
- * Robert Lehmann
- * Marc-André Lemburg
- * Ross Light
- * Ulf A. Lindgren
- * Everett Lipman
- * Mirko Liss
- * Martin von Löwis
- * Fredrik Lundh
- * Jeff MacDonald
- * John Machin
- * Andrew MacIntyre
- * Vladimir Marangozov
- * Vincent Marchetti
- * Westley Martínez
- * Laura Matson
- * Daniel May
- * Rebecca McCreary
- * Doug Mennella
- * Paolo Milani
- * Skip Montanaro
- * Paul Moore
- * Ross Moore
- * Sjoerd Mullender
- * Dale Nagata
- * Michal Nowikowski
- * Steffen Daode Nurpmeso
- * Ng Pheng Siong
- * Koray Oner
- * Tomas Oppelstrup
- * Denis S. Otkidach
- * Zooko O'Whielacronx
- * Shriphani Palakodety
- * William Park
- * Joonas Paalasmaa
- * Harri Pasanen
- * Bo Peng
- * Tim Peters
- * Benjamin Peterson
- * Christopher Petrilli
- * Justin D. Pettit
- * Chris Phoenix
- * François Pinard
- * Paul Prescod
- * Eric S. Raymond
- * Edward K. Ream
- * Terry J. Reedy
- * Sean Reifschneider
- * Bernhard Reiter
- * Armin Rigo
- * Wes Rishel
- * Armin Ronacher
- * Jim Roskind
- * Guido van Rossum
- * Donald Wallace Rouse II
- * Mark Russell
- * Nick Russo
- * Chris Ryland
- * Constantina S.
- * Hugh Sasse
- * Bob Savage
- * Scott Schram
- * Neil Schemenauer
- * Barry Scott
- * Joakim Sernbrant
- * Justin Sheehy
- * Charlie Shepherd
- * Yue Shuaijie
- * Michael Simcich
- * Ionel Simionescu
- * Michael Sloan
- * Gregory P. Smith
- * Roy Smith
- * Clay Spence
- * Nicholas Spies
- * Tage Stabell-Kulo
- * Frank Stajano
- * Anthony Starks
- * Greg Stein
- * Peter Stoehr
- * Mark Summerfield
- * Reuben Sumner
- * Kalle Svensson
- * Jim Tittsler
- * David Turner
- * Sandro Tosi
- * Ville Vainio
- * Martijn Vries
- * Charles G. Waldman
- * Greg Ward
- * Barry Warsaw
- * Corran Webster
- * Glyn Webster
- * Bob Weiner
- * Eddy Welbourne
- * Jeff Wheeler
- * Mats Wichmann
- * Gerry Wiener
- * Timothy Wild
- * Paul Winkler
- * Collin Winter
- * Blake Winton
- * Dan Wolfe
- * Adam Woodbeck
- * Steven Work
- * Thomas Wouters
- * Ka-Ping Yee
- * Rory Yorke
- * Moshe Zadka
- * Milan Zamazal
- * Cheng Zhang
diff --git a/Doc/Makefile b/Doc/Makefile
index 6e694c6..afdf35f 100644
--- a/Doc/Makefile
+++ b/Doc/Makefile
@@ -169,12 +169,19 @@ serve:
# for development releases: always build
autobuild-dev:
make update
- make dist SPHINXOPTS='-A daily=1'
+ make dist SPHINXOPTS='-A daily=1 -A versionswitcher=1'
+ -make suspicious
-# for stable releases: only build if not in pre-release stage (alpha, beta, rc)
+# for quick rebuilds (HTML only)
+autobuild-html:
+ make html SPHINXOPTS='-A daily=1 -A versionswitcher=1'
+
+# for stable releases: only build if not in pre-release stage (alpha, beta)
+# release candidate downloads are okay, since the stable tree can be in that stage
autobuild-stable:
- @case $(DISTVERSION) in *[abc]*) \
+ @case $(DISTVERSION) in *[ab]*) \
echo "Not building; $(DISTVERSION) is not a release version."; \
exit 1;; \
esac
@make autobuild-dev
+
diff --git a/Doc/README.txt b/Doc/README.txt
index f03da76..b8ff47f 100644
--- a/Doc/README.txt
+++ b/Doc/README.txt
@@ -3,18 +3,17 @@ Python Documentation README
This directory contains the reStructuredText (reST) sources to the Python
documentation. You don't need to build them yourself, prebuilt versions are
-available at http://docs.python.org/download/.
+available at https://docs.python.org/2/download.html
Documentation on the authoring Python documentation, including information about
both style and markup, is available in the "Documenting Python" chapter of the
-documentation. There's also a chapter intended to point out differences to
-those familiar with the previous docs written in LaTeX.
+documentation.
Building the docs
=================
-You need to have Python 2.4 or higher installed; the toolset used to build the
+You need to have Python 2 installed; the toolset used to build the
docs is written in Python. It is called *Sphinx*, it is not included in this
tree, but maintained separately. Also needed are the docutils, supplying the
base markup that Sphinx uses, Jinja, a templating engine, and optionally
@@ -33,6 +32,9 @@ to check out the necessary toolset in the `tools/` subdirectory and build the
HTML output files. To view the generated HTML, point your favorite browser at
the top-level index `build/html/index.html` after running "make".
+On Windows, we try to emulate the Makefile as closely as possible with a
+``make.bat`` file.
+
Available make targets are:
* "html", which builds standalone HTML files for offline viewing.
@@ -65,43 +67,23 @@ Available make targets are:
`tools/sphinxext/pyspecific.py` -- pydoc needs these to show topic and
keyword help.
+ * "suspicious", which checks the parsed markup for text that looks like
+ malformed and thus unconverted reST.
+
A "make update" updates the Subversion checkouts in `tools/`.
Without make
------------
-You'll need to install the Sphinx package, either by checking it out via ::
-
- svn co http://svn.python.org/projects/external/Sphinx-0.6.7/sphinx tools/sphinx
-
-or by installing it from PyPI.
-
-Then, you need to install Docutils, either by checking it out via ::
-
- svn co http://svn.python.org/projects/external/docutils-0.6/docutils tools/docutils
-
-or by installing it from http://docutils.sf.net/.
-
-You also need Jinja2, either by checking it out via ::
-
- svn co http://svn.python.org/projects/external/Jinja-2.3.1/jinja2 tools/jinja2
-
-or by installing it from PyPI.
-
-You can optionally also install Pygments, either as a checkout via ::
-
- svn co http://svn.python.org/projects/external/Pygments-1.3.1/pygments tools/pygments
-
-or from PyPI at http://pypi.python.org/pypi/Pygments.
-
+Install the Sphinx package and its dependencies from PyPI.
-Then, make an output directory, e.g. under `build/`, and run ::
+Then, from the ``Docs`` directory, run ::
- python tools/sphinx-build.py -b<builder> . build/<outputdirectory>
+ sphinx-build -b<builder> . build/<builder>
-where `<builder>` is one of html, text, latex, or htmlhelp (for explanations see
-the make targets above).
+where ``<builder>`` is one of html, text, latex, or htmlhelp (for explanations
+see the make targets above).
Contributing
@@ -127,7 +109,7 @@ The Python source is copyrighted, but you can freely use and copy it
as long as you don't change or remove the copyright notice:
----------------------------------------------------------------------
-Copyright (c) 2000-2012 Python Software Foundation.
+Copyright (c) 2000-2014 Python Software Foundation.
All rights reserved.
Copyright (c) 2000 BeOpen.com.
diff --git a/Doc/about.rst b/Doc/about.rst
index 2c229e6..678168b 100644
--- a/Doc/about.rst
+++ b/Doc/about.rst
@@ -7,14 +7,15 @@ These documents are generated from `reStructuredText`_ sources by `Sphinx`_, a
document processor specifically written for the Python documentation.
.. _reStructuredText: http://docutils.sf.net/rst.html
-.. _Sphinx: http://sphinx.pocoo.org/
+.. _Sphinx: http://sphinx-doc.org/
.. In the online version of these documents, you can submit comments and suggest
changes directly on the documentation pages.
-Development of the documentation and its toolchain takes place on the
-docs@python.org mailing list. We're always looking for volunteers wanting
-to help with the docs, so feel free to send a mail there!
+Development of the documentation and its toolchain is an entirely volunteer
+effort, just like Python itself. If you want to contribute, please take a
+look at the :ref:`reporting-bugs` page for information on how to do so. New
+volunteers are always welcome!
Many thanks go to:
@@ -26,11 +27,13 @@ Many thanks go to:
<http://effbot.org/zone/pyref.htm>`_ project from which Sphinx got many good
ideas.
-See :ref:`reporting-bugs` for information how to report bugs in this
-documentation, or Python itself.
-.. including the ACKS file here so that it can be maintained separately
-.. include:: ACKS.txt
+Contributors to the Python Documentation
+----------------------------------------
+
+Many people have contributed to the Python language, the Python standard
+library, and the Python documentation. See :source:`Misc/ACKS` in the Python
+source distribution for a partial list of contributors.
It is only with the input and contributions of the Python community
that Python has such wonderful documentation -- Thank You!
diff --git a/Doc/bugs.rst b/Doc/bugs.rst
index 3785ccb..847c010 100644
--- a/Doc/bugs.rst
+++ b/Doc/bugs.rst
@@ -13,15 +13,17 @@ Documentation bugs
==================
If you find a bug in this documentation or would like to propose an improvement,
-please send an e-mail to docs@python.org describing the bug and where you found
-it. If you have a suggestion how to fix it, include that as well.
+please submit a bug report on the :ref:`tracker <using-the-tracker>`. If you
+have a suggestion how to fix it, include that as well.
-docs@python.org is a mailing list run by volunteers; your request will be
-noticed, even if it takes a while to be processed.
+If you're short on time, you can also email your bug report to docs@python.org.
+'docs@' is a mailing list run by volunteers; your request will be noticed,
+though it may take a while to be processed.
-Of course, if you want a more persistent record of your issue, you can use the
-issue tracker for documentation bugs as well.
+.. seealso::
+ `Documentation bugs`_ on the Python issue tracker
+.. _using-the-tracker:
Using the Python issue tracker
==============================
@@ -62,9 +64,6 @@ taken on the bug.
.. seealso::
- `Python Developer's Guide <http://docs.python.org/devguide/>`_
- Detailed description of the issue workflow and developers tools.
-
`How to Report Bugs Effectively <http://www.chiark.greenend.org.uk/~sgtatham/bugs.html>`_
Article which goes into some detail about how to create a useful bug report.
This describes what kind of information is useful and why it is useful.
@@ -73,3 +72,16 @@ taken on the bug.
Information about writing a good bug report. Some of this is specific to the
Mozilla project, but describes general good practices.
+
+Getting started contributing to Python yourself
+===============================================
+
+Beyond just reporting bugs that you find, you are also welcome to submit
+patches to fix them. You can find more information on how to get started
+patching Python in the `Python Developer's Guide`_. If you have questions,
+the `core-mentorship mailing list`_ is a friendly place to get answers to
+any and all questions pertaining to the process of fixing issues in Python.
+
+.. _Documentation bugs: http://bugs.python.org/issue?@filter=status&@filter=components&components=4&status=1&@columns=id,activity,title,status&@sort=-activity
+.. _Python Developer's Guide: http://docs.python.org/devguide/
+.. _core-mentorship mailing list: https://mail.python.org/mailman/listinfo/core-mentorship/
diff --git a/Doc/c-api/allocation.rst b/Doc/c-api/allocation.rst
index cb43cbf..32a414b 100644
--- a/Doc/c-api/allocation.rst
+++ b/Doc/c-api/allocation.rst
@@ -43,7 +43,7 @@ Allocating Objects on the Heap
Allocate a new Python object using the C structure type *TYPE* and the
Python type object *type*. Fields not defined by the Python object header
are not initialized; the object's reference count will be one. The size of
- the memory allocation is determined from the :attr:`tp_basicsize` field of
+ the memory allocation is determined from the :c:member:`~PyTypeObject.tp_basicsize` field of
the type object.
@@ -52,7 +52,7 @@ Allocating Objects on the Heap
Allocate a new Python object using the C structure type *TYPE* and the
Python type object *type*. Fields not defined by the Python object header
are not initialized. The allocated memory allows for the *TYPE* structure
- plus *size* fields of the size given by the :attr:`tp_itemsize` field of
+ plus *size* fields of the size given by the :c:member:`~PyTypeObject.tp_itemsize` field of
*type*. This is useful for implementing objects like tuples, which are
able to determine their size at construction time. Embedding the array of
fields into the same allocation decreases the number of allocations,
@@ -67,7 +67,7 @@ Allocating Objects on the Heap
Releases memory allocated to an object using :c:func:`PyObject_New` or
:c:func:`PyObject_NewVar`. This is normally called from the
- :attr:`tp_dealloc` handler specified in the object's type. The fields of
+ :c:member:`~PyTypeObject.tp_dealloc` handler specified in the object's type. The fields of
the object should not be accessed after this call as the memory is no
longer a valid Python object.
diff --git a/Doc/c-api/buffer.rst b/Doc/c-api/buffer.rst
index 7b6d1ae..74693ac 100644
--- a/Doc/c-api/buffer.rst
+++ b/Doc/c-api/buffer.rst
@@ -21,8 +21,10 @@ first.
Two examples of objects that support the buffer interface are strings and
arrays. The string object exposes the character contents in the buffer
-interface's byte-oriented form. An array can also expose its contents, but it
-should be noted that array elements may be multi-byte values.
+interface's byte-oriented form. An array can only expose its contents via the
+old-style buffer interface. This limitation does not apply to Python 3,
+where :class:`memoryview` objects can be constructed from arrays, too.
+Array elements may be multi-byte values.
An example user of the buffer interface is the file object's :meth:`write`
method. Any object that can export a series of bytes through the buffer
@@ -33,7 +35,7 @@ returning data from the target object.
Starting from version 1.6, Python has been providing Python-level buffer
objects and a C-level buffer API so that any built-in or used-defined type can
expose its characteristics. Both, however, have been deprecated because of
-various shortcomings, and have been officially removed in Python 3.0 in favour
+various shortcomings, and have been officially removed in Python 3 in favour
of a new C-level buffer API and a new Python-level object named
:class:`memoryview`.
diff --git a/Doc/c-api/codec.rst b/Doc/c-api/codec.rst
index 8207ae0..83252af 100644
--- a/Doc/c-api/codec.rst
+++ b/Doc/c-api/codec.rst
@@ -52,19 +52,19 @@ and *NULL* returned.
.. c:function:: PyObject* PyCodec_IncrementalEncoder(const char *encoding, const char *errors)
- Get an :class:`IncrementalEncoder` object for the given *encoding*.
+ Get an :class:`~codecs.IncrementalEncoder` object for the given *encoding*.
.. c:function:: PyObject* PyCodec_IncrementalDecoder(const char *encoding, const char *errors)
- Get an :class:`IncrementalDecoder` object for the given *encoding*.
+ Get an :class:`~codecs.IncrementalDecoder` object for the given *encoding*.
.. c:function:: PyObject* PyCodec_StreamReader(const char *encoding, PyObject *stream, const char *errors)
- Get a :class:`StreamReader` factory function for the given *encoding*.
+ Get a :class:`~codecs.StreamReader` factory function for the given *encoding*.
.. c:function:: PyObject* PyCodec_StreamWriter(const char *encoding, PyObject *stream, const char *errors)
- Get a :class:`StreamWriter` factory function for the given *encoding*.
+ Get a :class:`~codecs.StreamWriter` factory function for the given *encoding*.
Registry API for Unicode encoding error handlers
diff --git a/Doc/c-api/dict.rst b/Doc/c-api/dict.rst
index 3e967bd..3006b6c 100644
--- a/Doc/c-api/dict.rst
+++ b/Doc/c-api/dict.rst
@@ -210,8 +210,11 @@ Dictionary Objects
.. c:function:: int PyDict_Update(PyObject *a, PyObject *b)
- This is the same as ``PyDict_Merge(a, b, 1)`` in C, or ``a.update(b)`` in
- Python. Return ``0`` on success or ``-1`` if an exception was raised.
+ This is the same as ``PyDict_Merge(a, b, 1)`` in C, and is similar to
+ ``a.update(b)`` in Python except that :c:func:`PyDict_Update` doesn't fall
+ back to the iterating over a sequence of key value pairs if the second
+ argument has no "keys" attribute. Return ``0`` on success or ``-1`` if an
+ exception was raised.
.. versionadded:: 2.2
diff --git a/Doc/c-api/exceptions.rst b/Doc/c-api/exceptions.rst
index 025b75a..91964d0 100644
--- a/Doc/c-api/exceptions.rst
+++ b/Doc/c-api/exceptions.rst
@@ -192,12 +192,19 @@ is a separate error indicator for each thread.
when the system call returns an error.
-.. c:function:: PyObject* PyErr_SetFromErrnoWithFilename(PyObject *type, const char *filename)
+.. c:function:: PyObject* PyErr_SetFromErrnoWithFilenameObject(PyObject *type, PyObject *filenameObject)
Similar to :c:func:`PyErr_SetFromErrno`, with the additional behavior that if
- *filename* is not *NULL*, it is passed to the constructor of *type* as a third
- parameter. In the case of exceptions such as :exc:`IOError` and :exc:`OSError`,
- this is used to define the :attr:`filename` attribute of the exception instance.
+ *filenameObject* is not *NULL*, it is passed to the constructor of *type* as
+ a third parameter. In the case of exceptions such as :exc:`IOError` and
+ :exc:`OSError`, this is used to define the :attr:`filename` attribute of the
+ exception instance.
+
+
+.. c:function:: PyObject* PyErr_SetFromErrnoWithFilename(PyObject *type, const char *filename)
+
+ Similar to :c:func:`PyErr_SetFromErrnoWithFilenameObject`, but the filename
+ is given as a C string.
.. c:function:: PyObject* PyErr_SetFromWindowsErr(int ierr)
@@ -220,14 +227,29 @@ is a separate error indicator for each thread.
.. versionadded:: 2.3
-.. c:function:: PyObject* PyErr_SetFromWindowsErrWithFilename(int ierr, const char *filename)
+.. c:function:: PyObject* PyErr_SetFromWindowsErrWithFilenameObject(int ierr, PyObject *filenameObject)
Similar to :c:func:`PyErr_SetFromWindowsErr`, with the additional behavior that
- if *filename* is not *NULL*, it is passed to the constructor of
+ if *filenameObject* is not *NULL*, it is passed to the constructor of
:exc:`WindowsError` as a third parameter. Availability: Windows.
-.. c:function:: PyObject* PyErr_SetExcFromWindowsErrWithFilename(PyObject *type, int ierr, char *filename)
+.. c:function:: PyObject* PyErr_SetFromWindowsErrWithFilename(int ierr, const char *filename)
+
+ Similar to :c:func:`PyErr_SetFromWindowsErrWithFilenameObject`, but the
+ filename is given as a C string. Availability: Windows.
+
+
+.. c:function:: PyObject* PyErr_SetExcFromWindowsErrWithFilenameObject(PyObject *type, int ierr, PyObject *filename)
+
+ Similar to :c:func:`PyErr_SetFromWindowsErrWithFilenameObject`, with an
+ additional parameter specifying the exception type to be raised.
+ Availability: Windows.
+
+ .. versionadded:: 2.3
+
+
+.. c:function:: PyObject* PyErr_SetExcFromWindowsErrWithFilename(PyObject *type, int ierr, const char *filename)
Similar to :c:func:`PyErr_SetFromWindowsErrWithFilename`, with an additional
parameter specifying the exception type to be raised. Availability: Windows.
diff --git a/Doc/c-api/file.rst b/Doc/c-api/file.rst
index 20a7a60..bbd4223 100644
--- a/Doc/c-api/file.rst
+++ b/Doc/c-api/file.rst
@@ -111,7 +111,8 @@ change in future releases of Python.
.. index:: single: EOFError (built-in exception)
Equivalent to ``p.readline([n])``, this function reads one line from the
- object *p*. *p* may be a file object or any object with a :meth:`readline`
+ object *p*. *p* may be a file object or any object with a
+ :meth:`~io.IOBase.readline`
method. If *n* is ``0``, exactly one line is read, regardless of the length of
the line. If *n* is greater than ``0``, no more than *n* bytes will be read
from the file; a partial line can be returned. In both cases, an empty string
diff --git a/Doc/c-api/gcsupport.rst b/Doc/c-api/gcsupport.rst
index 2a4fda4..b0a2d5c 100644
--- a/Doc/c-api/gcsupport.rst
+++ b/Doc/c-api/gcsupport.rst
@@ -15,10 +15,10 @@ collection.
.. An example showing the use of these interfaces can be found in "Supporting the
.. Cycle Collector (XXX not found: ../ext/example-cycle-support.html)".
-To create a container type, the :attr:`tp_flags` field of the type object must
+To create a container type, the :c:member:`~PyTypeObject.tp_flags` field of the type object must
include the :const:`Py_TPFLAGS_HAVE_GC` and provide an implementation of the
-:attr:`tp_traverse` handler. If instances of the type are mutable, a
-:attr:`tp_clear` implementation must also be provided.
+:c:member:`~PyTypeObject.tp_traverse` handler. If instances of the type are mutable, a
+:c:member:`~PyTypeObject.tp_clear` implementation must also be provided.
.. data:: Py_TPFLAGS_HAVE_GC
@@ -68,7 +68,7 @@ Constructors for container types must conform to two rules:
Adds the object *op* to the set of container objects tracked by the
collector. The collector can run at unexpected times so objects must be
valid while being tracked. This should be called once all the fields
- followed by the :attr:`tp_traverse` handler become valid, usually near the
+ followed by the :c:member:`~PyTypeObject.tp_traverse` handler become valid, usually near the
end of the constructor.
@@ -97,8 +97,8 @@ rules:
Remove the object *op* from the set of container objects tracked by the
collector. Note that :c:func:`PyObject_GC_Track` can be called again on
this object to add it back to the set of tracked objects. The deallocator
- (:attr:`tp_dealloc` handler) should call this for the object before any of
- the fields used by the :attr:`tp_traverse` handler become invalid.
+ (:c:member:`~PyTypeObject.tp_dealloc` handler) should call this for the object before any of
+ the fields used by the :c:member:`~PyTypeObject.tp_traverse` handler become invalid.
.. c:function:: void _PyObject_GC_UNTRACK(PyObject *op)
@@ -106,19 +106,19 @@ rules:
A macro version of :c:func:`PyObject_GC_UnTrack`. It should not be used for
extension modules.
-The :attr:`tp_traverse` handler accepts a function parameter of this type:
+The :c:member:`~PyTypeObject.tp_traverse` handler accepts a function parameter of this type:
.. c:type:: int (*visitproc)(PyObject *object, void *arg)
- Type of the visitor function passed to the :attr:`tp_traverse` handler.
+ Type of the visitor function passed to the :c:member:`~PyTypeObject.tp_traverse` handler.
The function should be called with an object to traverse as *object* and
- the third parameter to the :attr:`tp_traverse` handler as *arg*. The
+ the third parameter to the :c:member:`~PyTypeObject.tp_traverse` handler as *arg*. The
Python core uses several visitor functions to implement cyclic garbage
detection; it's not expected that users will need to write their own
visitor functions.
-The :attr:`tp_traverse` handler must have the following type:
+The :c:member:`~PyTypeObject.tp_traverse` handler must have the following type:
.. c:type:: int (*traverseproc)(PyObject *self, visitproc visit, void *arg)
@@ -130,15 +130,15 @@ The :attr:`tp_traverse` handler must have the following type:
object argument. If *visit* returns a non-zero value that value should be
returned immediately.
-To simplify writing :attr:`tp_traverse` handlers, a :c:func:`Py_VISIT` macro is
-provided. In order to use this macro, the :attr:`tp_traverse` implementation
+To simplify writing :c:member:`~PyTypeObject.tp_traverse` handlers, a :c:func:`Py_VISIT` macro is
+provided. In order to use this macro, the :c:member:`~PyTypeObject.tp_traverse` implementation
must name its arguments exactly *visit* and *arg*:
.. c:function:: void Py_VISIT(PyObject *o)
Call the *visit* callback, with arguments *o* and *arg*. If *visit* returns
- a non-zero value, then return it. Using this macro, :attr:`tp_traverse`
+ a non-zero value, then return it. Using this macro, :c:member:`~PyTypeObject.tp_traverse`
handlers look like::
static int
@@ -151,7 +151,7 @@ must name its arguments exactly *visit* and *arg*:
.. versionadded:: 2.4
-The :attr:`tp_clear` handler must be of the :c:type:`inquiry` type, or *NULL*
+The :c:member:`~PyTypeObject.tp_clear` handler must be of the :c:type:`inquiry` type, or *NULL*
if the object is immutable.
diff --git a/Doc/c-api/index.rst b/Doc/c-api/index.rst
index 12a1ec7..2ce7b98 100644
--- a/Doc/c-api/index.rst
+++ b/Doc/c-api/index.rst
@@ -4,9 +4,6 @@
Python/C API Reference Manual
##################################
-:Release: |version|
-:Date: |today|
-
This manual documents the API used by C and C++ programmers who want to write
extension modules or embed Python. It is a companion to :ref:`extending-index`,
which describes the general principles of extension writing but does not
diff --git a/Doc/c-api/init.rst b/Doc/c-api/init.rst
index 6c58c5d..46fc93f 100644
--- a/Doc/c-api/init.rst
+++ b/Doc/c-api/init.rst
@@ -427,6 +427,9 @@ pointer.
standard :mod:`zlib` and :mod:`hashlib` modules release the GIL when
compressing or hashing data.
+
+.. _gilstate:
+
Non-Python created threads
--------------------------
@@ -531,6 +534,7 @@ code, or when embedding the Python interpreter:
.. index:: module: thread
.. note::
+
When only the main thread exists, no GIL operations are needed. This is a
common situation (most Python programs do not use threads), and the lock
operations slow the interpreter down a bit. Therefore, the lock is not
@@ -905,41 +909,43 @@ Asynchronous Notifications
A mechanism is provided to make asynchronous notifications to the main
interpreter thread. These notifications take the form of a function
-pointer and a void argument.
-
-.. index:: single: setcheckinterval() (in module sys)
+pointer and a void pointer argument.
-Every check interval, when the global interpreter lock is released and
-reacquired, Python will also call any such provided functions. This can be used
-for example by asynchronous IO handlers. The notification can be scheduled from
-a worker thread and the actual call than made at the earliest convenience by the
-main thread where it has possession of the global interpreter lock and can
-perform any Python API calls.
.. c:function:: int Py_AddPendingCall(int (*func)(void *), void *arg)
.. index:: single: Py_AddPendingCall()
- Post a notification to the Python main thread. If successful, *func* will be
- called with the argument *arg* at the earliest convenience. *func* will be
- called having the global interpreter lock held and can thus use the full
- Python API and can take any action such as setting object attributes to
- signal IO completion. It must return 0 on success, or -1 signalling an
- exception. The notification function won't be interrupted to perform another
- asynchronous notification recursively, but it can still be interrupted to
- switch threads if the global interpreter lock is released, for example, if it
- calls back into Python code.
+ Schedule a function to be called from the main interpreter thread. On
+ success, 0 is returned and *func* is queued for being called in the
+ main thread. On failure, -1 is returned without setting any exception.
- This function returns 0 on success in which case the notification has been
- scheduled. Otherwise, for example if the notification buffer is full, it
- returns -1 without setting any exception.
+ When successfully queued, *func* will be *eventually* called from the
+ main interpreter thread with the argument *arg*. It will be called
+ asynchronously with respect to normally running Python code, but with
+ both these conditions met:
- This function can be called on any thread, be it a Python thread or some
- other system thread. If it is a Python thread, it doesn't matter if it holds
- the global interpreter lock or not.
+ * on a :term:`bytecode` boundary;
+ * with the main thread holding the :term:`global interpreter lock`
+ (*func* can therefore use the full C API).
- .. versionadded:: 2.7
+ *func* must return 0 on success, or -1 on failure with an exception
+ set. *func* won't be interrupted to perform another asynchronous
+ notification recursively, but it can still be interrupted to switch
+ threads if the global interpreter lock is released.
+
+ This function doesn't need a current thread state to run, and it doesn't
+ need the global interpreter lock.
+ .. warning::
+ This is a low-level function, only useful for very special cases.
+ There is no guarantee that *func* will be called as quick as
+ possible. If the main thread is busy executing a system call,
+ *func* won't be called before the system call returns. This
+ function is generally **not** suitable for calling Python code from
+ arbitrary C threads. Instead, use the :ref:`PyGILState API<gilstate>`.
+
+ .. versionadded:: 2.7
.. _profiling:
diff --git a/Doc/c-api/intro.rst b/Doc/c-api/intro.rst
index 4216881..6414277 100644
--- a/Doc/c-api/intro.rst
+++ b/Doc/c-api/intro.rst
@@ -255,8 +255,10 @@ sets all items of a list (actually, any mutable sequence) to a given item::
PyObject *index = PyInt_FromLong(i);
if (!index)
return -1;
- if (PyObject_SetItem(target, index, item) < 0)
+ if (PyObject_SetItem(target, index, item) < 0) {
+ Py_DECREF(index);
return -1;
+ }
Py_DECREF(index);
}
return 0;
@@ -424,7 +426,7 @@ and lose important information about the exact cause of the error.
.. index:: single: sum_sequence()
A simple example of detecting exceptions and passing them on is shown in the
-:c:func:`sum_sequence` example above. It so happens that that example doesn't
+:c:func:`sum_sequence` example above. It so happens that this example doesn't
need to clean up any owned references when it detects an error. The following
example function shows some error cleanup. First, to remind you why you like
Python, we show the equivalent Python code::
diff --git a/Doc/c-api/iter.rst b/Doc/c-api/iter.rst
index 88ac0c1..8d1567c 100644
--- a/Doc/c-api/iter.rst
+++ b/Doc/c-api/iter.rst
@@ -7,7 +7,7 @@ Iterator Protocol
.. versionadded:: 2.2
-There are only a couple of functions specifically for working with iterators.
+There are two functions specifically for working with iterators.
.. c:function:: int PyIter_Check(PyObject *o)
@@ -17,11 +17,10 @@ There are only a couple of functions specifically for working with iterators.
.. c:function:: PyObject* PyIter_Next(PyObject *o)
- Return the next value from the iteration *o*. If the object is an iterator,
- this retrieves the next value from the iteration, and returns *NULL* with no
- exception set if there are no remaining items. If the object is not an
- iterator, :exc:`TypeError` is raised, or if there is an error in retrieving the
- item, returns *NULL* and passes along the exception.
+ Return the next value from the iteration *o*. The object must be an iterator
+ (it is up to the caller to check this). If there are no remaining values,
+ returns *NULL* with no exception set. If an error occurs while retrieving
+ the item, returns *NULL* and passes along the exception.
To write a loop which iterates over an iterator, the C code should look
something like this::
diff --git a/Doc/c-api/memory.rst b/Doc/c-api/memory.rst
index b80b3d5..5465571 100644
--- a/Doc/c-api/memory.rst
+++ b/Doc/c-api/memory.rst
@@ -98,7 +98,7 @@ memory from the Python heap:
Allocates *n* bytes and returns a pointer of type :c:type:`void\*` to the
allocated memory, or *NULL* if the request fails. Requesting zero bytes returns
- a distinct non-*NULL* pointer if possible, as if :c:func:`PyMem_Malloc(1)` had
+ a distinct non-*NULL* pointer if possible, as if ``PyMem_Malloc(1)`` had
been called instead. The memory will not have been initialized in any way.
@@ -106,7 +106,7 @@ memory from the Python heap:
Resizes the memory block pointed to by *p* to *n* bytes. The contents will be
unchanged to the minimum of the old and the new sizes. If *p* is *NULL*, the
- call is equivalent to :c:func:`PyMem_Malloc(n)`; else if *n* is equal to zero,
+ call is equivalent to ``PyMem_Malloc(n)``; else if *n* is equal to zero,
the memory block is resized but is not freed, and the returned pointer is
non-*NULL*. Unless *p* is *NULL*, it must have been returned by a previous call
to :c:func:`PyMem_Malloc` or :c:func:`PyMem_Realloc`. If the request fails,
@@ -118,7 +118,7 @@ memory from the Python heap:
Frees the memory block pointed to by *p*, which must have been returned by a
previous call to :c:func:`PyMem_Malloc` or :c:func:`PyMem_Realloc`. Otherwise, or
- if :c:func:`PyMem_Free(p)` has been called before, undefined behavior occurs. If
+ if ``PyMem_Free(p)`` has been called before, undefined behavior occurs. If
*p* is *NULL*, no operation is performed.
The following type-oriented macros are provided for convenience. Note that
diff --git a/Doc/c-api/objbuffer.rst b/Doc/c-api/objbuffer.rst
index 90dce62..c5228c6 100644
--- a/Doc/c-api/objbuffer.rst
+++ b/Doc/c-api/objbuffer.rst
@@ -8,7 +8,7 @@ Old Buffer Protocol
This section describes the legacy buffer protocol, which has been introduced
in Python 1.6. It is still supported but deprecated in the Python 2.x series.
-Python 3.0 introduces a new buffer protocol which fixes weaknesses and
+Python 3 introduces a new buffer protocol which fixes weaknesses and
shortcomings of the protocol, and has been backported to Python 2.6. See
:ref:`bufferobjects` for more information.
diff --git a/Doc/c-api/object.rst b/Doc/c-api/object.rst
index a02326f..50b83e9 100644
--- a/Doc/c-api/object.rst
+++ b/Doc/c-api/object.rst
@@ -47,8 +47,8 @@ Object Protocol
Generic attribute getter function that is meant to be put into a type
object's ``tp_getattro`` slot. It looks for a descriptor in the dictionary
of classes in the object's MRO as well as an attribute in the object's
- :attr:`__dict__` (if present). As outlined in :ref:`descriptors`, data
- descriptors take preference over instance attributes, while non-data
+ :attr:`~object.__dict__` (if present). As outlined in :ref:`descriptors`,
+ data descriptors take preference over instance attributes, while non-data
descriptors don't. Otherwise, an :exc:`AttributeError` is raised.
@@ -72,8 +72,8 @@ Object Protocol
object's ``tp_setattro`` slot. It looks for a data descriptor in the
dictionary of classes in the object's MRO, and if found it takes preference
over setting the attribute in the instance dictionary. Otherwise, the
- attribute is set in the object's :attr:`__dict__` (if present). Otherwise,
- an :exc:`AttributeError` is raised and ``-1`` is returned.
+ attribute is set in the object's :attr:`~object.__dict__` (if present).
+ Otherwise, an :exc:`AttributeError` is raised and ``-1`` is returned.
.. c:function:: int PyObject_DelAttr(PyObject *o, PyObject *attr_name)
@@ -180,9 +180,9 @@ Object Protocol
be done against every entry in *cls*. The result will be ``1`` when at least one
of the checks returns ``1``, otherwise it will be ``0``. If *inst* is not a
class instance and *cls* is neither a type object, nor a class object, nor a
- tuple, *inst* must have a :attr:`__class__` attribute --- the class relationship
- of the value of that attribute with *cls* will be used to determine the result
- of this function.
+ tuple, *inst* must have a :attr:`~instance.__class__` attribute --- the
+ class relationship of the value of that attribute with *cls* will be used
+ to determine the result of this function.
.. versionadded:: 2.1
@@ -196,9 +196,9 @@ of. If :class:`A` and :class:`B` are class objects, :class:`B` is a subclass of
either is not a class object, a more general mechanism is used to determine the
class relationship of the two objects. When testing if *B* is a subclass of
*A*, if *A* is *B*, :c:func:`PyObject_IsSubclass` returns true. If *A* and *B*
-are different objects, *B*'s :attr:`__bases__` attribute is searched in a
-depth-first fashion for *A* --- the presence of the :attr:`__bases__` attribute
-is considered sufficient for this determination.
+are different objects, *B*'s :attr:`~class.__bases__` attribute is searched in
+a depth-first fashion for *A* --- the presence of the :attr:`~class.__bases__`
+attribute is considered sufficient for this determination.
.. c:function:: int PyObject_IsSubclass(PyObject *derived, PyObject *cls)
diff --git a/Doc/c-api/sequence.rst b/Doc/c-api/sequence.rst
index 2b668a5..653f9ad 100644
--- a/Doc/c-api/sequence.rst
+++ b/Doc/c-api/sequence.rst
@@ -167,10 +167,10 @@ Sequence Protocol
.. c:function:: PyObject* PySequence_Fast(PyObject *o, const char *m)
- Returns the sequence *o* as a tuple, unless it is already a tuple or list, in
- which case *o* is returned. Use :c:func:`PySequence_Fast_GET_ITEM` to access the
- members of the result. Returns *NULL* on failure. If the object is not a
- sequence, raises :exc:`TypeError` with *m* as the message text.
+ Return the sequence *o* as a list, unless it is already a tuple or list, in
+ which case *o* is returned. Use :c:func:`PySequence_Fast_GET_ITEM` to access
+ the members of the result. Returns *NULL* on failure. If the object is not
+ a sequence, raises :exc:`TypeError` with *m* as the message text.
.. c:function:: PyObject* PySequence_Fast_GET_ITEM(PyObject *o, Py_ssize_t i)
diff --git a/Doc/c-api/set.rst b/Doc/c-api/set.rst
index 41c4af4..258530b 100644
--- a/Doc/c-api/set.rst
+++ b/Doc/c-api/set.rst
@@ -156,7 +156,7 @@ subtypes but not for instances of :class:`frozenset` or its subtypes.
Return 1 if found and removed, 0 if not found (no action taken), and -1 if an
error is encountered. Does not raise :exc:`KeyError` for missing keys. Raise a
- :exc:`TypeError` if the *key* is unhashable. Unlike the Python :meth:`discard`
+ :exc:`TypeError` if the *key* is unhashable. Unlike the Python :meth:`~set.discard`
method, this function does not automatically convert unhashable sets into
temporary frozensets. Raise :exc:`PyExc_SystemError` if *set* is an not an
instance of :class:`set` or its subtype.
diff --git a/Doc/c-api/string.rst b/Doc/c-api/string.rst
index ecf7050..32dc274 100644
--- a/Doc/c-api/string.rst
+++ b/Doc/c-api/string.rst
@@ -241,7 +241,7 @@ called with a non-string parameter.
.. c:function:: PyObject* PyString_Format(PyObject *format, PyObject *args)
Return a new string object from *format* and *args*. Analogous to ``format %
- args``. The *args* argument must be a tuple.
+ args``. The *args* argument must be a tuple or dict.
.. c:function:: void PyString_InternInPlace(PyObject **string)
diff --git a/Doc/c-api/structures.rst b/Doc/c-api/structures.rst
index f5007ac..e31687f 100644
--- a/Doc/c-api/structures.rst
+++ b/Doc/c-api/structures.rst
@@ -293,6 +293,6 @@ definition with the same method name.
.. c:function:: PyObject* Py_FindMethod(PyMethodDef table[], PyObject *ob, char *name)
Return a bound method object for an extension type implemented in C. This
- can be useful in the implementation of a :attr:`tp_getattro` or
- :attr:`tp_getattr` handler that does not use the
+ can be useful in the implementation of a :c:member:`~PyTypeObject.tp_getattro` or
+ :c:member:`~PyTypeObject.tp_getattr` handler that does not use the
:c:func:`PyObject_GenericGetAttr` function.
diff --git a/Doc/c-api/typeobj.rst b/Doc/c-api/typeobj.rst
index 5bda1ff..b545b06 100644
--- a/Doc/c-api/typeobj.rst
+++ b/Doc/c-api/typeobj.rst
@@ -35,7 +35,7 @@ definition found there:
The type object structure extends the :c:type:`PyVarObject` structure. The
:attr:`ob_size` field is used for dynamic types (created by :func:`type_new`,
usually called from a class statement). Note that :c:data:`PyType_Type` (the
-metatype) initializes :attr:`tp_itemsize`, which means that its instances (i.e.
+metatype) initializes :c:member:`~PyTypeObject.tp_itemsize`, which means that its instances (i.e.
type objects) *must* have the :attr:`ob_size` field.
@@ -108,7 +108,7 @@ type objects) *must* have the :attr:`ob_size` field.
should be just the type name. If the module is a submodule of a package, the
full package name is part of the full module name. For example, a type named
:class:`T` defined in module :mod:`M` in subpackage :mod:`Q` in package :mod:`P`
- should have the :attr:`tp_name` initializer ``"P.Q.M.T"``.
+ should have the :c:member:`~PyTypeObject.tp_name` initializer ``"P.Q.M.T"``.
For dynamically allocated type objects, this should just be the type name, and
the module name explicitly stored in the type dict as the value for key
@@ -119,7 +119,7 @@ type objects) *must* have the :attr:`ob_size` field.
attribute, and everything after the last dot is made accessible as the
:attr:`__name__` attribute.
- If no dot is present, the entire :attr:`tp_name` field is made accessible as the
+ If no dot is present, the entire :c:member:`~PyTypeObject.tp_name` field is made accessible as the
:attr:`__name__` attribute, and the :attr:`__module__` attribute is undefined
(unless explicitly set in the dictionary, as explained above). This means your
type will be impossible to pickle.
@@ -133,13 +133,13 @@ type objects) *must* have the :attr:`ob_size` field.
These fields allow calculating the size in bytes of instances of the type.
There are two kinds of types: types with fixed-length instances have a zero
- :attr:`tp_itemsize` field, types with variable-length instances have a non-zero
- :attr:`tp_itemsize` field. For a type with fixed-length instances, all
- instances have the same size, given in :attr:`tp_basicsize`.
+ :c:member:`~PyTypeObject.tp_itemsize` field, types with variable-length instances have a non-zero
+ :c:member:`~PyTypeObject.tp_itemsize` field. For a type with fixed-length instances, all
+ instances have the same size, given in :c:member:`~PyTypeObject.tp_basicsize`.
For a type with variable-length instances, the instances must have an
- :attr:`ob_size` field, and the instance size is :attr:`tp_basicsize` plus N
- times :attr:`tp_itemsize`, where N is the "length" of the object. The value of
+ :attr:`ob_size` field, and the instance size is :c:member:`~PyTypeObject.tp_basicsize` plus N
+ times :c:member:`~PyTypeObject.tp_itemsize`, where N is the "length" of the object. The value of
N is typically stored in the instance's :attr:`ob_size` field. There are
exceptions: for example, long ints use a negative :attr:`ob_size` to indicate a
negative number, and N is ``abs(ob_size)`` there. Also, the presence of an
@@ -152,21 +152,21 @@ type objects) *must* have the :attr:`ob_size` field.
:c:macro:`PyObject_HEAD` or :c:macro:`PyObject_VAR_HEAD` (whichever is used to
declare the instance struct) and this in turn includes the :attr:`_ob_prev` and
:attr:`_ob_next` fields if they are present. This means that the only correct
- way to get an initializer for the :attr:`tp_basicsize` is to use the
+ way to get an initializer for the :c:member:`~PyTypeObject.tp_basicsize` is to use the
``sizeof`` operator on the struct used to declare the instance layout.
The basic size does not include the GC header size (this is new in Python 2.2;
- in 2.1 and 2.0, the GC header size was included in :attr:`tp_basicsize`).
+ in 2.1 and 2.0, the GC header size was included in :c:member:`~PyTypeObject.tp_basicsize`).
These fields are inherited separately by subtypes. If the base type has a
- non-zero :attr:`tp_itemsize`, it is generally not safe to set
- :attr:`tp_itemsize` to a different non-zero value in a subtype (though this
+ non-zero :c:member:`~PyTypeObject.tp_itemsize`, it is generally not safe to set
+ :c:member:`~PyTypeObject.tp_itemsize` to a different non-zero value in a subtype (though this
depends on the implementation of the base type).
A note about alignment: if the variable items require a particular alignment,
- this should be taken care of by the value of :attr:`tp_basicsize`. Example:
- suppose a type implements an array of ``double``. :attr:`tp_itemsize` is
+ this should be taken care of by the value of :c:member:`~PyTypeObject.tp_basicsize`. Example:
+ suppose a type implements an array of ``double``. :c:member:`~PyTypeObject.tp_itemsize` is
``sizeof(double)``. It is the programmer's responsibility that
- :attr:`tp_basicsize` is a multiple of ``sizeof(double)`` (assuming this is the
+ :c:member:`~PyTypeObject.tp_basicsize` is a multiple of ``sizeof(double)`` (assuming this is the
alignment requirement for ``double``).
@@ -182,10 +182,10 @@ type objects) *must* have the :attr:`ob_size` field.
destructor function should free all references which the instance owns, free all
memory buffers owned by the instance (using the freeing function corresponding
to the allocation function used to allocate the buffer), and finally (as its
- last action) call the type's :attr:`tp_free` function. If the type is not
+ last action) call the type's :c:member:`~PyTypeObject.tp_free` function. If the type is not
subtypable (doesn't have the :const:`Py_TPFLAGS_BASETYPE` flag bit set), it is
permissible to call the object deallocator directly instead of via
- :attr:`tp_free`. The object deallocator should be the one used to allocate the
+ :c:member:`~PyTypeObject.tp_free`. The object deallocator should be the one used to allocate the
instance; this is normally :c:func:`PyObject_Del` if the instance was allocated
using :c:func:`PyObject_New` or :c:func:`PyObject_VarNew`, or
:c:func:`PyObject_GC_Del` if the instance was allocated using
@@ -199,26 +199,26 @@ type objects) *must* have the :attr:`ob_size` field.
An optional pointer to the instance print function.
The print function is only called when the instance is printed to a *real* file;
- when it is printed to a pseudo-file (like a :class:`StringIO` instance), the
- instance's :attr:`tp_repr` or :attr:`tp_str` function is called to convert it to
- a string. These are also called when the type's :attr:`tp_print` field is
- *NULL*. A type should never implement :attr:`tp_print` in a way that produces
- different output than :attr:`tp_repr` or :attr:`tp_str` would.
+ when it is printed to a pseudo-file (like a :class:`~StringIO.StringIO` instance), the
+ instance's :c:member:`~PyTypeObject.tp_repr` or :c:member:`~PyTypeObject.tp_str` function is called to convert it to
+ a string. These are also called when the type's :c:member:`~PyTypeObject.tp_print` field is
+ *NULL*. A type should never implement :c:member:`~PyTypeObject.tp_print` in a way that produces
+ different output than :c:member:`~PyTypeObject.tp_repr` or :c:member:`~PyTypeObject.tp_str` would.
The print function is called with the same signature as :c:func:`PyObject_Print`:
``int tp_print(PyObject *self, FILE *file, int flags)``. The *self* argument is
the instance to be printed. The *file* argument is the stdio file to which it
is to be printed. The *flags* argument is composed of flag bits. The only flag
bit currently defined is :const:`Py_PRINT_RAW`. When the :const:`Py_PRINT_RAW`
- flag bit is set, the instance should be printed the same way as :attr:`tp_str`
+ flag bit is set, the instance should be printed the same way as :c:member:`~PyTypeObject.tp_str`
would format it; when the :const:`Py_PRINT_RAW` flag bit is clear, the instance
- should be printed the same was as :attr:`tp_repr` would format it. It should
+ should be printed the same was as :c:member:`~PyTypeObject.tp_repr` would format it. It should
return ``-1`` and set an exception condition when an error occurred during the
comparison.
- It is possible that the :attr:`tp_print` field will be deprecated. In any case,
- it is recommended not to define :attr:`tp_print`, but instead to rely on
- :attr:`tp_repr` and :attr:`tp_str` for printing.
+ It is possible that the :c:member:`~PyTypeObject.tp_print` field will be deprecated. In any case,
+ it is recommended not to define :c:member:`~PyTypeObject.tp_print`, but instead to rely on
+ :c:member:`~PyTypeObject.tp_repr` and :c:member:`~PyTypeObject.tp_str` for printing.
This field is inherited by subtypes.
@@ -228,13 +228,13 @@ type objects) *must* have the :attr:`ob_size` field.
An optional pointer to the get-attribute-string function.
This field is deprecated. When it is defined, it should point to a function
- that acts the same as the :attr:`tp_getattro` function, but taking a C string
+ that acts the same as the :c:member:`~PyTypeObject.tp_getattro` function, but taking a C string
instead of a Python string object to give the attribute name. The signature is
the same as for :c:func:`PyObject_GetAttrString`.
- This field is inherited by subtypes together with :attr:`tp_getattro`: a subtype
- inherits both :attr:`tp_getattr` and :attr:`tp_getattro` from its base type when
- the subtype's :attr:`tp_getattr` and :attr:`tp_getattro` are both *NULL*.
+ This field is inherited by subtypes together with :c:member:`~PyTypeObject.tp_getattro`: a subtype
+ inherits both :c:member:`~PyTypeObject.tp_getattr` and :c:member:`~PyTypeObject.tp_getattro` from its base type when
+ the subtype's :c:member:`~PyTypeObject.tp_getattr` and :c:member:`~PyTypeObject.tp_getattro` are both *NULL*.
.. c:member:: setattrfunc PyTypeObject.tp_setattr
@@ -242,13 +242,13 @@ type objects) *must* have the :attr:`ob_size` field.
An optional pointer to the set-attribute-string function.
This field is deprecated. When it is defined, it should point to a function
- that acts the same as the :attr:`tp_setattro` function, but taking a C string
+ that acts the same as the :c:member:`~PyTypeObject.tp_setattro` function, but taking a C string
instead of a Python string object to give the attribute name. The signature is
the same as for :c:func:`PyObject_SetAttrString`.
- This field is inherited by subtypes together with :attr:`tp_setattro`: a subtype
- inherits both :attr:`tp_setattr` and :attr:`tp_setattro` from its base type when
- the subtype's :attr:`tp_setattr` and :attr:`tp_setattro` are both *NULL*.
+ This field is inherited by subtypes together with :c:member:`~PyTypeObject.tp_setattro`: a subtype
+ inherits both :c:member:`~PyTypeObject.tp_setattr` and :c:member:`~PyTypeObject.tp_setattro` from its base type when
+ the subtype's :c:member:`~PyTypeObject.tp_setattr` and :c:member:`~PyTypeObject.tp_setattro` are both *NULL*.
.. c:member:: cmpfunc PyTypeObject.tp_compare
@@ -260,10 +260,10 @@ type objects) *must* have the :attr:`ob_size` field.
*other*, and ``-1`` if *self* less than *other*. It should return ``-1`` and
set an exception condition when an error occurred during the comparison.
- This field is inherited by subtypes together with :attr:`tp_richcompare` and
- :attr:`tp_hash`: a subtypes inherits all three of :attr:`tp_compare`,
- :attr:`tp_richcompare`, and :attr:`tp_hash` when the subtype's
- :attr:`tp_compare`, :attr:`tp_richcompare`, and :attr:`tp_hash` are all *NULL*.
+ This field is inherited by subtypes together with :c:member:`~PyTypeObject.tp_richcompare` and
+ :c:member:`~PyTypeObject.tp_hash`: a subtypes inherits all three of :c:member:`~PyTypeObject.tp_compare`,
+ :c:member:`~PyTypeObject.tp_richcompare`, and :c:member:`~PyTypeObject.tp_hash` when the subtype's
+ :c:member:`~PyTypeObject.tp_compare`, :c:member:`~PyTypeObject.tp_richcompare`, and :c:member:`~PyTypeObject.tp_hash` are all *NULL*.
.. c:member:: reprfunc PyTypeObject.tp_repr
@@ -292,7 +292,7 @@ type objects) *must* have the :attr:`ob_size` field.
objects which implement the number protocol. These fields are documented in
:ref:`number-structs`.
- The :attr:`tp_as_number` field is not inherited, but the contained fields are
+ The :c:member:`~PyTypeObject.tp_as_number` field is not inherited, but the contained fields are
inherited individually.
@@ -302,7 +302,7 @@ type objects) *must* have the :attr:`ob_size` field.
objects which implement the sequence protocol. These fields are documented
in :ref:`sequence-structs`.
- The :attr:`tp_as_sequence` field is not inherited, but the contained fields
+ The :c:member:`~PyTypeObject.tp_as_sequence` field is not inherited, but the contained fields
are inherited individually.
@@ -312,7 +312,7 @@ type objects) *must* have the :attr:`ob_size` field.
objects which implement the mapping protocol. These fields are documented in
:ref:`mapping-structs`.
- The :attr:`tp_as_mapping` field is not inherited, but the contained fields
+ The :c:member:`~PyTypeObject.tp_as_mapping` field is not inherited, but the contained fields
are inherited individually.
@@ -336,14 +336,14 @@ type objects) *must* have the :attr:`ob_size` field.
the Python level will result in the ``tp_hash`` slot being set to
:c:func:`PyObject_HashNotImplemented`.
- When this field is not set, two possibilities exist: if the :attr:`tp_compare`
- and :attr:`tp_richcompare` fields are both *NULL*, a default hash value based on
+ When this field is not set, two possibilities exist: if the :c:member:`~PyTypeObject.tp_compare`
+ and :c:member:`~PyTypeObject.tp_richcompare` fields are both *NULL*, a default hash value based on
the object's address is returned; otherwise, a :exc:`TypeError` is raised.
- This field is inherited by subtypes together with :attr:`tp_richcompare` and
- :attr:`tp_compare`: a subtypes inherits all three of :attr:`tp_compare`,
- :attr:`tp_richcompare`, and :attr:`tp_hash`, when the subtype's
- :attr:`tp_compare`, :attr:`tp_richcompare` and :attr:`tp_hash` are all *NULL*.
+ This field is inherited by subtypes together with :c:member:`~PyTypeObject.tp_richcompare` and
+ :c:member:`~PyTypeObject.tp_compare`: a subtypes inherits all three of :c:member:`~PyTypeObject.tp_compare`,
+ :c:member:`~PyTypeObject.tp_richcompare`, and :c:member:`~PyTypeObject.tp_hash`, when the subtype's
+ :c:member:`~PyTypeObject.tp_compare`, :c:member:`~PyTypeObject.tp_richcompare` and :c:member:`~PyTypeObject.tp_hash` are all *NULL*.
.. c:member:: ternaryfunc PyTypeObject.tp_call
@@ -381,9 +381,9 @@ type objects) *must* have the :attr:`ob_size` field.
convenient to set this field to :c:func:`PyObject_GenericGetAttr`, which
implements the normal way of looking for object attributes.
- This field is inherited by subtypes together with :attr:`tp_getattr`: a subtype
- inherits both :attr:`tp_getattr` and :attr:`tp_getattro` from its base type when
- the subtype's :attr:`tp_getattr` and :attr:`tp_getattro` are both *NULL*.
+ This field is inherited by subtypes together with :c:member:`~PyTypeObject.tp_getattr`: a subtype
+ inherits both :c:member:`~PyTypeObject.tp_getattr` and :c:member:`~PyTypeObject.tp_getattro` from its base type when
+ the subtype's :c:member:`~PyTypeObject.tp_getattr` and :c:member:`~PyTypeObject.tp_getattro` are both *NULL*.
.. c:member:: setattrofunc PyTypeObject.tp_setattro
@@ -394,9 +394,9 @@ type objects) *must* have the :attr:`ob_size` field.
convenient to set this field to :c:func:`PyObject_GenericSetAttr`, which
implements the normal way of setting object attributes.
- This field is inherited by subtypes together with :attr:`tp_setattr`: a subtype
- inherits both :attr:`tp_setattr` and :attr:`tp_setattro` from its base type when
- the subtype's :attr:`tp_setattr` and :attr:`tp_setattro` are both *NULL*.
+ This field is inherited by subtypes together with :c:member:`~PyTypeObject.tp_setattr`: a subtype
+ inherits both :c:member:`~PyTypeObject.tp_setattr` and :c:member:`~PyTypeObject.tp_setattro` from its base type when
+ the subtype's :c:member:`~PyTypeObject.tp_setattr` and :c:member:`~PyTypeObject.tp_setattro` are both *NULL*.
.. c:member:: PyBufferProcs* PyTypeObject.tp_as_buffer
@@ -405,7 +405,7 @@ type objects) *must* have the :attr:`ob_size` field.
which implement the buffer interface. These fields are documented in
:ref:`buffer-structs`.
- The :attr:`tp_as_buffer` field is not inherited, but the contained fields are
+ The :c:member:`~PyTypeObject.tp_as_buffer` field is not inherited, but the contained fields are
inherited individually.
@@ -414,8 +414,8 @@ type objects) *must* have the :attr:`ob_size` field.
This field is a bit mask of various flags. Some flags indicate variant
semantics for certain situations; others are used to indicate that certain
fields in the type object (or in the extension structures referenced via
- :attr:`tp_as_number`, :attr:`tp_as_sequence`, :attr:`tp_as_mapping`, and
- :attr:`tp_as_buffer`) that were historically not always present are valid; if
+ :c:member:`~PyTypeObject.tp_as_number`, :c:member:`~PyTypeObject.tp_as_sequence`, :c:member:`~PyTypeObject.tp_as_mapping`, and
+ :c:member:`~PyTypeObject.tp_as_buffer`) that were historically not always present are valid; if
such a flag bit is clear, the type fields it guards must not be accessed and
must be considered to have a zero or *NULL* value instead.
@@ -425,14 +425,14 @@ type objects) *must* have the :attr:`ob_size` field.
inherited if the extension structure is inherited, i.e. the base type's value of
the flag bit is copied into the subtype together with a pointer to the extension
structure. The :const:`Py_TPFLAGS_HAVE_GC` flag bit is inherited together with
- the :attr:`tp_traverse` and :attr:`tp_clear` fields, i.e. if the
+ the :c:member:`~PyTypeObject.tp_traverse` and :c:member:`~PyTypeObject.tp_clear` fields, i.e. if the
:const:`Py_TPFLAGS_HAVE_GC` flag bit is clear in the subtype and the
- :attr:`tp_traverse` and :attr:`tp_clear` fields in the subtype exist (as
+ :c:member:`~PyTypeObject.tp_traverse` and :c:member:`~PyTypeObject.tp_clear` fields in the subtype exist (as
indicated by the :const:`Py_TPFLAGS_HAVE_RICHCOMPARE` flag bit) and have *NULL*
values.
The following bit masks are currently defined; these can be ORed together using
- the ``|`` operator to form the value of the :attr:`tp_flags` field. The macro
+ the ``|`` operator to form the value of the :c:member:`~PyTypeObject.tp_flags` field. The macro
:c:func:`PyType_HasFeature` takes a type and a flags value, *tp* and *f*, and
checks whether ``tp->tp_flags & f`` is non-zero.
@@ -440,13 +440,13 @@ type objects) *must* have the :attr:`ob_size` field.
.. data:: Py_TPFLAGS_HAVE_GETCHARBUFFER
If this bit is set, the :c:type:`PyBufferProcs` struct referenced by
- :attr:`tp_as_buffer` has the :attr:`bf_getcharbuffer` field.
+ :c:member:`~PyTypeObject.tp_as_buffer` has the :attr:`bf_getcharbuffer` field.
.. data:: Py_TPFLAGS_HAVE_SEQUENCE_IN
If this bit is set, the :c:type:`PySequenceMethods` struct referenced by
- :attr:`tp_as_sequence` has the :attr:`sq_contains` field.
+ :c:member:`~PyTypeObject.tp_as_sequence` has the :attr:`sq_contains` field.
.. data:: Py_TPFLAGS_GC
@@ -458,8 +458,8 @@ type objects) *must* have the :attr:`ob_size` field.
.. data:: Py_TPFLAGS_HAVE_INPLACEOPS
If this bit is set, the :c:type:`PySequenceMethods` struct referenced by
- :attr:`tp_as_sequence` and the :c:type:`PyNumberMethods` structure referenced by
- :attr:`tp_as_number` contain the fields for in-place operators. In particular,
+ :c:member:`~PyTypeObject.tp_as_sequence` and the :c:type:`PyNumberMethods` structure referenced by
+ :c:member:`~PyTypeObject.tp_as_number` contain the fields for in-place operators. In particular,
this means that the :c:type:`PyNumberMethods` structure has the fields
:attr:`nb_inplace_add`, :attr:`nb_inplace_subtract`,
:attr:`nb_inplace_multiply`, :attr:`nb_inplace_divide`,
@@ -473,7 +473,7 @@ type objects) *must* have the :attr:`ob_size` field.
.. data:: Py_TPFLAGS_CHECKTYPES
If this bit is set, the binary and ternary operations in the
- :c:type:`PyNumberMethods` structure referenced by :attr:`tp_as_number` accept
+ :c:type:`PyNumberMethods` structure referenced by :c:member:`~PyTypeObject.tp_as_number` accept
arguments of arbitrary object types, and do their own type conversions if
needed. If this bit is clear, those operations require that all arguments have
the current type as their type, and the caller is supposed to perform a coercion
@@ -485,31 +485,31 @@ type objects) *must* have the :attr:`ob_size` field.
.. data:: Py_TPFLAGS_HAVE_RICHCOMPARE
- If this bit is set, the type object has the :attr:`tp_richcompare` field, as
- well as the :attr:`tp_traverse` and the :attr:`tp_clear` fields.
+ If this bit is set, the type object has the :c:member:`~PyTypeObject.tp_richcompare` field, as
+ well as the :c:member:`~PyTypeObject.tp_traverse` and the :c:member:`~PyTypeObject.tp_clear` fields.
.. data:: Py_TPFLAGS_HAVE_WEAKREFS
- If this bit is set, the :attr:`tp_weaklistoffset` field is defined. Instances
- of a type are weakly referenceable if the type's :attr:`tp_weaklistoffset` field
+ If this bit is set, the :c:member:`~PyTypeObject.tp_weaklistoffset` field is defined. Instances
+ of a type are weakly referenceable if the type's :c:member:`~PyTypeObject.tp_weaklistoffset` field
has a value greater than zero.
.. data:: Py_TPFLAGS_HAVE_ITER
- If this bit is set, the type object has the :attr:`tp_iter` and
- :attr:`tp_iternext` fields.
+ If this bit is set, the type object has the :c:member:`~PyTypeObject.tp_iter` and
+ :c:member:`~PyTypeObject.tp_iternext` fields.
.. data:: Py_TPFLAGS_HAVE_CLASS
If this bit is set, the type object has several new fields defined starting in
- Python 2.2: :attr:`tp_methods`, :attr:`tp_members`, :attr:`tp_getset`,
- :attr:`tp_base`, :attr:`tp_dict`, :attr:`tp_descr_get`, :attr:`tp_descr_set`,
- :attr:`tp_dictoffset`, :attr:`tp_init`, :attr:`tp_alloc`, :attr:`tp_new`,
- :attr:`tp_free`, :attr:`tp_is_gc`, :attr:`tp_bases`, :attr:`tp_mro`,
- :attr:`tp_cache`, :attr:`tp_subclasses`, and :attr:`tp_weaklist`.
+ Python 2.2: :c:member:`~PyTypeObject.tp_methods`, :c:member:`~PyTypeObject.tp_members`, :c:member:`~PyTypeObject.tp_getset`,
+ :c:member:`~PyTypeObject.tp_base`, :c:member:`~PyTypeObject.tp_dict`, :c:member:`~PyTypeObject.tp_descr_get`, :c:member:`~PyTypeObject.tp_descr_set`,
+ :c:member:`~PyTypeObject.tp_dictoffset`, :c:member:`~PyTypeObject.tp_init`, :c:member:`~PyTypeObject.tp_alloc`, :c:member:`~PyTypeObject.tp_new`,
+ :c:member:`~PyTypeObject.tp_free`, :c:member:`~PyTypeObject.tp_is_gc`, :c:member:`~PyTypeObject.tp_bases`, :c:member:`~PyTypeObject.tp_mro`,
+ :c:member:`~PyTypeObject.tp_cache`, :c:member:`~PyTypeObject.tp_subclasses`, and :c:member:`~PyTypeObject.tp_weaklist`.
.. data:: Py_TPFLAGS_HEAPTYPE
@@ -547,7 +547,7 @@ type objects) *must* have the :attr:`ob_size` field.
is set, instances must be created using :c:func:`PyObject_GC_New` and
destroyed using :c:func:`PyObject_GC_Del`. More information in section
:ref:`supporting-cycle-detection`. This bit also implies that the
- GC-related fields :attr:`tp_traverse` and :attr:`tp_clear` are present in
+ GC-related fields :c:member:`~PyTypeObject.tp_traverse` and :c:member:`~PyTypeObject.tp_clear` are present in
the type object; but those fields also exist when
:const:`Py_TPFLAGS_HAVE_GC` is clear but
:const:`Py_TPFLAGS_HAVE_RICHCOMPARE` is set.
@@ -582,8 +582,8 @@ The following three fields only exist if the
about Python's garbage collection scheme can be found in section
:ref:`supporting-cycle-detection`.
- The :attr:`tp_traverse` pointer is used by the garbage collector to detect
- reference cycles. A typical implementation of a :attr:`tp_traverse` function
+ The :c:member:`~PyTypeObject.tp_traverse` pointer is used by the garbage collector to detect
+ reference cycles. A typical implementation of a :c:member:`~PyTypeObject.tp_traverse` function
simply calls :c:func:`Py_VISIT` on each of the instance's members that are Python
objects. For example, this is function :c:func:`local_traverse` from the
:mod:`thread` extension module::
@@ -603,15 +603,15 @@ The following three fields only exist if the
On the other hand, even if you know a member can never be part of a cycle, as a
debugging aid you may want to visit it anyway just so the :mod:`gc` module's
- :func:`get_referents` function will include it.
+ :func:`~gc.get_referents` function will include it.
Note that :c:func:`Py_VISIT` requires the *visit* and *arg* parameters to
:c:func:`local_traverse` to have these specific names; don't name them just
anything.
- This field is inherited by subtypes together with :attr:`tp_clear` and the
- :const:`Py_TPFLAGS_HAVE_GC` flag bit: the flag bit, :attr:`tp_traverse`, and
- :attr:`tp_clear` are all inherited from the base type if they are all zero in
+ This field is inherited by subtypes together with :c:member:`~PyTypeObject.tp_clear` and the
+ :const:`Py_TPFLAGS_HAVE_GC` flag bit: the flag bit, :c:member:`~PyTypeObject.tp_traverse`, and
+ :c:member:`~PyTypeObject.tp_clear` are all inherited from the base type if they are all zero in
the subtype *and* the subtype has the :const:`Py_TPFLAGS_HAVE_RICHCOMPARE` flag
bit set.
@@ -621,17 +621,17 @@ The following three fields only exist if the
An optional pointer to a clear function for the garbage collector. This is only
used if the :const:`Py_TPFLAGS_HAVE_GC` flag bit is set.
- The :attr:`tp_clear` member function is used to break reference cycles in cyclic
- garbage detected by the garbage collector. Taken together, all :attr:`tp_clear`
+ The :c:member:`~PyTypeObject.tp_clear` member function is used to break reference cycles in cyclic
+ garbage detected by the garbage collector. Taken together, all :c:member:`~PyTypeObject.tp_clear`
functions in the system must combine to break all reference cycles. This is
- subtle, and if in any doubt supply a :attr:`tp_clear` function. For example,
- the tuple type does not implement a :attr:`tp_clear` function, because it's
+ subtle, and if in any doubt supply a :c:member:`~PyTypeObject.tp_clear` function. For example,
+ the tuple type does not implement a :c:member:`~PyTypeObject.tp_clear` function, because it's
possible to prove that no reference cycle can be composed entirely of tuples.
- Therefore the :attr:`tp_clear` functions of other types must be sufficient to
+ Therefore the :c:member:`~PyTypeObject.tp_clear` functions of other types must be sufficient to
break any cycle containing a tuple. This isn't immediately obvious, and there's
- rarely a good reason to avoid implementing :attr:`tp_clear`.
+ rarely a good reason to avoid implementing :c:member:`~PyTypeObject.tp_clear`.
- Implementations of :attr:`tp_clear` should drop the instance's references to
+ Implementations of :c:member:`~PyTypeObject.tp_clear` should drop the instance's references to
those of its members that may be Python objects, and set its pointers to those
members to *NULL*, as in the following example::
@@ -656,18 +656,18 @@ The following three fields only exist if the
so that *self* knows the contained object can no longer be used. The
:c:func:`Py_CLEAR` macro performs the operations in a safe order.
- Because the goal of :attr:`tp_clear` functions is to break reference cycles,
+ Because the goal of :c:member:`~PyTypeObject.tp_clear` functions is to break reference cycles,
it's not necessary to clear contained objects like Python strings or Python
integers, which can't participate in reference cycles. On the other hand, it may
be convenient to clear all contained Python objects, and write the type's
- :attr:`tp_dealloc` function to invoke :attr:`tp_clear`.
+ :c:member:`~PyTypeObject.tp_dealloc` function to invoke :c:member:`~PyTypeObject.tp_clear`.
More information about Python's garbage collection scheme can be found in
section :ref:`supporting-cycle-detection`.
- This field is inherited by subtypes together with :attr:`tp_traverse` and the
- :const:`Py_TPFLAGS_HAVE_GC` flag bit: the flag bit, :attr:`tp_traverse`, and
- :attr:`tp_clear` are all inherited from the base type if they are all zero in
+ This field is inherited by subtypes together with :c:member:`~PyTypeObject.tp_traverse` and the
+ :const:`Py_TPFLAGS_HAVE_GC` flag bit: the flag bit, :c:member:`~PyTypeObject.tp_traverse`, and
+ :c:member:`~PyTypeObject.tp_clear` are all inherited from the base type if they are all zero in
the subtype *and* the subtype has the :const:`Py_TPFLAGS_HAVE_RICHCOMPARE` flag
bit set.
@@ -688,13 +688,13 @@ The following three fields only exist if the
comparisons makes sense (e.g. ``==`` and ``!=``, but not ``<`` and
friends), directly raise :exc:`TypeError` in the rich comparison function.
- This field is inherited by subtypes together with :attr:`tp_compare` and
- :attr:`tp_hash`: a subtype inherits all three of :attr:`tp_compare`,
- :attr:`tp_richcompare`, and :attr:`tp_hash`, when the subtype's
- :attr:`tp_compare`, :attr:`tp_richcompare`, and :attr:`tp_hash` are all *NULL*.
+ This field is inherited by subtypes together with :c:member:`~PyTypeObject.tp_compare` and
+ :c:member:`~PyTypeObject.tp_hash`: a subtype inherits all three of :c:member:`~PyTypeObject.tp_compare`,
+ :c:member:`~PyTypeObject.tp_richcompare`, and :c:member:`~PyTypeObject.tp_hash`, when the subtype's
+ :c:member:`~PyTypeObject.tp_compare`, :c:member:`~PyTypeObject.tp_richcompare`, and :c:member:`~PyTypeObject.tp_hash` are all *NULL*.
The following constants are defined to be used as the third argument for
- :attr:`tp_richcompare` and for :c:func:`PyObject_RichCompare`:
+ :c:member:`~PyTypeObject.tp_richcompare` and for :c:func:`PyObject_RichCompare`:
+----------------+------------+
| Constant | Comparison |
@@ -725,26 +725,26 @@ set.
instance structure needs to include a field of type :c:type:`PyObject\*` which is
initialized to *NULL*.
- Do not confuse this field with :attr:`tp_weaklist`; that is the list head for
+ Do not confuse this field with :c:member:`~PyTypeObject.tp_weaklist`; that is the list head for
weak references to the type object itself.
This field is inherited by subtypes, but see the rules listed below. A subtype
may override this offset; this means that the subtype uses a different weak
reference list head than the base type. Since the list head is always found via
- :attr:`tp_weaklistoffset`, this should not be a problem.
+ :c:member:`~PyTypeObject.tp_weaklistoffset`, this should not be a problem.
- When a type defined by a class statement has no :attr:`__slots__` declaration,
+ When a type defined by a class statement has no :attr:`~object.__slots__` declaration,
and none of its base types are weakly referenceable, the type is made weakly
referenceable by adding a weak reference list head slot to the instance layout
- and setting the :attr:`tp_weaklistoffset` of that slot's offset.
+ and setting the :c:member:`~PyTypeObject.tp_weaklistoffset` of that slot's offset.
When a type's :attr:`__slots__` declaration contains a slot named
:attr:`__weakref__`, that slot becomes the weak reference list head for
instances of the type, and the slot's offset is stored in the type's
- :attr:`tp_weaklistoffset`.
+ :c:member:`~PyTypeObject.tp_weaklistoffset`.
When a type's :attr:`__slots__` declaration does not contain a slot named
- :attr:`__weakref__`, the type inherits its :attr:`tp_weaklistoffset` from its
+ :attr:`__weakref__`, the type inherits its :c:member:`~PyTypeObject.tp_weaklistoffset` from its
base type.
The next two fields only exist if the :const:`Py_TPFLAGS_HAVE_ITER` flag bit is
@@ -772,7 +772,7 @@ set.
are iterators (although classic instances always have this function, even if
they don't define a :meth:`next` method).
- Iterator types should also define the :attr:`tp_iter` function, and that
+ Iterator types should also define the :c:member:`~PyTypeObject.tp_iter` function, and that
function should return the iterator instance itself (not a new iterator
instance).
@@ -780,7 +780,7 @@ set.
This field is inherited by subtypes.
-The next fields, up to and including :attr:`tp_weaklist`, only exist if the
+The next fields, up to and including :c:member:`~PyTypeObject.tp_weaklist`, only exist if the
:const:`Py_TPFLAGS_HAVE_CLASS` flag bit is set.
@@ -790,7 +790,7 @@ The next fields, up to and including :attr:`tp_weaklist`, only exist if the
structures, declaring regular methods of this type.
For each entry in the array, an entry is added to the type's dictionary (see
- :attr:`tp_dict` below) containing a method descriptor.
+ :c:member:`~PyTypeObject.tp_dict` below) containing a method descriptor.
This field is not inherited by subtypes (methods are inherited through a
different mechanism).
@@ -803,7 +803,7 @@ The next fields, up to and including :attr:`tp_weaklist`, only exist if the
this type.
For each entry in the array, an entry is added to the type's dictionary (see
- :attr:`tp_dict` below) containing a member descriptor.
+ :c:member:`~PyTypeObject.tp_dict` below) containing a member descriptor.
This field is not inherited by subtypes (members are inherited through a
different mechanism).
@@ -815,7 +815,7 @@ The next fields, up to and including :attr:`tp_weaklist`, only exist if the
structures, declaring computed attributes of instances of this type.
For each entry in the array, an entry is added to the type's dictionary (see
- :attr:`tp_dict` below) containing a getset descriptor.
+ :c:member:`~PyTypeObject.tp_dict` below) containing a getset descriptor.
This field is not inherited by subtypes (computed attributes are inherited
through a different mechanism).
@@ -894,7 +894,7 @@ The next fields, up to and including :attr:`tp_weaklist`, only exist if the
the instance variable dictionary; this offset is used by
:c:func:`PyObject_GenericGetAttr`.
- Do not confuse this field with :attr:`tp_dict`; that is the dictionary for
+ Do not confuse this field with :c:member:`~PyTypeObject.tp_dict`; that is the dictionary for
attributes of the type object itself.
If the value of this field is greater than zero, it specifies the offset from
@@ -903,20 +903,20 @@ The next fields, up to and including :attr:`tp_weaklist`, only exist if the
offset is more expensive to use, and should only be used when the instance
structure contains a variable-length part. This is used for example to add an
instance variable dictionary to subtypes of :class:`str` or :class:`tuple`. Note
- that the :attr:`tp_basicsize` field should account for the dictionary added to
+ that the :c:member:`~PyTypeObject.tp_basicsize` field should account for the dictionary added to
the end in that case, even though the dictionary is not included in the basic
object layout. On a system with a pointer size of 4 bytes,
- :attr:`tp_dictoffset` should be set to ``-4`` to indicate that the dictionary is
+ :c:member:`~PyTypeObject.tp_dictoffset` should be set to ``-4`` to indicate that the dictionary is
at the very end of the structure.
The real dictionary offset in an instance can be computed from a negative
- :attr:`tp_dictoffset` as follows::
+ :c:member:`~PyTypeObject.tp_dictoffset` as follows::
dictoffset = tp_basicsize + abs(ob_size)*tp_itemsize + tp_dictoffset
if dictoffset is not aligned on sizeof(void*):
round up to sizeof(void*)
- where :attr:`tp_basicsize`, :attr:`tp_itemsize` and :attr:`tp_dictoffset` are
+ where :c:member:`~PyTypeObject.tp_basicsize`, :c:member:`~PyTypeObject.tp_itemsize` and :c:member:`~PyTypeObject.tp_dictoffset` are
taken from the type object, and :attr:`ob_size` is taken from the instance. The
absolute value is taken because long ints use the sign of :attr:`ob_size` to
store the sign of the number. (There's never a need to do this calculation
@@ -925,17 +925,17 @@ The next fields, up to and including :attr:`tp_weaklist`, only exist if the
This field is inherited by subtypes, but see the rules listed below. A subtype
may override this offset; this means that the subtype instances store the
dictionary at a difference offset than the base type. Since the dictionary is
- always found via :attr:`tp_dictoffset`, this should not be a problem.
+ always found via :c:member:`~PyTypeObject.tp_dictoffset`, this should not be a problem.
- When a type defined by a class statement has no :attr:`__slots__` declaration,
+ When a type defined by a class statement has no :attr:`~object.__slots__` declaration,
and none of its base types has an instance variable dictionary, a dictionary
- slot is added to the instance layout and the :attr:`tp_dictoffset` is set to
+ slot is added to the instance layout and the :c:member:`~PyTypeObject.tp_dictoffset` is set to
that slot's offset.
When a type defined by a class statement has a :attr:`__slots__` declaration,
- the type inherits its :attr:`tp_dictoffset` from its base type.
+ the type inherits its :c:member:`~PyTypeObject.tp_dictoffset` from its base type.
- (Adding a slot named :attr:`__dict__` to the :attr:`__slots__` declaration does
+ (Adding a slot named :attr:`~object.__dict__` to the :attr:`__slots__` declaration does
not have the expected effect, it just causes confusion. Maybe this should be
added as a feature just like :attr:`__weakref__` though.)
@@ -957,15 +957,15 @@ The next fields, up to and including :attr:`tp_weaklist`, only exist if the
arguments represent positional and keyword arguments of the call to
:meth:`__init__`.
- The :attr:`tp_init` function, if not *NULL*, is called when an instance is
- created normally by calling its type, after the type's :attr:`tp_new` function
- has returned an instance of the type. If the :attr:`tp_new` function returns an
+ The :c:member:`~PyTypeObject.tp_init` function, if not *NULL*, is called when an instance is
+ created normally by calling its type, after the type's :c:member:`~PyTypeObject.tp_new` function
+ has returned an instance of the type. If the :c:member:`~PyTypeObject.tp_new` function returns an
instance of some other type that is not a subtype of the original type, no
- :attr:`tp_init` function is called; if :attr:`tp_new` returns an instance of a
- subtype of the original type, the subtype's :attr:`tp_init` is called. (VERSION
+ :c:member:`~PyTypeObject.tp_init` function is called; if :c:member:`~PyTypeObject.tp_new` returns an instance of a
+ subtype of the original type, the subtype's :c:member:`~PyTypeObject.tp_init` is called. (VERSION
NOTE: described here is what is implemented in Python 2.2.1 and later. In
- Python 2.2, the :attr:`tp_init` of the type of the object returned by
- :attr:`tp_new` was always called, if not *NULL*.)
+ Python 2.2, the :c:member:`~PyTypeObject.tp_init` of the type of the object returned by
+ :c:member:`~PyTypeObject.tp_new` was always called, if not *NULL*.)
This field is inherited by subtypes.
@@ -982,14 +982,14 @@ The next fields, up to and including :attr:`tp_weaklist`, only exist if the
initialization. It should return a pointer to a block of memory of adequate
length for the instance, suitably aligned, and initialized to zeros, but with
:attr:`ob_refcnt` set to ``1`` and :attr:`ob_type` set to the type argument. If
- the type's :attr:`tp_itemsize` is non-zero, the object's :attr:`ob_size` field
+ the type's :c:member:`~PyTypeObject.tp_itemsize` is non-zero, the object's :attr:`ob_size` field
should be initialized to *nitems* and the length of the allocated memory block
should be ``tp_basicsize + nitems*tp_itemsize``, rounded up to a multiple of
``sizeof(void*)``; otherwise, *nitems* is not used and the length of the block
- should be :attr:`tp_basicsize`.
+ should be :c:member:`~PyTypeObject.tp_basicsize`.
Do not use this function to do any other instance initialization, not even to
- allocate additional memory; that should be done by :attr:`tp_new`.
+ allocate additional memory; that should be done by :c:member:`~PyTypeObject.tp_new`.
This field is inherited by static subtypes, but not by dynamic subtypes
(subtypes created by a class statement); in the latter, this field is always set
@@ -1011,20 +1011,20 @@ The next fields, up to and including :attr:`tp_weaklist`, only exist if the
The subtype argument is the type of the object being created; the *args* and
*kwds* arguments represent positional and keyword arguments of the call to the
- type. Note that subtype doesn't have to equal the type whose :attr:`tp_new`
+ type. Note that subtype doesn't have to equal the type whose :c:member:`~PyTypeObject.tp_new`
function is called; it may be a subtype of that type (but not an unrelated
type).
- The :attr:`tp_new` function should call ``subtype->tp_alloc(subtype, nitems)``
+ The :c:member:`~PyTypeObject.tp_new` function should call ``subtype->tp_alloc(subtype, nitems)``
to allocate space for the object, and then do only as much further
initialization as is absolutely necessary. Initialization that can safely be
- ignored or repeated should be placed in the :attr:`tp_init` handler. A good
+ ignored or repeated should be placed in the :c:member:`~PyTypeObject.tp_init` handler. A good
rule of thumb is that for immutable types, all initialization should take place
- in :attr:`tp_new`, while for mutable types, most initialization should be
- deferred to :attr:`tp_init`.
+ in :c:member:`~PyTypeObject.tp_new`, while for mutable types, most initialization should be
+ deferred to :c:member:`~PyTypeObject.tp_init`.
This field is inherited by subtypes, except it is not inherited by static types
- whose :attr:`tp_base` is *NULL* or ``&PyBaseObject_Type``. The latter exception
+ whose :c:member:`~PyTypeObject.tp_base` is *NULL* or ``&PyBaseObject_Type``. The latter exception
is a precaution so that old extension types don't become callable simply by
being linked with Python 2.2.
@@ -1057,7 +1057,7 @@ The next fields, up to and including :attr:`tp_weaklist`, only exist if the
The garbage collector needs to know whether a particular object is collectible
or not. Normally, it is sufficient to look at the object's type's
- :attr:`tp_flags` field, and check the :const:`Py_TPFLAGS_HAVE_GC` flag bit. But
+ :c:member:`~PyTypeObject.tp_flags` field, and check the :const:`Py_TPFLAGS_HAVE_GC` flag bit. But
some types have a mixture of statically and dynamically allocated instances, and
the statically allocated instances are not collectible. Such types should
define this function; it should return ``1`` for a collectible instance, and
@@ -1129,7 +1129,7 @@ subtypes.
.. c:member:: PyTypeObject* PyTypeObject.tp_next
- Pointer to the next type object with a non-zero :attr:`tp_allocs` field.
+ Pointer to the next type object with a non-zero :c:member:`~PyTypeObject.tp_allocs` field.
Also, note that, in a garbage collected Python, tp_dealloc may be called from
any Python thread, not just the thread which created the object (if the object
@@ -1227,7 +1227,7 @@ on the flag bit :const:`Py_TPFLAGS_CHECKTYPES`:
- If the :const:`Py_TPFLAGS_CHECKTYPES` flag is set, binary and ternary
functions must check the type of all their operands, and implement the
necessary conversions (at least one of the operands is an instance of the
- defined type). This is the recommended way; with Python 3.0 coercion will
+ defined type). This is the recommended way; with Python 3 coercion will
disappear completely.
If the operation is not defined for the given operands, binary and ternary
@@ -1289,13 +1289,14 @@ Sequence Object Structures
This function is used by :c:func:`PySequence_Concat` and has the same
signature. It is also used by the ``+`` operator, after trying the numeric
- addition via the :attr:`tp_as_number.nb_add` slot.
+ addition via the :c:member:`~PyTypeObject.tp_as_number.nb_add` slot.
.. c:member:: ssizeargfunc PySequenceMethods.sq_repeat
This function is used by :c:func:`PySequence_Repeat` and has the same
signature. It is also used by the ``*`` operator, after trying numeric
- multiplication via the :attr:`tp_as_number.nb_mul` slot.
+ multiplication via the :c:member:`~PyTypeObject.tp_as_number.nb_multiply`
+ slot.
.. c:member:: ssizeargfunc PySequenceMethods.sq_item
@@ -1348,14 +1349,14 @@ data as a set of chunks of data, where each chunk is specified as a
pointer/length pair. These chunks are called :dfn:`segments` and are presumed
to be non-contiguous in memory.
-If an object does not export the buffer interface, then its :attr:`tp_as_buffer`
+If an object does not export the buffer interface, then its :c:member:`~PyTypeObject.tp_as_buffer`
member in the :c:type:`PyTypeObject` structure should be *NULL*. Otherwise, the
-:attr:`tp_as_buffer` will point to a :c:type:`PyBufferProcs` structure.
+:c:member:`~PyTypeObject.tp_as_buffer` will point to a :c:type:`PyBufferProcs` structure.
.. note::
It is very important that your :c:type:`PyTypeObject` structure uses
- :const:`Py_TPFLAGS_DEFAULT` for the value of the :attr:`tp_flags` member rather
+ :const:`Py_TPFLAGS_DEFAULT` for the value of the :c:member:`~PyTypeObject.tp_flags` member rather
than ``0``. This tells the Python runtime that your :c:type:`PyBufferProcs`
structure contains the :attr:`bf_getcharbuffer` slot. Older versions of Python
did not have this member, so a new Python interpreter using an old extension
@@ -1385,7 +1386,7 @@ member in the :c:type:`PyTypeObject` structure should be *NULL*. Otherwise, the
The last slot is :attr:`bf_getcharbuffer`, of type :c:type:`getcharbufferproc`.
This slot will only be present if the :const:`Py_TPFLAGS_HAVE_GETCHARBUFFER`
- flag is present in the :attr:`tp_flags` field of the object's
+ flag is present in the :c:member:`~PyTypeObject.tp_flags` field of the object's
:c:type:`PyTypeObject`. Before using this slot, the caller should test whether it
is present by using the :c:func:`PyType_HasFeature` function. If the flag is
present, :attr:`bf_getcharbuffer` may be *NULL*, indicating that the object's
diff --git a/Doc/c-api/unicode.rst b/Doc/c-api/unicode.rst
index 73f6fe6..ddeaaa2 100644
--- a/Doc/c-api/unicode.rst
+++ b/Doc/c-api/unicode.rst
@@ -252,6 +252,8 @@ APIs:
.. % because not all compilers support the %z width modifier -- we fake it
.. % when necessary via interpolating PY_FORMAT_SIZE_T.
+ .. tabularcolumns:: |l|l|L|
+
+-------------------+---------------------+--------------------------------+
| Format Characters | Type | Comment |
+===================+=====================+================================+
@@ -453,9 +455,9 @@ These are the generic codec APIs:
Encode the :c:type:`Py_UNICODE` buffer *s* of the given *size* and return a Python
string object. *encoding* and *errors* have the same meaning as the parameters
- of the same name in the Unicode :meth:`encode` method. The codec to be used is
- looked up using the Python codec registry. Return *NULL* if an exception was
- raised by the codec.
+ of the same name in the Unicode :meth:`~unicode.encode` method. The codec
+ to be used is looked up using the Python codec registry. Return *NULL* if
+ an exception was raised by the codec.
.. versionchanged:: 2.5
This function used an :c:type:`int` type for *size*. This might require
diff --git a/Doc/c-api/veryhigh.rst b/Doc/c-api/veryhigh.rst
index 4ce3b03..6107665 100644
--- a/Doc/c-api/veryhigh.rst
+++ b/Doc/c-api/veryhigh.rst
@@ -265,7 +265,7 @@ the same library that the Python runtime is using.
frame *f* is executed, interpreting bytecode and executing calls as needed.
The additional *throwflag* parameter can mostly be ignored - if true, then
it causes an exception to immediately be thrown; this is used for the
- :meth:`throw` methods of generator objects.
+ :meth:`~generator.throw` methods of generator objects.
.. c:function:: int PyEval_MergeCompilerFlags(PyCompilerFlags *cf)
diff --git a/Doc/copyright.rst b/Doc/copyright.rst
index 9a245c8..be47e8a 100644
--- a/Doc/copyright.rst
+++ b/Doc/copyright.rst
@@ -4,7 +4,7 @@ Copyright
Python and this documentation is:
-Copyright © 2001-2012 Python Software Foundation. All rights reserved.
+Copyright © 2001-2014 Python Software Foundation. All rights reserved.
Copyright © 2000 BeOpen.com. All rights reserved.
diff --git a/Doc/data/refcounts.dat b/Doc/data/refcounts.dat
index 1fc896f..06c19d0 100644
--- a/Doc/data/refcounts.dat
+++ b/Doc/data/refcounts.dat
@@ -932,7 +932,7 @@ PyObject_CallMethod::...::
PyObject_CallMethodObjArgs:PyObject*::+1:
PyObject_CallMethodObjArgs:PyObject*:o:0:
-PyObject_CallMethodObjArgs:char*:name::
+PyObject_CallMethodObjArgs:PyObject*:name:0:
PyObject_CallMethodObjArgs::...::
PyObject_CallObject:PyObject*::+1:
diff --git a/Doc/distutils/apiref.rst b/Doc/distutils/apiref.rst
index 692d5cf..d1201ef 100644
--- a/Doc/distutils/apiref.rst
+++ b/Doc/distutils/apiref.rst
@@ -26,6 +26,8 @@ setup script). Indirectly provides the :class:`distutils.dist.Distribution` and
The setup function takes a large number of arguments. These are laid out in the
following table.
+ .. tabularcolumns:: |l|L|L|
+
+--------------------+--------------------------------+-------------------------------------------------------------+
| argument name | value | type |
+====================+================================+=============================================================+
@@ -48,7 +50,10 @@ setup script). Indirectly provides the :class:`distutils.dist.Distribution` and
+--------------------+--------------------------------+-------------------------------------------------------------+
| *maintainer* | The name of the current | a string |
| | maintainer, if different from | |
- | | the author | |
+ | | the author. Note that if | |
+ | | the maintainer is provided, | |
+ | | distutils will use it as the | |
+ | | author in :file:`PKG-INFO` | |
+--------------------+--------------------------------+-------------------------------------------------------------+
| *maintainer_email* | The email address of the | a string |
| | current maintainer, if | |
@@ -122,6 +127,8 @@ setup script). Indirectly provides the :class:`distutils.dist.Distribution` and
*stop_after* tells :func:`setup` when to stop processing; possible values:
+ .. tabularcolumns:: |l|L|
+
+---------------+---------------------------------------------+
| value | description |
+===============+=============================================+
@@ -162,6 +169,8 @@ the full reference.
The Extension class describes a single C or C++extension module in a setup
script. It accepts the following keyword arguments in its constructor
+ .. tabularcolumns:: |l|L|l|
+
+------------------------+--------------------------------+---------------------------+
| argument name | value | type |
+========================+================================+===========================+
@@ -444,7 +453,9 @@ This module provides the following functions.
Define a preprocessor macro for all compilations driven by this compiler object.
The optional parameter *value* should be a string; if it is not supplied, then
the macro will be defined without an explicit value and the exact outcome
- depends on the compiler used (XXX true? does ANSI say anything about this?)
+ depends on the compiler used.
+
+ .. XXX true? does ANSI say anything about this?
.. method:: CCompiler.undefine_macro(name)
@@ -598,7 +609,9 @@ This module provides the following functions.
*output_libname* should be a library name, not a filename; the filename will be
inferred from the library name. *output_dir* is the directory where the library
- file will be put. XXX defaults to what?
+ file will be put.
+
+ .. XXX defaults to what?
*debug* is a boolean; if true, debugging information will be included in the
library (note that on most platforms, it is the compile step where this matters:
@@ -716,32 +729,31 @@ This module provides the following functions.
.. method:: CCompiler.execute(func, args[, msg=None, level=1])
- Invokes :func:`distutils.util.execute` This method invokes a Python function
+ Invokes :func:`distutils.util.execute`. This method invokes a Python function
*func* with the given arguments *args*, after logging and taking into account
- the *dry_run* flag. XXX see also.
+ the *dry_run* flag.
.. method:: CCompiler.spawn(cmd)
Invokes :func:`distutils.util.spawn`. This invokes an external process to run
- the given command. XXX see also.
+ the given command.
.. method:: CCompiler.mkpath(name[, mode=511])
Invokes :func:`distutils.dir_util.mkpath`. This creates a directory and any
- missing ancestor directories. XXX see also.
+ missing ancestor directories.
.. method:: CCompiler.move_file(src, dst)
- Invokes :meth:`distutils.file_util.move_file`. Renames *src* to *dst*. XXX see
- also.
+ Invokes :meth:`distutils.file_util.move_file`. Renames *src* to *dst*.
.. method:: CCompiler.announce(msg[, level=1])
- Write a message using :func:`distutils.log.debug`. XXX see also.
+ Write a message using :func:`distutils.log.debug`.
.. method:: CCompiler.warn(msg)
@@ -869,8 +881,6 @@ tarballs or zipfiles.
prefix of all files and directories in the archive. *root_dir* and *base_dir*
both default to the current directory. Returns the name of the archive file.
- .. XXX This should be changed to support bz2 files.
-
.. function:: make_tarball(base_name, base_dir[, compress='gzip', verbose=0, dry_run=0])
@@ -882,8 +892,6 @@ tarballs or zipfiles.
possibly plus the appropriate compression extension (:file:`.gz`, :file:`.bz2`
or :file:`.Z`). Return the output filename.
- .. XXX This should be replaced with calls to the :mod:`tarfile` module.
-
.. function:: make_zipfile(base_name, base_dir[, verbose=0, dry_run=0])
@@ -974,20 +982,28 @@ directories.
Copy an entire directory tree *src* to a new location *dst*. Both *src* and
*dst* must be directory names. If *src* is not a directory, raise
:exc:`DistutilsFileError`. If *dst* does not exist, it is created with
- :func:`mkpath`. The end result of the copy is that every file in *src* is
- copied to *dst*, and directories under *src* are recursively copied to *dst*.
+ :func:`mkpath`. The end result of the copy is that every file in *src* is
+ copied to *dst*, and directories under *src* are recursively copied to *dst*.
Return the list of files that were copied or might have been copied, using their
output name. The return value is unaffected by *update* or *dry_run*: it is
simply the list of all files under *src*, with the names changed to be under
*dst*.
- *preserve_mode* and *preserve_times* are the same as for :func:`copy_file` in
- :mod:`distutils.file_util`; note that they only apply to regular files, not to
+ *preserve_mode* and *preserve_times* are the same as for
+ :func:`distutils.file_util.copy_file`; note that they only apply to
+ regular files, not to
directories. If *preserve_symlinks* is true, symlinks will be copied as
symlinks (on platforms that support them!); otherwise (the default), the
destination of the symlink will be copied. *update* and *verbose* are the same
as for :func:`copy_file`.
+ Files in *src* that begin with :file:`.nfs` are skipped (more information on
+ these files is available in answer D2 of the `NFS FAQ page
+ <http://nfs.sourceforge.net/#section_d>`_.
+
+ .. versionchanged:: 2.7.4
+ NFS files are ignored.
+
.. function:: remove_tree(directory[, verbose=0, dry_run=0])
@@ -995,8 +1011,6 @@ directories.
errors are ignored (apart from being reported to ``sys.stdout`` if *verbose* is
true).
-.. XXX Some of this could be replaced with the shutil module?
-
:mod:`distutils.file_util` --- Single file operations
=====================================================
@@ -1110,8 +1124,6 @@ other utility module.
* ``macosx-10.6-intel``
- .. % XXX isn't this also provided by some other non-distutils module?
-
.. function:: convert_path(pathname)
@@ -1155,15 +1167,6 @@ other utility module.
underscore. No { } or ( ) style quoting is available.
-.. function:: grok_environment_error(exc[, prefix='error: '])
-
- Generate a useful error message from an :exc:`EnvironmentError` (:exc:`IOError`
- or :exc:`OSError`) exception object. Handles Python 1.5.1 and later styles,
- and does what it can to deal with exception objects that don't have a filename
- (which happens when the error is due to a two-file operation, such as
- :func:`rename` or :func:`link`). Returns the error message as a string
- prefixed with *prefix*.
-
.. function:: split_quoted(s)
@@ -1246,8 +1249,8 @@ other utility module.
built/installed/distributed
-This module provides the :class:`Distribution` class, which represents the
-module distribution being built/installed/distributed.
+This module provides the :class:`~distutils.core.Distribution` class, which
+represents the module distribution being built/installed/distributed.
:mod:`distutils.extension` --- The Extension class
@@ -1311,8 +1314,6 @@ provides the following additional features:
the "negative alias" of :option:`--verbose`, then :option:`--quiet` on the
command line sets *verbose* to false.
-.. XXX Should be replaced with :mod:`optparse`.
-
.. function:: fancy_getopt(options, negative_opt, object, args)
@@ -1329,8 +1330,6 @@ provides the following additional features:
Wraps *text* to less than *width* wide.
- .. XXX Should be replaced with :mod:`textwrap` (which is available in Python
- 2.3 and later).
.. class:: FancyGetopt([option_table=None])
@@ -1394,10 +1393,6 @@ filesystem and building lists of files.
:synopsis: A simple logging mechanism, 282-style
-.. XXX Should be replaced with standard :mod:`logging` module.
-
-
-
:mod:`distutils.spawn` --- Spawn a sub-process
==============================================
@@ -1559,6 +1554,8 @@ lines, and joining lines with backslashes.
The options are all boolean, and affect the values returned by :meth:`readline`
+ .. tabularcolumns:: |l|L|l|
+
+------------------+--------------------------------+---------+
| option name | description | default |
+==================+================================+=========+
@@ -1701,8 +1698,8 @@ This module supplies the abstract base class :class:`Command`.
options, is the :meth:`run` method, which must also be implemented by every
command class.
- The class constructor takes a single argument *dist*, a :class:`Distribution`
- instance.
+ The class constructor takes a single argument *dist*, a
+ :class:`~distutils.core.Distribution` instance.
Creating a new Distutils command
@@ -1894,9 +1891,6 @@ Subclasses of :class:`Command` must define the following methods.
:synopsis: Build the .py/.pyc files of a package
-.. % todo
-
-
:mod:`distutils.command.build_scripts` --- Build the scripts of a package
=========================================================================
@@ -1913,8 +1907,12 @@ Subclasses of :class:`Command` must define the following methods.
.. module:: distutils.command.clean
:synopsis: Clean a package build area
+This command removes the temporary files created by :command:`build`
+and its subcommands, like intermediary compiled object files. With
+the ``--all`` option, the complete build directory will be removed.
-.. % todo
+Extension modules built :ref:`in place <distutils-build-ext-inplace>`
+will not be cleaned, as they are not in the build directory.
:mod:`distutils.command.config` --- Perform package configuration
diff --git a/Doc/distutils/configfile.rst b/Doc/distutils/configfile.rst
index 890047c..ac79671 100644
--- a/Doc/distutils/configfile.rst
+++ b/Doc/distutils/configfile.rst
@@ -69,6 +69,8 @@ universal :option:`--help` option, e.g. ::
Note that an option spelled :option:`--foo-bar` on the command-line is spelled
:option:`foo_bar` in configuration files.
+.. _distutils-build-ext-inplace:
+
For example, say you want your extensions to be built "in-place"---that is, you
have an extension :mod:`pkg.ext`, and you want the compiled extension file
(:file:`ext.so` on Unix, say) to be put in the same source directory as your
diff --git a/Doc/distutils/examples.rst b/Doc/distutils/examples.rst
index b495928..3c6c7bc 100644
--- a/Doc/distutils/examples.rst
+++ b/Doc/distutils/examples.rst
@@ -193,9 +193,6 @@ then the corresponding setup script would be ::
packages=['foobar', 'foobar.subfoo'],
)
-(Again, the empty string in :option:`package_dir` stands for the current
-directory.)
-
.. _single-ext:
diff --git a/Doc/distutils/index.rst b/Doc/distutils/index.rst
index ace8280..1a6f04c 100644
--- a/Doc/distutils/index.rst
+++ b/Doc/distutils/index.rst
@@ -6,14 +6,22 @@
:Authors: Greg Ward, Anthony Baxter
:Email: distutils-sig@python.org
-:Release: |version|
-:Date: |today|
This document describes the Python Distribution Utilities ("Distutils") from
the module developer's point of view, describing how to use the Distutils to
make Python modules and extensions easily available to a wider audience with
very little overhead for build/release/install mechanics.
+.. note::
+
+ This guide only covers the basic tools for building and distributing
+ extensions that are provided as part of this version of Python. Third
+ party tools offer easier to use and more secure alternatives. Refer to the
+ `quick recommendations section
+ <https://python-packaging-user-guide.readthedocs.org/en/latest/current.html>`__
+ in the Python Packaging User Guide for more information.
+
+
.. toctree::
:maxdepth: 2
:numbered:
@@ -24,7 +32,6 @@ very little overhead for build/release/install mechanics.
sourcedist.rst
builtdist.rst
packageindex.rst
- uploading.rst
examples.rst
extending.rst
commandref.rst
diff --git a/Doc/distutils/packageindex.rst b/Doc/distutils/packageindex.rst
index 1498394..1d724e2 100644
--- a/Doc/distutils/packageindex.rst
+++ b/Doc/distutils/packageindex.rst
@@ -1,12 +1,33 @@
+.. index::
+ single: Python Package Index (PyPI)
+ single: PyPI; (see Python Package Index (PyPI))
+
.. _package-index:
-**********************************
-Registering with the Package Index
-**********************************
+*******************************
+The Python Package Index (PyPI)
+*******************************
+
+The `Python Package Index (PyPI)`_ holds :ref:`meta-data <meta-data>`
+describing distributions packaged with distutils, as well as package data like
+distribution files if the package author wishes.
+
+Distutils exposes two commands for submitting package data to PyPI: the
+:ref:`register <package-register>` command for submitting meta-data to PyPI
+and the :ref:`upload <package-upload>` command for submitting distribution
+files. Both commands read configuration data from a special file called the
+:ref:`.pypirc file <pypirc>`. PyPI :ref:`displays a home page
+<package-display>` for each package created from the ``long_description``
+submitted by the :command:`register` command.
+
+
+.. _package-register:
-The Python Package Index (PyPI) holds meta-data describing distributions
-packaged with distutils. The distutils command :command:`register` is used to
-submit your distribution's meta-data to the index. It is invoked as follows::
+Registering Packages
+====================
+
+The distutils command :command:`register` is used to submit your distribution's
+meta-data to the index. It is invoked as follows::
python setup.py register
@@ -43,10 +64,58 @@ the web interface. They may also designate other users as Owners or Maintainers.
Maintainers may edit the package information, but not designate other Owners or
Maintainers.
-By default PyPI will list all versions of a given package. To hide certain
-versions, the Hidden property should be set to yes. This must be edited through
-the web interface.
+By default PyPI displays only the newest version of a given package. The web
+interface lets one change this default behavior and manually select which
+versions to display and hide.
+
+
+.. _package-upload:
+
+Uploading Packages
+==================
+
+.. versionadded:: 2.5
+
+The distutils command :command:`upload` pushes the distribution files to PyPI.
+
+The command is invoked immediately after building one or more distribution
+files. For example, the command ::
+
+ python setup.py sdist bdist_wininst upload
+
+will cause the source distribution and the Windows installer to be uploaded to
+PyPI. Note that these will be uploaded even if they are built using an earlier
+invocation of :file:`setup.py`, but that only distributions named on the command
+line for the invocation including the :command:`upload` command are uploaded.
+
+The :command:`upload` command uses the username, password, and repository URL
+from the :file:`$HOME/.pypirc` file (see section :ref:`pypirc` for more on this
+file). If a :command:`register` command was previously called in the same command,
+and if the password was entered in the prompt, :command:`upload` will reuse the
+entered password. This is useful if you do not want to store a clear text
+password in the :file:`$HOME/.pypirc` file.
+
+You can specify another PyPI server with the ``--repository=url`` option::
+
+ python setup.py sdist bdist_wininst upload -r http://example.com/pypi
+
+See section :ref:`pypirc` for more on defining several servers.
+You can use the ``--sign`` option to tell :command:`upload` to sign each
+uploaded file using GPG (GNU Privacy Guard). The :program:`gpg` program must
+be available for execution on the system :envvar:`PATH`. You can also specify
+which key to use for signing using the ``--identity=name`` option.
+
+Other :command:`upload` options include ``--repository=url`` or
+``--repository=section`` where *url* is the url of the server and
+*section* the name of the section in :file:`$HOME/.pypirc`, and
+``--show-response`` (which displays the full response text from the PyPI
+server for help in debugging upload problems).
+
+
+.. index::
+ single: .pypirc file
+ single: Python Package Index (PyPI); .pypirc file
.. _pypirc:
@@ -102,3 +171,45 @@ For convenience, the name of the section that describes the repository
may also be used::
python setup.py register -r other
+
+
+.. _package-display:
+
+PyPI package display
+====================
+
+The ``long_description`` field plays a special role at PyPI. It is used by
+the server to display a home page for the registered package.
+
+If you use the `reStructuredText <http://docutils.sourceforge.net/rst.html>`_
+syntax for this field, PyPI will parse it and display an HTML output for
+the package home page.
+
+The ``long_description`` field can be attached to a text file located
+in the package::
+
+ from distutils.core import setup
+
+ with open('README.txt') as file:
+ long_description = file.read()
+
+ setup(name='Distutils',
+ long_description=long_description)
+
+In that case, :file:`README.txt` is a regular reStructuredText text file located
+in the root of the package besides :file:`setup.py`.
+
+To prevent registering broken reStructuredText content, you can use the
+:program:`rst2html` program that is provided by the :mod:`docutils` package and
+check the ``long_description`` from the command line::
+
+ $ python setup.py --long-description | rst2html.py > output.html
+
+:mod:`docutils` will display a warning if there's something wrong with your
+syntax. Because PyPI applies additional checks (e.g. by passing ``--no-raw``
+to ``rst2html.py`` in the command above), being able to run the command above
+without warnings does not guarantee that PyPI will convert the content
+successfully.
+
+
+.. _Python Package Index (PyPI): http://pypi.python.org/
diff --git a/Doc/distutils/setupscript.rst b/Doc/distutils/setupscript.rst
index 165bfcd..15f130f 100644
--- a/Doc/distutils/setupscript.rst
+++ b/Doc/distutils/setupscript.rst
@@ -139,7 +139,8 @@ directories, libraries to link with, etc.).
All of this is done through another keyword argument to :func:`setup`, the
:option:`ext_modules` option. :option:`ext_modules` is just a list of
-:class:`Extension` instances, each of which describes a single extension module.
+:class:`~distutils.core.Extension` instances, each of which describes a
+single extension module.
Suppose your distribution includes a single extension, called :mod:`foo` and
implemented by :file:`foo.c`. If no additional instructions to the
compiler/linker are needed, describing this extension is quite simple::
@@ -165,8 +166,8 @@ following sections.
Extension names and packages
----------------------------
-The first argument to the :class:`Extension` constructor is always the name of
-the extension, including any package names. For example, ::
+The first argument to the :class:`~distutils.core.Extension` constructor is
+always the name of the extension, including any package names. For example, ::
Extension('foo', ['src/foo1.c', 'src/foo2.c'])
@@ -196,7 +197,8 @@ will compile :file:`foo.c` to the extension :mod:`pkg.foo`, and :file:`bar.c` to
Extension source files
----------------------
-The second argument to the :class:`Extension` constructor is a list of source
+The second argument to the :class:`~distutils.core.Extension` constructor is
+a list of source
files. Since the Distutils currently only support C, C++, and Objective-C
extensions, these are normally C/C++/Objective-C source files. (Be sure to use
appropriate extensions to distinguish C++\ source files: :file:`.cc` and
@@ -232,9 +234,9 @@ linked into the executable.
Preprocessor options
--------------------
-Three optional arguments to :class:`Extension` will help if you need to specify
-include directories to search or preprocessor macros to define/undefine:
-``include_dirs``, ``define_macros``, and ``undef_macros``.
+Three optional arguments to :class:`~distutils.core.Extension` will help if
+you need to specify include directories to search or preprocessor macros to
+define/undefine: ``include_dirs``, ``define_macros``, and ``undef_macros``.
For example, if your extension requires header files in the :file:`include`
directory under your distribution root, use the ``include_dirs`` option::
@@ -598,7 +600,8 @@ Notes:
It is recommended that versions take the form *major.minor[.patch[.sub]]*.
(3)
- Either the author or the maintainer must be identified.
+ Either the author or the maintainer must be identified. If maintainer is
+ provided, distutils lists it as the author in :file:`PKG-INFO`.
(4)
These fields should not be used if your package is to be compatible with Python
@@ -606,8 +609,9 @@ Notes:
<http://pypi.python.org/pypi>`_.
(5)
- The ``long_description`` field is used by PyPI when you are registering a
- package, to build its home page.
+ The ``long_description`` field is used by PyPI when you are
+ :ref:`registering <package-register>` a package, to
+ :ref:`build its home page <package-display>`.
(6)
The ``license`` field is a text indicating the license covering the
@@ -680,6 +684,8 @@ include the following code fragment in your :file:`setup.py` before the
DistributionMetadata.download_url = None
+.. _debug-setup-script:
+
Debugging the setup script
==========================
@@ -695,7 +701,8 @@ installation is broken because they don't read all the way down to the bottom
and see that it's a permission problem.
On the other hand, this doesn't help the developer to find the cause of the
-failure. For this purpose, the DISTUTILS_DEBUG environment variable can be set
+failure. For this purpose, the :envvar:`DISTUTILS_DEBUG` environment variable can be set
to anything except an empty string, and distutils will now print detailed
-information what it is doing, and prints the full traceback in case an exception
-occurs.
+information about what it is doing, dump the full traceback when an exception
+occurs, and print the whole command line when an external program (like a C
+compiler) fails.
diff --git a/Doc/distutils/sourcedist.rst b/Doc/distutils/sourcedist.rst
index a9858d0..b1695a2 100644
--- a/Doc/distutils/sourcedist.rst
+++ b/Doc/distutils/sourcedist.rst
@@ -51,8 +51,7 @@ Notes:
of the standard Python library since Python 1.6)
(4)
- requires the :program:`compress` program. Notice that this format is now
- pending for deprecation and will be removed in the future versions of Python.
+ requires the :program:`compress` program.
When using any ``tar`` format (``gztar``, ``bztar``, ``ztar`` or
``tar``) under Unix, you can specify the ``owner`` and ``group`` names
diff --git a/Doc/distutils/uploading.rst b/Doc/distutils/uploading.rst
index 936402b..4bce699 100644
--- a/Doc/distutils/uploading.rst
+++ b/Doc/distutils/uploading.rst
@@ -1,77 +1,7 @@
-.. _package-upload:
+:orphan:
***************************************
Uploading Packages to the Package Index
***************************************
-.. versionadded:: 2.5
-
-The Python Package Index (PyPI) not only stores the package info, but also the
-package data if the author of the package wishes to. The distutils command
-:command:`upload` pushes the distribution files to PyPI.
-
-The command is invoked immediately after building one or more distribution
-files. For example, the command ::
-
- python setup.py sdist bdist_wininst upload
-
-will cause the source distribution and the Windows installer to be uploaded to
-PyPI. Note that these will be uploaded even if they are built using an earlier
-invocation of :file:`setup.py`, but that only distributions named on the command
-line for the invocation including the :command:`upload` command are uploaded.
-
-The :command:`upload` command uses the username, password, and repository URL
-from the :file:`$HOME/.pypirc` file (see section :ref:`pypirc` for more on this
-file). If a :command:`register` command was previously called in the same command,
-and if the password was entered in the prompt, :command:`upload` will reuse the
-entered password. This is useful if you do not want to store a clear text
-password in the :file:`$HOME/.pypirc` file.
-
-You can specify another PyPI server with the :option:`--repository=*url*` option::
-
- python setup.py sdist bdist_wininst upload -r http://example.com/pypi
-
-See section :ref:`pypirc` for more on defining several servers.
-
-You can use the :option:`--sign` option to tell :command:`upload` to sign each
-uploaded file using GPG (GNU Privacy Guard). The :program:`gpg` program must
-be available for execution on the system :envvar:`PATH`. You can also specify
-which key to use for signing using the :option:`--identity=*name*` option.
-
-Other :command:`upload` options include :option:`--repository=<url>` or
-:option:`--repository=<section>` where *url* is the url of the server and
-*section* the name of the section in :file:`$HOME/.pypirc`, and
-:option:`--show-response` (which displays the full response text from the PyPI
-server for help in debugging upload problems).
-
-PyPI package display
-====================
-
-The ``long_description`` field plays a special role at PyPI. It is used by
-the server to display a home page for the registered package.
-
-If you use the `reStructuredText <http://docutils.sourceforge.net/rst.html>`_
-syntax for this field, PyPI will parse it and display an HTML output for
-the package home page.
-
-The ``long_description`` field can be attached to a text file located
-in the package::
-
- from distutils.core import setup
-
- with open('README.txt') as file:
- long_description = file.read()
-
- setup(name='Distutils',
- long_description=long_description)
-
-In that case, :file:`README.txt` is a regular reStructuredText text file located
-in the root of the package besides :file:`setup.py`.
-
-To prevent registering broken reStructuredText content, you can use the
-:program:`rst2html` program that is provided by the :mod:`docutils` package
-and check the ``long_description`` from the command line::
-
- $ python setup.py --long-description | rst2html.py > output.html
-
-:mod:`docutils` will display a warning if there's something wrong with your syntax.
+The contents of this page have moved to the section :ref:`package-index`.
diff --git a/Doc/extending/building.rst b/Doc/extending/building.rst
index f4d95b2..08b0cc2 100644
--- a/Doc/extending/building.rst
+++ b/Doc/extending/building.rst
@@ -58,8 +58,9 @@ distutils; this section explains building extension modules only.
It is common to pre-compute arguments to :func:`setup`, to better structure the
driver script. In the example above, the\ ``ext_modules`` argument to
:func:`setup` is a list of extension modules, each of which is an instance of
-the :class:`Extension`. In the example, the instance defines an extension named
-``demo`` which is build by compiling a single source file, :file:`demo.c`.
+the :class:`~distutils.extension.Extension`. In the example, the instance
+defines an extension named ``demo`` which is build by compiling a single source
+file, :file:`demo.c`.
In many cases, building an extension is more complex, since additional
preprocessor defines and libraries may be needed. This is demonstrated in the
diff --git a/Doc/extending/embedding.rst b/Doc/extending/embedding.rst
index 4bd0199..981e1d5 100644
--- a/Doc/extending/embedding.rst
+++ b/Doc/extending/embedding.rst
@@ -61,6 +61,7 @@ perform some operation on a file. ::
int
main(int argc, char *argv[])
{
+ Py_SetProgramName(argv[0]); /* optional but recommended */
Py_Initialize();
PyRun_SimpleString("from time import time,ctime\n"
"print 'Today is',ctime(time())\n");
@@ -68,9 +69,11 @@ perform some operation on a file. ::
return 0;
}
-The above code first initializes the Python interpreter with
+The :c:func:`Py_SetProgramName` function should be called before
+:c:func:`Py_Initialize` to inform the interpreter about paths to Python run-time
+libraries. Next, the Python interpreter is initialized with
:c:func:`Py_Initialize`, followed by the execution of a hard-coded Python script
-that print the date and time. Afterwards, the :c:func:`Py_Finalize` call shuts
+that prints the date and time. Afterwards, the :c:func:`Py_Finalize` call shuts
the interpreter down, followed by the end of the program. In a real program,
you may want to get the Python script from another source, perhaps a text-editor
routine, a file, or a database. Getting the Python code from a file can better
@@ -137,7 +140,9 @@ The code to run a function defined in a Python script is:
This code loads a Python script using ``argv[1]``, and calls the function named
in ``argv[2]``. Its integer arguments are the other values of the ``argv``
array. If you compile and link this program (let's call the finished executable
-:program:`call`), and use it to execute a Python script, such as::
+:program:`call`), and use it to execute a Python script, such as:
+
+.. code-block:: python
def multiply(a,b):
print "Will compute", a, "times", b
@@ -226,7 +231,9 @@ following two statements directly after :c:func:`Py_Initialize`::
These two lines initialize the ``numargs`` variable, and make the
:func:`emb.numargs` function accessible to the embedded Python interpreter.
-With these extensions, the Python script can do things like ::
+With these extensions, the Python script can do things like
+
+.. code-block:: python
import emb
print "Number of arguments", emb.numargs()
@@ -251,35 +258,55 @@ program. There is no need to recompile Python itself using C++.
.. _link-reqs:
-Linking Requirements
-====================
-
-While the :program:`configure` script shipped with the Python sources will
-correctly build Python to export the symbols needed by dynamically linked
-extensions, this is not automatically inherited by applications which embed the
-Python library statically, at least on Unix. This is an issue when the
-application is linked to the static runtime library (:file:`libpython.a`) and
-needs to load dynamic extensions (implemented as :file:`.so` files).
-
-The problem is that some entry points are defined by the Python runtime solely
-for extension modules to use. If the embedding application does not use any of
-these entry points, some linkers will not include those entries in the symbol
-table of the finished executable. Some additional options are needed to inform
-the linker not to remove these symbols.
-
-Determining the right options to use for any given platform can be quite
-difficult, but fortunately the Python configuration already has those values.
-To retrieve them from an installed Python interpreter, start an interactive
-interpreter and have a short session like this::
-
- >>> import distutils.sysconfig
- >>> distutils.sysconfig.get_config_var('LINKFORSHARED')
- '-Xlinker -export-dynamic'
+Compiling and Linking under Unix-like systems
+=============================================
-.. index:: module: distutils.sysconfig
+It is not necessarily trivial to find the right flags to pass to your
+compiler (and linker) in order to embed the Python interpreter into your
+application, particularly because Python needs to load library modules
+implemented as C dynamic extensions (:file:`.so` files) linked against
+it.
+
+To find out the required compiler and linker flags, you can execute the
+:file:`python{X.Y}-config` script which is generated as part of the
+installation process (a :file:`python-config` script may also be
+available). This script has several options, of which the following will
+be directly useful to you:
+
+* ``pythonX.Y-config --cflags`` will give you the recommended flags when
+ compiling::
+
+ $ /opt/bin/python2.7-config --cflags
+ -I/opt/include/python2.7 -fno-strict-aliasing -DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes
+
+* ``pythonX.Y-config --ldflags`` will give you the recommended flags when
+ linking::
+
+ $ /opt/bin/python2.7-config --ldflags
+ -L/opt/lib/python2.7/config -lpthread -ldl -lutil -lm -lpython2.7 -Xlinker -export-dynamic
+
+.. note::
+ To avoid confusion between several Python installations (and especially
+ between the system Python and your own compiled Python), it is recommended
+ that you use the absolute path to :file:`python{X.Y}-config`, as in the above
+ example.
+
+If this procedure doesn't work for you (it is not guaranteed to work for
+all Unix-like platforms; however, we welcome :ref:`bug reports <reporting-bugs>`)
+you will have to read your system's documentation about dynamic linking and/or
+examine Python's :file:`Makefile` (use :func:`sysconfig.get_makefile_filename`
+to find its location) and compilation
+options. In this case, the :mod:`sysconfig` module is a useful tool to
+programmatically extract the configuration values that you will want to
+combine together. For example:
+
+.. code-block:: python
+
+ >>> import sysconfig
+ >>> sysconfig.get_config_var('LIBS')
+ '-lpthread -ldl -lutil'
+ >>> sysconfig.get_config_var('LINKFORSHARED')
+ '-Xlinker -export-dynamic'
-The contents of the string presented will be the options that should be used.
-If the string is empty, there's no need to add any additional options. The
-:const:`LINKFORSHARED` definition corresponds to the variable of the same name
-in Python's top-level :file:`Makefile`.
+.. XXX similar documentation for Windows missing
diff --git a/Doc/extending/extending.rst b/Doc/extending/extending.rst
index eb18a46..8e8c3ab 100644
--- a/Doc/extending/extending.rst
+++ b/Doc/extending/extending.rst
@@ -372,6 +372,8 @@ to :c:func:`Py_Initialize`::
/* Add a static module */
initspam();
+ ...
+
An example may be found in the file :file:`Demo/embed/demo.c` in the Python
source distribution.
@@ -510,7 +512,7 @@ or more format codes between parentheses. For example::
value of the Python function. :c:func:`PyObject_CallObject` is
"reference-count-neutral" with respect to its arguments. In the example a new
tuple was created to serve as the argument list, which is :c:func:`Py_DECREF`\
--ed immediately after the call.
+-ed immediately after the :c:func:`PyObject_CallObject` call.
The return value of :c:func:`PyObject_CallObject` is "new": either it is a brand
new object, or it is an existing object whose reference count has been
@@ -843,9 +845,9 @@ the cycle itself.
The cycle detector is able to detect garbage cycles and can reclaim them so long
as there are no finalizers implemented in Python (:meth:`__del__` methods).
When there are such finalizers, the detector exposes the cycles through the
-:mod:`gc` module (specifically, the
-``garbage`` variable in that module). The :mod:`gc` module also exposes a way
-to run the detector (the :func:`collect` function), as well as configuration
+:mod:`gc` module (specifically, the :attr:`~gc.garbage` variable in that module).
+The :mod:`gc` module also exposes a way to run the detector (the
+:func:`~gc.collect` function), as well as configuration
interfaces and the ability to disable the detector at runtime. The cycle
detector is considered an optional component; though it is included by default,
it can be disabled at build time using the :option:`--without-cycle-gc` option
diff --git a/Doc/extending/index.rst b/Doc/extending/index.rst
index 92e6132..44a7f92 100644
--- a/Doc/extending/index.rst
+++ b/Doc/extending/index.rst
@@ -4,16 +4,13 @@
Extending and Embedding the Python Interpreter
##################################################
-:Release: |version|
-:Date: |today|
-
This document describes how to write modules in C or C++ to extend the Python
-interpreter with new modules. Those modules can define new functions but also
-new object types and their methods. The document also describes how to embed
-the Python interpreter in another application, for use as an extension language.
-Finally, it shows how to compile and link extension modules so that they can be
-loaded dynamically (at run time) into the interpreter, if the underlying
-operating system supports this feature.
+interpreter with new modules. Those modules can not only define new functions
+but also new object types and their methods. The document also describes how
+to embed the Python interpreter in another application, for use as an extension
+language. Finally, it shows how to compile and link extension modules so that
+they can be loaded dynamically (at run time) into the interpreter, if the
+underlying operating system supports this feature.
This document assumes basic knowledge about Python. For an informal
introduction to the language, see :ref:`tutorial-index`. :ref:`reference-index`
@@ -24,6 +21,15 @@ Python) that give the language its wide application range.
For a detailed description of the whole Python/C API, see the separate
:ref:`c-api-index`.
+.. note::
+
+ This guide only covers the basic tools for creating extensions provided
+ as part of this version of CPython. Third party tools may offer simpler
+ alternatives. Refer to the `binary extensions section
+ <https://python-packaging-user-guide.readthedocs.org/en/latest/extensions.html>`__
+ in the Python Packaging User Guide for more information.
+
+
.. toctree::
:maxdepth: 2
:numbered:
diff --git a/Doc/extending/newtypes.rst b/Doc/extending/newtypes.rst
index f18814f..d76aa24 100644
--- a/Doc/extending/newtypes.rst
+++ b/Doc/extending/newtypes.rst
@@ -150,11 +150,11 @@ This is so that Python knows how much memory to allocate when you call
.. note::
If you want your type to be subclassable from Python, and your type has the same
- :attr:`tp_basicsize` as its base type, you may have problems with multiple
+ :c:member:`~PyTypeObject.tp_basicsize` as its base type, you may have problems with multiple
inheritance. A Python subclass of your type will have to list your type first
- in its :attr:`__bases__`, or else it will not be able to call your type's
+ in its :attr:`~class.__bases__`, or else it will not be able to call your type's
:meth:`__new__` method without getting an error. You can avoid this problem by
- ensuring that your type has a larger value for :attr:`tp_basicsize` than its
+ ensuring that your type has a larger value for :c:member:`~PyTypeObject.tp_basicsize` than its
base type does. Most of the time, this will be true anyway, because either your
base type will be :class:`object`, or else you will be adding data members to
your base type, and therefore increasing its size.
@@ -174,7 +174,7 @@ to :const:`Py_TPFLAGS_DEFAULT`. ::
All types should include this constant in their flags. It enables all of the
members defined by the current version of Python.
-We provide a doc string for the type in :attr:`tp_doc`. ::
+We provide a doc string for the type in :c:member:`~PyTypeObject.tp_doc`. ::
"Noddy objects", /* tp_doc */
@@ -183,12 +183,12 @@ from the others. We aren't going to implement any of these in this version of
the module. We'll expand this example later to have more interesting behavior.
For now, all we want to be able to do is to create new :class:`Noddy` objects.
-To enable object creation, we have to provide a :attr:`tp_new` implementation.
+To enable object creation, we have to provide a :c:member:`~PyTypeObject.tp_new` implementation.
In this case, we can just use the default implementation provided by the API
function :c:func:`PyType_GenericNew`. We'd like to just assign this to the
-:attr:`tp_new` slot, but we can't, for portability sake, On some platforms or
+:c:member:`~PyTypeObject.tp_new` slot, but we can't, for portability sake, On some platforms or
compilers, we can't statically initialize a structure member with a function
-defined in another C module, so, instead, we'll assign the :attr:`tp_new` slot
+defined in another C module, so, instead, we'll assign the :c:member:`~PyTypeObject.tp_new` slot
in the module initialization function just before calling
:c:func:`PyType_Ready`::
@@ -283,13 +283,13 @@ allocation and deallocation. At a minimum, we need a deallocation method::
self->ob_type->tp_free((PyObject*)self);
}
-which is assigned to the :attr:`tp_dealloc` member::
+which is assigned to the :c:member:`~PyTypeObject.tp_dealloc` member::
(destructor)Noddy_dealloc, /*tp_dealloc*/
This method decrements the reference counts of the two Python attributes. We use
:c:func:`Py_XDECREF` here because the :attr:`first` and :attr:`last` members
-could be *NULL*. It then calls the :attr:`tp_free` member of the object's type
+could be *NULL*. It then calls the :c:member:`~PyTypeObject.tp_free` member of the object's type
to free the object's memory. Note that the object's type might not be
:class:`NoddyType`, because the object may be an instance of a subclass.
@@ -323,7 +323,7 @@ strings, so we provide a new method::
return (PyObject *)self;
}
-and install it in the :attr:`tp_new` member::
+and install it in the :c:member:`~PyTypeObject.tp_new` member::
Noddy_new, /* tp_new */
@@ -344,16 +344,16 @@ created. New methods always accept positional and keyword arguments, but they
often ignore the arguments, leaving the argument handling to initializer
methods. Note that if the type supports subclassing, the type passed may not be
the type being defined. The new method calls the tp_alloc slot to allocate
-memory. We don't fill the :attr:`tp_alloc` slot ourselves. Rather
+memory. We don't fill the :c:member:`~PyTypeObject.tp_alloc` slot ourselves. Rather
:c:func:`PyType_Ready` fills it for us by inheriting it from our base class,
which is :class:`object` by default. Most types use the default allocation.
.. note::
- If you are creating a co-operative :attr:`tp_new` (one that calls a base type's
- :attr:`tp_new` or :meth:`__new__`), you must *not* try to determine what method
+ If you are creating a co-operative :c:member:`~PyTypeObject.tp_new` (one that calls a base type's
+ :c:member:`~PyTypeObject.tp_new` or :meth:`__new__`), you must *not* try to determine what method
to call using method resolution order at runtime. Always statically determine
- what type you are going to call, and call its :attr:`tp_new` directly, or via
+ what type you are going to call, and call its :c:member:`~PyTypeObject.tp_new` directly, or via
``type->tp_base->tp_new``. If you do not do this, Python subclasses of your
type that also inherit from other Python-defined classes may not work correctly.
(Specifically, you may not be able to create instances of such subclasses
@@ -390,11 +390,11 @@ We provide an initialization function::
return 0;
}
-by filling the :attr:`tp_init` slot. ::
+by filling the :c:member:`~PyTypeObject.tp_init` slot. ::
(initproc)Noddy_init, /* tp_init */
-The :attr:`tp_init` slot is exposed in Python as the :meth:`__init__` method. It
+The :c:member:`~PyTypeObject.tp_init` slot is exposed in Python as the :meth:`__init__` method. It
is used to initialize an object after it's created. Unlike the new method, we
can't guarantee that the initializer is called. The initializer isn't called
when unpickling objects and it can be overridden. Our initializer accepts
@@ -424,7 +424,7 @@ reference counts. When don't we have to do this?
* when we know that deallocation of the object [#]_ will not cause any calls
back into our type's code
-* when decrementing a reference count in a :attr:`tp_dealloc` handler when
+* when decrementing a reference count in a :c:member:`~PyTypeObject.tp_dealloc` handler when
garbage-collections is not supported [#]_
We want to expose our instance variables as attributes. There are a
@@ -440,7 +440,7 @@ number of ways to do that. The simplest way is to define member definitions::
{NULL} /* Sentinel */
};
-and put the definitions in the :attr:`tp_members` slot::
+and put the definitions in the :c:member:`~PyTypeObject.tp_members` slot::
Noddy_members, /* tp_members */
@@ -516,7 +516,7 @@ definitions::
{NULL} /* Sentinel */
};
-and assign them to the :attr:`tp_methods` slot::
+and assign them to the :c:member:`~PyTypeObject.tp_methods` slot::
Noddy_methods, /* tp_methods */
@@ -611,7 +611,7 @@ We create an array of :c:type:`PyGetSetDef` structures::
{NULL} /* Sentinel */
};
-and register it in the :attr:`tp_getset` slot::
+and register it in the :c:member:`~PyTypeObject.tp_getset` slot::
Noddy_getseters, /* tp_getset */
@@ -628,7 +628,7 @@ We also remove the member definitions for these attributes::
{NULL} /* Sentinel */
};
-We also need to update the :attr:`tp_init` handler to only allow strings [#]_ to
+We also need to update the :c:member:`~PyTypeObject.tp_init` handler to only allow strings [#]_ to
be passed::
static int
@@ -747,7 +747,7 @@ simplified::
.. note::
- Note that the :attr:`tp_traverse` implementation must name its arguments exactly
+ Note that the :c:member:`~PyTypeObject.tp_traverse` implementation must name its arguments exactly
*visit* and *arg* in order to use :c:func:`Py_VISIT`. This is to encourage
uniformity across these boring implementations.
@@ -784,7 +784,7 @@ its reference count. We do this because, as was discussed earlier, if the
reference count drops to zero, we might cause code to run that calls back into
the object. In addition, because we now support garbage collection, we also
have to worry about code being run that triggers garbage collection. If garbage
-collection is run, our :attr:`tp_traverse` handler could get called. We can't
+collection is run, our :c:member:`~PyTypeObject.tp_traverse` handler could get called. We can't
take a chance of having :c:func:`Noddy_traverse` called when a member's reference
count has dropped to zero and its value hasn't been set to *NULL*.
@@ -804,8 +804,8 @@ Finally, we add the :const:`Py_TPFLAGS_HAVE_GC` flag to the class flags::
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /*tp_flags*/
-That's pretty much it. If we had written custom :attr:`tp_alloc` or
-:attr:`tp_free` slots, we'd need to modify them for cyclic-garbage collection.
+That's pretty much it. If we had written custom :c:member:`~PyTypeObject.tp_alloc` or
+:c:member:`~PyTypeObject.tp_free` slots, we'd need to modify them for cyclic-garbage collection.
Most extensions will use the versions automatically provided.
@@ -864,8 +864,8 @@ the :attr:`__init__` method of the base type.
This pattern is important when writing a type with custom :attr:`new` and
:attr:`dealloc` methods. The :attr:`new` method should not actually create the
-memory for the object with :attr:`tp_alloc`, that will be handled by the base
-class when calling its :attr:`tp_new`.
+memory for the object with :c:member:`~PyTypeObject.tp_alloc`, that will be handled by the base
+class when calling its :c:member:`~PyTypeObject.tp_new`.
When filling out the :c:func:`PyTypeObject` for the :class:`Shoddy` type, you see
a slot for :c:func:`tp_base`. Due to cross platform compiler issues, you can't
@@ -890,8 +890,8 @@ the module's :c:func:`init` function. ::
}
Before calling :c:func:`PyType_Ready`, the type structure must have the
-:attr:`tp_base` slot filled in. When we are deriving a new type, it is not
-necessary to fill out the :attr:`tp_alloc` slot with :c:func:`PyType_GenericNew`
+:c:member:`~PyTypeObject.tp_base` slot filled in. When we are deriving a new type, it is not
+necessary to fill out the :c:member:`~PyTypeObject.tp_alloc` slot with :c:func:`PyType_GenericNew`
-- the allocate function from the base type will be inherited.
After that, calling :c:func:`PyType_Ready` and adding the type object to the
@@ -934,7 +934,7 @@ that will be helpful in such a situation! ::
These fields tell the runtime how much memory to allocate when new objects of
this type are created. Python has some built-in support for variable length
-structures (think: strings, lists) which is where the :attr:`tp_itemsize` field
+structures (think: strings, lists) which is where the :c:member:`~PyTypeObject.tp_itemsize` field
comes in. This will be dealt with later. ::
char *tp_doc;
@@ -1032,13 +1032,13 @@ that creating a temporary string object to be written to a file is too
expensive.
These handlers are all optional, and most types at most need to implement the
-:attr:`tp_str` and :attr:`tp_repr` handlers. ::
+:c:member:`~PyTypeObject.tp_str` and :c:member:`~PyTypeObject.tp_repr` handlers. ::
reprfunc tp_repr;
reprfunc tp_str;
printfunc tp_print;
-The :attr:`tp_repr` handler should return a string object containing a
+The :c:member:`~PyTypeObject.tp_repr` handler should return a string object containing a
representation of the instance for which it is called. Here is a simple
example::
@@ -1049,15 +1049,15 @@ example::
obj->obj_UnderlyingDatatypePtr->size);
}
-If no :attr:`tp_repr` handler is specified, the interpreter will supply a
-representation that uses the type's :attr:`tp_name` and a uniquely-identifying
+If no :c:member:`~PyTypeObject.tp_repr` handler is specified, the interpreter will supply a
+representation that uses the type's :c:member:`~PyTypeObject.tp_name` and a uniquely-identifying
value for the object.
-The :attr:`tp_str` handler is to :func:`str` what the :attr:`tp_repr` handler
+The :c:member:`~PyTypeObject.tp_str` handler is to :func:`str` what the :c:member:`~PyTypeObject.tp_repr` handler
described above is to :func:`repr`; that is, it is called when Python code calls
:func:`str` on an instance of your object. Its implementation is very similar
-to the :attr:`tp_repr` function, but the resulting string is intended for human
-consumption. If :attr:`tp_str` is not specified, the :attr:`tp_repr` handler is
+to the :c:member:`~PyTypeObject.tp_repr` function, but the resulting string is intended for human
+consumption. If :c:member:`~PyTypeObject.tp_str` is not specified, the :c:member:`~PyTypeObject.tp_repr` handler is
used instead.
Here is a simple example::
@@ -1152,7 +1152,7 @@ type object to create :term:`descriptor`\s which are placed in the dictionary of
type object. Each descriptor controls access to one attribute of the instance
object. Each of the tables is optional; if all three are *NULL*, instances of
the type will only have attributes that are inherited from their base type, and
-should leave the :attr:`tp_getattro` and :attr:`tp_setattro` fields *NULL* as
+should leave the :c:member:`~PyTypeObject.tp_getattro` and :c:member:`~PyTypeObject.tp_setattro` fields *NULL* as
well, allowing the base type to handle attributes.
The tables are declared as three fields of the type object::
@@ -1161,7 +1161,7 @@ The tables are declared as three fields of the type object::
struct PyMemberDef *tp_members;
struct PyGetSetDef *tp_getset;
-If :attr:`tp_methods` is not *NULL*, it must refer to an array of
+If :c:member:`~PyTypeObject.tp_methods` is not *NULL*, it must refer to an array of
:c:type:`PyMethodDef` structures. Each entry in the table is an instance of this
structure::
@@ -1225,13 +1225,13 @@ combined using bitwise-OR.
single: WRITE_RESTRICTED
single: RESTRICTED
-An interesting advantage of using the :attr:`tp_members` table to build
+An interesting advantage of using the :c:member:`~PyTypeObject.tp_members` table to build
descriptors that are used at runtime is that any attribute defined this way can
have an associated doc string simply by providing the text in the table. An
application can use the introspection API to retrieve the descriptor from the
class object, and get the doc string using its :attr:`__doc__` attribute.
-As with the :attr:`tp_methods` table, a sentinel entry with a :attr:`name` value
+As with the :c:member:`~PyTypeObject.tp_methods` table, a sentinel entry with a :attr:`name` value
of *NULL* is required.
.. XXX Descriptors need to be explained in more detail somewhere, but not here.
@@ -1257,7 +1257,7 @@ portable to older versions of Python, and explains how the handler functions are
called, so that if you do need to extend their functionality, you'll understand
what needs to be done.
-The :attr:`tp_getattr` handler is called when the object requires an attribute
+The :c:member:`~PyTypeObject.tp_getattr` handler is called when the object requires an attribute
look-up. It is called in the same situations where the :meth:`__getattr__`
method of a class would be called.
@@ -1265,7 +1265,7 @@ A likely way to handle this is (1) to implement a set of functions (such as
:c:func:`newdatatype_getSize` and :c:func:`newdatatype_setSize` in the example
below), (2) provide a method table listing these functions, and (3) provide a
getattr function that returns the result of a lookup in that table. The method
-table uses the same structure as the :attr:`tp_methods` field of the type
+table uses the same structure as the :c:member:`~PyTypeObject.tp_methods` field of the type
object.
Here is an example::
@@ -1284,11 +1284,11 @@ Here is an example::
return Py_FindMethod(newdatatype_methods, (PyObject *)obj, name);
}
-The :attr:`tp_setattr` handler is called when the :meth:`__setattr__` or
+The :c:member:`~PyTypeObject.tp_setattr` handler is called when the :meth:`__setattr__` or
:meth:`__delattr__` method of a class instance would be called. When an
attribute should be deleted, the third parameter will be *NULL*. Here is an
example that simply raises an exception; if this were really all you wanted, the
-:attr:`tp_setattr` handler should be set to *NULL*. ::
+:c:member:`~PyTypeObject.tp_setattr` handler should be set to *NULL*. ::
static int
newdatatype_setattr(newdatatypeobject *obj, char *name, PyObject *v)
@@ -1305,7 +1305,7 @@ Object Comparison
cmpfunc tp_compare;
-The :attr:`tp_compare` handler is called when comparisons are needed and the
+The :c:member:`~PyTypeObject.tp_compare` handler is called when comparisons are needed and the
object does not implement the specific rich comparison method which matches the
requested comparison. (It is always used if defined and the
:c:func:`PyObject_Compare` or :c:func:`PyObject_Cmp` functions are used, or if
@@ -1316,7 +1316,7 @@ allowed to return arbitrary negative or positive integers for less than and
greater than, respectively; as of Python 2.2, this is no longer allowed. In the
future, other return values may be assigned a different meaning.)
-A :attr:`tp_compare` handler may raise an exception. In this case it should
+A :c:member:`~PyTypeObject.tp_compare` handler may raise an exception. In this case it should
return a negative value. The caller has to test for the exception using
:c:func:`PyErr_Occurred`.
@@ -1360,9 +1360,9 @@ that the slots are present and should be checked by the interpreter. (The flag
bit does not indicate that the slot values are non-*NULL*. The flag may be set
to indicate the presence of a slot, but a slot may still be unfilled.) ::
- PyNumberMethods tp_as_number;
- PySequenceMethods tp_as_sequence;
- PyMappingMethods tp_as_mapping;
+ PyNumberMethods *tp_as_number;
+ PySequenceMethods *tp_as_sequence;
+ PyMappingMethods *tp_as_mapping;
If you wish your object to be able to act like a number, a sequence, or a
mapping object, then you place the address of a structure that implements the C
@@ -1391,7 +1391,7 @@ instance of your data type. Here is a moderately pointless example::
This function is called when an instance of your data type is "called", for
example, if ``obj1`` is an instance of your data type and the Python script
-contains ``obj1('hello')``, the :attr:`tp_call` handler is invoked.
+contains ``obj1('hello')``, the :c:member:`~PyTypeObject.tp_call` handler is invoked.
This function takes three arguments:
@@ -1480,7 +1480,7 @@ those objects which do not benefit by weak referencing (such as numbers).
For an object to be weakly referencable, the extension must include a
:c:type:`PyObject\*` field in the instance structure for the use of the weak
reference mechanism; it must be initialized to *NULL* by the object's
-constructor. It must also set the :attr:`tp_weaklistoffset` field of the
+constructor. It must also set the :c:member:`~PyTypeObject.tp_weaklistoffset` field of the
corresponding type object to the offset of the field. For example, the instance
type is defined with the following structure::
@@ -1521,9 +1521,8 @@ The type constructor is responsible for initializing the weak reference list to
}
The only further addition is that the destructor needs to call the weak
-reference manager to clear any weak references. This should be done before any
-other parts of the destruction have occurred, but is only required if the weak
-reference list is non-*NULL*::
+reference manager to clear any weak references. This is only required if the
+weak reference list is non-*NULL*::
static void
instance_dealloc(PyInstanceObject *inst)
@@ -1567,7 +1566,7 @@ might be something like the following::
.. [#] This is true when we know that the object is a basic type, like a string or a
float.
-.. [#] We relied on this in the :attr:`tp_dealloc` handler in this example, because our
+.. [#] We relied on this in the :c:member:`~PyTypeObject.tp_dealloc` handler in this example, because our
type doesn't support garbage collection. Even if a type supports garbage
collection, there are calls that can be made to "untrack" the object from
garbage collection, however, these calls are advanced and not covered here.
diff --git a/Doc/faq/design.rst b/Doc/faq/design.rst
index 962b4ef..017c6d4 100644
--- a/Doc/faq/design.rst
+++ b/Doc/faq/design.rst
@@ -225,7 +225,7 @@ The major reason is history. Functions were used for those operations that were
generic for a group of types and which were intended to work even for objects
that didn't have methods at all (e.g. tuples). It is also convenient to have a
function that can readily be applied to an amorphous collection of objects when
-you use the functional features of Python (``map()``, ``apply()`` et al).
+you use the functional features of Python (``map()``, ``zip()`` et al).
In fact, implementing ``len()``, ``max()``, ``min()`` as a built-in function is
actually less code than implementing them as methods for each type. One can
@@ -297,8 +297,9 @@ use the ``join()`` function from the string module, which allows you to write ::
How fast are exceptions?
------------------------
-A try/except block is extremely efficient. Actually catching an exception is
-expensive. In versions of Python prior to 2.0 it was common to use this idiom::
+A try/except block is extremely efficient if no exceptions are raised. Actually
+catching an exception is expensive. In versions of Python prior to 2.0 it was
+common to use this idiom::
try:
value = mydict[key]
@@ -309,11 +310,10 @@ expensive. In versions of Python prior to 2.0 it was common to use this idiom::
This only made sense when you expected the dict to have the key almost all the
time. If that wasn't the case, you coded it like this::
- if mydict.has_key(key):
+ if key in mydict:
value = mydict[key]
else:
- mydict[key] = getvalue(key)
- value = mydict[key]
+ value = mydict[key] = getvalue(key)
.. note::
@@ -370,25 +370,22 @@ support for C.
Answer 2: Fortunately, there is `Stackless Python <http://www.stackless.com>`_,
which has a completely redesigned interpreter loop that avoids the C stack.
-It's still experimental but looks very promising. Although it is binary
-compatible with standard Python, it's still unclear whether Stackless will make
-it into the core -- maybe it's just too revolutionary.
-Why can't lambda forms contain statements?
-------------------------------------------
+Why can't lambda expressions contain statements?
+------------------------------------------------
-Python lambda forms cannot contain statements because Python's syntactic
+Python lambda expressions cannot contain statements because Python's syntactic
framework can't handle statements nested inside expressions. However, in
Python, this is not a serious problem. Unlike lambda forms in other languages,
where they add functionality, Python lambdas are only a shorthand notation if
you're too lazy to define a function.
Functions are already first class objects in Python, and can be declared in a
-local scope. Therefore the only advantage of using a lambda form instead of a
+local scope. Therefore the only advantage of using a lambda instead of a
locally-defined function is that you don't need to invent a name for the
function -- but that's just a local variable to which the function object (which
-is exactly the same type of object that a lambda form yields) is assigned!
+is exactly the same type of object that a lambda expression yields) is assigned!
Can Python be compiled to machine code, C or some other language?
@@ -685,7 +682,8 @@ Python 2.6 adds an :mod:`abc` module that lets you define Abstract Base Classes
(ABCs). You can then use :func:`isinstance` and :func:`issubclass` to check
whether an instance or a class implements a particular ABC. The
:mod:`collections` module defines a set of useful ABCs such as
-:class:`Iterable`, :class:`Container`, and :class:`MutableMapping`.
+:class:`~collections.Iterable`, :class:`~collections.Container`, and
+:class:`~collections.MutableMapping`.
For Python, many of the advantages of interface specifications can be obtained
by an appropriate test discipline for components. There is also a tool,
@@ -757,7 +755,7 @@ of each call to the function, and return the cached value if the same value is
requested again. This is called "memoizing", and can be implemented like this::
# Callers will never provide a third parameter for this function.
- def expensive (arg1, arg2, _cache={}):
+ def expensive(arg1, arg2, _cache={}):
if (arg1, arg2) in _cache:
return _cache[(arg1, arg2)]
@@ -782,7 +780,7 @@ languages. For example::
try:
...
- if (condition): raise label() # goto label
+ if condition: raise label() # goto label
...
except label: # where to goto
pass
@@ -913,8 +911,8 @@ There are several reasons to allow this.
When you have a literal value for a list, tuple, or dictionary spread across
multiple lines, it's easier to add more elements because you don't have to
-remember to add a comma to the previous line. The lines can also be sorted in
-your editor without creating a syntax error.
+remember to add a comma to the previous line. The lines can also be reordered
+without creating a syntax error.
Accidentally omitting the comma can lead to errors that are hard to diagnose.
For example::
diff --git a/Doc/faq/extending.rst b/Doc/faq/extending.rst
index b79d716..4f3cabf 100644
--- a/Doc/faq/extending.rst
+++ b/Doc/faq/extending.rst
@@ -2,7 +2,9 @@
Extending/Embedding FAQ
=======================
-.. contents::
+.. only:: html
+
+ .. contents::
.. highlight:: c
diff --git a/Doc/faq/general.rst b/Doc/faq/general.rst
index df43196..aa8075c 100644
--- a/Doc/faq/general.rst
+++ b/Doc/faq/general.rst
@@ -4,7 +4,10 @@
General Python FAQ
==================
-.. contents::
+.. only:: html
+
+ .. contents::
+
General Information
===================
@@ -178,8 +181,8 @@ at http://docs.python.org/. PDF, plain text, and downloadable HTML versions are
also available at http://docs.python.org/download.html.
The documentation is written in reStructuredText and processed by `the Sphinx
-documentation tool <http://sphinx.pocoo.org/>`__. The reStructuredText source
-for the documentation is part of the Python source distribution.
+documentation tool <http://sphinx-doc.org/>`__. The reStructuredText source for
+the documentation is part of the Python source distribution.
I've never programmed before. Is there a Python tutorial?
@@ -265,9 +268,13 @@ Python references; or perhaps search for "Python" and "language".
Where in the world is www.python.org located?
---------------------------------------------
-It's currently in Amsterdam, graciously hosted by `XS4ALL
-<http://www.xs4all.nl>`_. Thanks to Thomas Wouters for his work in arranging
-python.org's hosting.
+The Python project's infrastructure is located all over the world.
+`www.python.org <http://www.python.org>`_ is currently in Amsterdam, graciously
+hosted by `XS4ALL <http://www.xs4all.nl>`_. `Upfront Systems
+<http://www.upfrontsystems.co.za>`_ hosts `bugs.python.org
+<http://bugs.python.org>`_. Most other Python services like `PyPI
+<https://pypi.python.org>`_ and hg.python.org are hosted by `Oregon State
+University Open Source Lab <https://osuosl.org>`_.
Why is it called Python?
@@ -464,7 +471,8 @@ that is written in Python using Tkinter. PythonWin is a Windows-specific IDE.
Emacs users will be happy to know that there is a very good Python mode for
Emacs. All of these programming environments provide syntax highlighting,
auto-indenting, and access to the interactive interpreter while coding. Consult
-http://www.python.org/editors/ for a full list of Python editing environments.
+`the Python wiki <https://wiki.python.org/moin/PythonEditors>`_ for a full list
+of Python editing environments.
If you want to discuss Python's use in education, you may be interested in
joining `the edu-sig mailing list
diff --git a/Doc/faq/gui.rst b/Doc/faq/gui.rst
index 50a30b0..42d95bd 100644
--- a/Doc/faq/gui.rst
+++ b/Doc/faq/gui.rst
@@ -4,7 +4,9 @@
Graphic User Interface FAQ
==========================
-.. contents::
+.. only:: html
+
+ .. contents::
What platform-independent GUI toolkits exist for Python?
========================================================
@@ -19,15 +21,15 @@ Tkinter
Standard builds of Python include an object-oriented interface to the Tcl/Tk
widget set, called Tkinter. This is probably the easiest to install and use.
For more info about Tk, including pointers to the source, see the Tcl/Tk home
-page at http://www.tcl.tk. Tcl/Tk is fully portable to the MacOS, Windows, and
-Unix platforms.
+page at http://www.tcl.tk. Tcl/Tk is fully portable to the Mac OS X, Windows,
+and Unix platforms.
wxWidgets
---------
wxWidgets (http://www.wxwidgets.org) is a free, portable GUI class
library written in C++ that provides a native look and feel on a
-number of platforms, with Windows, MacOS X, GTK, X11, all listed as
+number of platforms, with Windows, Mac OS X, GTK, X11, all listed as
current stable targets. Language bindings are available for a number
of languages including Python, Perl, Ruby, etc.
@@ -45,13 +47,15 @@ well as in freeware or shareware.
Qt
---
-There are bindings available for the Qt toolkit (`PyQt
-<http://www.riverbankcomputing.co.uk/software/pyqt/>`_) and for KDE (`PyKDE <http://www.riverbankcomputing.co.uk/software/pykde/intro>`__). If
-you're writing open source software, you don't need to pay for PyQt, but if you
-want to write proprietary applications, you must buy a PyQt license from
-`Riverbank Computing <http://www.riverbankcomputing.co.uk>`_ and (up to Qt 4.4;
-Qt 4.5 upwards is licensed under the LGPL license) a Qt license from `Trolltech
-<http://www.trolltech.com>`_.
+There are bindings available for the Qt toolkit (using either `PyQt
+<http://www.riverbankcomputing.co.uk/software/pyqt/>`_ or `PySide
+<http://www.pyside.org/>`_) and for KDE (`PyKDE <http://www.riverbankcomputing.co.uk/software/pykde/intro>`_).
+PyQt is currently more mature than PySide, but you must buy a PyQt license from
+`Riverbank Computing <http://www.riverbankcomputing.co.uk/software/pyqt/license>`_
+if you want to write proprietary applications. PySide is free for all applications.
+
+Qt 4.5 upwards is licensed under the LGPL license; also, commercial licenses
+are available from `Nokia <http://qt.nokia.com/>`_.
Gtk+
----
@@ -84,13 +88,9 @@ For OpenGL bindings, see `PyOpenGL <http://pyopengl.sourceforge.net>`_.
What platform-specific GUI toolkits exist for Python?
========================================================
-`The Mac port <http://python.org/download/mac>`_ by Jack Jansen has a rich and
-ever-growing set of modules that support the native Mac toolbox calls. The port
-supports MacOS X's Carbon libraries.
-
By installing the `PyObjc Objective-C bridge
-<http://pyobjc.sourceforge.net>`_, Python programs can use MacOS X's
-Cocoa libraries. See the documentation that comes with the Mac port.
+<http://pyobjc.sourceforge.net>`_, Python programs can use Mac OS X's
+Cocoa libraries.
:ref:`Pythonwin <windows-faq>` by Mark Hammond includes an interface to the
Microsoft Foundation Classes and a Python programming environment
diff --git a/Doc/faq/index.rst b/Doc/faq/index.rst
index caba425..46ed3db 100644
--- a/Doc/faq/index.rst
+++ b/Doc/faq/index.rst
@@ -1,10 +1,9 @@
+.. _faq-index:
+
###################################
Python Frequently Asked Questions
###################################
-:Release: |version|
-:Date: |today|
-
.. toctree::
:maxdepth: 1
diff --git a/Doc/faq/library.rst b/Doc/faq/library.rst
index 5b77eb8..0d80f76 100644
--- a/Doc/faq/library.rst
+++ b/Doc/faq/library.rst
@@ -4,7 +4,9 @@
Library and Extension FAQ
=========================
-.. contents::
+.. only:: html
+
+ .. contents::
General Library Questions
=========================
@@ -14,7 +16,7 @@ How do I find a module or application to perform task X?
Check :ref:`the Library Reference <library-index>` to see if there's a relevant
standard library module. (Eventually you'll learn what's in the standard
-library and will able to skip this step.)
+library and will be able to skip this step.)
For third-party packages, search the `Python Package Index
<http://pypi.python.org/pypi>`_ or try `Google <http://www.google.com>`_ or
@@ -28,7 +30,7 @@ Where is the math.py (socket.py, regex.py, etc.) source file?
If you can't find a source file for a module it may be a built-in or
dynamically loaded module implemented in C, C++ or other compiled language.
In this case you may not have the source file or it may be something like
-mathmodule.c, somewhere in a C source directory (not on the Python Path).
+:file:`mathmodule.c`, somewhere in a C source directory (not on the Python Path).
There are (at least) three kinds of modules in Python:
@@ -60,18 +62,18 @@ as the very first line of your file, using the pathname for where the Python
interpreter is installed on your platform.
If you would like the script to be independent of where the Python interpreter
-lives, you can use the "env" program. Almost all Unix variants support the
-following, assuming the Python interpreter is in a directory on the user's
-$PATH::
+lives, you can use the :program:`env` program. Almost all Unix variants support
+the following, assuming the Python interpreter is in a directory on the user's
+:envvar:`PATH`::
#!/usr/bin/env python
-*Don't* do this for CGI scripts. The $PATH variable for CGI scripts is often
-very minimal, so you need to use the actual absolute pathname of the
+*Don't* do this for CGI scripts. The :envvar:`PATH` variable for CGI scripts is
+often very minimal, so you need to use the actual absolute pathname of the
interpreter.
-Occasionally, a user's environment is so full that the /usr/bin/env program
-fails; or there's no env program at all. In that case, you can try the
+Occasionally, a user's environment is so full that the :program:`/usr/bin/env`
+program fails; or there's no env program at all. In that case, you can try the
following hack (due to Alex Rezinsky)::
#! /bin/sh
@@ -91,12 +93,12 @@ Is there a curses/termcap package for Python?
.. XXX curses *is* built by default, isn't it?
-For Unix variants: The standard Python source distribution comes with a curses
-module in the ``Modules/`` subdirectory, though it's not compiled by default
-(note that this is not available in the Windows distribution -- there is no
-curses module for Windows).
+For Unix variants the standard Python source distribution comes with a curses
+module in the :source:`Modules` subdirectory, though it's not compiled by default.
+(Note that this is not available in the Windows distribution -- there is no
+curses module for Windows.)
-The curses module supports basic curses features as well as many additional
+The :mod:`curses` module supports basic curses features as well as many additional
functions from ncurses and SYSV curses such as colour, alternative character set
support, pads, and mouse support. This means the module isn't compatible with
operating systems that only have BSD curses, but there don't seem to be any
@@ -110,7 +112,7 @@ Is there an equivalent to C's onexit() in Python?
-------------------------------------------------
The :mod:`atexit` module provides a register function that is similar to C's
-onexit.
+:c:func:`onexit`.
Why don't my signal handlers work?
@@ -140,8 +142,8 @@ the expected output given in the docstring.
The :mod:`unittest` module is a fancier testing framework modelled on Java and
Smalltalk testing frameworks.
-For testing, it helps to write the program so that it may be easily tested by
-using good modular design. Your program should have almost all functionality
+To make testing easier, you should use good modular design in your program.
+Your program should have almost all functionality
encapsulated in either functions or class methods -- and this sometimes has the
surprising and delightful effect of making the program run faster (because local
variable accesses are faster than global accesses). Furthermore the program
@@ -157,7 +159,7 @@ at the bottom of the main module of your program.
Once your program is organized as a tractable collection of functions and class
behaviours you should write test functions that exercise the behaviours. A test
-suite can be associated with each module which automates a sequence of tests.
+suite that automates a sequence of tests can be associated with each module.
This sounds like a lot of work, but since Python is so terse and flexible it's
surprisingly easy. You can make coding much more pleasant and fun by writing
your test functions in parallel with the "production code", since this makes it
@@ -186,7 +188,7 @@ docstrings is `epydoc <http://epydoc.sf.net/>`_. `Sphinx
How do I get a single keypress at a time?
-----------------------------------------
-For Unix variants: There are several solutions. It's straightforward to do this
+For Unix variants there are several solutions. It's straightforward to do this
using curses, but curses is a fairly large module to learn. Here's a solution
without curses::
@@ -273,7 +275,7 @@ A simple fix is to add a tiny sleep to the start of the run function::
time.sleep(10)
-Instead of trying to guess how long a :func:`time.sleep` delay will be enough,
+Instead of trying to guess a good delay value for :func:`time.sleep`,
it's better to use some kind of semaphore mechanism. One idea is to use the
:mod:`Queue` module to create a queue object, let each thread append a token to
the queue when it finishes, and let the main thread read as many tokens from the
@@ -284,10 +286,10 @@ How do I parcel out work among a bunch of worker threads?
---------------------------------------------------------
Use the :mod:`Queue` module to create a queue containing a list of jobs. The
-:class:`~Queue.Queue` class maintains a list of objects with ``.put(obj)`` to
-add an item to the queue and ``.get()`` to return an item. The class will take
-care of the locking necessary to ensure that each job is handed out exactly
-once.
+:class:`~Queue.Queue` class maintains a list of objects and has a ``.put(obj)``
+method that adds items to the queue and a ``.get()`` method to return them.
+The class will take care of the locking necessary to ensure that each job is
+handed out exactly once.
Here's a trivial example::
@@ -296,7 +298,7 @@ Here's a trivial example::
# The worker thread gets jobs off the queue. When the queue is empty, it
# assumes there will be no more work and exits.
# (Realistically workers will run until terminated.)
- def worker ():
+ def worker():
print 'Running worker'
time.sleep(0.1)
while True:
@@ -329,6 +331,8 @@ Here's a trivial example::
When run, this will produce the following output:
+.. code-block:: none
+
Running worker
Running worker
Running worker
@@ -343,15 +347,15 @@ When run, this will produce the following output:
Worker <Thread(worker 1, started)> running with argument 5
...
-Consult the module's documentation for more details; the ``Queue`` class
-provides a featureful interface.
+Consult the module's documentation for more details; the :class:`~Queue.Queue`
+class provides a featureful interface.
What kinds of global value mutation are thread-safe?
----------------------------------------------------
-A global interpreter lock (GIL) is used internally to ensure that only one
-thread runs in the Python VM at a time. In general, Python offers to switch
+A :term:`global interpreter lock` (GIL) is used internally to ensure that only
+one thread runs in the Python VM at a time. In general, Python offers to switch
among threads only between bytecode instructions; how frequently it switches can
be set via :func:`sys.setcheckinterval`. Each bytecode instruction and
therefore all the C implementation code reached from each instruction is
@@ -396,7 +400,7 @@ Can't we get rid of the Global Interpreter Lock?
.. XXX mention multiprocessing
.. XXX link to dbeazley's talk about GIL?
-The Global Interpreter Lock (GIL) is often seen as a hindrance to Python's
+The :term:`global interpreter lock` (GIL) is often seen as a hindrance to Python's
deployment on high-end multiprocessor server machines, because a multi-threaded
Python program effectively only uses one CPU, due to the insistence that
(almost) all Python code can only run while the GIL is held.
@@ -459,7 +463,7 @@ To rename a file, use ``os.rename(old_path, new_path)``.
To truncate a file, open it using ``f = open(filename, "r+")``, and use
``f.truncate(offset)``; offset defaults to the current seek position. There's
also ``os.ftruncate(fd, offset)`` for files opened with :func:`os.open`, where
-``fd`` is the file descriptor (a small integer).
+*fd* is the file descriptor (a small integer).
The :mod:`shutil` module also contains a number of functions to work on files
including :func:`~shutil.copyfile`, :func:`~shutil.copytree`, and
@@ -493,7 +497,7 @@ The '>' in the format string forces big-endian data; the letter 'h' reads one
"short integer" (2 bytes), and 'l' reads one "long integer" (4 bytes) from the
string.
-For data that is more regular (e.g. a homogeneous list of ints or thefloats),
+For data that is more regular (e.g. a homogeneous list of ints or floats),
you can also use the :mod:`array` module.
@@ -503,7 +507,7 @@ I can't seem to use os.read() on a pipe created with os.popen(); why?
:func:`os.read` is a low-level function which takes a file descriptor, a small
integer representing the opened file. :func:`os.popen` creates a high-level
file object, the same type returned by the built-in :func:`open` function.
-Thus, to read n bytes from a pipe p created with :func:`os.popen`, you need to
+Thus, to read *n* bytes from a pipe *p* created with :func:`os.popen`, you need to
use ``p.read(n)``.
@@ -522,9 +526,9 @@ Use the :mod:`popen2` module. For example::
Warning: in general it is unwise to do this because you can easily cause a
deadlock where your process is blocked waiting for output from the child while
-the child is blocked waiting for input from you. This can be caused because the
-parent expects the child to output more text than it does, or it can be caused
-by data being stuck in stdio buffers due to lack of flushing. The Python parent
+the child is blocked waiting for input from you. This can be caused by the
+parent expecting the child to output more text than it does or by data being
+stuck in stdio buffers due to lack of flushing. The Python parent
can of course explicitly flush the data it sends to the child before it reads
any output, but if the child is a naive C program it may have been written to
never explicitly flush its output, even if it is interactive, since flushing is
@@ -544,8 +548,8 @@ place to insert such a call would be before calling ``popen2`` again.
In many cases, all you really need is to run some data through a command and get
the result back. Unless the amount of data is very large, the easiest way to do
this is to write it to a temporary file and run the command with that temporary
-file as input. The standard module :mod:`tempfile` exports a ``mktemp()``
-function to generate unique temporary file names. ::
+file as input. The standard module :mod:`tempfile` exports a
+:func:`~tempfile.mktemp` function to generate unique temporary file names. ::
import tempfile
import os
@@ -636,7 +640,7 @@ and client-side web systems.
.. XXX check if wiki page is still up to date
A summary of available frameworks is maintained by Paul Boddie at
-http://wiki.python.org/moin/WebProgramming .
+http://wiki.python.org/moin/WebProgramming\ .
Cameron Laird maintains a useful set of pages about Python web technologies at
http://phaseit.net/claird/comp.lang.python/web_python.
@@ -673,15 +677,12 @@ Yes. Here's a simple example that uses httplib::
sys.stdout.write(httpobj.getfile().read())
Note that in general for percent-encoded POST operations, query strings must be
-quoted using :func:`urllib.quote`. For example to send name="Guy Steele, Jr."::
+quoted using :func:`urllib.urlencode`. For example, to send
+``name=Guy Steele, Jr.``::
- >>> from urllib import quote
- >>> x = quote("Guy Steele, Jr.")
- >>> x
- 'Guy%20Steele,%20Jr.'
- >>> query_string = "name="+x
- >>> query_string
- 'name=Guy%20Steele,%20Jr.'
+ >>> import urllib
+ >>> urllib.urlencode({'name': 'Guy Steele, Jr.'})
+ 'name=Guy+Steele%2C+Jr.'
What module should I use to help with generating HTML?
@@ -689,19 +690,8 @@ What module should I use to help with generating HTML?
.. XXX add modern template languages
-There are many different modules available:
-
-* HTMLgen is a class library of objects corresponding to all the HTML 3.2 markup
- tags. It's used when you are writing in Python and wish to synthesize HTML
- pages for generating a web or for CGI forms, etc.
-
-* DocumentTemplate and Zope Page Templates are two different systems that are
- part of Zope.
-
-* Quixote's PTL uses Python syntax to assemble strings of text.
-
-Consult the `Web Programming wiki pages
-<http://wiki.python.org/moin/WebProgramming>`_ for more links.
+You can find a collection of useful links on the `Web Programming wiki page
+<http://wiki.python.org/moin/WebProgramming>`_.
How do I send mail from a Python script?
@@ -730,7 +720,7 @@ work on any host that supports an SMTP listener. ::
server.quit()
A Unix-only alternative uses sendmail. The location of the sendmail program
-varies between systems; sometimes it is ``/usr/lib/sendmail``, sometime
+varies between systems; sometimes it is ``/usr/lib/sendmail``, sometimes
``/usr/sbin/sendmail``. The sendmail manual page will help you out. Here's
some sample code::
@@ -797,7 +787,7 @@ A more awkward way of doing things is to use pickle's little sister, marshal.
The :mod:`marshal` module provides very fast ways to store noncircular basic
Python types to files and strings, and back again. Although marshal does not do
fancy things like store instances or handle shared references properly, it does
-run extremely fast. For example loading a half megabyte of data may take less
+run extremely fast. For example, loading a half megabyte of data may take less
than a third of a second. This often beats doing something more complex and
general such as using gdbm with pickle/shelve.
@@ -807,9 +797,9 @@ Why is cPickle so slow?
.. XXX update this, default protocol is 2/3
-The default format used by the pickle module is a slow one that results in
-readable pickles. Making it the default, but it would break backward
-compatibility::
+By default :mod:`pickle` uses a relatively old and slow format for backward
+compatibility. You can however specify other protocol versions that are
+faster::
largeString = 'z' * (100 * 1024)
myPickle = cPickle.dumps(largeString, protocol=1)
diff --git a/Doc/faq/programming.rst b/Doc/faq/programming.rst
index a0898c5..0f3a331 100644
--- a/Doc/faq/programming.rst
+++ b/Doc/faq/programming.rst
@@ -4,7 +4,9 @@
Programming FAQ
===============
-.. contents::
+.. only:: html
+
+ .. contents::
General Questions
=================
@@ -147,7 +149,7 @@ There is a page on the wiki devoted to `performance tips
<http://wiki.python.org/moin/PythonSpeed/PerformanceTips>`_.
Guido van Rossum has written up an anecdote related to optimization at
-http://www.python.org/doc/essays/list2str.html.
+http://www.python.org/doc/essays/list2str.
One thing to notice is that function and (especially) method calls are rather
expensive; if you have designed a purely OO interface with lots of tiny
@@ -352,6 +354,58 @@ an imported module. This clutter would defeat the usefulness of the ``global``
declaration for identifying side-effects.
+Why do lambdas defined in a loop with different values all return the same result?
+----------------------------------------------------------------------------------
+
+Assume you use a for loop to define a few different lambdas (or even plain
+functions), e.g.::
+
+ >>> squares = []
+ >>> for x in range(5):
+ ... squares.append(lambda: x**2)
+
+This gives you a list that contains 5 lambdas that calculate ``x**2``. You
+might expect that, when called, they would return, respectively, ``0``, ``1``,
+``4``, ``9``, and ``16``. However, when you actually try you will see that
+they all return ``16``::
+
+ >>> squares[2]()
+ 16
+ >>> squares[4]()
+ 16
+
+This happens because ``x`` is not local to the lambdas, but is defined in
+the outer scope, and it is accessed when the lambda is called --- not when it
+is defined. At the end of the loop, the value of ``x`` is ``4``, so all the
+functions now return ``4**2``, i.e. ``16``. You can also verify this by
+changing the value of ``x`` and see how the results of the lambdas change::
+
+ >>> x = 8
+ >>> squares[2]()
+ 64
+
+In order to avoid this, you need to save the values in variables local to the
+lambdas, so that they don't rely on the value of the global ``x``::
+
+ >>> squares = []
+ >>> for x in range(5):
+ ... squares.append(lambda n=x: n**2)
+
+Here, ``n=x`` creates a new variable ``n`` local to the lambda and computed
+when the lambda is defined so that it has the same value that ``x`` had at
+that point in the loop. This means that the value of ``n`` will be ``0``
+in the first lambda, ``1`` in the second, ``2`` in the third, and so on.
+Therefore each lambda will now return the correct result::
+
+ >>> squares[2]()
+ 4
+ >>> squares[4]()
+ 16
+
+Note that this behaviour is not peculiar to lambdas, but applies to regular
+functions too.
+
+
How do I share global variables across modules?
------------------------------------------------
@@ -469,6 +523,31 @@ In the unlikely case that you care about Python versions older than 2.0, use
apply(g, (x,)+args, kwargs)
+.. index::
+ single: argument; difference from parameter
+ single: parameter; difference from argument
+
+.. _faq-argument-vs-parameter:
+
+What is the difference between arguments and parameters?
+--------------------------------------------------------
+
+:term:`Parameters <parameter>` are defined by the names that appear in a
+function definition, whereas :term:`arguments <argument>` are the values
+actually passed to a function when calling it. Parameters define what types of
+arguments a function can accept. For example, given the function definition::
+
+ def func(foo, bar=None, **kwargs):
+ pass
+
+*foo*, *bar* and *kwargs* are parameters of ``func``. However, when calling
+``func``, for example::
+
+ func(42, bar=314, extra=somevar)
+
+the values ``42``, ``314``, and ``somevar`` are arguments.
+
+
How do I write a function with output parameters (call by reference)?
---------------------------------------------------------------------
@@ -669,11 +748,11 @@ Comma is not an operator in Python. Consider this session::
Since the comma is not an operator, but a separator between expressions the
above is evaluated as if you had entered::
- >>> ("a" in "b"), "a"
+ ("a" in "b"), "a"
not::
- >>> "a" in ("b", "a")
+ "a" in ("b", "a")
The same is true of the various assignment operators (``=``, ``+=`` etc). They
are not truly operators but syntactic delimiters in assignment statements.
@@ -692,52 +771,6 @@ Yes, this feature was added in Python 2.5. The syntax would be as follows::
For versions previous to 2.5 the answer would be 'No'.
-.. XXX remove rest?
-
-In many cases you can mimic ``a ? b : c`` with ``a and b or c``, but there's a
-flaw: if *b* is zero (or empty, or ``None`` -- anything that tests false) then
-*c* will be selected instead. In many cases you can prove by looking at the
-code that this can't happen (e.g. because *b* is a constant or has a type that
-can never be false), but in general this can be a problem.
-
-Tim Peters (who wishes it was Steve Majewski) suggested the following solution:
-``(a and [b] or [c])[0]``. Because ``[b]`` is a singleton list it is never
-false, so the wrong path is never taken; then applying ``[0]`` to the whole
-thing gets the *b* or *c* that you really wanted. Ugly, but it gets you there
-in the rare cases where it is really inconvenient to rewrite your code using
-'if'.
-
-The best course is usually to write a simple ``if...else`` statement. Another
-solution is to implement the ``?:`` operator as a function::
-
- def q(cond, on_true, on_false):
- if cond:
- if not isfunction(on_true):
- return on_true
- else:
- return on_true()
- else:
- if not isfunction(on_false):
- return on_false
- else:
- return on_false()
-
-In most cases you'll pass b and c directly: ``q(a, b, c)``. To avoid evaluating
-b or c when they shouldn't be, encapsulate them within a lambda function, e.g.:
-``q(a, lambda: b, lambda: c)``.
-
-It has been asked *why* Python has no if-then-else expression. There are
-several answers: many languages do just fine without one; it can easily lead to
-less readable code; no sufficiently "Pythonic" syntax has been discovered; a
-search of the standard library found remarkably few places where using an
-if-then-else expression would make the code more understandable.
-
-In 2002, :pep:`308` was written proposing several possible syntaxes and the
-community was asked to vote on the issue. The vote was inconclusive. Most
-people liked one of the syntaxes, but also hated other syntaxes; many votes
-implied that people preferred no ternary operator rather than having a syntax
-they hated.
-
Is it possible to write obfuscated one-liners in Python?
--------------------------------------------------------
@@ -864,6 +897,7 @@ How do I modify a string in place?
You can't, because strings are immutable. If you need an object with this
ability, try converting the string to a list or use the array module::
+ >>> import io
>>> s = "Hello, world"
>>> a = list(s)
>>> print a
@@ -876,8 +910,8 @@ ability, try converting the string to a list or use the array module::
>>> a = array.array('c', s)
>>> print a
array('c', 'Hello, world')
- >>> a[0] = 'y' ; print a
- array('c', 'yello world')
+ >>> a[0] = 'y'; print a
+ array('c', 'yello, world')
>>> a.tostring()
'yello, world'
@@ -1139,7 +1173,7 @@ How do I create a multidimensional list?
You probably tried to make a multidimensional array like this::
- A = [[None] * 2] * 3
+ >>> A = [[None] * 2] * 3
This looks correct if you print it::
@@ -1171,7 +1205,7 @@ use a list comprehension::
A = [[None] * w for i in range(h)]
Or, you can use an extension that provides a matrix datatype; `Numeric Python
-<http://numpy.scipy.org/>`_ is the best known.
+<http://www.numpy.org/>`_ is the best known.
How do I apply a method to a sequence of objects?
@@ -1190,6 +1224,92 @@ More generically, you can try the following function::
return map(apply, methods, [arguments]*nobjects)
+Why does a_tuple[i] += ['item'] raise an exception when the addition works?
+---------------------------------------------------------------------------
+
+This is because of a combination of the fact that augmented assignment
+operators are *assignment* operators, and the difference between mutable and
+immutable objects in Python.
+
+This discussion applies in general when augmented assignment operators are
+applied to elements of a tuple that point to mutable objects, but we'll use
+a ``list`` and ``+=`` as our exemplar.
+
+If you wrote::
+
+ >>> a_tuple = (1, 2)
+ >>> a_tuple[0] += 1
+ Traceback (most recent call last):
+ ...
+ TypeError: 'tuple' object does not support item assignment
+
+The reason for the exception should be immediately clear: ``1`` is added to the
+object ``a_tuple[0]`` points to (``1``), producing the result object, ``2``,
+but when we attempt to assign the result of the computation, ``2``, to element
+``0`` of the tuple, we get an error because we can't change what an element of
+a tuple points to.
+
+Under the covers, what this augmented assignment statement is doing is
+approximately this::
+
+ >>> result = a_tuple[0] + 1
+ >>> a_tuple[0] = result
+ Traceback (most recent call last):
+ ...
+ TypeError: 'tuple' object does not support item assignment
+
+It is the assignment part of the operation that produces the error, since a
+tuple is immutable.
+
+When you write something like::
+
+ >>> a_tuple = (['foo'], 'bar')
+ >>> a_tuple[0] += ['item']
+ Traceback (most recent call last):
+ ...
+ TypeError: 'tuple' object does not support item assignment
+
+The exception is a bit more surprising, and even more surprising is the fact
+that even though there was an error, the append worked::
+
+ >>> a_tuple[0]
+ ['foo', 'item']
+
+To see why this happens, you need to know that (a) if an object implements an
+``__iadd__`` magic method, it gets called when the ``+=`` augmented assignment
+is executed, and its return value is what gets used in the assignment statement;
+and (b) for lists, ``__iadd__`` is equivalent to calling ``extend`` on the list
+and returning the list. That's why we say that for lists, ``+=`` is a
+"shorthand" for ``list.extend``::
+
+ >>> a_list = []
+ >>> a_list += [1]
+ >>> a_list
+ [1]
+
+This is equivalent to::
+
+ >>> result = a_list.__iadd__([1])
+ >>> a_list = result
+
+The object pointed to by a_list has been mutated, and the pointer to the
+mutated object is assigned back to ``a_list``. The end result of the
+assignment is a no-op, since it is a pointer to the same object that ``a_list``
+was previously pointing to, but the assignment still happens.
+
+Thus, in our tuple example what is happening is equivalent to::
+
+ >>> result = a_tuple[0].__iadd__(['item'])
+ >>> a_tuple[0] = result
+ Traceback (most recent call last):
+ ...
+ TypeError: 'tuple' object does not support item assignment
+
+The ``__iadd__`` succeeds, and thus the list is extended, but even though
+``result`` points to the same object that ``a_tuple[0]`` already points to,
+that final assignment still results in an error, because tuples are immutable.
+
+
Dictionaries
============
@@ -1604,6 +1724,32 @@ You can program the class's constructor to keep track of all instances by
keeping a list of weak references to each instance.
+Why does the result of ``id()`` appear to be not unique?
+--------------------------------------------------------
+
+The :func:`id` builtin returns an integer that is guaranteed to be unique during
+the lifetime of the object. Since in CPython, this is the object's memory
+address, it happens frequently that after an object is deleted from memory, the
+next freshly created object is allocated at the same position in memory. This
+is illustrated by this example:
+
+>>> id(1000)
+13901272
+>>> id(2000)
+13901272
+
+The two ids belong to different integer objects that are created before, and
+deleted immediately after execution of the ``id()`` call. To be sure that
+objects whose id you want to examine are still alive, create another reference
+to the object:
+
+>>> a = 1000; b = 2000
+>>> id(a)
+13901272
+>>> id(b)
+13891296
+
+
Modules
=======
@@ -1621,13 +1767,13 @@ file is automatic if you're importing a module and Python has the ability
(permissions, free space, etc...) to write the compiled module back to the
directory.
-Running Python on a top level script is not considered an import and no ``.pyc``
-will be created. For example, if you have a top-level module ``abc.py`` that
-imports another module ``xyz.py``, when you run abc, ``xyz.pyc`` will be created
-since xyz is imported, but no ``abc.pyc`` file will be created since ``abc.py``
-isn't being imported.
+Running Python on a top level script is not considered an import and no
+``.pyc`` will be created. For example, if you have a top-level module
+``foo.py`` that imports another module ``xyz.py``, when you run ``foo``,
+``xyz.pyc`` will be created since ``xyz`` is imported, but no ``foo.pyc`` file
+will be created since ``foo.py`` isn't being imported.
-If you need to create abc.pyc -- that is, to create a .pyc file for a module
+If you need to create ``foo.pyc`` -- that is, to create a ``.pyc`` file for a module
that is not imported -- you can, using the :mod:`py_compile` and
:mod:`compileall` modules.
@@ -1635,9 +1781,9 @@ The :mod:`py_compile` module can manually compile any module. One way is to use
the ``compile()`` function in that module interactively::
>>> import py_compile
- >>> py_compile.compile('abc.py')
+ >>> py_compile.compile('foo.py') # doctest: +SKIP
-This will write the ``.pyc`` to the same location as ``abc.py`` (or you can
+This will write the ``.pyc`` to the same location as ``foo.py`` (or you can
override that with the optional parameter ``cfile``).
You can also automatically compile all files in a directory or directories using
diff --git a/Doc/faq/windows.rst b/Doc/faq/windows.rst
index dbb7bb8..0379bac 100644
--- a/Doc/faq/windows.rst
+++ b/Doc/faq/windows.rst
@@ -6,16 +6,16 @@
Python on Windows FAQ
=====================
-.. contents::
+.. only:: html
+
+ .. contents::
How do I run a Python program under Windows?
--------------------------------------------
This is not necessarily a straightforward question. If you are already familiar
with running programs from the Windows command line then everything will seem
-obvious; otherwise, you might need a little more guidance. There are also
-differences between Windows 95, 98, NT, ME, 2000 and XP which can add to the
-confusion.
+obvious; otherwise, you might need a little more guidance.
.. sidebar:: |Python Development on XP|_
:subtitle: `Python Development on XP`_
@@ -32,7 +32,7 @@ confusion.
Unless you use some sort of integrated development environment, you will end up
*typing* Windows commands into what is variously referred to as a "DOS window"
or "Command prompt window". Usually you can create such a window from your
-Start menu; under Windows 2000 the menu selection is :menuselection:`Start -->
+Start menu; under Windows 7 the menu selection is :menuselection:`Start -->
Programs --> Accessories --> Command Prompt`. You should be able to recognize
when you have started such a window because you will see a Windows "command
prompt", which usually looks like this::
@@ -42,23 +42,27 @@ prompt", which usually looks like this::
The letter may be different, and there might be other things after it, so you
might just as easily see something like::
- D:\Steve\Projects\Python>
+ D:\YourName\Projects\Python>
depending on how your computer has been set up and what else you have recently
done with it. Once you have started such a window, you are well on the way to
running Python programs.
You need to realize that your Python scripts have to be processed by another
-program called the Python interpreter. The interpreter reads your script,
+program called the Python *interpreter*. The interpreter reads your script,
compiles it into bytecodes, and then executes the bytecodes to run your
program. So, how do you arrange for the interpreter to handle your Python?
First, you need to make sure that your command window recognises the word
"python" as an instruction to start the interpreter. If you have opened a
command window, you should try entering the command ``python`` and hitting
-return. You should then see something like::
+return.::
+
+ C:\Users\YourName> python
+
+You should then see something like::
- Python 2.2 (#28, Dec 21 2001, 12:21:22) [MSC 32 bit (Intel)] on win32
+ Python 2.7.3 (default, Apr 10 2012, 22.71:26) [MSC v.1500 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license" for more information.
>>>
@@ -78,7 +82,7 @@ key down while you enter a Z, then hit the "Enter" key to get back to your
Windows command prompt.
You may also find that you have a Start-menu entry such as :menuselection:`Start
---> Programs --> Python 2.2 --> Python (command line)` that results in you
+--> Programs --> Python 2.7 --> Python (command line)` that results in you
seeing the ``>>>`` prompt in a new window. If so, the window will disappear
after you enter the Ctrl-Z character; Windows is running a single "python"
command in the window, and closes it when you terminate the interpreter.
@@ -86,8 +90,7 @@ command in the window, and closes it when you terminate the interpreter.
If the ``python`` command, instead of displaying the interpreter prompt ``>>>``,
gives you a message like::
- 'python' is not recognized as an internal or external command,
- operable program or batch file.
+ 'python' is not recognized as an internal or external command, operable program or batch file.
.. sidebar:: |Adding Python to DOS Path|_
:subtitle: `Adding Python to DOS Path`_
@@ -116,115 +119,33 @@ then the command ::
dir C:\py*
will probably tell you where it is installed; the usual location is something
-like ``C:\Python23``. Otherwise you will be reduced to a search of your whole
+like ``C:\Python27``. Otherwise you will be reduced to a search of your whole
disk ... use :menuselection:`Tools --> Find` or hit the :guilabel:`Search`
button and look for "python.exe". Supposing you discover that Python is
-installed in the ``C:\Python23`` directory (the default at the time of writing),
+installed in the ``C:\Python27`` directory (the default at the time of writing),
you should make sure that entering the command ::
- c:\Python23\python
+ c:\Python27\python
starts up the interpreter as above (and don't forget you'll need a "CTRL-Z" and
-an "Enter" to get out of it). Once you have verified the directory, you need to
-add it to the start-up routines your computer goes through. For older versions
-of Windows the easiest way to do this is to edit the ``C:\AUTOEXEC.BAT``
-file. You would want to add a line like the following to ``AUTOEXEC.BAT``::
-
- PATH C:\Python23;%PATH%
-
-For Windows NT, 2000 and (I assume) XP, you will need to add a string such as ::
-
- ;C:\Python23
-
-to the current setting for the PATH environment variable, which you will find in
-the properties window of "My Computer" under the "Advanced" tab. Note that if
-you have sufficient privilege you might get a choice of installing the settings
-either for the Current User or for System. The latter is preferred if you want
-everybody to be able to run Python on the machine.
-
-If you aren't confident doing any of these manipulations yourself, ask for help!
-At this stage you may want to reboot your system to make absolutely sure the new
-setting has taken effect. You probably won't need to reboot for Windows NT, XP
-or 2000. You can also avoid it in earlier versions by editing the file
-``C:\WINDOWS\COMMAND\CMDINIT.BAT`` instead of ``AUTOEXEC.BAT``.
-
-You should now be able to start a new command window, enter ``python`` at the
-``C:\>`` (or whatever) prompt, and see the ``>>>`` prompt that indicates the
-Python interpreter is reading interactive commands.
-
-Let's suppose you have a program called ``pytest.py`` in directory
-``C:\Steve\Projects\Python``. A session to run that program might look like
-this::
-
- C:\> cd \Steve\Projects\Python
- C:\Steve\Projects\Python> python pytest.py
-
-Because you added a file name to the command to start the interpreter, when it
-starts up it reads the Python script in the named file, compiles it, executes
-it, and terminates, so you see another ``C:\>`` prompt. You might also have
-entered ::
-
- C:\> python \Steve\Projects\Python\pytest.py
-
-if you hadn't wanted to change your current directory.
-
-Under NT, 2000 and XP you may well find that the installation process has also
-arranged that the command ``pytest.py`` (or, if the file isn't in the current
-directory, ``C:\Steve\Projects\Python\pytest.py``) will automatically recognize
-the ".py" extension and run the Python interpreter on the named file. Using this
-feature is fine, but *some* versions of Windows have bugs which mean that this
-form isn't exactly equivalent to using the interpreter explicitly, so be
-careful.
-
-The important things to remember are:
-
-1. Start Python from the Start Menu, or make sure the PATH is set correctly so
- Windows can find the Python interpreter. ::
-
- python
-
- should give you a '>>>' prompt from the Python interpreter. Don't forget the
- CTRL-Z and ENTER to terminate the interpreter (and, if you started the window
- from the Start Menu, make the window disappear).
-
-2. Once this works, you run programs with commands::
-
- python {program-file}
-
-3. When you know the commands to use you can build Windows shortcuts to run the
- Python interpreter on any of your scripts, naming particular working
- directories, and adding them to your menus. Take a look at ::
-
- python --help
-
- if your needs are complex.
-
-4. Interactive mode (where you see the ``>>>`` prompt) is best used for checking
- that individual statements and expressions do what you think they will, and
- for developing code by experiment.
+an "Enter" to get out of it). Once you have verified the directory, you can
+add it to the system path to make it easier to start Python by just running
+the ``python`` command. This is currently an option in the installer as of
+CPython 2.7.
+More information about environment variables can be found on the
+:ref:`Using Python on Windows <setting-envvars>` page.
How do I make Python scripts executable?
----------------------------------------
-On Windows 2000, the standard Python installer already associates the .py
+On Windows, the standard Python installer already associates the .py
extension with a file type (Python.File) and gives that file type an open
command that runs the interpreter (``D:\Program Files\Python\python.exe "%1"
%*``). This is enough to make scripts executable from the command prompt as
'foo.py'. If you'd rather be able to execute the script by simple typing 'foo'
with no extension you need to add .py to the PATHEXT environment variable.
-On Windows NT, the steps taken by the installer as described above allow you to
-run a script with 'foo.py', but a longtime bug in the NT command processor
-prevents you from redirecting the input or output of any script executed in this
-way. This is often important.
-
-The incantation for making a Python script executable under WinNT is to give the
-file an extension of .cmd and add the following as the first line::
-
- @setlocal enableextensions & python -x %~f0 %* & goto :EOF
-
-
Why does Python sometimes take so long to start?
------------------------------------------------
@@ -242,22 +163,11 @@ McAfee, when configured to scan all file system read activity, is a particular
offender.
-Where is Freeze for Windows?
-----------------------------
-
-"Freeze" is a program that allows you to ship a Python program as a single
-stand-alone executable file. It is *not* a compiler; your programs don't run
-any faster, but they are more easily distributable, at least to platforms with
-the same OS and CPU. Read the README file of the freeze program for more
-disclaimers.
-
-You can use freeze on Windows, but you must download the source tree (see
-http://www.python.org/download/source). The freeze program is in the
-``Tools\freeze`` subdirectory of the source tree.
-
-You need the Microsoft VC++ compiler, and you probably need to build Python.
-The required project files are in the PCbuild directory.
+How do I make an executable from a Python script?
+-------------------------------------------------
+See http://www.py2exe.org/ for a distutils extension that allows you
+to create console and GUI executables from Python code.
Is a ``*.pyd`` file the same as a DLL?
--------------------------------------
@@ -288,7 +198,7 @@ Embedding the Python interpreter in a Windows app can be summarized as follows:
be a DLL to handle importing modules that are themselves DLL's. (This is the
first key undocumented fact.) Instead, link to :file:`python{NN}.dll`; it is
typically installed in ``C:\Windows\System``. *NN* is the Python version, a
- number such as "23" for Python 2.3.
+ number such as "27" for Python 2.7.
You can link to Python in two different ways. Load-time linking means
linking against :file:`python{NN}.lib`, while run-time linking means linking
@@ -333,7 +243,7 @@ Embedding the Python interpreter in a Windows app can be summarized as follows:
...
Py_Initialize(); // Initialize Python.
initmyAppc(); // Initialize (import) the helper class.
- PyRun_SimpleString("import myApp") ; // Import the shadow class.
+ PyRun_SimpleString("import myApp"); // Import the shadow class.
5. There are two problems with Python's C API which will become apparent if you
use a compiler other than MSVC, the compiler used to build pythonNN.dll.
@@ -372,47 +282,6 @@ Embedding the Python interpreter in a Windows app can be summarized as follows:
object that supports read and write, so all you need is a Python object
(defined in your extension module) that contains read() and write() methods.
-
-How do I use Python for CGI?
-----------------------------
-
-On the Microsoft IIS server or on the Win95 MS Personal Web Server you set up
-Python in the same way that you would set up any other scripting engine.
-
-Run regedt32 and go to::
-
- HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\W3SVC\Parameters\ScriptMap
-
-and enter the following line (making any specific changes that your system may
-need)::
-
- .py :REG_SZ: c:\<path to python>\python.exe -u %s %s
-
-This line will allow you to call your script with a simple reference like:
-``http://yourserver/scripts/yourscript.py`` provided "scripts" is an
-"executable" directory for your server (which it usually is by default). The
-:option:`-u` flag specifies unbuffered and binary mode for stdin - needed when
-working with binary data.
-
-In addition, it is recommended that using ".py" may not be a good idea for the
-file extensions when used in this context (you might want to reserve ``*.py``
-for support modules and use ``*.cgi`` or ``*.cgp`` for "main program" scripts).
-
-In order to set up Internet Information Services 5 to use Python for CGI
-processing, please see the following links:
-
- http://www.e-coli.net/pyiis_server.html (for Win2k Server)
- http://www.e-coli.net/pyiis.html (for Win2k pro)
-
-Configuring Apache is much simpler. In the Apache configuration file
-``httpd.conf``, add the following line at the end of the file::
-
- ScriptInterpreterSource Registry
-
-Then, give your Python CGI-scripts the extension .py and put them in the cgi-bin
-directory.
-
-
How do I keep editors from inserting tabs into my Python source?
----------------------------------------------------------------
@@ -456,116 +325,6 @@ with the additional feature of being able to send CTRL+C and CTRL+BREAK
to console subprocesses which are designed to handle those signals. See
:func:`os.kill` for further details.
-
-Why does os.path.isdir() fail on NT shared directories?
--------------------------------------------------------
-
-The solution appears to be always append the "\\" on the end of shared
-drives.
-
- >>> import os
- >>> os.path.isdir( '\\\\rorschach\\public')
- 0
- >>> os.path.isdir( '\\\\rorschach\\public\\')
- 1
-
-It helps to think of share points as being like drive letters. Example::
-
- k: is not a directory
- k:\ is a directory
- k:\media is a directory
- k:\media\ is not a directory
-
-The same rules apply if you substitute "k:" with "\\conky\foo"::
-
- \\conky\foo is not a directory
- \\conky\foo\ is a directory
- \\conky\foo\media is a directory
- \\conky\foo\media\ is not a directory
-
-
-cgi.py (or other CGI programming) doesn't work sometimes on NT or win95!
-------------------------------------------------------------------------
-
-Be sure you have the latest python.exe, that you are using python.exe rather
-than a GUI version of Python and that you have configured the server to execute
-::
-
- "...\python.exe -u ..."
-
-for the CGI execution. The :option:`-u` (unbuffered) option on NT and Win95
-prevents the interpreter from altering newlines in the standard input and
-output. Without it post/multipart requests will seem to have the wrong length
-and binary (e.g. GIF) responses may get garbled (resulting in broken images, PDF
-files, and other binary downloads failing).
-
-
-Why doesn't os.popen() work in PythonWin on NT?
------------------------------------------------
-
-The reason that os.popen() doesn't work from within PythonWin is due to a bug in
-Microsoft's C Runtime Library (CRT). The CRT assumes you have a Win32 console
-attached to the process.
-
-You should use the win32pipe module's popen() instead which doesn't depend on
-having an attached Win32 console.
-
-Example::
-
- import win32pipe
- f = win32pipe.popen('dir /c c:\\')
- print f.readlines()
- f.close()
-
-
-Why doesn't os.popen()/win32pipe.popen() work on Win9x?
--------------------------------------------------------
-
-There is a bug in Win9x that prevents os.popen/win32pipe.popen* from
-working. The good news is there is a way to work around this problem. The
-Microsoft Knowledge Base article that you need to lookup is: Q150956. You will
-find links to the knowledge base at: http://support.microsoft.com/.
-
-
-PyRun_SimpleFile() crashes on Windows but not on Unix; why?
------------------------------------------------------------
-
-This is very sensitive to the compiler vendor, version and (perhaps) even
-options. If the FILE* structure in your embedding program isn't the same as is
-assumed by the Python interpreter it won't work.
-
-The Python 1.5.* DLLs (``python15.dll``) are all compiled with MS VC++ 5.0 and
-with multithreading-DLL options (``/MD``).
-
-If you can't change compilers or flags, try using :c:func:`Py_RunSimpleString`.
-A trick to get it to run an arbitrary file is to construct a call to
-:func:`execfile` with the name of your file as argument.
-
-Also note that you can not mix-and-match Debug and Release versions. If you
-wish to use the Debug Multithreaded DLL, then your module *must* have ``_d``
-appended to the base name.
-
-
-Importing _tkinter fails on Windows 95/98: why?
-------------------------------------------------
-
-Sometimes, the import of _tkinter fails on Windows 95 or 98, complaining with a
-message like the following::
-
- ImportError: DLL load failed: One of the library files needed
- to run this application cannot be found.
-
-It could be that you haven't installed Tcl/Tk, but if you did install Tcl/Tk,
-and the Wish application works correctly, the problem may be that its installer
-didn't manage to edit the autoexec.bat file correctly. It tries to add a
-statement that changes the PATH environment variable to include the Tcl/Tk 'bin'
-subdirectory, but sometimes this edit doesn't quite work. Opening it with
-notepad usually reveals what the problem is.
-
-(One additional hint, noted by David Szafranski: you can't use long filenames
-here; e.g. use ``C:\PROGRA~1\Tcl\bin`` instead of ``C:\Program Files\Tcl\bin``.)
-
-
How do I extract the downloaded documentation on Windows?
---------------------------------------------------------
@@ -577,38 +336,3 @@ Simply rename the downloaded file to have the .TGZ extension, and WinZip will be
able to handle it. (If your copy of WinZip doesn't, get a newer one from
http://www.winzip.com.)
-
-Missing cw3215mt.dll (or missing cw3215.dll)
---------------------------------------------
-
-Sometimes, when using Tkinter on Windows, you get an error that cw3215mt.dll or
-cw3215.dll is missing.
-
-Cause: you have an old Tcl/Tk DLL built with cygwin in your path (probably
-``C:\Windows``). You must use the Tcl/Tk DLLs from the standard Tcl/Tk
-installation (Python 1.5.2 comes with one).
-
-
-Warning about CTL3D32 version from installer
---------------------------------------------
-
-The Python installer issues a warning like this::
-
- This version uses CTL3D32.DLL which is not the correct version.
- This version is used for windows NT applications only.
-
-Tim Peters:
-
- This is a Microsoft DLL, and a notorious source of problems. The message
- means what it says: you have the wrong version of this DLL for your operating
- system. The Python installation did not cause this -- something else you
- installed previous to this overwrote the DLL that came with your OS (probably
- older shareware of some sort, but there's no way to tell now). If you search
- for "CTL3D32" using any search engine (AltaVista, for example), you'll find
- hundreds and hundreds of web pages complaining about the same problem with
- all sorts of installation programs. They'll point you to ways to get the
- correct version reinstalled on your system (since Python doesn't cause this,
- we can't fix it).
-
-David A Burton has written a little program to fix this. Go to
-http://www.burtonsys.com/downloads.html and click on "ctl3dfix.zip".
diff --git a/Doc/glossary.rst b/Doc/glossary.rst
index 36a912c..b5e8171 100644
--- a/Doc/glossary.rst
+++ b/Doc/glossary.rst
@@ -39,16 +39,34 @@ Glossary
create your own ABCs with the :mod:`abc` module.
argument
- A value passed to a function or method, assigned to a named local
- variable in the function body. A function or method may have both
- positional arguments and keyword arguments in its definition.
- Positional and keyword arguments may be variable-length: ``*`` accepts
- or passes (if in the function definition or call) several positional
- arguments in a list, while ``**`` does the same for keyword arguments
- in a dictionary.
+ A value passed to a :term:`function` (or :term:`method`) when calling the
+ function. There are two types of arguments:
+
+ * :dfn:`keyword argument`: an argument preceded by an identifier (e.g.
+ ``name=``) in a function call or passed as a value in a dictionary
+ preceded by ``**``. For example, ``3`` and ``5`` are both keyword
+ arguments in the following calls to :func:`complex`::
+
+ complex(real=3, imag=5)
+ complex(**{'real': 3, 'imag': 5})
+
+ * :dfn:`positional argument`: an argument that is not a keyword argument.
+ Positional arguments can appear at the beginning of an argument list
+ and/or be passed as elements of an :term:`iterable` preceded by ``*``.
+ For example, ``3`` and ``5`` are both positional arguments in the
+ following calls::
+
+ complex(3, 5)
+ complex(*(3, 5))
- Any expression may be used within the argument list, and the evaluated
- value is passed to the local variable.
+ Arguments are assigned to the named local variables in a function body.
+ See the :ref:`calls` section for the rules governing this assignment.
+ Syntactically, any expression can be used to represent an argument; the
+ evaluated value is assigned to the local variable.
+
+ See also the :term:`parameter` glossary entry and the FAQ question on
+ :ref:`the difference between arguments and parameters
+ <faq-argument-vs-parameter>`.
attribute
A value associated with an object which is referenced by name using
@@ -59,6 +77,14 @@ Glossary
Benevolent Dictator For Life, a.k.a. `Guido van Rossum
<http://www.python.org/~guido/>`_, Python's creator.
+ bytes-like object
+ An object that supports the :ref:`buffer protocol <bufferobjects>`,
+ like :class:`str`, :class:`bytearray` or :class:`memoryview`.
+ Bytes-like objects can be used for various operations that expect
+ binary data, such as compression, saving to a binary file or sending
+ over a socket. Some operations need the binary data to be mutable,
+ in which case not all bytes-like objects can apply.
+
bytecode
Python source code is compiled into bytecode, the internal representation
of a Python program in the CPython interpreter. The bytecode is also
@@ -80,7 +106,7 @@ Glossary
classic class
Any class which does not inherit from :class:`object`. See
- :term:`new-style class`. Classic classes will be removed in Python 3.0.
+ :term:`new-style class`. Classic classes have been removed in Python 3.
coercion
The implicit conversion of an instance of one type to another during an
@@ -152,9 +178,9 @@ Glossary
For more information about descriptors' methods, see :ref:`descriptors`.
dictionary
- An associative array, where arbitrary keys are mapped to values. The keys
- can be any object with :meth:`__hash__` function and :meth:`__eq__`
- methods. Called a hash in Perl.
+ An associative array, where arbitrary keys are mapped to values. The
+ keys can be any object with :meth:`__hash__` and :meth:`__eq__` methods.
+ Called a hash in Perl.
docstring
A string literal which appears as the first expression in a class,
@@ -200,7 +226,7 @@ Glossary
An object exposing a file-oriented API (with methods such as
:meth:`read()` or :meth:`write()`) to an underlying resource. Depending
on the way it was created, a file object can mediate access to a real
- on-disk file or to another other type of storage or communication device
+ on-disk file or to another type of storage or communication device
(for example standard input/output, in-memory buffers, sockets, pipes,
etc.). File objects are also called :dfn:`file-like objects` or
:dfn:`streams`.
@@ -227,8 +253,9 @@ Glossary
function
A series of statements which returns some value to a caller. It can also
- be passed zero or more arguments which may be used in the execution of
- the body. See also :term:`argument` and :term:`method`.
+ be passed zero or more :term:`arguments <argument>` which may be used in
+ the execution of the body. See also :term:`parameter`, :term:`method`,
+ and the :ref:`function` section.
__future__
A pseudo-module which programmers can use to enable new language features
@@ -311,7 +338,8 @@ Glossary
All of Python's immutable built-in objects are hashable, while no mutable
containers (such as lists or dictionaries) are. Objects which are
instances of user-defined classes are hashable by default; they all
- compare unequal, and their hash value is their :func:`id`.
+ compare unequal (except with themselves), and their hash value is their
+ :func:`id`.
IDLE
An Integrated Development Environment for Python. IDLE is a basic editor
@@ -337,6 +365,10 @@ Glossary
fraction. Integer division can be forced by using the ``//`` operator
instead of the ``/`` operator. See also :term:`__future__`.
+ importing
+ The process by which Python code in one module is made available to
+ Python code in another module.
+
importer
An object that both finds and loads a module; both a
:term:`finder` and :term:`loader` object.
@@ -359,17 +391,17 @@ Glossary
slowly. See also :term:`interactive`.
iterable
- An object capable of returning its members one at a
- time. Examples of iterables include all sequence types (such as
- :class:`list`, :class:`str`, and :class:`tuple`) and some non-sequence
- types like :class:`dict` and :class:`file` and objects of any classes you
- define with an :meth:`__iter__` or :meth:`__getitem__` method. Iterables
- can be used in a :keyword:`for` loop and in many other places where a
- sequence is needed (:func:`zip`, :func:`map`, ...). When an iterable
- object is passed as an argument to the built-in function :func:`iter`, it
- returns an iterator for the object. This iterator is good for one pass
- over the set of values. When using iterables, it is usually not necessary
- to call :func:`iter` or deal with iterator objects yourself. The ``for``
+ An object capable of returning its members one at a time. Examples of
+ iterables include all sequence types (such as :class:`list`, :class:`str`,
+ and :class:`tuple`) and some non-sequence types like :class:`dict`
+ and :class:`file` and objects of any classes you define
+ with an :meth:`__iter__` or :meth:`__getitem__` method. Iterables can be
+ used in a :keyword:`for` loop and in many other places where a sequence is
+ needed (:func:`zip`, :func:`map`, ...). When an iterable object is passed
+ as an argument to the built-in function :func:`iter`, it returns an
+ iterator for the object. This iterator is good for one pass over the set
+ of values. When using iterables, it is usually not necessary to call
+ :func:`iter` or deal with iterator objects yourself. The ``for``
statement does that automatically for you, creating a temporary unnamed
variable to hold the iterator for the duration of the loop. See also
:term:`iterator`, :term:`sequence`, and :term:`generator`.
@@ -406,16 +438,13 @@ Glossary
:meth:`str.lower` method can serve as a key function for case insensitive
sorts. Alternatively, an ad-hoc key function can be built from a
:keyword:`lambda` expression such as ``lambda r: (r[0], r[2])``. Also,
- the :mod:`operator` module provides three key function constuctors:
+ the :mod:`operator` module provides three key function constructors:
:func:`~operator.attrgetter`, :func:`~operator.itemgetter`, and
:func:`~operator.methodcaller`. See the :ref:`Sorting HOW TO
<sortinghowto>` for examples of how to create and use key functions.
keyword argument
- Arguments which are preceded with a ``variable_name=`` in the call.
- The variable name designates the local name in the function to which the
- value is assigned. ``**`` is used to accept or pass a dictionary of
- keyword arguments. See :term:`argument`.
+ See :term:`argument`.
lambda
An anonymous inline function consisting of a single :term:`expression`
@@ -484,6 +513,13 @@ Glossary
for a member during lookup. See `The Python 2.3 Method Resolution Order
<http://www.python.org/download/releases/2.3/mro/>`_.
+ module
+ An object that serves as an organizational unit of Python code. Modules
+ have a namespace containing arbitrary Python objects. Modules are loaded
+ into Python by the process of :term:`importing`.
+
+ See also :term:`package`.
+
MRO
See :term:`method resolution order`.
@@ -527,7 +563,7 @@ Glossary
new-style class
Any class which inherits from :class:`object`. This includes all built-in
types like :class:`list` and :class:`dict`. Only new-style classes can
- use Python's newer, versatile features like :attr:`__slots__`,
+ use Python's newer, versatile features like :attr:`~object.__slots__`,
descriptors, properties, and :meth:`__getattribute__`.
More information can be found in :ref:`newstyle`.
@@ -537,12 +573,51 @@ Glossary
(methods). Also the ultimate base class of any :term:`new-style
class`.
+ package
+ A Python :term:`module` which can contain submodules or recursively,
+ subpackages. Technically, a package is a Python module with an
+ ``__path__`` attribute.
+
+ parameter
+ A named entity in a :term:`function` (or method) definition that
+ specifies an :term:`argument` (or in some cases, arguments) that the
+ function can accept. There are four types of parameters:
+
+ * :dfn:`positional-or-keyword`: specifies an argument that can be passed
+ either :term:`positionally <argument>` or as a :term:`keyword argument
+ <argument>`. This is the default kind of parameter, for example *foo*
+ and *bar* in the following::
+
+ def func(foo, bar=None): ...
+
+ * :dfn:`positional-only`: specifies an argument that can be supplied only
+ by position. Python has no syntax for defining positional-only
+ parameters. However, some built-in functions have positional-only
+ parameters (e.g. :func:`abs`).
+
+ * :dfn:`var-positional`: specifies that an arbitrary sequence of
+ positional arguments can be provided (in addition to any positional
+ arguments already accepted by other parameters). Such a parameter can
+ be defined by prepending the parameter name with ``*``, for example
+ *args* in the following::
+
+ def func(*args, **kwargs): ...
+
+ * :dfn:`var-keyword`: specifies that arbitrarily many keyword arguments
+ can be provided (in addition to any keyword arguments already accepted
+ by other parameters). Such a parameter can be defined by prepending
+ the parameter name with ``**``, for example *kwargs* in the example
+ above.
+
+ Parameters can specify both optional and required arguments, as well as
+ default values for some optional arguments.
+
+ See also the :term:`argument` glossary entry, the FAQ question on
+ :ref:`the difference between arguments and parameters
+ <faq-argument-vs-parameter>`, and the :ref:`function` section.
+
positional argument
- The arguments assigned to local names inside a function or method,
- determined by the order in which they were given in the call. ``*`` is
- used to either accept multiple positional arguments (when in the
- definition), or pass several arguments as a list to a function. See
- :term:`argument`.
+ See :term:`argument`.
Python 3000
Nickname for the Python 3.x release line (coined long ago when the release
@@ -605,7 +680,7 @@ Glossary
statement
A statement is part of a suite (a "block" of code). A statement is either
- an :term:`expression` or a one of several constructs with a keyword, such
+ an :term:`expression` or one of several constructs with a keyword, such
as :keyword:`if`, :keyword:`while` or :keyword:`for`.
struct sequence
@@ -628,7 +703,15 @@ Glossary
type
The type of a Python object determines what kind of object it is; every
object has a type. An object's type is accessible as its
- :attr:`__class__` attribute or can be retrieved with ``type(obj)``.
+ :attr:`~instance.__class__` attribute or can be retrieved with
+ ``type(obj)``.
+
+ universal newlines
+ A manner of interpreting text streams in which all of the following are
+ recognized as ending a line: the Unix end-of-line convention ``'\n'``,
+ the Windows convention ``'\r\n'``, and the old Macintosh convention
+ ``'\r'``. See :pep:`278` and :pep:`3116`, as well as
+ :func:`str.splitlines` for an additional use.
view
The objects returned from :meth:`dict.viewkeys`, :meth:`dict.viewvalues`,
diff --git a/Doc/howto/advocacy.rst b/Doc/howto/advocacy.rst
deleted file mode 100644
index e67e201..0000000
--- a/Doc/howto/advocacy.rst
+++ /dev/null
@@ -1,356 +0,0 @@
-*************************
- Python Advocacy HOWTO
-*************************
-
-:Author: A.M. Kuchling
-:Release: 0.03
-
-
-.. topic:: Abstract
-
- It's usually difficult to get your management to accept open source software,
- and Python is no exception to this rule. This document discusses reasons to use
- Python, strategies for winning acceptance, facts and arguments you can use, and
- cases where you *shouldn't* try to use Python.
-
-
-Reasons to Use Python
-=====================
-
-There are several reasons to incorporate a scripting language into your
-development process, and this section will discuss them, and why Python has some
-properties that make it a particularly good choice.
-
-
-Programmability
----------------
-
-Programs are often organized in a modular fashion. Lower-level operations are
-grouped together, and called by higher-level functions, which may in turn be
-used as basic operations by still further upper levels.
-
-For example, the lowest level might define a very low-level set of functions for
-accessing a hash table. The next level might use hash tables to store the
-headers of a mail message, mapping a header name like ``Date`` to a value such
-as ``Tue, 13 May 1997 20:00:54 -0400``. A yet higher level may operate on
-message objects, without knowing or caring that message headers are stored in a
-hash table, and so forth.
-
-Often, the lowest levels do very simple things; they implement a data structure
-such as a binary tree or hash table, or they perform some simple computation,
-such as converting a date string to a number. The higher levels then contain
-logic connecting these primitive operations. Using the approach, the primitives
-can be seen as basic building blocks which are then glued together to produce
-the complete product.
-
-Why is this design approach relevant to Python? Because Python is well suited
-to functioning as such a glue language. A common approach is to write a Python
-module that implements the lower level operations; for the sake of speed, the
-implementation might be in C, Java, or even Fortran. Once the primitives are
-available to Python programs, the logic underlying higher level operations is
-written in the form of Python code. The high-level logic is then more
-understandable, and easier to modify.
-
-John Ousterhout wrote a paper that explains this idea at greater length,
-entitled "Scripting: Higher Level Programming for the 21st Century". I
-recommend that you read this paper; see the references for the URL. Ousterhout
-is the inventor of the Tcl language, and therefore argues that Tcl should be
-used for this purpose; he only briefly refers to other languages such as Python,
-Perl, and Lisp/Scheme, but in reality, Ousterhout's argument applies to
-scripting languages in general, since you could equally write extensions for any
-of the languages mentioned above.
-
-
-Prototyping
------------
-
-In *The Mythical Man-Month*, Fredrick Brooks suggests the following rule when
-planning software projects: "Plan to throw one away; you will anyway." Brooks
-is saying that the first attempt at a software design often turns out to be
-wrong; unless the problem is very simple or you're an extremely good designer,
-you'll find that new requirements and features become apparent once development
-has actually started. If these new requirements can't be cleanly incorporated
-into the program's structure, you're presented with two unpleasant choices:
-hammer the new features into the program somehow, or scrap everything and write
-a new version of the program, taking the new features into account from the
-beginning.
-
-Python provides you with a good environment for quickly developing an initial
-prototype. That lets you get the overall program structure and logic right, and
-you can fine-tune small details in the fast development cycle that Python
-provides. Once you're satisfied with the GUI interface or program output, you
-can translate the Python code into C++, Fortran, Java, or some other compiled
-language.
-
-Prototyping means you have to be careful not to use too many Python features
-that are hard to implement in your other language. Using ``eval()``, or regular
-expressions, or the :mod:`pickle` module, means that you're going to need C or
-Java libraries for formula evaluation, regular expressions, and serialization,
-for example. But it's not hard to avoid such tricky code, and in the end the
-translation usually isn't very difficult. The resulting code can be rapidly
-debugged, because any serious logical errors will have been removed from the
-prototype, leaving only more minor slip-ups in the translation to track down.
-
-This strategy builds on the earlier discussion of programmability. Using Python
-as glue to connect lower-level components has obvious relevance for constructing
-prototype systems. In this way Python can help you with development, even if
-end users never come in contact with Python code at all. If the performance of
-the Python version is adequate and corporate politics allow it, you may not need
-to do a translation into C or Java, but it can still be faster to develop a
-prototype and then translate it, instead of attempting to produce the final
-version immediately.
-
-One example of this development strategy is Microsoft Merchant Server. Version
-1.0 was written in pure Python, by a company that subsequently was purchased by
-Microsoft. Version 2.0 began to translate the code into C++, shipping with some
-C++code and some Python code. Version 3.0 didn't contain any Python at all; all
-the code had been translated into C++. Even though the product doesn't contain
-a Python interpreter, the Python language has still served a useful purpose by
-speeding up development.
-
-This is a very common use for Python. Past conference papers have also
-described this approach for developing high-level numerical algorithms; see
-David M. Beazley and Peter S. Lomdahl's paper "Feeding a Large-scale Physics
-Application to Python" in the references for a good example. If an algorithm's
-basic operations are things like "Take the inverse of this 4000x4000 matrix",
-and are implemented in some lower-level language, then Python has almost no
-additional performance cost; the extra time required for Python to evaluate an
-expression like ``m.invert()`` is dwarfed by the cost of the actual computation.
-It's particularly good for applications where seemingly endless tweaking is
-required to get things right. GUI interfaces and Web sites are prime examples.
-
-The Python code is also shorter and faster to write (once you're familiar with
-Python), so it's easier to throw it away if you decide your approach was wrong;
-if you'd spent two weeks working on it instead of just two hours, you might
-waste time trying to patch up what you've got out of a natural reluctance to
-admit that those two weeks were wasted. Truthfully, those two weeks haven't
-been wasted, since you've learnt something about the problem and the technology
-you're using to solve it, but it's human nature to view this as a failure of
-some sort.
-
-
-Simplicity and Ease of Understanding
-------------------------------------
-
-Python is definitely *not* a toy language that's only usable for small tasks.
-The language features are general and powerful enough to enable it to be used
-for many different purposes. It's useful at the small end, for 10- or 20-line
-scripts, but it also scales up to larger systems that contain thousands of lines
-of code.
-
-However, this expressiveness doesn't come at the cost of an obscure or tricky
-syntax. While Python has some dark corners that can lead to obscure code, there
-are relatively few such corners, and proper design can isolate their use to only
-a few classes or modules. It's certainly possible to write confusing code by
-using too many features with too little concern for clarity, but most Python
-code can look a lot like a slightly-formalized version of human-understandable
-pseudocode.
-
-In *The New Hacker's Dictionary*, Eric S. Raymond gives the following definition
-for "compact":
-
-.. epigraph::
-
- Compact *adj.* Of a design, describes the valuable property that it can all be
- apprehended at once in one's head. This generally means the thing created from
- the design can be used with greater facility and fewer errors than an equivalent
- tool that is not compact. Compactness does not imply triviality or lack of
- power; for example, C is compact and FORTRAN is not, but C is more powerful than
- FORTRAN. Designs become non-compact through accreting features and cruft that
- don't merge cleanly into the overall design scheme (thus, some fans of Classic C
- maintain that ANSI C is no longer compact).
-
- (From http://www.catb.org/~esr/jargon/html/C/compact.html)
-
-In this sense of the word, Python is quite compact, because the language has
-just a few ideas, which are used in lots of places. Take namespaces, for
-example. Import a module with ``import math``, and you create a new namespace
-called ``math``. Classes are also namespaces that share many of the properties
-of modules, and have a few of their own; for example, you can create instances
-of a class. Instances? They're yet another namespace. Namespaces are currently
-implemented as Python dictionaries, so they have the same methods as the
-standard dictionary data type: .keys() returns all the keys, and so forth.
-
-This simplicity arises from Python's development history. The language syntax
-derives from different sources; ABC, a relatively obscure teaching language, is
-one primary influence, and Modula-3 is another. (For more information about ABC
-and Modula-3, consult their respective Web sites at http://www.cwi.nl/~steven/abc/
-and http://www.m3.org.) Other features have come from C, Icon,
-Algol-68, and even Perl. Python hasn't really innovated very much, but instead
-has tried to keep the language small and easy to learn, building on ideas that
-have been tried in other languages and found useful.
-
-Simplicity is a virtue that should not be underestimated. It lets you learn the
-language more quickly, and then rapidly write code -- code that often works the
-first time you run it.
-
-
-Java Integration
-----------------
-
-If you're working with Java, Jython (http://www.jython.org/) is definitely worth
-your attention. Jython is a re-implementation of Python in Java that compiles
-Python code into Java bytecodes. The resulting environment has very tight,
-almost seamless, integration with Java. It's trivial to access Java classes
-from Python, and you can write Python classes that subclass Java classes.
-Jython can be used for prototyping Java applications in much the same way
-CPython is used, and it can also be used for test suites for Java code, or
-embedded in a Java application to add scripting capabilities.
-
-
-Arguments and Rebuttals
-=======================
-
-Let's say that you've decided upon Python as the best choice for your
-application. How can you convince your management, or your fellow developers,
-to use Python? This section lists some common arguments against using Python,
-and provides some possible rebuttals.
-
-**Python is freely available software that doesn't cost anything. How good can
-it be?**
-
-Very good, indeed. These days Linux and Apache, two other pieces of open source
-software, are becoming more respected as alternatives to commercial software,
-but Python hasn't had all the publicity.
-
-Python has been around for several years, with many users and developers.
-Accordingly, the interpreter has been used by many people, and has gotten most
-of the bugs shaken out of it. While bugs are still discovered at intervals,
-they're usually either quite obscure (they'd have to be, for no one to have run
-into them before) or they involve interfaces to external libraries. The
-internals of the language itself are quite stable.
-
-Having the source code should be viewed as making the software available for
-peer review; people can examine the code, suggest (and implement) improvements,
-and track down bugs. To find out more about the idea of open source code, along
-with arguments and case studies supporting it, go to http://www.opensource.org.
-
-**Who's going to support it?**
-
-Python has a sizable community of developers, and the number is still growing.
-The Internet community surrounding the language is an active one, and is worth
-being considered another one of Python's advantages. Most questions posted to
-the comp.lang.python newsgroup are quickly answered by someone.
-
-Should you need to dig into the source code, you'll find it's clear and
-well-organized, so it's not very difficult to write extensions and track down
-bugs yourself. If you'd prefer to pay for support, there are companies and
-individuals who offer commercial support for Python.
-
-**Who uses Python for serious work?**
-
-Lots of people; one interesting thing about Python is the surprising diversity
-of applications that it's been used for. People are using Python to:
-
-* Run Web sites
-
-* Write GUI interfaces
-
-* Control number-crunching code on supercomputers
-
-* Make a commercial application scriptable by embedding the Python interpreter
- inside it
-
-* Process large XML data sets
-
-* Build test suites for C or Java code
-
-Whatever your application domain is, there's probably someone who's used Python
-for something similar. Yet, despite being useable for such high-end
-applications, Python's still simple enough to use for little jobs.
-
-See http://wiki.python.org/moin/OrganizationsUsingPython for a list of some of
-the organizations that use Python.
-
-**What are the restrictions on Python's use?**
-
-They're practically nonexistent. Consult the :file:`Misc/COPYRIGHT` file in the
-source distribution, or the section :ref:`history-and-license` for the full
-language, but it boils down to three conditions:
-
-* You have to leave the copyright notice on the software; if you don't include
- the source code in a product, you have to put the copyright notice in the
- supporting documentation.
-
-* Don't claim that the institutions that have developed Python endorse your
- product in any way.
-
-* If something goes wrong, you can't sue for damages. Practically all software
- licenses contain this condition.
-
-Notice that you don't have to provide source code for anything that contains
-Python or is built with it. Also, the Python interpreter and accompanying
-documentation can be modified and redistributed in any way you like, and you
-don't have to pay anyone any licensing fees at all.
-
-**Why should we use an obscure language like Python instead of well-known
-language X?**
-
-I hope this HOWTO, and the documents listed in the final section, will help
-convince you that Python isn't obscure, and has a healthily growing user base.
-One word of advice: always present Python's positive advantages, instead of
-concentrating on language X's failings. People want to know why a solution is
-good, rather than why all the other solutions are bad. So instead of attacking
-a competing solution on various grounds, simply show how Python's virtues can
-help.
-
-
-Useful Resources
-================
-
-http://www.pythonology.com/success
- The Python Success Stories are a collection of stories from successful users of
- Python, with the emphasis on business and corporate users.
-
-.. http://www.fsbassociates.com/books/pythonchpt1.htm
- The first chapter of \emph{Internet Programming with Python} also
- examines some of the reasons for using Python. The book is well worth
- buying, but the publishers have made the first chapter available on
- the Web.
-
-http://www.tcl.tk/doc/scripting.html
- John Ousterhout's white paper on scripting is a good argument for the utility of
- scripting languages, though naturally enough, he emphasizes Tcl, the language he
- developed. Most of the arguments would apply to any scripting language.
-
-http://www.python.org/workshops/1997-10/proceedings/beazley.html
- The authors, David M. Beazley and Peter S. Lomdahl, describe their use of
- Python at Los Alamos National Laboratory. It's another good example of how
- Python can help get real work done. This quotation from the paper has been
- echoed by many people:
-
- .. epigraph::
-
- Originally developed as a large monolithic application for massively parallel
- processing systems, we have used Python to transform our application into a
- flexible, highly modular, and extremely powerful system for performing
- simulation, data analysis, and visualization. In addition, we describe how
- Python has solved a number of important problems related to the development,
- debugging, deployment, and maintenance of scientific software.
-
-http://pythonjournal.cognizor.com/pyj1/Everitt-Feit_interview98-V1.html
- This interview with Andy Feit, discussing Infoseek's use of Python, can be used
- to show that choosing Python didn't introduce any difficulties into a company's
- development process, and provided some substantial benefits.
-
-.. http://www.python.org/psa/Commercial.html
- Robin Friedrich wrote this document on how to support Python's use in
- commercial projects.
-
-http://www.python.org/workshops/1997-10/proceedings/stein.ps
- For the 6th Python conference, Greg Stein presented a paper that traced Python's
- adoption and usage at a startup called eShop, and later at Microsoft.
-
-http://www.opensource.org
- Management may be doubtful of the reliability and usefulness of software that
- wasn't written commercially. This site presents arguments that show how open
- source software can have considerable advantages over closed-source software.
-
-http://www.faqs.org/docs/Linux-mini/Advocacy.html
- The Linux Advocacy mini-HOWTO was the inspiration for this document, and is also
- well worth reading for general suggestions on winning acceptance for a new
- technology, such as Linux or Python. In general, you won't make much progress
- by simply attacking existing systems and complaining about their inadequacies;
- this often ends up looking like unfocused whining. It's much better to point
- out some of the many areas where Python is an improvement over other systems.
-
diff --git a/Doc/howto/argparse.rst b/Doc/howto/argparse.rst
new file mode 100644
index 0000000..f110a55
--- /dev/null
+++ b/Doc/howto/argparse.rst
@@ -0,0 +1,764 @@
+*****************
+Argparse Tutorial
+*****************
+
+:author: Tshepang Lekhonkhobe
+
+.. _argparse-tutorial:
+
+This tutorial is intended to be a gentle introduction to :mod:`argparse`, the
+recommended command-line parsing module in the Python standard library.
+
+.. note::
+
+ There are two other modules that fulfill the same task, namely
+ :mod:`getopt` (an equivalent for :c:func:`getopt` from the C
+ language) and the deprecated :mod:`optparse`.
+ Note also that :mod:`argparse` is based on :mod:`optparse`,
+ and therefore very similar in terms of usage.
+
+
+Concepts
+========
+
+Let's show the sort of functionality that we are going to explore in this
+introductory tutorial by making use of the :command:`ls` command:
+
+.. code-block:: sh
+
+ $ ls
+ cpython devguide prog.py pypy rm-unused-function.patch
+ $ ls pypy
+ ctypes_configure demo dotviewer include lib_pypy lib-python ...
+ $ ls -l
+ total 20
+ drwxr-xr-x 19 wena wena 4096 Feb 18 18:51 cpython
+ drwxr-xr-x 4 wena wena 4096 Feb 8 12:04 devguide
+ -rwxr-xr-x 1 wena wena 535 Feb 19 00:05 prog.py
+ drwxr-xr-x 14 wena wena 4096 Feb 7 00:59 pypy
+ -rw-r--r-- 1 wena wena 741 Feb 18 01:01 rm-unused-function.patch
+ $ ls --help
+ Usage: ls [OPTION]... [FILE]...
+ List information about the FILEs (the current directory by default).
+ Sort entries alphabetically if none of -cftuvSUX nor --sort is specified.
+ ...
+
+A few concepts we can learn from the four commands:
+
+* The :command:`ls` command is useful when run without any options at all. It defaults
+ to displaying the contents of the current directory.
+
+* If we want beyond what it provides by default, we tell it a bit more. In
+ this case, we want it to display a different directory, ``pypy``.
+ What we did is specify what is known as a positional argument. It's named so
+ because the program should know what to do with the value, solely based on
+ where it appears on the command line. This concept is more relevant
+ to a command like :command:`cp`, whose most basic usage is ``cp SRC DEST``.
+ The first position is *what you want copied,* and the second
+ position is *where you want it copied to*.
+
+* Now, say we want to change behaviour of the program. In our example,
+ we display more info for each file instead of just showing the file names.
+ The ``-l`` in that case is known as an optional argument.
+
+* That's a snippet of the help text. It's very useful in that you can
+ come across a program you have never used before, and can figure out
+ how it works simply by reading its help text.
+
+
+The basics
+==========
+
+Let us start with a very simple example which does (almost) nothing::
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.parse_args()
+
+Following is a result of running the code:
+
+.. code-block:: sh
+
+ $ python prog.py
+ $ python prog.py --help
+ usage: prog.py [-h]
+
+ optional arguments:
+ -h, --help show this help message and exit
+ $ python prog.py --verbose
+ usage: prog.py [-h]
+ prog.py: error: unrecognized arguments: --verbose
+ $ python prog.py foo
+ usage: prog.py [-h]
+ prog.py: error: unrecognized arguments: foo
+
+Here is what is happening:
+
+* Running the script without any options results in nothing displayed to
+ stdout. Not so useful.
+
+* The second one starts to display the usefulness of the :mod:`argparse`
+ module. We have done almost nothing, but already we get a nice help message.
+
+* The ``--help`` option, which can also be shortened to ``-h``, is the only
+ option we get for free (i.e. no need to specify it). Specifying anything
+ else results in an error. But even then, we do get a useful usage message,
+ also for free.
+
+
+Introducing Positional arguments
+================================
+
+An example::
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument("echo")
+ args = parser.parse_args()
+ print args.echo
+
+And running the code:
+
+.. code-block:: sh
+
+ $ python prog.py
+ usage: prog.py [-h] echo
+ prog.py: error: the following arguments are required: echo
+ $ python prog.py --help
+ usage: prog.py [-h] echo
+
+ positional arguments:
+ echo
+
+ optional arguments:
+ -h, --help show this help message and exit
+ $ python prog.py foo
+ foo
+
+Here is what's happening:
+
+* We've added the :meth:`add_argument` method, which is what we use to specify
+ which command-line options the program is willing to accept. In this case,
+ I've named it ``echo`` so that it's in line with its function.
+
+* Calling our program now requires us to specify an option.
+
+* The :meth:`parse_args` method actually returns some data from the
+ options specified, in this case, ``echo``.
+
+* The variable is some form of 'magic' that :mod:`argparse` performs for free
+ (i.e. no need to specify which variable that value is stored in).
+ You will also notice that its name matches the string argument given
+ to the method, ``echo``.
+
+Note however that, although the help display looks nice and all, it currently
+is not as helpful as it can be. For example we see that we got ``echo`` as a
+positional argument, but we don't know what it does, other than by guessing or
+by reading the source code. So, let's make it a bit more useful::
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument("echo", help="echo the string you use here")
+ args = parser.parse_args()
+ print args.echo
+
+And we get:
+
+.. code-block:: sh
+
+ $ python prog.py -h
+ usage: prog.py [-h] echo
+
+ positional arguments:
+ echo echo the string you use here
+
+ optional arguments:
+ -h, --help show this help message and exit
+
+Now, how about doing something even more useful::
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument("square", help="display a square of a given number")
+ args = parser.parse_args()
+ print args.square**2
+
+Following is a result of running the code:
+
+.. code-block:: sh
+
+ $ python prog.py 4
+ Traceback (most recent call last):
+ File "prog.py", line 5, in <module>
+ print args.square**2
+ TypeError: unsupported operand type(s) for ** or pow(): 'str' and 'int'
+
+That didn't go so well. That's because :mod:`argparse` treats the options we
+give it as strings, unless we tell it otherwise. So, let's tell
+:mod:`argparse` to treat that input as an integer::
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument("square", help="display a square of a given number",
+ type=int)
+ args = parser.parse_args()
+ print args.square**2
+
+Following is a result of running the code:
+
+.. code-block:: sh
+
+ $ python prog.py 4
+ 16
+ $ python prog.py four
+ usage: prog.py [-h] square
+ prog.py: error: argument square: invalid int value: 'four'
+
+That went well. The program now even helpfully quits on bad illegal input
+before proceeding.
+
+
+Introducing Optional arguments
+==============================
+
+So far we, have been playing with positional arguments. Let us
+have a look on how to add optional ones::
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--verbosity", help="increase output verbosity")
+ args = parser.parse_args()
+ if args.verbosity:
+ print "verbosity turned on"
+
+And the output:
+
+.. code-block:: sh
+
+ $ python prog.py --verbosity 1
+ verbosity turned on
+ $ python prog.py
+ $ python prog.py --help
+ usage: prog.py [-h] [--verbosity VERBOSITY]
+
+ optional arguments:
+ -h, --help show this help message and exit
+ --verbosity VERBOSITY
+ increase output verbosity
+ $ python prog.py --verbosity
+ usage: prog.py [-h] [--verbosity VERBOSITY]
+ prog.py: error: argument --verbosity: expected one argument
+
+Here is what is happening:
+
+* The program is written so as to display something when ``--verbosity`` is
+ specified and display nothing when not.
+
+* To show that the option is actually optional, there is no error when running
+ the program without it. Note that by default, if an optional argument isn't
+ used, the relevant variable, in this case :attr:`args.verbosity`, is
+ given ``None`` as a value, which is the reason it fails the truth
+ test of the :keyword:`if` statement.
+
+* The help message is a bit different.
+
+* When using the ``--verbosity`` option, one must also specify some value,
+ any value.
+
+The above example accepts arbitrary integer values for ``--verbosity``, but for
+our simple program, only two values are actually useful, ``True`` or ``False``.
+Let's modify the code accordingly::
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--verbose", help="increase output verbosity",
+ action="store_true")
+ args = parser.parse_args()
+ if args.verbose:
+ print "verbosity turned on"
+
+And the output:
+
+.. code-block:: sh
+
+ $ python prog.py --verbose
+ verbosity turned on
+ $ python prog.py --verbose 1
+ usage: prog.py [-h] [--verbose]
+ prog.py: error: unrecognized arguments: 1
+ $ python prog.py --help
+ usage: prog.py [-h] [--verbose]
+
+ optional arguments:
+ -h, --help show this help message and exit
+ --verbose increase output verbosity
+
+Here is what is happening:
+
+* The option is now more of a flag than something that requires a value.
+ We even changed the name of the option to match that idea.
+ Note that we now specify a new keyword, ``action``, and give it the value
+ ``"store_true"``. This means that, if the option is specified,
+ assign the value ``True`` to :data:`args.verbose`.
+ Not specifying it implies ``False``.
+
+* It complains when you specify a value, in true spirit of what flags
+ actually are.
+
+* Notice the different help text.
+
+
+Short options
+-------------
+
+If you are familiar with command line usage,
+you will notice that I haven't yet touched on the topic of short
+versions of the options. It's quite simple::
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-v", "--verbose", help="increase output verbosity",
+ action="store_true")
+ args = parser.parse_args()
+ if args.verbose:
+ print "verbosity turned on"
+
+And here goes:
+
+.. code-block:: sh
+
+ $ python prog.py -v
+ verbosity turned on
+ $ python prog.py --help
+ usage: prog.py [-h] [-v]
+
+ optional arguments:
+ -h, --help show this help message and exit
+ -v, --verbose increase output verbosity
+
+Note that the new ability is also reflected in the help text.
+
+
+Combining Positional and Optional arguments
+===========================================
+
+Our program keeps growing in complexity::
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument("square", type=int,
+ help="display a square of a given number")
+ parser.add_argument("-v", "--verbose", action="store_true",
+ help="increase output verbosity")
+ args = parser.parse_args()
+ answer = args.square**2
+ if args.verbose:
+ print "the square of {} equals {}".format(args.square, answer)
+ else:
+ print answer
+
+And now the output:
+
+.. code-block:: sh
+
+ $ python prog.py
+ usage: prog.py [-h] [-v] square
+ prog.py: error: the following arguments are required: square
+ $ python prog.py 4
+ 16
+ $ python prog.py 4 --verbose
+ the square of 4 equals 16
+ $ python prog.py --verbose 4
+ the square of 4 equals 16
+
+* We've brought back a positional argument, hence the complaint.
+
+* Note that the order does not matter.
+
+How about we give this program of ours back the ability to have
+multiple verbosity values, and actually get to use them::
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument("square", type=int,
+ help="display a square of a given number")
+ parser.add_argument("-v", "--verbosity", type=int,
+ help="increase output verbosity")
+ args = parser.parse_args()
+ answer = args.square**2
+ if args.verbosity == 2:
+ print "the square of {} equals {}".format(args.square, answer)
+ elif args.verbosity == 1:
+ print "{}^2 == {}".format(args.square, answer)
+ else:
+ print answer
+
+And the output:
+
+.. code-block:: sh
+
+ $ python prog.py 4
+ 16
+ $ python prog.py 4 -v
+ usage: prog.py [-h] [-v VERBOSITY] square
+ prog.py: error: argument -v/--verbosity: expected one argument
+ $ python prog.py 4 -v 1
+ 4^2 == 16
+ $ python prog.py 4 -v 2
+ the square of 4 equals 16
+ $ python prog.py 4 -v 3
+ 16
+
+These all look good except the last one, which exposes a bug in our program.
+Let's fix it by restricting the values the ``--verbosity`` option can accept::
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument("square", type=int,
+ help="display a square of a given number")
+ parser.add_argument("-v", "--verbosity", type=int, choices=[0, 1, 2],
+ help="increase output verbosity")
+ args = parser.parse_args()
+ answer = args.square**2
+ if args.verbosity == 2:
+ print "the square of {} equals {}".format(args.square, answer)
+ elif args.verbosity == 1:
+ print "{}^2 == {}".format(args.square, answer)
+ else:
+ print answer
+
+And the output:
+
+.. code-block:: sh
+
+ $ python prog.py 4 -v 3
+ usage: prog.py [-h] [-v {0,1,2}] square
+ prog.py: error: argument -v/--verbosity: invalid choice: 3 (choose from 0, 1, 2)
+ $ python prog.py 4 -h
+ usage: prog.py [-h] [-v {0,1,2}] square
+
+ positional arguments:
+ square display a square of a given number
+
+ optional arguments:
+ -h, --help show this help message and exit
+ -v {0,1,2}, --verbosity {0,1,2}
+ increase output verbosity
+
+Note that the change also reflects both in the error message as well as the
+help string.
+
+Now, let's use a different approach of playing with verbosity, which is pretty
+common. It also matches the way the CPython executable handles its own
+verbosity argument (check the output of ``python --help``)::
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument("square", type=int,
+ help="display the square of a given number")
+ parser.add_argument("-v", "--verbosity", action="count",
+ help="increase output verbosity")
+ args = parser.parse_args()
+ answer = args.square**2
+ if args.verbosity == 2:
+ print "the square of {} equals {}".format(args.square, answer)
+ elif args.verbosity == 1:
+ print "{}^2 == {}".format(args.square, answer)
+ else:
+ print answer
+
+We have introduced another action, "count",
+to count the number of occurrences of a specific optional arguments:
+
+.. code-block:: sh
+
+ $ python prog.py 4
+ 16
+ $ python prog.py 4 -v
+ 4^2 == 16
+ $ python prog.py 4 -vv
+ the square of 4 equals 16
+ $ python prog.py 4 --verbosity --verbosity
+ the square of 4 equals 16
+ $ python prog.py 4 -v 1
+ usage: prog.py [-h] [-v] square
+ prog.py: error: unrecognized arguments: 1
+ $ python prog.py 4 -h
+ usage: prog.py [-h] [-v] square
+
+ positional arguments:
+ square display a square of a given number
+
+ optional arguments:
+ -h, --help show this help message and exit
+ -v, --verbosity increase output verbosity
+ $ python prog.py 4 -vvv
+ 16
+
+* Yes, it's now more of a flag (similar to ``action="store_true"``) in the
+ previous version of our script. That should explain the complaint.
+
+* It also behaves similar to "store_true" action.
+
+* Now here's a demonstration of what the "count" action gives. You've probably
+ seen this sort of usage before.
+
+* And, just like the "store_true" action, if you don't specify the ``-v`` flag,
+ that flag is considered to have ``None`` value.
+
+* As should be expected, specifying the long form of the flag, we should get
+ the same output.
+
+* Sadly, our help output isn't very informative on the new ability our script
+ has acquired, but that can always be fixed by improving the documentation for
+ out script (e.g. via the ``help`` keyword argument).
+
+* That last output exposes a bug in our program.
+
+
+Let's fix::
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument("square", type=int,
+ help="display a square of a given number")
+ parser.add_argument("-v", "--verbosity", action="count",
+ help="increase output verbosity")
+ args = parser.parse_args()
+ answer = args.square**2
+
+ # bugfix: replace == with >=
+ if args.verbosity >= 2:
+ print "the square of {} equals {}".format(args.square, answer)
+ elif args.verbosity >= 1:
+ print "{}^2 == {}".format(args.square, answer)
+ else:
+ print answer
+
+And this is what it gives:
+
+.. code-block:: sh
+
+ $ python prog.py 4 -vvv
+ the square of 4 equals 16
+ $ python prog.py 4 -vvvv
+ the square of 4 equals 16
+ $ python prog.py 4
+ Traceback (most recent call last):
+ File "prog.py", line 11, in <module>
+ if args.verbosity >= 2:
+ TypeError: unorderable types: NoneType() >= int()
+
+* First output went well, and fixes the bug we had before.
+ That is, we want any value >= 2 to be as verbose as possible.
+
+* Third output not so good.
+
+Let's fix that bug::
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument("square", type=int,
+ help="display a square of a given number")
+ parser.add_argument("-v", "--verbosity", action="count", default=0,
+ help="increase output verbosity")
+ args = parser.parse_args()
+ answer = args.square**2
+ if args.verbosity >= 2:
+ print "the square of {} equals {}".format(args.square, answer)
+ elif args.verbosity >= 1:
+ print "{}^2 == {}".format(args.square, answer)
+ else:
+ print answer
+
+We've just introduced yet another keyword, ``default``.
+We've set it to ``0`` in order to make it comparable to the other int values.
+Remember that by default,
+if an optional argument isn't specified,
+it gets the ``None`` value, and that cannot be compared to an int value
+(hence the :exc:`TypeError` exception).
+
+And:
+
+.. code-block:: sh
+
+ $ python prog.py 4
+ 16
+
+You can go quite far just with what we've learned so far,
+and we have only scratched the surface.
+The :mod:`argparse` module is very powerful,
+and we'll explore a bit more of it before we end this tutorial.
+
+
+Getting a little more advanced
+==============================
+
+What if we wanted to expand our tiny program to perform other powers,
+not just squares::
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument("x", type=int, help="the base")
+ parser.add_argument("y", type=int, help="the exponent")
+ parser.add_argument("-v", "--verbosity", action="count", default=0)
+ args = parser.parse_args()
+ answer = args.x**args.y
+ if args.verbosity >= 2:
+ print "{} to the power {} equals {}".format(args.x, args.y, answer)
+ elif args.verbosity >= 1:
+ print "{}^{} == {}".format(args.x, args.y, answer)
+ else:
+ print answer
+
+Output:
+
+.. code-block:: sh
+
+ $ python prog.py
+ usage: prog.py [-h] [-v] x y
+ prog.py: error: the following arguments are required: x, y
+ $ python prog.py -h
+ usage: prog.py [-h] [-v] x y
+
+ positional arguments:
+ x the base
+ y the exponent
+
+ optional arguments:
+ -h, --help show this help message and exit
+ -v, --verbosity
+ $ python prog.py 4 2 -v
+ 4^2 == 16
+
+
+Notice that so far we've been using verbosity level to *change* the text
+that gets displayed. The following example instead uses verbosity level
+to display *more* text instead::
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument("x", type=int, help="the base")
+ parser.add_argument("y", type=int, help="the exponent")
+ parser.add_argument("-v", "--verbosity", action="count", default=0)
+ args = parser.parse_args()
+ answer = args.x**args.y
+ if args.verbosity >= 2:
+ print "Running '{}'".format(__file__)
+ if args.verbosity >= 1:
+ print "{}^{} ==".format(args.x, args.y),
+ print answer
+
+Output:
+
+.. code-block:: sh
+
+ $ python prog.py 4 2
+ 16
+ $ python prog.py 4 2 -v
+ 4^2 == 16
+ $ python prog.py 4 2 -vv
+ Running 'prog.py'
+ 4^2 == 16
+
+
+Conflicting options
+-------------------
+
+So far, we have been working with two methods of an
+:class:`argparse.ArgumentParser` instance. Let's introduce a third one,
+:meth:`add_mutually_exclusive_group`. It allows for us to specify options that
+conflict with each other. Let's also change the rest of the program so that
+the new functionality makes more sense:
+we'll introduce the ``--quiet`` option,
+which will be the opposite of the ``--verbose`` one::
+
+ import argparse
+
+ parser = argparse.ArgumentParser()
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument("-v", "--verbose", action="store_true")
+ group.add_argument("-q", "--quiet", action="store_true")
+ parser.add_argument("x", type=int, help="the base")
+ parser.add_argument("y", type=int, help="the exponent")
+ args = parser.parse_args()
+ answer = args.x**args.y
+
+ if args.quiet:
+ print answer
+ elif args.verbose:
+ print "{} to the power {} equals {}".format(args.x, args.y, answer)
+ else:
+ print "{}^{} == {}".format(args.x, args.y, answer)
+
+Our program is now simpler, and we've lost some functionality for the sake of
+demonstration. Anyways, here's the output:
+
+.. code-block:: sh
+
+ $ python prog.py 4 2
+ 4^2 == 16
+ $ python prog.py 4 2 -q
+ 16
+ $ python prog.py 4 2 -v
+ 4 to the power 2 equals 16
+ $ python prog.py 4 2 -vq
+ usage: prog.py [-h] [-v | -q] x y
+ prog.py: error: argument -q/--quiet: not allowed with argument -v/--verbose
+ $ python prog.py 4 2 -v --quiet
+ usage: prog.py [-h] [-v | -q] x y
+ prog.py: error: argument -q/--quiet: not allowed with argument -v/--verbose
+
+That should be easy to follow. I've added that last output so you can see the
+sort of flexibility you get, i.e. mixing long form options with short form
+ones.
+
+Before we conclude, you probably want to tell your users the main purpose of
+your program, just in case they don't know::
+
+ import argparse
+
+ parser = argparse.ArgumentParser(description="calculate X to the power of Y")
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument("-v", "--verbose", action="store_true")
+ group.add_argument("-q", "--quiet", action="store_true")
+ parser.add_argument("x", type=int, help="the base")
+ parser.add_argument("y", type=int, help="the exponent")
+ args = parser.parse_args()
+ answer = args.x**args.y
+
+ if args.quiet:
+ print answer
+ elif args.verbose:
+ print "{} to the power {} equals {}".format(args.x, args.y, answer)
+ else:
+ print "{}^{} == {}".format(args.x, args.y, answer)
+
+Note that slight difference in the usage text. Note the ``[-v | -q]``,
+which tells us that we can either use ``-v`` or ``-q``,
+but not both at the same time:
+
+.. code-block:: sh
+
+ $ python prog.py --help
+ usage: prog.py [-h] [-v | -q] x y
+
+ calculate X to the power of Y
+
+ positional arguments:
+ x the base
+ y the exponent
+
+ optional arguments:
+ -h, --help show this help message and exit
+ -v, --verbose
+ -q, --quiet
+
+
+Conclusion
+==========
+
+The :mod:`argparse` module offers a lot more than shown here.
+Its docs are quite detailed and thorough, and full of examples.
+Having gone through this tutorial, you should easily digest them
+without feeling overwhelmed.
diff --git a/Doc/howto/cporting.rst b/Doc/howto/cporting.rst
index 7ef7537..1ad77d6 100644
--- a/Doc/howto/cporting.rst
+++ b/Doc/howto/cporting.rst
@@ -2,27 +2,28 @@
.. _cporting-howto:
-********************************
-Porting Extension Modules to 3.0
-********************************
+*************************************
+Porting Extension Modules to Python 3
+*************************************
:author: Benjamin Peterson
.. topic:: Abstract
- Although changing the C-API was not one of Python 3.0's objectives, the many
- Python level changes made leaving 2.x's API intact impossible. In fact, some
- changes such as :func:`int` and :func:`long` unification are more obvious on
- the C level. This document endeavors to document incompatibilities and how
- they can be worked around.
+ Although changing the C-API was not one of Python 3's objectives,
+ the many Python-level changes made leaving Python 2's API intact
+ impossible. In fact, some changes such as :func:`int` and
+ :func:`long` unification are more obvious on the C level. This
+ document endeavors to document incompatibilities and how they can
+ be worked around.
Conditional compilation
=======================
-The easiest way to compile only some code for 3.0 is to check if
-:c:macro:`PY_MAJOR_VERSION` is greater than or equal to 3. ::
+The easiest way to compile only some code for Python 3 is to check
+if :c:macro:`PY_MAJOR_VERSION` is greater than or equal to 3. ::
#if PY_MAJOR_VERSION >= 3
#define IS_PY3K
@@ -35,7 +36,7 @@ conditional blocks.
Changes to Object APIs
======================
-Python 3.0 merged together some types with similar functions while cleanly
+Python 3 merged together some types with similar functions while cleanly
separating others.
@@ -43,14 +44,14 @@ str/unicode Unification
-----------------------
-Python 3.0's :func:`str` (``PyString_*`` functions in C) type is equivalent to
-2.x's :func:`unicode` (``PyUnicode_*``). The old 8-bit string type has become
-:func:`bytes`. Python 2.6 and later provide a compatibility header,
+Python 3's :func:`str` (``PyString_*`` functions in C) type is equivalent to
+Python 2's :func:`unicode` (``PyUnicode_*``). The old 8-bit string type has
+become :func:`bytes`. Python 2.6 and later provide a compatibility header,
:file:`bytesobject.h`, mapping ``PyBytes`` names to ``PyString`` ones. For best
-compatibility with 3.0, :c:type:`PyUnicode` should be used for textual data and
+compatibility with Python 3, :c:type:`PyUnicode` should be used for textual data and
:c:type:`PyBytes` for binary data. It's also important to remember that
-:c:type:`PyBytes` and :c:type:`PyUnicode` in 3.0 are not interchangeable like
-:c:type:`PyString` and :c:type:`PyUnicode` are in 2.x. The following example
+:c:type:`PyBytes` and :c:type:`PyUnicode` in Python 3 are not interchangeable like
+:c:type:`PyString` and :c:type:`PyUnicode` are in Python 2. The following example
shows best practices with regards to :c:type:`PyUnicode`, :c:type:`PyString`,
and :c:type:`PyBytes`. ::
@@ -94,36 +95,20 @@ and :c:type:`PyBytes`. ::
long/int Unification
--------------------
-In Python 3.0, there is only one integer type. It is called :func:`int` on the
-Python level, but actually corresponds to 2.x's :func:`long` type. In the
-C-API, ``PyInt_*`` functions are replaced by their ``PyLong_*`` neighbors. The
-best course of action here is using the ``PyInt_*`` functions aliased to
-``PyLong_*`` found in :file:`intobject.h`. The abstract ``PyNumber_*`` APIs
-can also be used in some cases. ::
-
- #include "Python.h"
- #include "intobject.h"
-
- static PyObject *
- add_ints(PyObject *self, PyObject *args) {
- int one, two;
- PyObject *result;
-
- if (!PyArg_ParseTuple(args, "ii:add_ints", &one, &two))
- return NULL;
-
- return PyInt_FromLong(one + two);
- }
-
+Python 3 has only one integer type, :func:`int`. But it actually
+corresponds to Python 2's :func:`long` type--the :func:`int` type
+used in Python 2 was removed. In the C-API, ``PyInt_*`` functions
+are replaced by their ``PyLong_*`` equivalents.
Module initialization and state
===============================
-Python 3.0 has a revamped extension module initialization system. (See
-:pep:`3121`.) Instead of storing module state in globals, they should be stored
-in an interpreter specific structure. Creating modules that act correctly in
-both 2.x and 3.0 is tricky. The following simple example demonstrates how. ::
+Python 3 has a revamped extension module initialization system. (See
+:pep:`3121`.) Instead of storing module state in globals, they should
+be stored in an interpreter specific structure. Creating modules that
+act correctly in both Python 2 and Python 3 is tricky. The following
+simple example demonstrates how. ::
#include "Python.h"
@@ -223,15 +208,18 @@ If you're currently using CObjects, and you want to migrate to 3.1 or newer,
you'll need to switch to Capsules.
:c:type:`CObject` was deprecated in 3.1 and 2.7 and completely removed in
Python 3.2. If you only support 2.7, or 3.1 and above, you
-can simply switch to :c:type:`Capsule`. If you need to support 3.0 or
-versions of Python earlier than 2.7 you'll have to support both CObjects
-and Capsules.
+can simply switch to :c:type:`Capsule`. If you need to support Python 3.0,
+or versions of Python earlier than 2.7,
+you'll have to support both CObjects and Capsules.
+(Note that Python 3.0 is no longer supported, and it is not recommended
+for production use.)
The following example header file :file:`capsulethunk.h` may
-solve the problem for you;
-simply write your code against the :c:type:`Capsule` API, include
-this header file after ``"Python.h"``, and you'll automatically use CObjects
-in Python 3.0 or versions earlier than 2.7.
+solve the problem for you. Simply write your code against the
+:c:type:`Capsule` API and include this header file after
+:file:`Python.h`. Your code will automatically use Capsules
+in versions of Python with Capsules, and switch to CObjects
+when Capsules are unavailable.
:file:`capsulethunk.h` simulates Capsules using CObjects. However,
:c:type:`CObject` provides no place to store the capsule's "name". As a
@@ -246,16 +234,16 @@ behave slightly differently from real Capsules. Specifically:
* :c:func:`PyCapsule_GetName` always returns NULL.
- * :c:func:`PyCapsule_SetName` always throws an exception and
+ * :c:func:`PyCapsule_SetName` always raises an exception and
returns failure. (Since there's no way to store a name
in a CObject, noisy failure of :c:func:`PyCapsule_SetName`
was deemed preferable to silent failure here. If this is
- inconveient, feel free to modify your local
+ inconvenient, feel free to modify your local
copy as you see fit.)
You can find :file:`capsulethunk.h` in the Python source distribution
-in the :file:`Doc/includes` directory. We also include it here for
-your reference; here is :file:`capsulethunk.h`:
+as :source:`Doc/includes/capsulethunk.h`. We also include it here for
+your convenience:
.. literalinclude:: ../includes/capsulethunk.h
@@ -266,5 +254,5 @@ Other options
If you are writing a new extension module, you might consider `Cython
<http://www.cython.org>`_. It translates a Python-like language to C. The
-extension modules it creates are compatible with Python 3.x and 2.x.
+extension modules it creates are compatible with Python 3 and Python 2.
diff --git a/Doc/howto/curses.rst b/Doc/howto/curses.rst
index 71e640c..74c1f2a 100644
--- a/Doc/howto/curses.rst
+++ b/Doc/howto/curses.rst
@@ -118,7 +118,7 @@ function to restore the terminal to its original operating mode. ::
A common problem when debugging a curses application is to get your terminal
messed up when the application dies without restoring the terminal to its
previous state. In Python this commonly happens when your code is buggy and
-raises an uncaught exception. Keys are no longer be echoed to the screen when
+raises an uncaught exception. Keys are no longer echoed to the screen when
you type them, for example, which makes using the shell difficult.
In Python you can avoid these complications and make debugging much easier by
@@ -144,8 +144,8 @@ window, but you might wish to divide the screen into smaller windows, in order
to redraw or clear them separately. The :func:`newwin` function creates a new
window of a given size, returning the new window object. ::
- begin_x = 20 ; begin_y = 7
- height = 5 ; width = 40
+ begin_x = 20; begin_y = 7
+ height = 5; width = 40
win = curses.newwin(height, width, begin_y, begin_x)
A word about the coordinate system used in curses: coordinates are always passed
@@ -184,11 +184,13 @@ displayed. ::
# explained in the next section
for y in range(0, 100):
for x in range(0, 100):
- try: pad.addch(y,x, ord('a') + (x*x+y*y) % 26 )
- except curses.error: pass
+ try:
+ pad.addch(y,x, ord('a') + (x*x+y*y) % 26)
+ except curses.error:
+ pass
# Displays a section of the pad in the middle of the screen
- pad.refresh( 0,0, 5,5, 20,75)
+ pad.refresh(0,0, 5,5, 20,75)
The :func:`refresh` call displays a section of the pad in the rectangle
extending from coordinate (5,5) to coordinate (20,75) on the screen; the upper
@@ -271,7 +273,7 @@ application are commonly shown in reverse video; a text viewer may need to
highlight certain words. curses supports this by allowing you to specify an
attribute for each cell on the screen.
-An attribute is a integer, each bit representing a different attribute. You can
+An attribute is an integer, each bit representing a different attribute. You can
try to display text with multiple attribute bits set, but curses doesn't
guarantee that all the possible combinations are available, or that they're all
visually distinct. That depends on the ability of the terminal being used, so
@@ -300,7 +302,7 @@ could code::
curses.A_REVERSE)
stdscr.refresh()
-The curses library also supports color on those terminals that provide it, The
+The curses library also supports color on those terminals that provide it. The
most common such terminal is probably the Linux console, followed by color
xterms.
@@ -321,7 +323,7 @@ again, such combinations are not guaranteed to work on all terminals.
An example, which displays a line of text using color pair 1::
- stdscr.addstr( "Pretty text", curses.color_pair(1) )
+ stdscr.addstr("Pretty text", curses.color_pair(1))
stdscr.refresh()
As I said before, a color pair consists of a foreground and background color.
@@ -343,7 +345,7 @@ When you change a color pair, any text already displayed using that color pair
will change to the new colors. You can also display new text in this color
with::
- stdscr.addstr(0,0, "RED ALERT!", curses.color_pair(1) )
+ stdscr.addstr(0,0, "RED ALERT!", curses.color_pair(1))
Very fancy terminals can change the definitions of the actual colors to a given
RGB value. This lets you change color 1, which is usually red, to purple or
@@ -381,9 +383,12 @@ your program will look something like this::
while 1:
c = stdscr.getch()
- if c == ord('p'): PrintDocument()
- elif c == ord('q'): break # Exit the while()
- elif c == curses.KEY_HOME: x = y = 0
+ if c == ord('p'):
+ PrintDocument()
+ elif c == ord('q'):
+ break # Exit the while()
+ elif c == curses.KEY_HOME:
+ x = y = 0
The :mod:`curses.ascii` module supplies ASCII class membership functions that
take either integer or 1-character-string arguments; these may be useful in
@@ -433,4 +438,3 @@ If you write an interesting little program, feel free to contribute it as
another demo. We can always use more of them!
The ncurses FAQ: http://invisible-island.net/ncurses/ncurses.faq.html
-
diff --git a/Doc/howto/descriptor.rst b/Doc/howto/descriptor.rst
index ce4b6bb..2a323c7 100644
--- a/Doc/howto/descriptor.rst
+++ b/Doc/howto/descriptor.rst
@@ -124,7 +124,7 @@ The important points to remember are:
The object returned by ``super()`` also has a custom :meth:`__getattribute__`
method for invoking descriptors. The call ``super(B, obj).m()`` searches
``obj.__class__.__mro__`` for the base class ``A`` immediately following ``B``
-and then returns ``A.__dict__['m'].__get__(obj, A)``. If not a descriptor,
+and then returns ``A.__dict__['m'].__get__(obj, B)``. If not a descriptor,
``m`` is returned unchanged. If not in the dictionary, ``m`` reverts to a
search using :meth:`object.__getattribute__`.
@@ -167,7 +167,7 @@ descriptor is useful for monitoring just a few chosen attributes::
return self.val
def __set__(self, obj, val):
- print 'Updating' , self.name
+ print 'Updating', self.name
self.val = val
>>> class MyClass(object):
@@ -218,25 +218,36 @@ here is a pure Python equivalent::
self.fget = fget
self.fset = fset
self.fdel = fdel
+ if doc is None and fget is not None:
+ doc = fget.__doc__
self.__doc__ = doc
def __get__(self, obj, objtype=None):
if obj is None:
return self
if self.fget is None:
- raise AttributeError, "unreadable attribute"
+ raise AttributeError("unreadable attribute")
return self.fget(obj)
def __set__(self, obj, value):
if self.fset is None:
- raise AttributeError, "can't set attribute"
+ raise AttributeError("can't set attribute")
self.fset(obj, value)
def __delete__(self, obj):
if self.fdel is None:
- raise AttributeError, "can't delete attribute"
+ raise AttributeError("can't delete attribute")
self.fdel(obj)
+ def getter(self, fget):
+ return type(self)(fget, self.fset, self.fdel, self.__doc__)
+
+ def setter(self, fset):
+ return type(self)(self.fget, fset, self.fdel, self.__doc__)
+
+ def deleter(self, fdel):
+ return type(self)(self.fget, self.fset, fdel, self.__doc__)
+
The :func:`property` builtin helps whenever a user interface has granted
attribute access and then subsequent changes require the intervention of a
method.
@@ -398,7 +409,7 @@ is to create alternate class constructors. In Python 2.3, the classmethod
:func:`dict.fromkeys` creates a new dictionary from a list of keys. The pure
Python equivalent is::
- class Dict:
+ class Dict(object):
. . .
def fromkeys(klass, iterable, value=None):
"Emulate dict_fromkeys() in Objects/dictobject.c"
diff --git a/Doc/howto/functional.rst b/Doc/howto/functional.rst
index 9636c6c..c6d6a56 100644
--- a/Doc/howto/functional.rst
+++ b/Doc/howto/functional.rst
@@ -244,9 +244,9 @@ Built-in functions such as :func:`max` and :func:`min` can take a single
iterator argument and will return the largest or smallest element. The ``"in"``
and ``"not in"`` operators also support iterators: ``X in iterator`` is true if
X is found in the stream returned by the iterator. You'll run into obvious
-problems if the iterator is infinite; ``max()``, ``min()``, and ``"not in"``
+problems if the iterator is infinite; ``max()``, ``min()``
will never return, and if the element X never appears in the stream, the
-``"in"`` operator won't return either.
+``"in"`` and ``"not in"`` operators won't return either.
Note that you can only go forward in an iterator; there's no way to get the
previous element, reset the iterator, or make a copy of it. Iterator objects
@@ -587,7 +587,8 @@ And here's an example of changing the counter:
Because ``yield`` will often be returning ``None``, you should always check for
this case. Don't just use its value in expressions unless you're sure that the
-``send()`` method will be the only method used resume your generator function.
+``send()`` method will be the only method used to resume your generator
+function.
In addition to ``send()``, there are two other new methods on generators:
@@ -743,8 +744,8 @@ the constructed list's ``.sort()`` method. ::
Python wiki at http://wiki.python.org/moin/HowTo/Sorting.)
The ``any(iter)`` and ``all(iter)`` built-ins look at the truth values of an
-iterable's contents. :func:`any` returns True if any element in the iterable is
-a true value, and :func:`all` returns True if all of the elements are true
+iterable's contents. :func:`any` returns ``True`` if any element in the iterable is
+a true value, and :func:`all` returns ``True`` if all of the elements are true
values:
>>> any([0,1,0])
diff --git a/Doc/howto/index.rst b/Doc/howto/index.rst
index 94ecc9a..e4c95b1 100644
--- a/Doc/howto/index.rst
+++ b/Doc/howto/index.rst
@@ -13,7 +13,6 @@ Currently, the HOWTOs are:
.. toctree::
:maxdepth: 1
- advocacy.rst
pyporting.rst
cporting.rst
curses.rst
@@ -28,4 +27,5 @@ Currently, the HOWTOs are:
unicode.rst
urllib2.rst
webservers.rst
+ argparse.rst
diff --git a/Doc/howto/logging-cookbook.rst b/Doc/howto/logging-cookbook.rst
index 5e02fdb..07b04f9 100644
--- a/Doc/howto/logging-cookbook.rst
+++ b/Doc/howto/logging-cookbook.rst
@@ -97,11 +97,11 @@ The output looks like this::
Multiple handlers and formatters
--------------------------------
-Loggers are plain Python objects. The :func:`addHandler` method has no minimum
-or maximum quota for the number of handlers you may add. Sometimes it will be
-beneficial for an application to log all messages of all severities to a text
-file while simultaneously logging errors or above to the console. To set this
-up, simply configure the appropriate handlers. The logging calls in the
+Loggers are plain Python objects. The :meth:`~Logger.addHandler` method has no
+minimum or maximum quota for the number of handlers you may add. Sometimes it
+will be beneficial for an application to log all messages of all severities to a
+text file while simultaneously logging errors or above to the console. To set
+this up, simply configure the appropriate handlers. The logging calls in the
application code will remain unchanged. Here is a slight modification to the
previous simple module-based configuration example::
@@ -295,17 +295,17 @@ the receiving end. A simple way of doing this is attaching a
logger2.warning('Jail zesty vixen who grabbed pay from quack.')
logger2.error('The five boxing wizards jump quickly.')
-At the receiving end, you can set up a receiver using the :mod:`socketserver`
+At the receiving end, you can set up a receiver using the :mod:`SocketServer`
module. Here is a basic working example::
import pickle
import logging
import logging.handlers
- import socketserver
+ import SocketServer
import struct
- class LogRecordStreamHandler(socketserver.StreamRequestHandler):
+ class LogRecordStreamHandler(SocketServer.StreamRequestHandler):
"""Handler for a streaming logging request.
This basically logs the record using whatever logging policy is
@@ -347,7 +347,7 @@ module. Here is a basic working example::
# cycles and network bandwidth!
logger.handle(record)
- class LogRecordSocketReceiver(socketserver.ThreadingTCPServer):
+ class LogRecordSocketReceiver(SocketServer.ThreadingTCPServer):
"""
Simple TCP socket-based logging receiver suitable for testing.
"""
@@ -357,7 +357,7 @@ module. Here is a basic working example::
def __init__(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
- socketserver.ThreadingTCPServer.__init__(self, (host, port), handler)
+ SocketServer.ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = 0
self.timeout = 1
self.logname = None
@@ -395,8 +395,9 @@ printed on the console; on the server side, you should see something like::
Note that there are some security issues with pickle in some scenarios. If
these affect you, you can use an alternative serialization scheme by overriding
-the :meth:`makePickle` method and implementing your alternative there, as
-well as adapting the above script to use your alternative serialization.
+the :meth:`~handlers.SocketHandler.makePickle` method and implementing your
+alternative there, as well as adapting the above script to use your alternative
+serialization.
.. _context-info:
@@ -404,6 +405,8 @@ well as adapting the above script to use your alternative serialization.
Adding contextual information to your logging output
----------------------------------------------------
+.. currentmodule:: logging
+
Sometimes you want logging output to contain contextual information in
addition to the parameters passed to the logging call. For example, in a
networked application, it may be desirable to log client-specific information
@@ -445,9 +448,9 @@ information in the delegated call. Here's a snippet from the code of
msg, kwargs = self.process(msg, kwargs)
self.logger.debug(msg, *args, **kwargs)
-The :meth:`process` method of :class:`LoggerAdapter` is where the contextual
-information is added to the logging output. It's passed the message and
-keyword arguments of the logging call, and it passes back (potentially)
+The :meth:`~LoggerAdapter.process` method of :class:`LoggerAdapter` is where the
+contextual information is added to the logging output. It's passed the message
+and keyword arguments of the logging call, and it passes back (potentially)
modified versions of these to use in the call to the underlying logger. The
default implementation of this method leaves the message alone, but inserts
an 'extra' key in the keyword argument whose value is the dict-like object
@@ -459,70 +462,32 @@ merged into the :class:`LogRecord` instance's __dict__, allowing you to use
customized strings with your :class:`Formatter` instances which know about
the keys of the dict-like object. If you need a different method, e.g. if you
want to prepend or append the contextual information to the message string,
-you just need to subclass :class:`LoggerAdapter` and override :meth:`process`
-to do what you need. Here's an example script which uses this class, which
-also illustrates what dict-like behaviour is needed from an arbitrary
-'dict-like' object for use in the constructor::
+you just need to subclass :class:`LoggerAdapter` and override
+:meth:`~LoggerAdapter.process` to do what you need. Here is a simple example::
- import logging
+ class CustomAdapter(logging.LoggerAdapter):
+ """
+ This example adapter expects the passed in dict-like object to have a
+ 'connid' key, whose value in brackets is prepended to the log message.
+ """
+ def process(self, msg, kwargs):
+ return '[%s] %s' % (self.extra['connid'], msg), kwargs
- class ConnInfo:
- """
- An example class which shows how an arbitrary class can be used as
- the 'extra' context information repository passed to a LoggerAdapter.
- """
+which you can use like this::
- def __getitem__(self, name):
- """
- To allow this instance to look like a dict.
- """
- from random import choice
- if name == 'ip':
- result = choice(['127.0.0.1', '192.168.0.1'])
- elif name == 'user':
- result = choice(['jim', 'fred', 'sheila'])
- else:
- result = self.__dict__.get(name, '?')
- return result
+ logger = logging.getLogger(__name__)
+ adapter = CustomAdapter(logger, {'connid': some_conn_id})
- def __iter__(self):
- """
- To allow iteration over keys, which will be merged into
- the LogRecord dict before formatting and output.
- """
- keys = ['ip', 'user']
- keys.extend(self.__dict__.keys())
- return keys.__iter__()
+Then any events that you log to the adapter will have the value of
+``some_conn_id`` prepended to the log messages.
- if __name__ == '__main__':
- from random import choice
- levels = (logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL)
- a1 = logging.LoggerAdapter(logging.getLogger('a.b.c'),
- { 'ip' : '123.231.231.123', 'user' : 'sheila' })
- logging.basicConfig(level=logging.DEBUG,
- format='%(asctime)-15s %(name)-5s %(levelname)-8s IP: %(ip)-15s User: %(user)-8s %(message)s')
- a1.debug('A debug message')
- a1.info('An info message with %s', 'some parameters')
- a2 = logging.LoggerAdapter(logging.getLogger('d.e.f'), ConnInfo())
- for x in range(10):
- lvl = choice(levels)
- lvlname = logging.getLevelName(lvl)
- a2.log(lvl, 'A message at %s level with %d %s', lvlname, 2, 'parameters')
-
-When this script is run, the output should look something like this::
+Using objects other than dicts to pass contextual information
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- 2008-01-18 14:49:54,023 a.b.c DEBUG IP: 123.231.231.123 User: sheila A debug message
- 2008-01-18 14:49:54,023 a.b.c INFO IP: 123.231.231.123 User: sheila An info message with some parameters
- 2008-01-18 14:49:54,023 d.e.f CRITICAL IP: 192.168.0.1 User: jim A message at CRITICAL level with 2 parameters
- 2008-01-18 14:49:54,033 d.e.f INFO IP: 192.168.0.1 User: jim A message at INFO level with 2 parameters
- 2008-01-18 14:49:54,033 d.e.f WARNING IP: 192.168.0.1 User: sheila A message at WARNING level with 2 parameters
- 2008-01-18 14:49:54,033 d.e.f ERROR IP: 127.0.0.1 User: fred A message at ERROR level with 2 parameters
- 2008-01-18 14:49:54,033 d.e.f ERROR IP: 127.0.0.1 User: sheila A message at ERROR level with 2 parameters
- 2008-01-18 14:49:54,033 d.e.f WARNING IP: 192.168.0.1 User: sheila A message at WARNING level with 2 parameters
- 2008-01-18 14:49:54,033 d.e.f WARNING IP: 192.168.0.1 User: jim A message at WARNING level with 2 parameters
- 2008-01-18 14:49:54,033 d.e.f INFO IP: 192.168.0.1 User: fred A message at INFO level with 2 parameters
- 2008-01-18 14:49:54,033 d.e.f WARNING IP: 192.168.0.1 User: sheila A message at WARNING level with 2 parameters
- 2008-01-18 14:49:54,033 d.e.f WARNING IP: 127.0.0.1 User: jim A message at WARNING level with 2 parameters
+You don't need to pass an actual dict to a :class:`LoggerAdapter` - you could
+pass an instance of a class which implements ``__getitem__`` and ``__iter__`` so
+that it looks like a dict to logging. This would be useful if you want to
+generate values dynamically (whereas the values in a dict would be constant).
.. _filters-contextual:
@@ -607,25 +572,23 @@ threads in a single process *is* supported, logging to a single file from
*multiple processes* is *not* supported, because there is no standard way to
serialize access to a single file across multiple processes in Python. If you
need to log to a single file from multiple processes, one way of doing this is
-to have all the processes log to a :class:`SocketHandler`, and have a separate
-process which implements a socket server which reads from the socket and logs
-to file. (If you prefer, you can dedicate one thread in one of the existing
-processes to perform this function.) :ref:`This section <network-logging>`
-documents this approach in more detail and includes a working socket receiver
-which can be used as a starting point for you to adapt in your own
-applications.
+to have all the processes log to a :class:`~handlers.SocketHandler`, and have a
+separate process which implements a socket server which reads from the socket
+and logs to file. (If you prefer, you can dedicate one thread in one of the
+existing processes to perform this function.)
+:ref:`This section <network-logging>` documents this approach in more detail and
+includes a working socket receiver which can be used as a starting point for you
+to adapt in your own applications.
If you are using a recent version of Python which includes the
:mod:`multiprocessing` module, you could write your own handler which uses the
-:class:`Lock` class from this module to serialize access to the file from
-your processes. The existing :class:`FileHandler` and subclasses do not make
-use of :mod:`multiprocessing` at present, though they may do so in the future.
-Note that at present, the :mod:`multiprocessing` module does not provide
+:class:`~multiprocessing.Lock` class from this module to serialize access to the
+file from your processes. The existing :class:`FileHandler` and subclasses do
+not make use of :mod:`multiprocessing` at present, though they may do so in the
+future. Note that at present, the :mod:`multiprocessing` module does not provide
working lock functionality on all platforms (see
http://bugs.python.org/issue3770).
-.. currentmodule:: logging.handlers
-
Using file rotation
-------------------
@@ -637,7 +600,7 @@ Sometimes you want to let a log file grow to a certain size, then open a new
file and log to that. You may want to keep a certain number of these files, and
when that many files have been created, rotate the files so that the number of
files and the size of the files both remain bounded. For this usage pattern, the
-logging package provides a :class:`RotatingFileHandler`::
+logging package provides a :class:`~handlers.RotatingFileHandler`::
import glob
import logging
@@ -688,7 +651,7 @@ An example dictionary-based configuration
Below is an example of a logging configuration dictionary - it's taken from
the `documentation on the Django project <https://docs.djangoproject.com/en/1.3/topics/logging/#configuring-logging>`_.
-This dictionary is passed to :func:`~logging.config.dictConfig` to put the configuration into effect::
+This dictionary is passed to :func:`~config.dictConfig` to put the configuration into effect::
LOGGING = {
'version': 1,
@@ -743,5 +706,351 @@ This dictionary is passed to :func:`~logging.config.dictConfig` to put the confi
}
For more information about this configuration, you can see the `relevant
-section <https://docs.djangoproject.com/en/1.3/topics/logging/#configuring-logging>`_
+section <https://docs.djangoproject.com/en/1.6/topics/logging/#configuring-logging>`_
of the Django documentation.
+
+Inserting a BOM into messages sent to a SysLogHandler
+-----------------------------------------------------
+
+`RFC 5424 <http://tools.ietf.org/html/rfc5424>`_ requires that a
+Unicode message be sent to a syslog daemon as a set of bytes which have the
+following structure: an optional pure-ASCII component, followed by a UTF-8 Byte
+Order Mark (BOM), followed by Unicode encoded using UTF-8. (See the `relevant
+section of the specification <http://tools.ietf.org/html/rfc5424#section-6>`_.)
+
+In Python 2.6 and 2.7, code was added to
+:class:`~logging.handlers.SysLogHandler` to insert a BOM into the message, but
+unfortunately, it was implemented incorrectly, with the BOM appearing at the
+beginning of the message and hence not allowing any pure-ASCII component to
+appear before it.
+
+As this behaviour is broken, the incorrect BOM insertion code is being removed
+from Python 2.7.4 and later. However, it is not being replaced, and if you
+want to produce RFC 5424-compliant messages which include a BOM, an optional
+pure-ASCII sequence before it and arbitrary Unicode after it, encoded using
+UTF-8, then you need to do the following:
+
+#. Attach a :class:`~logging.Formatter` instance to your
+ :class:`~logging.handlers.SysLogHandler` instance, with a format string
+ such as::
+
+ u'ASCII section\ufeffUnicode section'
+
+ The Unicode code point ``u'\ufeff'``, when encoded using UTF-8, will be
+ encoded as a UTF-8 BOM -- the byte-string ``'\xef\xbb\xbf'``.
+
+#. Replace the ASCII section with whatever placeholders you like, but make sure
+ that the data that appears in there after substitution is always ASCII (that
+ way, it will remain unchanged after UTF-8 encoding).
+
+#. Replace the Unicode section with whatever placeholders you like; if the data
+ which appears there after substitution contains characters outside the ASCII
+ range, that's fine -- it will be encoded using UTF-8.
+
+If the formatted message is Unicode, it *will* be encoded using UTF-8 encoding
+by ``SysLogHandler``. If you follow the above rules, you should be able to
+produce RFC 5424-compliant messages. If you don't, logging may not complain,
+but your messages will not be RFC 5424-compliant, and your syslog daemon may
+complain.
+
+
+Implementing structured logging
+-------------------------------
+
+Although most logging messages are intended for reading by humans, and thus not
+readily machine-parseable, there might be cirumstances where you want to output
+messages in a structured format which *is* capable of being parsed by a program
+(without needing complex regular expressions to parse the log message). This is
+straightforward to achieve using the logging package. There are a number of
+ways in which this could be achieved, but the following is a simple approach
+which uses JSON to serialise the event in a machine-parseable manner::
+
+ import json
+ import logging
+
+ class StructuredMessage(object):
+ def __init__(self, message, **kwargs):
+ self.message = message
+ self.kwargs = kwargs
+
+ def __str__(self):
+ return '%s >>> %s' % (self.message, json.dumps(self.kwargs))
+
+ _ = StructuredMessage # optional, to improve readability
+
+ logging.basicConfig(level=logging.INFO, format='%(message)s')
+ logging.info(_('message 1', foo='bar', bar='baz', num=123, fnum=123.456))
+
+If the above script is run, it prints::
+
+ message 1 >>> {"fnum": 123.456, "num": 123, "bar": "baz", "foo": "bar"}
+
+Note that the order of items might be different according to the version of
+Python used.
+
+If you need more specialised processing, you can use a custom JSON encoder,
+as in the following complete example::
+
+ from __future__ import unicode_literals
+
+ import json
+ import logging
+
+ # This next bit is to ensure the script runs unchanged on 2.x and 3.x
+ try:
+ unicode
+ except NameError:
+ unicode = str
+
+ class Encoder(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, set):
+ return tuple(o)
+ elif isinstance(o, unicode):
+ return o.encode('unicode_escape').decode('ascii')
+ return super(Encoder, self).default(o)
+
+ class StructuredMessage(object):
+ def __init__(self, message, **kwargs):
+ self.message = message
+ self.kwargs = kwargs
+
+ def __str__(self):
+ s = Encoder().encode(self.kwargs)
+ return '%s >>> %s' % (self.message, s)
+
+ _ = StructuredMessage # optional, to improve readability
+
+ def main():
+ logging.basicConfig(level=logging.INFO, format='%(message)s')
+ logging.info(_('message 1', set_value=set([1, 2, 3]), snowman='\u2603'))
+
+ if __name__ == '__main__':
+ main()
+
+When the above script is run, it prints::
+
+ message 1 >>> {"snowman": "\u2603", "set_value": [1, 2, 3]}
+
+Note that the order of items might be different according to the version of
+Python used.
+
+
+.. _custom-handlers:
+
+.. currentmodule:: logging.config
+
+Customizing handlers with :func:`dictConfig`
+--------------------------------------------
+
+There are times when you want to customize logging handlers in particular ways,
+and if you use :func:`dictConfig` you may be able to do this without
+subclassing. As an example, consider that you may want to set the ownership of a
+log file. On POSIX, this is easily done using :func:`shutil.chown`, but the file
+handlers in the stdlib don't offer built-in support. You can customize handler
+creation using a plain function such as::
+
+ def owned_file_handler(filename, mode='a', encoding=None, owner=None):
+ if owner:
+ if not os.path.exists(filename):
+ open(filename, 'a').close()
+ shutil.chown(filename, *owner)
+ return logging.FileHandler(filename, mode, encoding)
+
+You can then specify, in a logging configuration passed to :func:`dictConfig`,
+that a logging handler be created by calling this function::
+
+ LOGGING = {
+ 'version': 1,
+ 'disable_existing_loggers': False,
+ 'formatters': {
+ 'default': {
+ 'format': '%(asctime)s %(levelname)s %(name)s %(message)s'
+ },
+ },
+ 'handlers': {
+ 'file':{
+ # The values below are popped from this dictionary and
+ # used to create the handler, set the handler's level and
+ # its formatter.
+ '()': owned_file_handler,
+ 'level':'DEBUG',
+ 'formatter': 'default',
+ # The values below are passed to the handler creator callable
+ # as keyword arguments.
+ 'owner': ['pulse', 'pulse'],
+ 'filename': 'chowntest.log',
+ 'mode': 'w',
+ 'encoding': 'utf-8',
+ },
+ },
+ 'root': {
+ 'handlers': ['file'],
+ 'level': 'DEBUG',
+ },
+ }
+
+In this example I am setting the ownership using the ``pulse`` user and group,
+just for the purposes of illustration. Putting it together into a working
+script, ``chowntest.py``::
+
+ import logging, logging.config, os, shutil
+
+ def owned_file_handler(filename, mode='a', encoding=None, owner=None):
+ if owner:
+ if not os.path.exists(filename):
+ open(filename, 'a').close()
+ shutil.chown(filename, *owner)
+ return logging.FileHandler(filename, mode, encoding)
+
+ LOGGING = {
+ 'version': 1,
+ 'disable_existing_loggers': False,
+ 'formatters': {
+ 'default': {
+ 'format': '%(asctime)s %(levelname)s %(name)s %(message)s'
+ },
+ },
+ 'handlers': {
+ 'file':{
+ # The values below are popped from this dictionary and
+ # used to create the handler, set the handler's level and
+ # its formatter.
+ '()': owned_file_handler,
+ 'level':'DEBUG',
+ 'formatter': 'default',
+ # The values below are passed to the handler creator callable
+ # as keyword arguments.
+ 'owner': ['pulse', 'pulse'],
+ 'filename': 'chowntest.log',
+ 'mode': 'w',
+ 'encoding': 'utf-8',
+ },
+ },
+ 'root': {
+ 'handlers': ['file'],
+ 'level': 'DEBUG',
+ },
+ }
+
+ logging.config.dictConfig(LOGGING)
+ logger = logging.getLogger('mylogger')
+ logger.debug('A debug message')
+
+To run this, you will probably need to run as ``root``::
+
+ $ sudo python3.3 chowntest.py
+ $ cat chowntest.log
+ 2013-11-05 09:34:51,128 DEBUG mylogger A debug message
+ $ ls -l chowntest.log
+ -rw-r--r-- 1 pulse pulse 55 2013-11-05 09:34 chowntest.log
+
+Note that this example uses Python 3.3 because that's where :func:`shutil.chown`
+makes an appearance. This approach should work with any Python version that
+supports :func:`dictConfig` - namely, Python 2.7, 3.2 or later. With pre-3.3
+versions, you would need to implement the actual ownership change using e.g.
+:func:`os.chown`.
+
+In practice, the handler-creating function may be in a utility module somewhere
+in your project. Instead of the line in the configuration::
+
+ '()': owned_file_handler,
+
+you could use e.g.::
+
+ '()': 'ext://project.util.owned_file_handler',
+
+where ``project.util`` can be replaced with the actual name of the package
+where the function resides. In the above working script, using
+``'ext://__main__.owned_file_handler'`` should work. Here, the actual callable
+is resolved by :func:`dictConfig` from the ``ext://`` specification.
+
+This example hopefully also points the way to how you could implement other
+types of file change - e.g. setting specific POSIX permission bits - in the
+same way, using :func:`os.chmod`.
+
+Of course, the approach could also be extended to types of handler other than a
+:class:`~logging.FileHandler` - for example, one of the rotating file handlers,
+or a different type of handler altogether.
+
+
+.. _filters-dictconfig:
+
+Configuring filters with :func:`dictConfig`
+-------------------------------------------
+
+You *can* configure filters using :func:`~logging.config.dictConfig`, though it
+might not be obvious at first glance how to do it (hence this recipe). Since
+:class:`~logging.Filter` is the only filter class included in the standard
+library, and it is unlikely to cater to many requirements (it's only there as a
+base class), you will typically need to define your own :class:`~logging.Filter`
+subclass with an overridden :meth:`~logging.Filter.filter` method. To do this,
+specify the ``()`` key in the configuration dictionary for the filter,
+specifying a callable which will be used to create the filter (a class is the
+most obvious, but you can provide any callable which returns a
+:class:`~logging.Filter` instance). Here is a complete example::
+
+ import logging
+ import logging.config
+ import sys
+
+ class MyFilter(logging.Filter):
+ def __init__(self, param=None):
+ self.param = param
+
+ def filter(self, record):
+ if self.param is None:
+ allow = True
+ else:
+ allow = self.param not in record.msg
+ if allow:
+ record.msg = 'changed: ' + record.msg
+ return allow
+
+ LOGGING = {
+ 'version': 1,
+ 'filters': {
+ 'myfilter': {
+ '()': MyFilter,
+ 'param': 'noshow',
+ }
+ },
+ 'handlers': {
+ 'console': {
+ 'class': 'logging.StreamHandler',
+ 'filters': ['myfilter']
+ }
+ },
+ 'root': {
+ 'level': 'DEBUG',
+ 'handlers': ['console']
+ },
+ }
+
+ if __name__ == '__main__':
+ logging.config.dictConfig(LOGGING)
+ logging.debug('hello')
+ logging.debug('hello - noshow')
+
+This example shows how you can pass configuration data to the callable which
+constructs the instance, in the form of keyword parameters. When run, the above
+script will print::
+
+ changed: hello
+
+which shows that the filter is working as configured.
+
+A couple of extra points to note:
+
+* If you can't refer to the callable directly in the configuration (e.g. if it
+ lives in a different module, and you can't import it directly where the
+ configuration dictionary is), you can use the form ``ext://...`` as described
+ in :ref:`logging-config-dict-externalobj`. For example, you could have used
+ the text ``'ext://__main__.MyFilter'`` instead of ``MyFilter`` in the above
+ example.
+
+* As well as for filters, this technique can also be used to configure custom
+ handlers and formatters. See :ref:`logging-config-dict-userdef` for more
+ information on how logging supports using user-defined objects in its
+ configuration, and see the other cookbook recipe :ref:`custom-handlers` above.
+
diff --git a/Doc/howto/logging.rst b/Doc/howto/logging.rst
index 029a0ab..fdb6c53 100644
--- a/Doc/howto/logging.rst
+++ b/Doc/howto/logging.rst
@@ -63,6 +63,8 @@ The logging functions are named after the level or severity of the events
they are used to track. The standard levels and their applicability are
described below (in increasing order of severity):
+.. tabularcolumns:: |l|L|
+
+--------------+---------------------------------------------+
| Level | When it's used |
+==============+=============================================+
@@ -120,7 +122,8 @@ Logging to a file
^^^^^^^^^^^^^^^^^
A very common situation is that of recording logging events in a file, so let's
-look at that next::
+look at that next. Be sure to try the following in a newly-started Python
+interpreter, and don't just continue from the session described above::
import logging
logging.basicConfig(filename='example.log',level=logging.DEBUG)
@@ -330,6 +333,9 @@ of components: loggers, handlers, filters, and formatters.
to output.
* Formatters specify the layout of log records in the final output.
+Log event information is passed between loggers, handlers, filters and
+formatters in a :class:`LogRecord` instance.
+
Logging is performed by calling methods on instances of the :class:`Logger`
class (hereafter called :dfn:`loggers`). Each instance has a name, and they are
conceptually arranged in a namespace hierarchy using dots (periods) as
@@ -374,6 +380,13 @@ You can change this by passing a format string to :func:`basicConfig` with the
*format* keyword argument. For all options regarding how a format string is
constructed, see :ref:`formatter-objects`.
+Logging Flow
+^^^^^^^^^^^^
+
+The flow of log event information in loggers and handlers is illustrated in the
+following diagram.
+
+.. image:: logging_flow.png
Loggers
^^^^^^^
@@ -457,12 +470,13 @@ Handlers
:class:`~logging.Handler` objects are responsible for dispatching the
appropriate log messages (based on the log messages' severity) to the handler's
-specified destination. Logger objects can add zero or more handler objects to
-themselves with an :func:`addHandler` method. As an example scenario, an
-application may want to send all log messages to a log file, all log messages
-of error or higher to stdout, and all messages of critical to an email address.
-This scenario requires three individual handlers where each handler is
-responsible for sending messages of a specific severity to a specific location.
+specified destination. :class:`Logger` objects can add zero or more handler
+objects to themselves with an :meth:`~Logger.addHandler` method. As an example
+scenario, an application may want to send all log messages to a log file, all
+log messages of error or higher to stdout, and all messages of critical to an
+email address. This scenario requires three individual handlers where each
+handler is responsible for sending messages of a specific severity to a specific
+location.
The standard library includes quite a few handler types (see
:ref:`useful-handlers`); the tutorials use mainly :class:`StreamHandler` and
@@ -473,16 +487,17 @@ themselves with. The only handler methods that seem relevant for application
developers who are using the built-in handler objects (that is, not creating
custom handlers) are the following configuration methods:
-* The :meth:`Handler.setLevel` method, just as in logger objects, specifies the
+* The :meth:`~Handler.setLevel` method, just as in logger objects, specifies the
lowest severity that will be dispatched to the appropriate destination. Why
are there two :func:`setLevel` methods? The level set in the logger
determines which severity of messages it will pass to its handlers. The level
set in each handler determines which messages that handler will send on.
-* :func:`setFormatter` selects a Formatter object for this handler to use.
+* :meth:`~Handler.setFormatter` selects a Formatter object for this handler to
+ use.
-* :func:`addFilter` and :func:`removeFilter` respectively configure and
- deconfigure filter objects on handlers.
+* :meth:`~Handler.addFilter` and :meth:`~Handler.removeFilter` respectively
+ configure and deconfigure filter objects on handlers.
Application code should not directly instantiate and use instances of
:class:`Handler`. Instead, the :class:`Handler` class is a base class that
@@ -642,6 +657,21 @@ You can see that the config file approach has a few advantages over the Python
code approach, mainly separation of configuration and code and the ability of
noncoders to easily modify the logging properties.
+.. warning:: The :func:`fileConfig` function takes a default parameter,
+ ``disable_existing_loggers``, which defaults to ``True`` for reasons of
+ backward compatibility. This may or may not be what you want, since it
+ will cause any loggers existing before the :func:`fileConfig` call to
+ be disabled unless they (or an ancestor) are explicitly named in the
+ configuration. Please refer to the reference documentation for more
+ information, and specify ``False`` for this parameter if you wish.
+
+ The dictionary passed to :func:`dictConfig` can also specify a Boolean
+ value with key ``disable_existing_loggers``, which if not specified
+ explicitly in the dictionary also defaults to being interpreted as
+ ``True``. This leads to the logger-disabling behaviour described above,
+ which may not be what you want - in which case, provide the key
+ explicitly with a value of ``False``.
+
.. currentmodule:: logging
Note that the class names referenced in config files need to be either relative
@@ -713,12 +743,11 @@ Configuring Logging for a Library
When developing a library which uses logging, you should take care to
document how the library uses logging - for example, the names of loggers
used. Some consideration also needs to be given to its logging configuration.
-If the using application does not use logging, and library code makes logging
-calls, then (as described in the previous section) events of severity
-``WARNING`` and greater will be printed to ``sys.stderr``. This is regarded as
-the best default behaviour.
+If the using application does not configure logging, and library code makes
+logging calls, then (as described in the previous section) an error message
+will be printed to ``sys.stderr``.
-If for some reason you *don't* want these messages printed in the absence of
+If for some reason you *don't* want this message printed in the absence of
any logging configuration, you can attach a do-nothing handler to the top-level
logger for your library. This avoids the message being printed, since a handler
will be always be found for the library's events: it just doesn't produce any
@@ -730,7 +759,7 @@ handlers, as normal.
A do-nothing handler is included in the logging package:
:class:`~logging.NullHandler` (since Python 2.7). An instance of this handler
could be added to the top-level logger of the logging namespace used by the
-library (*if* you want to prevent your library's logged events being output to
+library (*if* you want to prevent an error message being output to
``sys.stderr`` in the absence of logging configuration). If all logging by a
library *foo* is done using loggers with names matching 'foo.x', 'foo.x.y',
etc. then the code::
@@ -742,13 +771,14 @@ should have the desired effect. If an organisation produces a number of
libraries, then the logger name specified can be 'orgname.foo' rather than
just 'foo'.
-**PLEASE NOTE:** It is strongly advised that you *do not add any handlers other
-than* :class:`~logging.NullHandler` *to your library's loggers*. This is
-because the configuration of handlers is the prerogative of the application
-developer who uses your library. The application developer knows their target
-audience and what handlers are most appropriate for their application: if you
-add handlers 'under the hood', you might well interfere with their ability to
-carry out unit tests and deliver logs which suit their requirements.
+.. note:: It is strongly advised that you *do not add any handlers other
+ than* :class:`~logging.NullHandler` *to your library's loggers*. This is
+ because the configuration of handlers is the prerogative of the application
+ developer who uses your library. The application developer knows their
+ target audience and what handlers are most appropriate for their
+ application: if you add handlers 'under the hood', you might well interfere
+ with their ability to carry out unit tests and deliver logs which suit their
+ requirements.
Logging Levels
@@ -891,16 +921,16 @@ Logged messages are formatted for presentation through instances of the
use with the % operator and a dictionary.
For formatting multiple messages in a batch, instances of
-:class:`BufferingFormatter` can be used. In addition to the format string (which
-is applied to each message in the batch), there is provision for header and
-trailer format strings.
+:class:`~handlers.BufferingFormatter` can be used. In addition to the format
+string (which is applied to each message in the batch), there is provision for
+header and trailer format strings.
When filtering based on logger level and/or handler level is not enough,
instances of :class:`Filter` can be added to both :class:`Logger` and
-:class:`Handler` instances (through their :meth:`addFilter` method). Before
-deciding to process a message further, both loggers and handlers consult all
-their filters for permission. If any filter returns a false value, the message
-is not processed further.
+:class:`Handler` instances (through their :meth:`~Handler.addFilter` method).
+Before deciding to process a message further, both loggers and handlers consult
+all their filters for permission. If any filter returns a false value, the
+message is not processed further.
The basic :class:`Filter` functionality allows filtering by specific logger
name. If this feature is used, messages sent to the named logger and its
@@ -918,19 +948,20 @@ in production. This is so that errors which occur while handling logging events
cause the application using logging to terminate prematurely.
:class:`SystemExit` and :class:`KeyboardInterrupt` exceptions are never
-swallowed. Other exceptions which occur during the :meth:`emit` method of a
-:class:`Handler` subclass are passed to its :meth:`handleError` method.
+swallowed. Other exceptions which occur during the :meth:`~Handler.emit` method
+of a :class:`Handler` subclass are passed to its :meth:`~Handler.handleError`
+method.
-The default implementation of :meth:`handleError` in :class:`Handler` checks
-to see if a module-level variable, :data:`raiseExceptions`, is set. If set, a
-traceback is printed to :data:`sys.stderr`. If not set, the exception is swallowed.
+The default implementation of :meth:`~Handler.handleError` in :class:`Handler`
+checks to see if a module-level variable, :data:`raiseExceptions`, is set. If
+set, a traceback is printed to :data:`sys.stderr`. If not set, the exception is
+swallowed.
-**Note:** The default value of :data:`raiseExceptions` is ``True``. This is because
-during development, you typically want to be notified of any exceptions that
-occur. It's advised that you set :data:`raiseExceptions` to ``False`` for production
-usage.
+.. note:: The default value of :data:`raiseExceptions` is ``True``. This is
+ because during development, you typically want to be notified of any
+ exceptions that occur. It's advised that you set :data:`raiseExceptions` to
+ ``False`` for production usage.
-.. currentmodule:: logging
.. _arbitrary-object-messages:
@@ -940,11 +971,11 @@ Using arbitrary objects as messages
In the preceding sections and examples, it has been assumed that the message
passed when logging the event is a string. However, this is not the only
possibility. You can pass an arbitrary object as a message, and its
-:meth:`__str__` method will be called when the logging system needs to convert
-it to a string representation. In fact, if you want to, you can avoid
+:meth:`~object.__str__` method will be called when the logging system needs to
+convert it to a string representation. In fact, if you want to, you can avoid
computing a string representation altogether - for example, the
-:class:`SocketHandler` emits an event by pickling it and sending it over the
-wire.
+:class:`~handlers.SocketHandler` emits an event by pickling it and sending it
+over the wire.
Optimization
@@ -953,9 +984,10 @@ Optimization
Formatting of message arguments is deferred until it cannot be avoided.
However, computing the arguments passed to the logging method can also be
expensive, and you may want to avoid doing it if the logger will just throw
-away your event. To decide what to do, you can call the :meth:`isEnabledFor`
-method which takes a level argument and returns true if the event would be
-created by the Logger for that level of call. You can write code like this::
+away your event. To decide what to do, you can call the
+:meth:`~Logger.isEnabledFor` method which takes a level argument and returns
+true if the event would be created by the Logger for that level of call.
+You can write code like this::
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Message with %s, %s', expensive_func1(),
@@ -964,6 +996,15 @@ created by the Logger for that level of call. You can write code like this::
so that if the logger's threshold is set above ``DEBUG``, the calls to
:func:`expensive_func1` and :func:`expensive_func2` are never made.
+.. note:: In some cases, :meth:`~Logger.isEnabledFor` can itself be more
+ expensive than you'd like (e.g. for deeply nested loggers where an explicit
+ level is only set high up in the logger hierarchy). In such cases (or if you
+ want to avoid calling a method in tight loops), you can cache the result of a
+ call to :meth:`~Logger.isEnabledFor` in a local or instance variable, and use
+ that instead of calling the method each time. Such a cached value would only
+ need to be recomputed when the logging configuration changes dynamically
+ while the application is running (which is not all that common).
+
There are other optimizations which can be made for specific applications which
need more precise control over what logging information is collected. Here's a
list of things you can do to avoid processing during logging which you don't
@@ -973,6 +1014,11 @@ need:
| What you don't want to collect | How to avoid collecting it |
+===============================================+========================================+
| Information about where calls were made from. | Set ``logging._srcfile`` to ``None``. |
+| | This avoids calling |
+| | :func:`sys._getframe`, which may help |
+| | to speed up your code in environments |
+| | like PyPy (which can't speed up code |
+| | that uses :func:`sys._getframe`). |
+-----------------------------------------------+----------------------------------------+
| Threading information. | Set ``logging.logThreads`` to ``0``. |
+-----------------------------------------------+----------------------------------------+
diff --git a/Doc/howto/logging_flow.png b/Doc/howto/logging_flow.png
new file mode 100755
index 0000000..a883823
--- /dev/null
+++ b/Doc/howto/logging_flow.png
Binary files differ
diff --git a/Doc/howto/pyporting.rst b/Doc/howto/pyporting.rst
index 309f3f7..9d7e859 100644
--- a/Doc/howto/pyporting.rst
+++ b/Doc/howto/pyporting.rst
@@ -10,238 +10,211 @@ Porting Python 2 Code to Python 3
With Python 3 being the future of Python while Python 2 is still in active
use, it is good to have your project available for both major releases of
- Python. This guide is meant to help you choose which strategy works best
- for your project to support both Python 2 & 3 along with how to execute
- that strategy.
+ Python. This guide is meant to help you figure out how best to support both
+ Python 2 & 3 simultaneously.
If you are looking to port an extension module instead of pure Python code,
please see :ref:`cporting-howto`.
+ If you would like to read one core Python developer's take on why Python 3
+ came into existence, you can read Nick Coghlan's `Python 3 Q & A`_.
-Choosing a Strategy
-===================
-
-When a project makes the decision that it's time to support both Python 2 & 3,
-a decision needs to be made as to how to go about accomplishing that goal.
-The chosen strategy will depend on how large the project's existing
-codebase is and how much divergence you want from your Python 2 codebase from
-your Python 3 one (e.g., starting a new version with Python 3).
-
-If your project is brand-new or does not have a large codebase, then you may
-want to consider writing/porting :ref:`all of your code for Python 3
-and use 3to2 <use_3to2>` to port your code for Python 2.
-
-If you would prefer to maintain a codebase which is semantically **and**
-syntactically compatible with Python 2 & 3 simultaneously, you can write
-:ref:`use_same_source`. While this tends to lead to somewhat non-idiomatic
-code, it does mean you keep a rapid development process for you, the developer.
-
-Finally, you do have the option of :ref:`using 2to3 <use_2to3>` to translate
-Python 2 code into Python 3 code (with some manual help). This can take the
-form of branching your code and using 2to3 to start a Python 3 branch. You can
-also have users perform the translation as installation time automatically so
-that you only have to maintain a Python 2 codebase.
-
-Regardless of which approach you choose, porting is not as hard or
-time-consuming as you might initially think. You can also tackle the problem
-piece-meal as a good portion of porting is simply updating your code to follow
-current best practices in a Python 2/3 compatible way.
-
-
-Universal Bits of Advice
-------------------------
-
-Regardless of what strategy you pick, there are a few things you should
-consider.
-
-One is make sure you have a robust test suite. You need to make sure everything
-continues to work, just like when you support a new minor version of Python.
-This means making sure your test suite is thorough and is ported properly
-between Python 2 & 3. You will also most likely want to use something like tox_
-to automate testing between both a Python 2 and Python 3 VM.
-
-Two, once your project has Python 3 support, make sure to add the proper
-classifier on the Cheeseshop_ (PyPI_). To have your project listed as Python 3
-compatible it must have the
-`Python 3 classifier <http://pypi.python.org/pypi?:action=browse&c=533>`_
-(from
-http://techspot.zzzeek.org/2011/01/24/zzzeek-s-guide-to-python-3-porting/)::
-
- setup(
- name='Your Library',
- version='1.0',
- classifiers=[
- # make sure to use :: Python *and* :: Python :: 3 so
- # that pypi can list the package on the python 3 page
- 'Programming Language :: Python',
- 'Programming Language :: Python :: 3'
- ],
- packages=['yourlibrary'],
- # make sure to add custom_fixers to the MANIFEST.in
- include_package_data=True,
- # ...
- )
-
-
-Doing so will cause your project to show up in the
-`Python 3 packages list
-<http://pypi.python.org/pypi?:action=browse&c=533&show=all>`_. You will know
-you set the classifier properly as visiting your project page on the Cheeseshop
-will show a Python 3 logo in the upper-left corner of the page.
-
-Three, the six_ project provides a library which helps iron out differences
-between Python 2 & 3. If you find there is a sticky point that is a continual
-point of contention in your translation or maintenance of code, consider using
-a source-compatible solution relying on six. If you have to create your own
-Python 2/3 compatible solution, you can use ``sys.version_info[0] >= 3`` as a
-guard.
-
-Four, read all the approaches. Just because some bit of advice applies to one
-approach more than another doesn't mean that some advice doesn't apply to other
-strategies.
-
-Five, drop support for older Python versions if possible. `Python 2.5`_
+ If you prefer to read a (free) book on porting a project to Python 3,
+ consider reading `Porting to Python 3`_ by Lennart Regebro which should cover
+ much of what is discussed in this HOWTO.
+
+ For help with porting, you can email the python-porting_ mailing list with
+ questions.
+
+The Short Version
+=================
+
+* Decide what's the oldest version of Python 2 you want to support (if at all)
+* Make sure you have a thorough test suite and use continuous integration
+ testing to make sure you stay compatible with the versions of Python you care
+ about
+* If you have dependencies, check their Python 3 status using caniusepython3
+ (`command-line tool <https://pypi.python.org/pypi/caniusepython3>`__,
+ `web app <https://caniusepython3.com/>`__)
+
+With that done, your options are:
+
+* If you are dropping Python 2 support, use 2to3_ to port to Python 3
+* If you are keeping Python 2 support, then start writing Python 2/3-compatible
+ code starting **TODAY**
+
+ + If you have dependencies that have not been ported, reach out to them to port
+ their project while working to make your code compatible with Python 3 so
+ you're ready when your dependencies are all ported
+ + If all your dependencies have been ported (or you have none), go ahead and
+ port to Python 3
+
+* If you are creating a new project that wants to have 2/3 compatibility,
+ code in Python 3 and then backport to Python 2
+
+
+Before You Begin
+================
+
+If your project is on the Cheeseshop_/PyPI_, make sure it has the proper
+`trove classifiers`_ to signify what versions of Python it **currently**
+supports. At minimum you should specify the major version(s), e.g.
+``Programming Language :: Python :: 2`` if your project currently only supports
+Python 2. It is preferrable that you be as specific as possible by listing every
+major/minor version of Python that you support, e.g. if your project supports
+Python 2.6 and 2.7, then you want the classifiers of::
+
+ Programming Language :: Python :: 2
+ Programming Language :: Python :: 2.6
+ Programming Language :: Python :: 2.7
+
+Once your project supports Python 3 you will want to go back and add the
+appropriate classifiers for Python 3 as well. This is important as setting the
+``Programming Language :: Python :: 3`` classifier will lead to your project
+being listed under the `Python 3 Packages`_ section of PyPI.
+
+Make sure you have a robust test suite. You need to
+make sure everything continues to work, just like when you support a new
+minor/feature release of Python. This means making sure your test suite is
+thorough and is ported properly between Python 2 & 3 (consider using coverage_
+to measure that you have effective test coverage). You will also most likely
+want to use something like tox_ to automate testing between all of your
+supported versions of Python. You will also want to **port your tests first** so
+that you can make sure that you detect breakage during the transition. Tests also
+tend to be simpler than the code they are testing so it gives you an idea of how
+easy it can be to port code.
+
+Drop support for older Python versions if possible. `Python 2.5`_
introduced a lot of useful syntax and libraries which have become idiomatic
in Python 3. `Python 2.6`_ introduced future statements which makes
compatibility much easier if you are going from Python 2 to 3.
-`Python 2.7`_ continues the trend in the stdlib. So choose the newest version
+`Python 2.7`_ continues the trend in the stdlib. Choose the newest version
of Python which you believe can be your minimum support version
and work from there.
+Target the newest version of Python 3 that you can. Beyond just the usual
+bugfixes, compatibility has continued to improve between Python 2 and 3 as time
+has passed. E.g. Python 3.3 added back the ``u`` prefix for
+strings, making source-compatible Python code easier to write.
-.. _tox: http://codespeak.net/tox/
-.. _Cheeseshop:
-.. _PyPI: http://pypi.python.org/
-.. _six: http://packages.python.org/six
-.. _Python 2.7: http://www.python.org/2.7.x
-.. _Python 2.6: http://www.python.org/2.6.x
-.. _Python 2.5: http://www.python.org/2.5.x
-.. _Python 2.4: http://www.python.org/2.4.x
-.. _Python 2.3: http://www.python.org/2.3.x
-.. _Python 2.2: http://www.python.org/2.2.x
+Writing Source-Compatible Python 2/3 Code
+=========================================
-.. _use_3to2:
+Over the years the Python community has discovered that the easiest way to
+support both Python 2 and 3 in parallel is to write Python code that works in
+either version. While this might sound counter-intuitive at first, it actually
+is not difficult and typically only requires following some select
+(non-idiomatic) practices and using some key projects to help make bridging
+between Python 2 and 3 easier.
-Python 3 and 3to2
-=================
+Projects to Consider
+--------------------
-If you are starting a new project or your codebase is small enough, you may
-want to consider writing your code for Python 3 and backporting to Python 2
-using 3to2_. Thanks to Python 3 being more strict about things than Python 2
-(e.g., bytes vs. strings), the source translation can be easier and more
-straightforward than from Python 2 to 3. Plus it gives you more direct
-experience developing in Python 3 which, since it is the future of Python, is a
-good thing long-term.
+The lowest level library for supporting Python 2 & 3 simultaneously is six_.
+Reading through its documentation will give you an idea of where exactly the
+Python language changed between versions 2 & 3 and thus what you will want the
+library to help you continue to support.
-A drawback of this approach is that 3to2 is a third-party project. This means
-that the Python core developers (and thus this guide) can make no promises
-about how well 3to2 works at any time. There is nothing to suggest, though,
-that 3to2 is not a high-quality project.
+To help automate porting your code over to using six, you can use
+modernize_. This project will attempt to rewrite your code to be as modern as
+possible while using six to smooth out any differences between Python 2 & 3.
+If you want to write your compatible code to feel more like Python 3 there is
+the future_ project. It tries to provide backports of objects from Python 3 so
+that you can use them from Python 2-compatible code, e.g. replacing the
+``bytes`` type from Python 2 with the one from Python 3.
+It also provides a translation script like modernize (its translation code is
+actually partially based on it) to help start working with a pre-existing code
+base. It is also unique in that its translation script will also port Python 3
+code backwards as well as Python 2 code forwards.
-.. _3to2: https://bitbucket.org/amentajo/lib3to2/overview
+Tips & Tricks
+-------------
-.. _use_2to3:
-
-Python 2 and 2to3
-=================
-
-Included with Python since 2.6, the 2to3_ tool (and :mod:`lib2to3` module)
-helps with porting Python 2 to Python 3 by performing various source
-translations. This is a perfect solution for projects which wish to branch
-their Python 3 code from their Python 2 codebase and maintain them as
-independent codebases. You can even begin preparing to use this approach
-today by writing future-compatible Python code which works cleanly in
-Python 2 in conjunction with 2to3; all steps outlined below will work
-with Python 2 code up to the point when the actual use of 2to3 occurs.
-
-Use of 2to3 as an on-demand translation step at install time is also possible,
-preventing the need to maintain a separate Python 3 codebase, but this approach
-does come with some drawbacks. While users will only have to pay the
-translation cost once at installation, you as a developer will need to pay the
-cost regularly during development. If your codebase is sufficiently large
-enough then the translation step ends up acting like a compilation step,
-robbing you of the rapid development process you are used to with Python.
-Obviously the time required to translate a project will vary, so do an
-experimental translation just to see how long it takes to evaluate whether you
-prefer this approach compared to using :ref:`use_same_source` or simply keeping
-a separate Python 3 codebase.
-
-Below are the typical steps taken by a project which uses a 2to3-based approach
-to supporting Python 2 & 3.
-
+To help with writing source-compatible code using one of the projects mentioned
+in `Projects to Consider`_, consider following the below suggestions. Some of
+them are handled by the suggested projects, so if you do use one of them then
+read their documentation first to see which suggestions below will taken care of
+for you.
Support Python 2.7
-------------------
+//////////////////
As a first step, make sure that your project is compatible with `Python 2.7`_.
This is just good to do as Python 2.7 is the last release of Python 2 and thus
will be used for a rather long time. It also allows for use of the ``-3`` flag
-to Python to help discover places in your code which 2to3 cannot handle but are
-known to cause issues.
+to Python to help discover places in your code where compatibility might be an
+issue (the ``-3`` flag is in Python 2.6 but Python 2.7 adds more warnings).
Try to Support `Python 2.6`_ and Newer Only
--------------------------------------------
+///////////////////////////////////////////
While not possible for all projects, if you can support `Python 2.6`_ and newer
**only**, your life will be much easier. Various future statements, stdlib
additions, etc. exist only in Python 2.6 and later which greatly assist in
-porting to Python 3. But if you project must keep support for `Python 2.5`_ (or
-even `Python 2.4`_) then it is still possible to port to Python 3.
+supporting Python 3. But if you project must keep support for `Python 2.5`_ then
+it is still possible to simultaneously support Python 3.
Below are the benefits you gain if you only have to support Python 2.6 and
newer. Some of these options are personal choice while others are
**strongly** recommended (the ones that are more for personal choice are
labeled as such). If you continue to support older versions of Python then you
-at least need to watch out for situations that these solutions fix.
+at least need to watch out for situations that these solutions fix and handle
+them appropriately (which is where library help from e.g. six_ comes in handy).
``from __future__ import print_function``
'''''''''''''''''''''''''''''''''''''''''
-This is a personal choice. 2to3 handles the translation from the print
-statement to the print function rather well so this is an optional step. This
-future statement does help, though, with getting used to typing
-``print('Hello, World')`` instead of ``print 'Hello, World'``.
+It will not only get you used to typing ``print()`` as a function instead of a
+statement, but it will also give you the various benefits the function has over
+the Python 2 statement (six_ provides a function if you support Python 2.5 or
+older).
``from __future__ import unicode_literals``
'''''''''''''''''''''''''''''''''''''''''''
-Another personal choice. You can always mark what you want to be a (unicode)
-string with a ``u`` prefix to get the same effect. But regardless of whether
-you use this future statement or not, you **must** make sure you know exactly
-which Python 2 strings you want to be bytes, and which are to be strings. This
-means you should, **at minimum** mark all strings that are meant to be text
-strings with a ``u`` prefix if you do not use this future statement.
+If you choose to use this future statement then all string literals in
+Python 2 will be assumed to be Unicode (as is already the case in Python 3).
+If you choose not to use this future statement then you should mark all of your
+text strings with a ``u`` prefix and only support Python 3.3 or newer. But you
+are **strongly** advised to do one or the other (six_ provides a function in
+case you don't want to use the future statement **and** you want to support
+Python 3.2 or older).
-Bytes literals
-''''''''''''''
+Bytes/string literals
+'''''''''''''''''''''
-This is a **very** important one. The ability to prefix Python 2 strings that
-are meant to contain bytes with a ``b`` prefix help to very clearly delineate
-what is and is not a Python 3 string. When you run 2to3 on code, all Python 2
-strings become Python 3 strings **unless** they are prefixed with ``b``.
+This is a **very** important one. Prefix Python 2 strings that
+are meant to contain bytes with a ``b`` prefix to very clearly delineate
+what is and is not a Python 3 text string (six_ provides a function to use for
+Python 2.5 compatibility).
+
+This point cannot be stressed enough: make sure you know what all of your string
+literals in Python 2 are meant to be in Python 3. Any string literal that
+should be treated as bytes should have the ``b`` prefix. Any string literal
+that should be Unicode/text in Python 2 should either have the ``u`` literal
+(supported, but ignored, in Python 3.3 and later) or you should have
+``from __future__ import unicode_literals`` at the top of the file. But the key
+point is you should know how Python 3 will treat every one one of your string
+literals and you should mark them as appropriate.
There are some differences between byte literals in Python 2 and those in
Python 3 thanks to the bytes type just being an alias to ``str`` in Python 2.
-Probably the biggest "gotcha" is that indexing results in different values. In
-Python 2, the value of ``b'py'[1]`` is ``'y'``, while in Python 3 it's ``121``.
-You can avoid this disparity by always slicing at the size of a single element:
-``b'py'[1:2]`` is ``'y'`` in Python 2 and ``b'y'`` in Python 3 (i.e., close
-enough).
+See the `Handle Common "Gotchas"`_ section for what to watch out for.
-You cannot concatenate bytes and strings in Python 3. But since in Python
-2 has bytes aliased to ``str``, it will succeed: ``b'a' + u'b'`` works in
-Python 2, but ``b'a' + 'b'`` in Python 3 is a :exc:`TypeError`. A similar issue
-also comes about when doing comparisons between bytes and strings.
+``from __future__ import absolute_import``
+''''''''''''''''''''''''''''''''''''''''''
+Discussed in more detail below, but you should use this future statement to
+prevent yourself from accidentally using implicit relative imports.
Supporting `Python 2.5`_ and Newer Only
----------------------------------------
+///////////////////////////////////////
If you are supporting `Python 2.5`_ and newer there are still some features of
Python that you can utilize.
@@ -251,7 +224,7 @@ Python that you can utilize.
''''''''''''''''''''''''''''''''''''''''''
Implicit relative imports (e.g., importing ``spam.bacon`` from within
-``spam.eggs`` with the statement ``import bacon``) does not work in Python 3.
+``spam.eggs`` with the statement ``import bacon``) do not work in Python 3.
This future statement moves away from that and allows the use of explicit
relative imports (e.g., ``from . import bacon``).
@@ -261,16 +234,74 @@ implicit ones. In `Python 2.6`_ explicit relative imports are available without
the statement, but you still want the __future__ statement to prevent implicit
relative imports. In `Python 2.7`_ the __future__ statement is not needed. In
other words, unless you are only supporting Python 2.7 or a version earlier
-than Python 2.5, use the __future__ statement.
+than Python 2.5, use this __future__ statement.
+
+Mark all Unicode strings with a ``u`` prefix
+'''''''''''''''''''''''''''''''''''''''''''''
+
+While Python 2.6 has a ``__future__`` statement to automatically cause Python 2
+to treat all string literals as Unicode, Python 2.5 does not have that shortcut.
+This means you should go through and mark all string literals with a ``u``
+prefix to turn them explicitly into text strings where appropriate and only
+support Python 3.3 or newer. Otherwise use a project like six_ which provides a
+function to pass all text string literals through.
+
+
+Capturing the Currently Raised Exception
+''''''''''''''''''''''''''''''''''''''''
+
+In Python 2.5 and earlier the syntax to access the current exception is::
+
+ try:
+ raise Exception()
+ except Exception, exc:
+ # Current exception is 'exc'.
+ pass
+
+This syntax changed in Python 3 (and backported to `Python 2.6`_ and later)
+to::
+
+ try:
+ raise Exception()
+ except Exception as exc:
+ # Current exception is 'exc'.
+ # In Python 3, 'exc' is restricted to the block; in Python 2.6/2.7 it will "leak".
+ pass
+
+Because of this syntax change you must change how you capture the current
+exception in Python 2.5 and earlier to::
+
+ try:
+ raise Exception()
+ except Exception:
+ import sys
+ exc = sys.exc_info()[1]
+ # Current exception is 'exc'.
+ pass
+
+You can get more information about the raised exception from
+:func:`sys.exc_info` than simply the current exception instance, but you most
+likely don't need it.
+
+.. note::
+ In Python 3, the traceback is attached to the exception instance
+ through the ``__traceback__`` attribute. If the instance is saved in
+ a local variable that persists outside of the ``except`` block, the
+ traceback will create a reference cycle with the current frame and its
+ dictionary of local variables. This will delay reclaiming dead
+ resources until the next cyclic :term:`garbage collection` pass.
+
+ In Python 2, this problem only occurs if you save the traceback itself
+ (e.g. the third element of the tuple returned by :func:`sys.exc_info`)
+ in a variable.
Handle Common "Gotchas"
------------------------
+///////////////////////
-There are a few things that just consistently come up as sticking points for
-people which 2to3 cannot handle automatically or can easily be done in Python 2
-to help modernize your code.
+These are things to watch out for no matter what version of Python 2 you are
+supporting which are not syntactic considerations.
``from __future__ import division``
@@ -327,9 +358,9 @@ One of the biggest issues people have when porting code to Python 3 is handling
the bytes/string dichotomy. Because Python 2 allowed the ``str`` type to hold
textual data, people have over the years been rather loose in their delineation
of what ``str`` instances held text compared to bytes. In Python 3 you cannot
-be so care-free anymore and need to properly handle the difference. The key
-handling this issue to make sure that **every** string literal in your
-Python 2 code is either syntactically of functionally marked as either bytes or
+be so care-free anymore and need to properly handle the difference. The key to
+handling this issue is to make sure that **every** string literal in your
+Python 2 code is either syntactically or functionally marked as either bytes or
text data. After this is done you then need to make sure your APIs are designed
to either handle a specific type or made to be properly polymorphic.
@@ -343,7 +374,7 @@ newer, this can be accomplished by marking bytes literals with a ``b`` prefix
and then designating textual data with a ``u`` prefix or using the
``unicode_literals`` future statement.
-If your project supports versions of Python pre-dating 2.6, then you should use
+If your project supports versions of Python predating 2.6, then you should use
the six_ project and its ``b()`` function to denote bytes literals. For text
literals you can either use six's ``u()`` function or use a ``u`` prefix.
@@ -436,14 +467,7 @@ methods which have unpredictable results (e.g., infinite recursion if you
happen to use the ``unicode(self).encode('utf8')`` idiom as the body of your
``__str__()`` method).
-There are two ways to solve this issue. One is to use a custom 2to3 fixer. The
-blog post at http://lucumr.pocoo.org/2011/1/22/forwards-compatible-python/
-specifies how to do this. That will allow 2to3 to change all instances of ``def
-__unicode(self): ...`` to ``def __str__(self): ...``. This does require you
-define your ``__str__()`` method in Python 2 before your ``__unicode__()``
-method.
-
-The other option is to use a mixin class. This allows you to only define a
+You can use a mixin class to work around this. This allows you to only define a
``__unicode__()`` method for your class and let the mixin derive
``__str__()`` for you (code from
http://lucumr.pocoo.org/2011/1/22/forwards-compatible-python/)::
@@ -486,6 +510,7 @@ sequence containing all arguments passed to the :meth:`__init__` method.
Even better is to use the documented attributes the exception provides.
+
Don't use ``__getslice__`` & Friends
''''''''''''''''''''''''''''''''''''
@@ -497,189 +522,62 @@ friends.
Updating doctests
'''''''''''''''''
-2to3_ will attempt to generate fixes for doctests that it comes across. It's
-not perfect, though. If you wrote a monolithic set of doctests (e.g., a single
-docstring containing all of your doctests), you should at least consider
-breaking the doctests up into smaller pieces to make it more manageable to fix.
-Otherwise it might very well be worth your time and effort to port your tests
-to :mod:`unittest`.
+Don't forget to make them Python 2/3 compatible as well. If you wrote a
+monolithic set of doctests (e.g., a single docstring containing all of your
+doctests), you should at least consider breaking the doctests up into smaller
+pieces to make it more manageable to fix. Otherwise it might very well be worth
+your time and effort to port your tests to :mod:`unittest`.
+
+
+Update ``map`` for imbalanced input sequences
+'''''''''''''''''''''''''''''''''''''''''''''
+
+With Python 2, when ``map`` was given more than one input sequence it would pad
+the shorter sequences with `None` values, returning a sequence as long as the
+longest input sequence.
+With Python 3, if the input sequences to ``map`` are of unequal length, ``map``
+will stop at the termination of the shortest of the sequences. For full
+compatibility with ``map`` from Python 2.x, wrap the sequence arguments in
+:func:`itertools.zip_longest`, e.g. ``map(func, *sequences)`` becomes
+``list(map(func, itertools.zip_longest(*sequences)))``.
Eliminate ``-3`` Warnings
-------------------------
When you run your application's test suite, run it using the ``-3`` flag passed
to Python. This will cause various warnings to be raised during execution about
-things that 2to3 cannot handle automatically (e.g., modules that have been
-removed). Try to eliminate those warnings to make your code even more portable
-to Python 3.
-
-
-Run 2to3
---------
-
-Once you have made your Python 2 code future-compatible with Python 3, it's
-time to use 2to3_ to actually port your code.
-
-
-Manually
-''''''''
-
-To manually convert source code using 2to3_, you use the ``2to3`` script that
-is installed with Python 2.6 and later.::
-
- 2to3 <directory or file to convert>
-
-This will cause 2to3 to write out a diff with all of the fixers applied for the
-converted source code. If you would like 2to3 to go ahead and apply the changes
-you can pass it the ``-w`` flag::
-
- 2to3 -w <stuff to convert>
+things that are semantic changes between Python 2 and 3. Try to eliminate those
+warnings to make your code even more portable to Python 3.
-There are other flags available to control exactly which fixers are applied,
-etc.
+Alternative Approaches
+======================
-During Installation
-'''''''''''''''''''
-
-When a user installs your project for Python 3, you can have either
-:mod:`distutils` or Distribute_ run 2to3_ on your behalf.
-For distutils, use the following idiom::
-
- try: # Python 3
- from distutils.command.build_py import build_py_2to3 as build_py
- except ImportError: # Python 2
- from distutils.command.build_py import build_py
-
- setup(cmdclass = {'build_py': build_py},
- # ...
- )
-
-For Distribute::
-
- setup(use_2to3=True,
- # ...
- )
-
-This will allow you to not have to distribute a separate Python 3 version of
-your project. It does require, though, that when you perform development that
-you at least build your project and use the built Python 3 source for testing.
-
-
-Verify & Test
--------------
-
-At this point you should (hopefully) have your project converted in such a way
-that it works in Python 3. Verify it by running your unit tests and making sure
-nothing has gone awry. If you miss something then figure out how to fix it in
-Python 3, backport to your Python 2 code, and run your code through 2to3 again
-to verify the fix transforms properly.
-
-
-.. _2to3: http://docs.python.org/py3k/library/2to3.html
-.. _Distribute: http://packages.python.org/distribute/
-
-
-.. _use_same_source:
-
-Python 2/3 Compatible Source
-============================
-
-While it may seem counter-intuitive, you can write Python code which is
-source-compatible between Python 2 & 3. It does lead to code that is not
-entirely idiomatic Python (e.g., having to extract the currently raised
-exception from ``sys.exc_info()[1]``), but it can be run under Python 2
-**and** Python 3 without using 2to3_ as a translation step (although the tool
-should be used to help find potential portability problems). This allows you to
-continue to have a rapid development process regardless of whether you are
-developing under Python 2 or Python 3. Whether this approach or using
-:ref:`use_2to3` works best for you will be a per-project decision.
-
-To get a complete idea of what issues you will need to deal with, see the
-`What's New in Python 3.0`_. Others have reorganized the data in other formats
-such as http://docs.pythonsprints.com/python3_porting/py-porting.html .
-
-The following are some steps to take to try to support both Python 2 & 3 from
-the same source code.
+While supporting Python 2 & 3 simultaneously is typically the preferred choice
+by people so that they can continue to improve code and have it work for the
+most number of users, your life may be easier if you only have to support one
+major version of Python going forward.
+Supporting Only Python 3 Going Forward From Python 2 Code
+---------------------------------------------------------
-.. _What's New in Python 3.0: http://docs.python.org/release/3.0/whatsnew/3.0.html
+If you have Python 2 code but going forward only want to improve it as Python 3
+code, then you can use 2to3_ to translate your Python 2 code to Python 3 code.
+This is only recommended, though, if your current version of your project is
+going into maintenance mode and you want all new features to be exclusive to
+Python 3.
-Follow The Steps for Using 2to3_
---------------------------------
+Backporting Python 3 code to Python 2
+-------------------------------------
-All of the steps outlined in how to
-:ref:`port Python 2 code with 2to3 <use_2to3>` apply
-to creating a Python 2/3 codebase. This includes trying only support Python 2.6
-or newer (the :mod:`__future__` statements work in Python 3 without issue),
-eliminating warnings that are triggered by ``-3``, etc.
-
-You should even consider running 2to3_ over your code (without committing the
-changes). This will let you know where potential pain points are within your
-code so that you can fix them properly before they become an issue.
-
-
-Use six_
---------
-
-The six_ project contains many things to help you write portable Python code.
-You should make sure to read its documentation from beginning to end and use
-any and all features it provides. That way you will minimize any mistakes you
-might make in writing cross-version code.
-
-
-Capturing the Currently Raised Exception
-----------------------------------------
-
-One change between Python 2 and 3 that will require changing how you code (if
-you support `Python 2.5`_ and earlier) is
-accessing the currently raised exception. In Python 2.5 and earlier the syntax
-to access the current exception is::
-
- try:
- raise Exception()
- except Exception, exc:
- # Current exception is 'exc'
- pass
-
-This syntax changed in Python 3 (and backported to `Python 2.6`_ and later)
-to::
-
- try:
- raise Exception()
- except Exception as exc:
- # Current exception is 'exc'
- # In Python 3, 'exc' is restricted to the block; Python 2.6 will "leak"
- pass
-
-Because of this syntax change you must change to capturing the current
-exception to::
-
- try:
- raise Exception()
- except Exception:
- import sys
- exc = sys.exc_info()[1]
- # Current exception is 'exc'
- pass
-
-You can get more information about the raised exception from
-:func:`sys.exc_info` than simply the current exception instance, but you most
-likely don't need it.
-
-.. note::
- In Python 3, the traceback is attached to the exception instance
- through the ``__traceback__`` attribute. If the instance is saved in
- a local variable that persists outside of the ``except`` block, the
- traceback will create a reference cycle with the current frame and its
- dictionary of local variables. This will delay reclaiming dead
- resources until the next cyclic :term:`garbage collection` pass.
-
- In Python 2, this problem only occurs if you save the traceback itself
- (e.g. the third element of the tuple returned by :func:`sys.exc_info`)
- in a variable.
+If you have Python 3 code and have little interest in supporting Python 2 you
+can use 3to2_ to translate from Python 3 code to Python 2 code. This is only
+recommended if you don't plan to heavily support Python 2 users. Otherwise
+write your code for Python 3 and then backport as far back as you want. This
+is typically easier than going from Python 2 to 3 as you will have worked out
+any difficulties with e.g. bytes/strings, etc.
Other Resources
@@ -687,17 +585,41 @@ Other Resources
The authors of the following blog posts, wiki pages, and books deserve special
thanks for making public their tips for porting Python 2 code to Python 3 (and
-thus helping provide information for this document):
+thus helping provide information for this document and its various revisions
+over the years):
+* http://wiki.python.org/moin/PortingPythonToPy3k
* http://python3porting.com/
* http://docs.pythonsprints.com/python3_porting/py-porting.html
* http://techspot.zzzeek.org/2011/01/24/zzzeek-s-guide-to-python-3-porting/
* http://dabeaz.blogspot.com/2011/01/porting-py65-and-my-superboard-to.html
* http://lucumr.pocoo.org/2011/1/22/forwards-compatible-python/
* http://lucumr.pocoo.org/2010/2/11/porting-to-python-3-a-guide/
-* http://wiki.python.org/moin/PortingPythonToPy3k
+* https://wiki.ubuntu.com/Python/3
If you feel there is something missing from this document that should be added,
please email the python-porting_ mailing list.
+
+
+.. _2to3: http://docs.python.org/2/library/2to3.html
+.. _3to2: https://pypi.python.org/pypi/3to2
+.. _Cheeseshop: PyPI_
+.. _coverage: https://pypi.python.org/pypi/coverage
+.. _future: http://python-future.org/
+.. _modernize: https://github.com/mitsuhiko/python-modernize
+.. _Porting to Python 3: http://python3porting.com/
+.. _PyPI: http://pypi.python.org/
+.. _Python 2.2: http://www.python.org/2.2.x
+.. _Python 2.5: http://www.python.org/2.5.x
+.. _Python 2.6: http://www.python.org/2.6.x
+.. _Python 2.7: http://www.python.org/2.7.x
+.. _Python 2.5: http://www.python.org/2.5.x
+.. _Python 3.3: http://www.python.org/3.3.x
+.. _Python 3 Packages: https://pypi.python.org/pypi?:action=browse&c=533&show=all
+.. _Python 3 Q & A: http://ncoghlan-devs-python-notes.readthedocs.org/en/latest/python3/questions_and_answers.html
.. _python-porting: http://mail.python.org/mailman/listinfo/python-porting
+.. _six: https://pypi.python.org/pypi/six
+.. _tox: https://pypi.python.org/pypi/tox
+.. _trove classifiers: https://pypi.python.org/pypi?%3Aaction=list_classifiers
+
diff --git a/Doc/howto/regex.rst b/Doc/howto/regex.rst
index 1523c48..2f552e3 100644
--- a/Doc/howto/regex.rst
+++ b/Doc/howto/regex.rst
@@ -265,7 +265,7 @@ performing string substitutions. ::
>>> import re
>>> p = re.compile('ab*')
- >>> print p
+ >>> p #doctest: +ELLIPSIS
<_sre.SRE_Pattern object at 0x...>
:func:`re.compile` also accepts an optional *flags* argument, used to enable
@@ -359,13 +359,13 @@ for a complete listing.
+------------------+-----------------------------------------------+
:meth:`match` and :meth:`search` return ``None`` if no match can be found. If
-they're successful, a ``MatchObject`` instance is returned, containing
-information about the match: where it starts and ends, the substring it matched,
-and more.
+they're successful, a :ref:`match object <match-objects>` instance is returned,
+containing information about the match: where it starts and ends, the substring
+it matched, and more.
You can learn about this by interactively experimenting with the :mod:`re`
module. If you have Tkinter available, you may also want to look at
-:file:`Tools/scripts/redemo.py`, a demonstration program included with the
+:source:`Tools/scripts/redemo.py`, a demonstration program included with the
Python distribution. It allows you to enter REs and strings, and displays
whether the RE matches or fails. :file:`redemo.py` can be quite useful when
trying to debug a complicated RE. Phil Schwartz's `Kodos
@@ -378,7 +378,7 @@ Python interpreter, import the :mod:`re` module, and compile a RE::
Python 2.2.2 (#1, Feb 10 2003, 12:57:01)
>>> import re
>>> p = re.compile('[a-z]+')
- >>> p
+ >>> p #doctest: +ELLIPSIS
<_sre.SRE_Pattern object at 0x...>
Now, you can try matching various strings against the RE ``[a-z]+``. An empty
@@ -392,16 +392,16 @@ interpreter to print no output. You can explicitly print the result of
None
Now, let's try it on a string that it should match, such as ``tempo``. In this
-case, :meth:`match` will return a :class:`MatchObject`, so you should store the
-result in a variable for later use. ::
+case, :meth:`match` will return a :ref:`match object <match-objects>`, so you
+should store the result in a variable for later use. ::
>>> m = p.match('tempo')
- >>> print m
+ >>> m #doctest: +ELLIPSIS
<_sre.SRE_Match object at 0x...>
-Now you can query the :class:`MatchObject` for information about the matching
-string. :class:`MatchObject` instances also have several methods and
-attributes; the most important ones are:
+Now you can query the :ref:`match object <match-objects>` for information
+about the matching string. :ref:`match object <match-objects>` instances
+also have several methods and attributes; the most important ones are:
+------------------+--------------------------------------------+
| Method/Attribute | Purpose |
@@ -435,15 +435,16 @@ case. ::
>>> print p.match('::: message')
None
- >>> m = p.search('::: message') ; print m
+ >>> m = p.search('::: message'); print m #doctest: +ELLIPSIS
<_sre.SRE_Match object at 0x...>
>>> m.group()
'message'
>>> m.span()
(4, 11)
-In actual programs, the most common style is to store the :class:`MatchObject`
-in a variable, and then check if it was ``None``. This usually looks like::
+In actual programs, the most common style is to store the
+:ref:`match object <match-objects>` in a variable, and then check if it was
+``None``. This usually looks like::
p = re.compile( ... )
m = p.match( 'string goes here' )
@@ -460,12 +461,12 @@ Two pattern methods return all of the matches for a pattern.
['12', '11', '10']
:meth:`findall` has to create the entire list before it can be returned as the
-result. The :meth:`finditer` method returns a sequence of :class:`MatchObject`
-instances as an :term:`iterator`. [#]_ ::
+result. The :meth:`finditer` method returns a sequence of
+:ref:`match object <match-objects>` instances as an :term:`iterator`. [#]_ ::
>>> iterator = p.finditer('12 drummers drumming, 11 ... 10 ...')
- >>> iterator
- <callable-iterator object at 0x401833ac>
+ >>> iterator #doctest: +ELLIPSIS
+ <callable-iterator object at 0x...>
>>> for match in iterator:
... print match.span()
...
@@ -482,11 +483,11 @@ You don't have to create a pattern object and call its methods; the
:func:`search`, :func:`findall`, :func:`sub`, and so forth. These functions
take the same arguments as the corresponding pattern method, with
the RE string added as the first argument, and still return either ``None`` or a
-:class:`MatchObject` instance. ::
+:ref:`match object <match-objects>` instance. ::
>>> print re.match(r'From\s+', 'Fromage amk')
None
- >>> re.match(r'From\s+', 'From amk Thu May 14 19:12:10 1998')
+ >>> re.match(r'From\s+', 'From amk Thu May 14 19:12:10 1998') #doctest: +ELLIPSIS
<_sre.SRE_Match object at 0x...>
Under the hood, these functions simply create a pattern object for you
@@ -501,7 +502,7 @@ more convenient. If a program contains a lot of regular expressions, or re-uses
the same ones in several locations, then it might be worthwhile to collect all
the definitions in one place, in a section of code that compiles all the REs
ahead of time. To take an example from the standard library, here's an extract
-from :file:`xmllib.py`::
+from the deprecated :mod:`xmllib` module::
ref = re.compile( ... )
entityref = re.compile( ... )
@@ -687,7 +688,7 @@ given location, they can obviously be matched an infinite number of times.
For example, if you wish to match the word ``From`` only at the beginning of a
line, the RE to use is ``^From``. ::
- >>> print re.search('^From', 'From Here to Eternity')
+ >>> print re.search('^From', 'From Here to Eternity') #doctest: +ELLIPSIS
<_sre.SRE_Match object at 0x...>
>>> print re.search('^From', 'Reciting From Memory')
None
@@ -699,11 +700,11 @@ given location, they can obviously be matched an infinite number of times.
Matches at the end of a line, which is defined as either the end of the string,
or any location followed by a newline character. ::
- >>> print re.search('}$', '{block}')
+ >>> print re.search('}$', '{block}') #doctest: +ELLIPSIS
<_sre.SRE_Match object at 0x...>
>>> print re.search('}$', '{block} ')
None
- >>> print re.search('}$', '{block}\n')
+ >>> print re.search('}$', '{block}\n') #doctest: +ELLIPSIS
<_sre.SRE_Match object at 0x...>
To match a literal ``'$'``, use ``\$`` or enclose it inside a character class,
@@ -728,7 +729,7 @@ given location, they can obviously be matched an infinite number of times.
match when it's contained inside another word. ::
>>> p = re.compile(r'\bclass\b')
- >>> print p.search('no class at all')
+ >>> print p.search('no class at all') #doctest: +ELLIPSIS
<_sre.SRE_Match object at 0x...>
>>> print p.search('the declassified algorithm')
None
@@ -746,7 +747,7 @@ given location, they can obviously be matched an infinite number of times.
>>> p = re.compile('\bclass\b')
>>> print p.search('no class at all')
None
- >>> print p.search('\b' + 'class' + '\b')
+ >>> print p.search('\b' + 'class' + '\b') #doctest: +ELLIPSIS
<_sre.SRE_Match object at 0x...>
Second, inside a character class, where there's no use for this assertion,
@@ -791,9 +792,9 @@ Groups indicated with ``'('``, ``')'`` also capture the starting and ending
index of the text that they match; this can be retrieved by passing an argument
to :meth:`group`, :meth:`start`, :meth:`end`, and :meth:`span`. Groups are
numbered starting with 0. Group 0 is always present; it's the whole RE, so
-:class:`MatchObject` methods all have group 0 as their default argument. Later
-we'll see how to express groups that don't capture the span of text that they
-match. ::
+:ref:`match object <match-objects>` methods all have group 0 as their default
+argument. Later we'll see how to express groups that don't capture the span
+of text that they match. ::
>>> p = re.compile('(a)b')
>>> m = p.match('ab')
@@ -913,10 +914,10 @@ numbers, groups can be referenced by a name.
The syntax for a named group is one of the Python-specific extensions:
``(?P<name>...)``. *name* is, obviously, the name of the group. Named groups
also behave exactly like capturing groups, and additionally associate a name
-with a group. The :class:`MatchObject` methods that deal with capturing groups
-all accept either integers that refer to the group by number or strings that
-contain the desired group's name. Named groups are still given numbers, so you
-can retrieve information about a group in two ways::
+with a group. The :ref:`match object <match-objects>` methods that deal with
+capturing groups all accept either integers that refer to the group by number
+or strings that contain the desired group's name. Named groups are still
+given numbers, so you can retrieve information about a group in two ways::
>>> p = re.compile(r'(?P<word>\b\w+\b)')
>>> m = p.search( '(((( Lots of punctuation )))' )
@@ -1180,16 +1181,16 @@ three variations of the replacement string. ::
*replacement* can also be a function, which gives you even more control. If
*replacement* is a function, the function is called for every non-overlapping
-occurrence of *pattern*. On each call, the function is passed a
-:class:`MatchObject` argument for the match and can use this information to
-compute the desired replacement string and return it.
+occurrence of *pattern*. On each call, the function is passed a
+:ref:`match object <match-objects>` argument for the match and can use this
+information to compute the desired replacement string and return it.
-In the following example, the replacement function translates decimals into
+In the following example, the replacement function translates decimals into
hexadecimal::
- >>> def hexrepl( match ):
+ >>> def hexrepl(match):
... "Return the hex string for a decimal number"
- ... value = int( match.group() )
+ ... value = int(match.group())
... return hex(value)
...
>>> p = re.compile(r'\d+')
diff --git a/Doc/howto/sockets.rst b/Doc/howto/sockets.rst
index f15d659..f8ac348 100644
--- a/Doc/howto/sockets.rst
+++ b/Doc/howto/sockets.rst
@@ -19,12 +19,6 @@
Sockets
=======
-Sockets are used nearly everywhere, but are one of the most severely
-misunderstood technologies around. This is a 10,000 foot overview of sockets.
-It's not really a tutorial - you'll still have work to do in getting things
-working. It doesn't cover the fine points (and there are a lot of them), but I
-hope it will give you enough background to begin using them decently.
-
I'm only going to talk about INET sockets, but they account for at least 99% of
the sockets in use. And I'll only talk about STREAM sockets - unless you really
know what you're doing (in which case this HOWTO isn't for you!), you'll get
@@ -88,9 +82,11 @@ creates a "server socket"::
serversocket.listen(5)
A couple things to notice: we used ``socket.gethostname()`` so that the socket
-would be visible to the outside world. If we had used ``s.bind(('', 80))`` or
-``s.bind(('localhost', 80))`` or ``s.bind(('127.0.0.1', 80))`` we would still
-have a "server" socket, but one that was only visible within the same machine.
+would be visible to the outside world. If we had used ``s.bind(('localhost',
+80))`` or ``s.bind(('127.0.0.1', 80))`` we would still have a "server" socket,
+but one that was only visible within the same machine. ``s.bind(('', 80))``
+specifies that the socket is reachable by any address the machine happens to
+have.
A second thing to note: low number ports are usually reserved for "well known"
services (HTTP, SNMP etc). If you're playing around, use a nice high number (4
@@ -156,7 +152,7 @@ I'm not going to talk about it here, except to warn you that you need to use
there, you may wait forever for the reply, because the request may still be in
your output buffer.
-Now we come the major stumbling block of sockets - ``send`` and ``recv`` operate
+Now we come to the major stumbling block of sockets - ``send`` and ``recv`` operate
on the network buffers. They do not necessarily handle all the bytes you hand
them (or expect from them), because their major focus is handling the network
buffers. In general, they return when the associated network buffers have been
@@ -167,7 +163,7 @@ been completely dealt with.
When a ``recv`` returns 0 bytes, it means the other side has closed (or is in
the process of closing) the connection. You will not receive any more data on
this connection. Ever. You may be able to send data successfully; I'll talk
-about that some on the next page.
+more about this later.
A protocol like HTTP uses a socket for only one transfer. The client sends a
request, then reads a reply. That's it. The socket is discarded. This means that
@@ -211,13 +207,15 @@ length message::
totalsent = totalsent + sent
def myreceive(self):
- msg = ''
- while len(msg) < MSGLEN:
- chunk = self.sock.recv(MSGLEN-len(msg))
+ chunks = []
+ bytes_recd = 0
+ while bytes_recd < MSGLEN:
+ chunk = self.sock.recv(min(MSGLEN - bytes_recd, 2048))
if chunk == '':
raise RuntimeError("socket connection broken")
- msg = msg + chunk
- return msg
+ chunks.append(chunk)
+ bytes_recd = bytes_recd + len(chunk)
+ return ''.join(chunks)
The sending code here is usable for almost any messaging scheme - in Python you
send strings, and you can use ``len()`` to determine its length (even if it has
diff --git a/Doc/howto/sorting.rst b/Doc/howto/sorting.rst
index 9aa39f7..56b65b0 100644
--- a/Doc/howto/sorting.rst
+++ b/Doc/howto/sorting.rst
@@ -124,7 +124,7 @@ Ascending and Descending
========================
Both :meth:`list.sort` and :func:`sorted` accept a *reverse* parameter with a
-boolean value. This is using to flag descending sorts. For example, to get the
+boolean value. This is used to flag descending sorts. For example, to get the
student data in reverse *age* order:
>>> sorted(student_tuples, key=itemgetter(2), reverse=True)
@@ -210,11 +210,11 @@ there was no :func:`sorted` builtin and :meth:`list.sort` took no keyword
arguments. Instead, all of the Py2.x versions supported a *cmp* parameter to
handle user specified comparison functions.
-In Py3.0, the *cmp* parameter was removed entirely (as part of a larger effort to
+In Python 3, the *cmp* parameter was removed entirely (as part of a larger effort to
simplify and unify the language, eliminating the conflict between rich
comparisons and the :meth:`__cmp__` magic method).
-In Py2.x, sort allowed an optional function which can be called for doing the
+In Python 2, :meth:`~list.sort` allowed an optional function which can be called for doing the
comparisons. That function should take two arguments to be compared and then
return a negative value for less-than, return zero if they are equal, or return
a positive value for greater-than. For example, we can do:
diff --git a/Doc/howto/unicode.rst b/Doc/howto/unicode.rst
index ff3c721..297e87e 100644
--- a/Doc/howto/unicode.rst
+++ b/Doc/howto/unicode.rst
@@ -6,8 +6,8 @@
This HOWTO discusses Python 2.x's support for Unicode, and explains
various problems that people commonly encounter when trying to work
-with Unicode. (This HOWTO has not yet been updated to cover the 3.x
-versions of Python.)
+with Unicode. For the Python 3 version, see
+<http://docs.python.org/py3k/howto/unicode.html>.
Introduction to Unicode
=======================
@@ -49,7 +49,7 @@ another and managed to catch on.
255 characters aren't very many. For example, you can't fit both the accented
characters used in Western Europe and the Cyrillic alphabet used for Russian
-into the 128-255 range because there are more than 127 such characters.
+into the 128-255 range because there are more than 128 such characters.
You could write files using different codes (all your Russian files in a coding
system called KOI8, all your French files in a different coding system called
@@ -253,11 +253,11 @@ characters greater than 127 will be treated as errors::
>>> s = unicode('abcdef')
>>> type(s)
<type 'unicode'>
- >>> unicode('abcdef' + chr(255))
+ >>> unicode('abcdef' + chr(255)) #doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
- File "<stdin>", line 1, in ?
+ ...
UnicodeDecodeError: 'ascii' codec can't decode byte 0xff in position 6:
- ordinal not in range(128)
+ ordinal not in range(128)
The ``errors`` argument specifies the response when the input string can't be
converted according to the encoding's rules. Legal values for this argument are
@@ -265,11 +265,11 @@ converted according to the encoding's rules. Legal values for this argument are
'REPLACEMENT CHARACTER'), or 'ignore' (just leave the character out of the
Unicode result). The following examples show the differences::
- >>> unicode('\x80abc', errors='strict')
+ >>> unicode('\x80abc', errors='strict') #doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
- File "<stdin>", line 1, in ?
+ ...
UnicodeDecodeError: 'ascii' codec can't decode byte 0x80 in position 0:
- ordinal not in range(128)
+ ordinal not in range(128)
>>> unicode('\x80abc', errors='replace')
u'\ufffdabc'
>>> unicode('\x80abc', errors='ignore')
@@ -312,10 +312,11 @@ strings. 8-bit strings will be converted to Unicode before carrying out the
operation; Python's default ASCII encoding will be used, so characters greater
than 127 will cause an exception::
- >>> s.find('Was\x9f')
+ >>> s.find('Was\x9f') #doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
- File "<stdin>", line 1, in ?
- UnicodeDecodeError: 'ascii' codec can't decode byte 0x9f in position 3: ordinal not in range(128)
+ ...
+ UnicodeDecodeError: 'ascii' codec can't decode byte 0x9f in position 3:
+ ordinal not in range(128)
>>> s.find(u'Was\x9f')
-1
@@ -333,10 +334,11 @@ character references. The following example shows the different results::
>>> u = unichr(40960) + u'abcd' + unichr(1972)
>>> u.encode('utf-8')
'\xea\x80\x80abcd\xde\xb4'
- >>> u.encode('ascii')
+ >>> u.encode('ascii') #doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
- File "<stdin>", line 1, in ?
- UnicodeEncodeError: 'ascii' codec can't encode character '\ua000' in position 0: ordinal not in range(128)
+ ...
+ UnicodeEncodeError: 'ascii' codec can't encode character u'\ua000' in
+ position 0: ordinal not in range(128)
>>> u.encode('ascii', 'ignore')
'abcd'
>>> u.encode('ascii', 'replace')
@@ -384,9 +386,9 @@ arbitrary code point. Octal escapes can go up to U+01ff, which is octal 777.
::
>>> s = u"a\xac\u1234\u20ac\U00008000"
- ^^^^ two-digit hex escape
- ^^^^^^ four-digit Unicode escape
- ^^^^^^^^^^ eight-digit Unicode escape
+ ... # ^^^^ two-digit hex escape
+ ... # ^^^^^^ four-digit Unicode escape
+ ... # ^^^^^^^^^^ eight-digit Unicode escape
>>> for c in s: print ord(c),
...
97 172 4660 8364 32768
diff --git a/Doc/howto/urllib2.rst b/Doc/howto/urllib2.rst
index 6c80c77..d13f174 100644
--- a/Doc/howto/urllib2.rst
+++ b/Doc/howto/urllib2.rst
@@ -18,7 +18,7 @@ Introduction
.. sidebar:: Related Articles
You may also find useful the following article on fetching web resources
- with Python :
+ with Python:
* `Basic Authentication <http://www.voidspace.org.uk/python/articles/authentication.shtml>`_
@@ -134,7 +134,7 @@ This is done as follows::
>>> data['location'] = 'Northampton'
>>> data['language'] = 'Python'
>>> url_values = urllib.urlencode(data)
- >>> print url_values
+ >>> print url_values # The order may differ. #doctest: +SKIP
name=Somebody+Here&language=Python&location=Northampton
>>> url = 'http://www.example.com/example.cgi'
>>> full_url = url + '?' + url_values
@@ -150,7 +150,7 @@ We'll discuss here one particular HTTP header, to illustrate how to add headers
to your HTTP request.
Some websites [#]_ dislike being browsed by programs, or send different versions
-to different browsers [#]_ . By default urllib2 identifies itself as
+to different browsers [#]_. By default urllib2 identifies itself as
``Python-urllib/x.y`` (where ``x`` and ``y`` are the major and minor version
numbers of the Python release,
e.g. ``Python-urllib/2.5``), which may confuse the site, or just plain
@@ -201,9 +201,9 @@ e.g. ::
>>> req = urllib2.Request('http://www.pretend_server.org')
>>> try: urllib2.urlopen(req)
- >>> except URLError, e:
- >>> print e.reason
- >>>
+ ... except URLError as e:
+ ... print e.reason #doctest: +SKIP
+ ...
(4, 'getaddrinfo failed')
@@ -309,18 +309,18 @@ geturl, and info, methods. ::
>>> req = urllib2.Request('http://www.python.org/fish.html')
>>> try:
- >>> urllib2.urlopen(req)
- >>> except HTTPError, e:
- >>> print e.code
- >>> print e.read()
- >>>
+ ... urllib2.urlopen(req)
+ ... except urllib2.HTTPError as e:
+ ... print e.code
+ ... print e.read() #doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
+ ...
404
- <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
- "http://www.w3.org/TR/html4/loose.dtd">
- <?xml-stylesheet href="./css/ht2html.css"
- type="text/css"?>
- <html><head><title>Error 404: File Not Found</title>
- ...... etc...
+ <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+ ...
+ <title>Page Not Found</title>
+ ...
+
Wrapping it Up
--------------
@@ -338,10 +338,10 @@ Number 1
req = Request(someurl)
try:
response = urlopen(req)
- except HTTPError, e:
+ except HTTPError as e:
print 'The server couldn\'t fulfill the request.'
print 'Error code: ', e.code
- except URLError, e:
+ except URLError as e:
print 'We failed to reach a server.'
print 'Reason: ', e.reason
else:
@@ -362,7 +362,7 @@ Number 2
req = Request(someurl)
try:
response = urlopen(req)
- except URLError, e:
+ except URLError as e:
if hasattr(e, 'reason'):
print 'We failed to reach a server.'
print 'Reason: ', e.reason
@@ -439,12 +439,12 @@ Authentication Tutorial
When authentication is required, the server sends a header (as well as the 401
error code) requesting authentication. This specifies the authentication scheme
-and a 'realm'. The header looks like : ``Www-authenticate: SCHEME
+and a 'realm'. The header looks like: ``WWW-Authenticate: SCHEME
realm="REALM"``.
e.g. ::
- Www-authenticate: Basic realm="cPanel Users"
+ WWW-Authenticate: Basic realm="cPanel Users"
The client should then retry the request with the appropriate name and password
@@ -489,7 +489,8 @@ than the URL you pass to .add_password() will also match. ::
In the above example we only supplied our ``HTTPBasicAuthHandler`` to
``build_opener``. By default openers have the handlers for normal situations
- -- ``ProxyHandler``, ``UnknownHandler``, ``HTTPHandler``,
+ -- ``ProxyHandler`` (if a proxy setting such as an :envvar:`http_proxy`
+ environment variable is set), ``UnknownHandler``, ``HTTPHandler``,
``HTTPDefaultErrorHandler``, ``HTTPRedirectHandler``, ``FTPHandler``,
``FileHandler``, ``HTTPErrorProcessor``.
@@ -506,10 +507,11 @@ Proxies
=======
**urllib2** will auto-detect your proxy settings and use those. This is through
-the ``ProxyHandler`` which is part of the normal handler chain. Normally that's
-a good thing, but there are occasions when it may not be helpful [#]_. One way
-to do this is to setup our own ``ProxyHandler``, with no proxies defined. This
-is done using similar steps to setting up a `Basic Authentication`_ handler : ::
+the ``ProxyHandler``, which is part of the normal handler chain when a proxy
+setting is detected. Normally that's a good thing, but there are occasions
+when it may not be helpful [#]_. One way to do this is to setup our own
+``ProxyHandler``, with no proxies defined. This is done using similar steps to
+setting up a `Basic Authentication`_ handler: ::
>>> proxy_support = urllib2.ProxyHandler({})
>>> opener = urllib2.build_opener(proxy_support)
diff --git a/Doc/howto/webservers.rst b/Doc/howto/webservers.rst
index fbc9fd9..c3b79e4 100644
--- a/Doc/howto/webservers.rst
+++ b/Doc/howto/webservers.rst
@@ -691,7 +691,7 @@ published, which is a good starting point.
The newest version of TurboGears, version 2.0, moves even further in direction
of WSGI support and a component-based architecture. TurboGears 2 is based on
the WSGI stack of another popular component-based web framework, `Pylons
-<http://pylonshq.com/>`_.
+<http://www.pylonsproject.org/>`_.
Zope
diff --git a/Doc/includes/email-unpack.py b/Doc/includes/email-unpack.py
index 8f99ded..a8f712d 100644
--- a/Doc/includes/email-unpack.py
+++ b/Doc/includes/email-unpack.py
@@ -35,7 +35,7 @@ Usage: %prog [options] msgfile
try:
os.mkdir(opts.directory)
- except OSError, e:
+ except OSError as e:
# Ignore directory exists error
if e.errno != errno.EEXIST:
raise
diff --git a/Doc/includes/sqlite3/complete_statement.py b/Doc/includes/sqlite3/complete_statement.py
index 22525e3..76ea7f6 100644
--- a/Doc/includes/sqlite3/complete_statement.py
+++ b/Doc/includes/sqlite3/complete_statement.py
@@ -23,7 +23,7 @@ while True:
if buffer.lstrip().upper().startswith("SELECT"):
print cur.fetchall()
- except sqlite3.Error, e:
+ except sqlite3.Error as e:
print "An error occurred:", e.args[0]
buffer = ""
diff --git a/Doc/includes/sqlite3/execute_1.py b/Doc/includes/sqlite3/execute_1.py
index fb3784f..763167c 100644
--- a/Doc/includes/sqlite3/execute_1.py
+++ b/Doc/includes/sqlite3/execute_1.py
@@ -1,11 +1,16 @@
import sqlite3
-con = sqlite3.connect("mydb")
-
+con = sqlite3.connect(":memory:")
cur = con.cursor()
+cur.execute("create table people (name_last, age)")
who = "Yeltsin"
age = 72
-cur.execute("select name_last, age from people where name_last=? and age=?", (who, age))
+# This is the qmark style:
+cur.execute("insert into people values (?, ?)", (who, age))
+
+# And this is the named style:
+cur.execute("select * from people where name_last=:who and age=:age", {"who": who, "age": age})
+
print cur.fetchone()
diff --git a/Doc/includes/sqlite3/execute_2.py b/Doc/includes/sqlite3/execute_2.py
deleted file mode 100644
index df6c894..0000000
--- a/Doc/includes/sqlite3/execute_2.py
+++ /dev/null
@@ -1,12 +0,0 @@
-import sqlite3
-
-con = sqlite3.connect("mydb")
-
-cur = con.cursor()
-
-who = "Yeltsin"
-age = 72
-
-cur.execute("select name_last, age from people where name_last=:who and age=:age",
- {"who": who, "age": age})
-print cur.fetchone()
diff --git a/Doc/includes/sqlite3/executemany_2.py b/Doc/includes/sqlite3/executemany_2.py
index 05857c0..0b12688 100644
--- a/Doc/includes/sqlite3/executemany_2.py
+++ b/Doc/includes/sqlite3/executemany_2.py
@@ -1,8 +1,8 @@
import sqlite3
+import string
def char_generator():
- import string
- for c in string.letters[:26]:
+ for c in string.lowercase:
yield (c,)
con = sqlite3.connect(":memory:")
diff --git a/Doc/includes/sqlite3/rowclass.py b/Doc/includes/sqlite3/rowclass.py
index 3fa0b87..92b5ad6 100644
--- a/Doc/includes/sqlite3/rowclass.py
+++ b/Doc/includes/sqlite3/rowclass.py
@@ -1,12 +1,12 @@
import sqlite3
-con = sqlite3.connect("mydb")
+con = sqlite3.connect(":memory:")
con.row_factory = sqlite3.Row
cur = con.cursor()
-cur.execute("select name_last, age from people")
+cur.execute("select 'John' as name, 42 as age")
for row in cur:
- assert row[0] == row["name_last"]
- assert row["name_last"] == row["nAmE_lAsT"]
+ assert row[0] == row["name"]
+ assert row["name"] == row["nAmE"]
assert row[1] == row["age"]
assert row[1] == row["AgE"]
diff --git a/Doc/includes/sqlite3/text_factory.py b/Doc/includes/sqlite3/text_factory.py
index 1959498..577378f 100644
--- a/Doc/includes/sqlite3/text_factory.py
+++ b/Doc/includes/sqlite3/text_factory.py
@@ -3,9 +3,6 @@ import sqlite3
con = sqlite3.connect(":memory:")
cur = con.cursor()
-# Create the table
-con.execute("create table person(lastname, firstname)")
-
AUSTRIA = u"\xd6sterreich"
# by default, rows are returned as Unicode
@@ -17,7 +14,7 @@ assert row[0] == AUSTRIA
con.text_factory = str
cur.execute("select ?", (AUSTRIA,))
row = cur.fetchone()
-assert type(row[0]) == str
+assert type(row[0]) is str
# the bytestrings will be encoded in UTF-8, unless you stored garbage in the
# database ...
assert row[0] == AUSTRIA.encode("utf-8")
@@ -29,15 +26,15 @@ con.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cur.execute("select ?", ("this is latin1 and would normally create errors" +
u"\xe4\xf6\xfc".encode("latin1"),))
row = cur.fetchone()
-assert type(row[0]) == unicode
+assert type(row[0]) is unicode
# sqlite3 offers a built-in optimized text_factory that will return bytestring
# objects, if the data is in ASCII only, and otherwise return unicode objects
con.text_factory = sqlite3.OptimizedUnicode
cur.execute("select ?", (AUSTRIA,))
row = cur.fetchone()
-assert type(row[0]) == unicode
+assert type(row[0]) is unicode
cur.execute("select ?", ("Germany",))
row = cur.fetchone()
-assert type(row[0]) == str
+assert type(row[0]) is str
diff --git a/Doc/install/index.rst b/Doc/install/index.rst
index abce88d..ad939ab 100644
--- a/Doc/install/index.rst
+++ b/Doc/install/index.rst
@@ -7,8 +7,6 @@
*****************************
:Author: Greg Ward
-:Release: |version|
-:Date: |today|
.. TODO: Fill in XXX comments
@@ -22,12 +20,20 @@
Finally, it might be useful to include all the material from my "Care
and Feeding of a Python Installation" talk in here somewhere. Yow!
-.. topic:: Abstract
+This document describes the Python Distribution Utilities ("Distutils") from the
+end-user's point-of-view, describing how to extend the capabilities of a
+standard Python installation by building and installing third-party Python
+modules and extensions.
- This document describes the Python Distribution Utilities ("Distutils") from the
- end-user's point-of-view, describing how to extend the capabilities of a
- standard Python installation by building and installing third-party Python
- modules and extensions.
+
+.. note::
+
+ This guide only covers the basic tools for installing extensions that are
+ provided as part of this version of Python. Third party tools offer easier
+ to use and more secure alternatives. Refer to the
+ `quick recommendations section
+ <https://python-packaging-user-guide.readthedocs.org/en/latest/current.html>`__
+ in the Python Packaging User Guide for more information.
.. _inst-intro:
@@ -52,7 +58,9 @@ new goodies to their toolbox. You don't need to know Python to read this
document; there will be some brief forays into using Python's interactive mode
to explore your installation, but that's it. If you're looking for information
on how to distribute your own Python modules so that others may use them, see
-the :ref:`distutils-index` manual.
+the :ref:`distutils-index` manual. :ref:`debug-setup-script` may also be of
+interest.
+
.. _inst-trivial-install:
@@ -191,7 +199,7 @@ under the distribution root; if you're excessively concerned with speed, or want
to keep the source tree pristine, you can change the build directory with the
:option:`--build-base` option. For example::
- python setup.py build --build-base=/tmp/pybuild/foo-1.0
+ python setup.py build --build-base=/path/to/pybuild/foo-1.0
(Or you could do this permanently with a directive in your system or personal
Distutils configuration file; see section :ref:`inst-config-files`.) Normally, this
@@ -237,6 +245,8 @@ by how you built/installed Python itself. On Unix (and Mac OS X, which is also
Unix-based), it also depends on whether the module distribution being installed
is pure Python or contains extensions ("non-pure"):
+.. tabularcolumns:: |l|l|l|l|
+
+-----------------+-----------------------------------------------------+--------------------------------------------------+-------+
| Platform | Standard installation location | Default value | Notes |
+=================+=====================================================+==================================================+=======+
@@ -1042,7 +1052,7 @@ These compilers require some special libraries. This task is more complex than
for Borland's C++, because there is no program to convert the library. First
you have to create a list of symbols which the Python DLL exports. (You can find
a good program for this task at
-http://www.emmestech.com/software/pexports-0.43/download_pexports.html).
+http://sourceforge.net/projects/mingw/files/MinGW/Extension/pexports/).
.. I don't understand what the next line means. --amk
.. (inclusive the references on data structures.)
diff --git a/Doc/library/2to3.rst b/Doc/library/2to3.rst
index d08c853..a927510 100644
--- a/Doc/library/2to3.rst
+++ b/Doc/library/2to3.rst
@@ -23,7 +23,7 @@ Using 2to3
also located in the :file:`Tools/scripts` directory of the Python root.
2to3's basic arguments are a list of files or directories to transform. The
-directories are to recursively traversed for Python sources.
+directories are recursively traversed for Python sources.
Here is a sample Python 2.x source file, :file:`example.py`::
@@ -264,8 +264,7 @@ and off individually. They are described here in more detail.
.. 2to3fixer:: long
- Strips the ``L`` suffix on long literals and renames :class:`long` to
- :class:`int`.
+ Renames :class:`long` to :class:`int`.
.. 2to3fixer:: map
@@ -291,11 +290,11 @@ and off individually. They are described here in more detail.
Converts the use of iterator's :meth:`~iterator.next` methods to the
:func:`next` function. It also renames :meth:`next` methods to
- :meth:`~object.__next__`.
+ :meth:`~iterator.__next__`.
.. 2to3fixer:: nonzero
- Renames :meth:`~object.__nonzero__` to :meth:`~object.__bool__`.
+ Renames :meth:`__nonzero__` to :meth:`~object.__bool__`.
.. 2to3fixer:: numliterals
@@ -314,7 +313,7 @@ and off individually. They are described here in more detail.
Converts ``raise E, V`` to ``raise E(V)``, and ``raise E, V, T`` to ``raise
E(V).with_traceback(T)``. If ``E`` is a tuple, the translation will be
- incorrect because substituting tuples for exceptions has been removed in 3.0.
+ incorrect because substituting tuples for exceptions has been removed in Python 3.
.. 2to3fixer:: raw_input
@@ -337,7 +336,7 @@ and off individually. They are described here in more detail.
Replaces use of the :class:`set` constructor with set literals. This fixer
is optional.
-.. 2to3fixer:: standard_error
+.. 2to3fixer:: standarderror
Renames :exc:`StandardError` to :exc:`Exception`.
diff --git a/Doc/library/__future__.rst b/Doc/library/__future__.rst
index 87e1205..329e411 100644
--- a/Doc/library/__future__.rst
+++ b/Doc/library/__future__.rst
@@ -75,7 +75,7 @@ language using this mechanism:
| division | 2.2.0a2 | 3.0 | :pep:`238`: |
| | | | *Changing the Division Operator* |
+------------------+-------------+--------------+---------------------------------------------+
-| absolute_import | 2.5.0a1 | 2.7 | :pep:`328`: |
+| absolute_import | 2.5.0a1 | 3.0 | :pep:`328`: |
| | | | *Imports: Multi-Line and Absolute/Relative* |
+------------------+-------------+--------------+---------------------------------------------+
| with_statement | 2.5.0a1 | 2.6 | :pep:`343`: |
diff --git a/Doc/library/_winreg.rst b/Doc/library/_winreg.rst
index 825ce1f..f82d1c5 100644
--- a/Doc/library/_winreg.rst
+++ b/Doc/library/_winreg.rst
@@ -7,9 +7,9 @@
.. sectionauthor:: Mark Hammond <MarkH@ActiveState.com>
.. note::
- The :mod:`_winreg` module has been renamed to :mod:`winreg` in Python 3.0.
+ The :mod:`_winreg` module has been renamed to :mod:`winreg` in Python 3.
The :term:`2to3` tool will automatically adapt imports when converting your
- sources to 3.0.
+ sources to Python 3.
.. versionadded:: 2.0
diff --git a/Doc/library/abc.rst b/Doc/library/abc.rst
index ef8b08e..3a00a9c 100644
--- a/Doc/library/abc.rst
+++ b/Doc/library/abc.rst
@@ -110,19 +110,19 @@ This module provides the following class:
MyIterable.register(Foo)
The ABC ``MyIterable`` defines the standard iterable method,
- :meth:`__iter__`, as an abstract method. The implementation given here can
- still be called from subclasses. The :meth:`get_iterator` method is also
- part of the ``MyIterable`` abstract base class, but it does not have to be
- overridden in non-abstract derived classes.
+ :meth:`~iterator.__iter__`, as an abstract method. The implementation given
+ here can still be called from subclasses. The :meth:`get_iterator` method
+ is also part of the ``MyIterable`` abstract base class, but it does not have
+ to be overridden in non-abstract derived classes.
The :meth:`__subclasshook__` class method defined here says that any class
- that has an :meth:`__iter__` method in its :attr:`__dict__` (or in that of
- one of its base classes, accessed via the :attr:`__mro__` list) is
- considered a ``MyIterable`` too.
+ that has an :meth:`~iterator.__iter__` method in its
+ :attr:`~object.__dict__` (or in that of one of its base classes, accessed
+ via the :attr:`~class.__mro__` list) is considered a ``MyIterable`` too.
Finally, the last line makes ``Foo`` a virtual subclass of ``MyIterable``,
- even though it does not define an :meth:`__iter__` method (it uses the
- old-style iterable protocol, defined in terms of :meth:`__len__` and
+ even though it does not define an :meth:`~iterator.__iter__` method (it uses
+ the old-style iterable protocol, defined in terms of :meth:`__len__` and
:meth:`__getitem__`). Note that this will not make ``get_iterator``
available as a method of ``Foo``, so it is provided separately.
diff --git a/Doc/library/aifc.rst b/Doc/library/aifc.rst
index 1265423..de4144c 100644
--- a/Doc/library/aifc.rst
+++ b/Doc/library/aifc.rst
@@ -30,8 +30,8 @@ sampling rate or frame rate is the number of times per second the sound is
sampled. The number of channels indicate if the audio is mono, stereo, or
quadro. Each frame consists of one sample per channel. The sample size is the
size in bytes of each sample. Thus a frame consists of
-*nchannels*\**samplesize* bytes, and a second's worth of audio consists of
-*nchannels*\**samplesize*\**framerate* bytes.
+*nchannels*\*\ *samplesize* bytes, and a second's worth of audio consists of
+*nchannels*\*\ *samplesize*\*\ *framerate* bytes.
For example, CD quality audio has a sample size of two bytes (16 bits), uses two
channels (stereo) and has a frame rate of 44,100 frames/second. This gives a
diff --git a/Doc/library/al.rst b/Doc/library/al.rst
index ad2eaea..f796c5c 100644
--- a/Doc/library/al.rst
+++ b/Doc/library/al.rst
@@ -8,7 +8,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`al` module has been deprecated for removal in Python 3.0.
+ The :mod:`al` module has been removed in Python 3.
This module provides access to the audio facilities of the SGI Indy and Indigo
@@ -201,7 +201,7 @@ Port objects, as returned by :func:`openport`, have the following methods:
:deprecated:
.. deprecated:: 2.6
- The :mod:`AL` module has been deprecated for removal in Python 3.0.
+ The :mod:`AL` module has been removed in Python 3.
This module defines symbolic constants needed to use the built-in module
diff --git a/Doc/library/anydbm.rst b/Doc/library/anydbm.rst
index 7c6f99f..38e01f3 100644
--- a/Doc/library/anydbm.rst
+++ b/Doc/library/anydbm.rst
@@ -6,9 +6,9 @@
.. note::
- The :mod:`anydbm` module has been renamed to :mod:`dbm` in Python 3.0. The
+ The :mod:`anydbm` module has been renamed to :mod:`dbm` in Python 3. The
:term:`2to3` tool will automatically adapt imports when converting your
- sources to 3.0.
+ sources to Python 3.
.. index::
module: dbhash
@@ -92,6 +92,14 @@ then prints out the contents of the database::
db.close()
+In addition to the dictionary-like methods, ``anydbm`` objects
+provide the following method:
+
+.. function:: close()
+
+ Close the ``anydbm`` database.
+
+
.. seealso::
Module :mod:`dbhash`
diff --git a/Doc/library/archiving.rst b/Doc/library/archiving.rst
index 7d0df5f..472c617 100644
--- a/Doc/library/archiving.rst
+++ b/Doc/library/archiving.rst
@@ -7,6 +7,7 @@ Data Compression and Archiving
The modules described in this chapter support data compression with the zlib,
gzip, and bzip2 algorithms, and the creation of ZIP- and tar-format archives.
+See also :ref:`archiving-operations` provided by the :mod:`shutil` module.
.. toctree::
diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst
index c76fe60..4aa2918 100644
--- a/Doc/library/argparse.rst
+++ b/Doc/library/argparse.rst
@@ -12,6 +12,12 @@
--------------
+.. sidebar:: Tutorial
+
+ This page contains the API reference information. For a more gentle
+ introduction to Python command-line parsing, have a look at the
+ :ref:`argparse tutorial <argparse-tutorial>`.
+
The :mod:`argparse` module makes it easy to write user-friendly command-line
interfaces. The program defines what arguments it requires, and :mod:`argparse`
will figure out how to parse those out of :data:`sys.argv`. The :mod:`argparse`
@@ -40,7 +46,7 @@ produces either the sum or the max::
Assuming the Python code above is saved into a file called ``prog.py``, it can
be run at the command line and provides useful help messages::
- $ prog.py -h
+ $ python prog.py -h
usage: prog.py [-h] [--sum] N [N ...]
Process some integers.
@@ -55,15 +61,15 @@ be run at the command line and provides useful help messages::
When run with the appropriate arguments, it prints either the sum or the max of
the command-line integers::
- $ prog.py 1 2 3 4
+ $ python prog.py 1 2 3 4
4
- $ prog.py 1 2 3 4 --sum
+ $ python prog.py 1 2 3 4 --sum
10
If invalid arguments are passed in, it will issue an error::
- $ prog.py a b c
+ $ python prog.py a b c
usage: prog.py [-h] [--sum] N [N ...]
prog.py: error: argument N: invalid int value: 'a'
@@ -124,44 +130,143 @@ command-line arguments from :data:`sys.argv`.
ArgumentParser objects
----------------------
-.. class:: ArgumentParser([description], [epilog], [prog], [usage], [add_help], \
- [argument_default], [parents], [prefix_chars], \
- [conflict_handler], [formatter_class])
+.. class:: ArgumentParser(prog=None, usage=None, description=None, \
+ epilog=None, parents=[], \
+ formatter_class=argparse.HelpFormatter, \
+ prefix_chars='-', fromfile_prefix_chars=None, \
+ argument_default=None, conflict_handler='error', \
+ add_help=True)
- Create a new :class:`ArgumentParser` object. Each parameter has its own more
- detailed description below, but in short they are:
+ Create a new :class:`ArgumentParser` object. All parameters should be passed
+ as keyword arguments. Each parameter has its own more detailed description
+ below, but in short they are:
- * description_ - Text to display before the argument help.
+ * prog_ - The name of the program (default: ``sys.argv[0]``)
- * epilog_ - Text to display after the argument help.
+ * usage_ - The string describing the program usage (default: generated from
+ arguments added to parser)
- * add_help_ - Add a -h/--help option to the parser. (default: ``True``)
+ * description_ - Text to display before the argument help (default: none)
- * argument_default_ - Set the global default value for arguments.
- (default: ``None``)
+ * epilog_ - Text to display after the argument help (default: none)
* parents_ - A list of :class:`ArgumentParser` objects whose arguments should
- also be included.
+ also be included
+
+ * formatter_class_ - A class for customizing the help output
- * prefix_chars_ - The set of characters that prefix optional arguments.
+ * prefix_chars_ - The set of characters that prefix optional arguments
(default: '-')
* fromfile_prefix_chars_ - The set of characters that prefix files from
- which additional arguments should be read. (default: ``None``)
+ which additional arguments should be read (default: ``None``)
- * formatter_class_ - A class for customizing the help output.
-
- * conflict_handler_ - Usually unnecessary, defines strategy for resolving
- conflicting optionals.
+ * argument_default_ - The global default value for arguments
+ (default: ``None``)
- * prog_ - The name of the program (default:
- ``sys.argv[0]``)
+ * conflict_handler_ - The strategy for resolving conflicting optionals
+ (usually unnecessary)
- * usage_ - The string describing the program usage (default: generated)
+ * add_help_ - Add a -h/--help option to the parser (default: ``True``)
The following sections describe how each of these are used.
+prog
+^^^^
+
+By default, :class:`ArgumentParser` objects uses ``sys.argv[0]`` to determine
+how to display the name of the program in help messages. This default is almost
+always desirable because it will make the help messages match how the program was
+invoked on the command line. For example, consider a file named
+``myprogram.py`` with the following code::
+
+ import argparse
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--foo', help='foo help')
+ args = parser.parse_args()
+
+The help for this program will display ``myprogram.py`` as the program name
+(regardless of where the program was invoked from)::
+
+ $ python myprogram.py --help
+ usage: myprogram.py [-h] [--foo FOO]
+
+ optional arguments:
+ -h, --help show this help message and exit
+ --foo FOO foo help
+ $ cd ..
+ $ python subdir\myprogram.py --help
+ usage: myprogram.py [-h] [--foo FOO]
+
+ optional arguments:
+ -h, --help show this help message and exit
+ --foo FOO foo help
+
+To change this default behavior, another value can be supplied using the
+``prog=`` argument to :class:`ArgumentParser`::
+
+ >>> parser = argparse.ArgumentParser(prog='myprogram')
+ >>> parser.print_help()
+ usage: myprogram [-h]
+
+ optional arguments:
+ -h, --help show this help message and exit
+
+Note that the program name, whether determined from ``sys.argv[0]`` or from the
+``prog=`` argument, is available to help messages using the ``%(prog)s`` format
+specifier.
+
+::
+
+ >>> parser = argparse.ArgumentParser(prog='myprogram')
+ >>> parser.add_argument('--foo', help='foo of the %(prog)s program')
+ >>> parser.print_help()
+ usage: myprogram [-h] [--foo FOO]
+
+ optional arguments:
+ -h, --help show this help message and exit
+ --foo FOO foo of the myprogram program
+
+
+usage
+^^^^^
+
+By default, :class:`ArgumentParser` calculates the usage message from the
+arguments it contains::
+
+ >>> parser = argparse.ArgumentParser(prog='PROG')
+ >>> parser.add_argument('--foo', nargs='?', help='foo help')
+ >>> parser.add_argument('bar', nargs='+', help='bar help')
+ >>> parser.print_help()
+ usage: PROG [-h] [--foo [FOO]] bar [bar ...]
+
+ positional arguments:
+ bar bar help
+
+ optional arguments:
+ -h, --help show this help message and exit
+ --foo [FOO] foo help
+
+The default message can be overridden with the ``usage=`` keyword argument::
+
+ >>> parser = argparse.ArgumentParser(prog='PROG', usage='%(prog)s [options]')
+ >>> parser.add_argument('--foo', nargs='?', help='foo help')
+ >>> parser.add_argument('bar', nargs='+', help='bar help')
+ >>> parser.print_help()
+ usage: PROG [options]
+
+ positional arguments:
+ bar bar help
+
+ optional arguments:
+ -h, --help show this help message and exit
+ --foo [FOO] foo help
+
+The ``%(prog)s`` format specifier is available to fill in the program name in
+your usage messages.
+
+
description
^^^^^^^^^^^
@@ -209,122 +314,6 @@ line-wrapped, but this behavior can be adjusted with the formatter_class_
argument to :class:`ArgumentParser`.
-add_help
-^^^^^^^^
-
-By default, ArgumentParser objects add an option which simply displays
-the parser's help message. For example, consider a file named
-``myprogram.py`` containing the following code::
-
- import argparse
- parser = argparse.ArgumentParser()
- parser.add_argument('--foo', help='foo help')
- args = parser.parse_args()
-
-If ``-h`` or ``--help`` is supplied at the command line, the ArgumentParser
-help will be printed::
-
- $ python myprogram.py --help
- usage: myprogram.py [-h] [--foo FOO]
-
- optional arguments:
- -h, --help show this help message and exit
- --foo FOO foo help
-
-Occasionally, it may be useful to disable the addition of this help option.
-This can be achieved by passing ``False`` as the ``add_help=`` argument to
-:class:`ArgumentParser`::
-
- >>> parser = argparse.ArgumentParser(prog='PROG', add_help=False)
- >>> parser.add_argument('--foo', help='foo help')
- >>> parser.print_help()
- usage: PROG [--foo FOO]
-
- optional arguments:
- --foo FOO foo help
-
-The help option is typically ``-h/--help``. The exception to this is
-if the ``prefix_chars=`` is specified and does not include ``-``, in
-which case ``-h`` and ``--help`` are not valid options. In
-this case, the first character in ``prefix_chars`` is used to prefix
-the help options::
-
- >>> parser = argparse.ArgumentParser(prog='PROG', prefix_chars='+/')
- >>> parser.print_help()
- usage: PROG [+h]
-
- optional arguments:
- +h, ++help show this help message and exit
-
-
-prefix_chars
-^^^^^^^^^^^^
-
-Most command-line options will use ``-`` as the prefix, e.g. ``-f/--foo``.
-Parsers that need to support different or additional prefix
-characters, e.g. for options
-like ``+f`` or ``/foo``, may specify them using the ``prefix_chars=`` argument
-to the ArgumentParser constructor::
-
- >>> parser = argparse.ArgumentParser(prog='PROG', prefix_chars='-+')
- >>> parser.add_argument('+f')
- >>> parser.add_argument('++bar')
- >>> parser.parse_args('+f X ++bar Y'.split())
- Namespace(bar='Y', f='X')
-
-The ``prefix_chars=`` argument defaults to ``'-'``. Supplying a set of
-characters that does not include ``-`` will cause ``-f/--foo`` options to be
-disallowed.
-
-
-fromfile_prefix_chars
-^^^^^^^^^^^^^^^^^^^^^
-
-Sometimes, for example when dealing with a particularly long argument lists, it
-may make sense to keep the list of arguments in a file rather than typing it out
-at the command line. If the ``fromfile_prefix_chars=`` argument is given to the
-:class:`ArgumentParser` constructor, then arguments that start with any of the
-specified characters will be treated as files, and will be replaced by the
-arguments they contain. For example::
-
- >>> with open('args.txt', 'w') as fp:
- ... fp.write('-f\nbar')
- >>> parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
- >>> parser.add_argument('-f')
- >>> parser.parse_args(['-f', 'foo', '@args.txt'])
- Namespace(f='bar')
-
-Arguments read from a file must by default be one per line (but see also
-:meth:`~ArgumentParser.convert_arg_line_to_args`) and are treated as if they
-were in the same place as the original file referencing argument on the command
-line. So in the example above, the expression ``['-f', 'foo', '@args.txt']``
-is considered equivalent to the expression ``['-f', 'foo', '-f', 'bar']``.
-
-The ``fromfile_prefix_chars=`` argument defaults to ``None``, meaning that
-arguments will never be treated as file references.
-
-
-argument_default
-^^^^^^^^^^^^^^^^
-
-Generally, argument defaults are specified either by passing a default to
-:meth:`~ArgumentParser.add_argument` or by calling the
-:meth:`~ArgumentParser.set_defaults` methods with a specific set of name-value
-pairs. Sometimes however, it may be useful to specify a single parser-wide
-default for arguments. This can be accomplished by passing the
-``argument_default=`` keyword argument to :class:`ArgumentParser`. For example,
-to globally suppress attribute creation on :meth:`~ArgumentParser.parse_args`
-calls, we supply ``argument_default=SUPPRESS``::
-
- >>> parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
- >>> parser.add_argument('--foo')
- >>> parser.add_argument('bar', nargs='?')
- >>> parser.parse_args(['--foo', '1', 'BAR'])
- Namespace(bar='BAR', foo='1')
- >>> parser.parse_args([])
- Namespace()
-
-
parents
^^^^^^^
@@ -443,6 +432,74 @@ will add information about the default value of each of the arguments::
--foo FOO FOO! (default: 42)
+prefix_chars
+^^^^^^^^^^^^
+
+Most command-line options will use ``-`` as the prefix, e.g. ``-f/--foo``.
+Parsers that need to support different or additional prefix
+characters, e.g. for options
+like ``+f`` or ``/foo``, may specify them using the ``prefix_chars=`` argument
+to the ArgumentParser constructor::
+
+ >>> parser = argparse.ArgumentParser(prog='PROG', prefix_chars='-+')
+ >>> parser.add_argument('+f')
+ >>> parser.add_argument('++bar')
+ >>> parser.parse_args('+f X ++bar Y'.split())
+ Namespace(bar='Y', f='X')
+
+The ``prefix_chars=`` argument defaults to ``'-'``. Supplying a set of
+characters that does not include ``-`` will cause ``-f/--foo`` options to be
+disallowed.
+
+
+fromfile_prefix_chars
+^^^^^^^^^^^^^^^^^^^^^
+
+Sometimes, for example when dealing with a particularly long argument lists, it
+may make sense to keep the list of arguments in a file rather than typing it out
+at the command line. If the ``fromfile_prefix_chars=`` argument is given to the
+:class:`ArgumentParser` constructor, then arguments that start with any of the
+specified characters will be treated as files, and will be replaced by the
+arguments they contain. For example::
+
+ >>> with open('args.txt', 'w') as fp:
+ ... fp.write('-f\nbar')
+ >>> parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
+ >>> parser.add_argument('-f')
+ >>> parser.parse_args(['-f', 'foo', '@args.txt'])
+ Namespace(f='bar')
+
+Arguments read from a file must by default be one per line (but see also
+:meth:`~ArgumentParser.convert_arg_line_to_args`) and are treated as if they
+were in the same place as the original file referencing argument on the command
+line. So in the example above, the expression ``['-f', 'foo', '@args.txt']``
+is considered equivalent to the expression ``['-f', 'foo', '-f', 'bar']``.
+
+The ``fromfile_prefix_chars=`` argument defaults to ``None``, meaning that
+arguments will never be treated as file references.
+
+
+argument_default
+^^^^^^^^^^^^^^^^
+
+Generally, argument defaults are specified either by passing a default to
+:meth:`~ArgumentParser.add_argument` or by calling the
+:meth:`~ArgumentParser.set_defaults` methods with a specific set of name-value
+pairs. Sometimes however, it may be useful to specify a single parser-wide
+default for arguments. This can be accomplished by passing the
+``argument_default=`` keyword argument to :class:`ArgumentParser`. For example,
+to globally suppress attribute creation on :meth:`~ArgumentParser.parse_args`
+calls, we supply ``argument_default=SUPPRESS``::
+
+ >>> parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
+ >>> parser.add_argument('--foo')
+ >>> parser.add_argument('bar', nargs='?')
+ >>> parser.parse_args(['--foo', '1', 'BAR'])
+ Namespace(bar='BAR', foo='1')
+ >>> parser.parse_args([])
+ Namespace()
+
+
conflict_handler
^^^^^^^^^^^^^^^^
@@ -480,22 +537,20 @@ action is retained as the ``-f`` action, because only the ``--foo`` option
string was overridden.
-prog
-^^^^
+add_help
+^^^^^^^^
-By default, :class:`ArgumentParser` objects uses ``sys.argv[0]`` to determine
-how to display the name of the program in help messages. This default is almost
-always desirable because it will make the help messages match how the program was
-invoked on the command line. For example, consider a file named
-``myprogram.py`` with the following code::
+By default, ArgumentParser objects add an option which simply displays
+the parser's help message. For example, consider a file named
+``myprogram.py`` containing the following code::
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--foo', help='foo help')
args = parser.parse_args()
-The help for this program will display ``myprogram.py`` as the program name
-(regardless of where the program was invoked from)::
+If ``-h`` or ``--help`` is supplied at the command line, the ArgumentParser
+help will be printed::
$ python myprogram.py --help
usage: myprogram.py [-h] [--foo FOO]
@@ -503,76 +558,31 @@ The help for this program will display ``myprogram.py`` as the program name
optional arguments:
-h, --help show this help message and exit
--foo FOO foo help
- $ cd ..
- $ python subdir\myprogram.py --help
- usage: myprogram.py [-h] [--foo FOO]
-
- optional arguments:
- -h, --help show this help message and exit
- --foo FOO foo help
-To change this default behavior, another value can be supplied using the
-``prog=`` argument to :class:`ArgumentParser`::
-
- >>> parser = argparse.ArgumentParser(prog='myprogram')
- >>> parser.print_help()
- usage: myprogram [-h]
-
- optional arguments:
- -h, --help show this help message and exit
-
-Note that the program name, whether determined from ``sys.argv[0]`` or from the
-``prog=`` argument, is available to help messages using the ``%(prog)s`` format
-specifier.
-
-::
-
- >>> parser = argparse.ArgumentParser(prog='myprogram')
- >>> parser.add_argument('--foo', help='foo of the %(prog)s program')
- >>> parser.print_help()
- usage: myprogram [-h] [--foo FOO]
-
- optional arguments:
- -h, --help show this help message and exit
- --foo FOO foo of the myprogram program
-
-
-usage
-^^^^^
-
-By default, :class:`ArgumentParser` calculates the usage message from the
-arguments it contains::
+Occasionally, it may be useful to disable the addition of this help option.
+This can be achieved by passing ``False`` as the ``add_help=`` argument to
+:class:`ArgumentParser`::
- >>> parser = argparse.ArgumentParser(prog='PROG')
- >>> parser.add_argument('--foo', nargs='?', help='foo help')
- >>> parser.add_argument('bar', nargs='+', help='bar help')
+ >>> parser = argparse.ArgumentParser(prog='PROG', add_help=False)
+ >>> parser.add_argument('--foo', help='foo help')
>>> parser.print_help()
- usage: PROG [-h] [--foo [FOO]] bar [bar ...]
-
- positional arguments:
- bar bar help
+ usage: PROG [--foo FOO]
optional arguments:
- -h, --help show this help message and exit
- --foo [FOO] foo help
+ --foo FOO foo help
-The default message can be overridden with the ``usage=`` keyword argument::
+The help option is typically ``-h/--help``. The exception to this is
+if the ``prefix_chars=`` is specified and does not include ``-``, in
+which case ``-h`` and ``--help`` are not valid options. In
+this case, the first character in ``prefix_chars`` is used to prefix
+the help options::
- >>> parser = argparse.ArgumentParser(prog='PROG', usage='%(prog)s [options]')
- >>> parser.add_argument('--foo', nargs='?', help='foo help')
- >>> parser.add_argument('bar', nargs='+', help='bar help')
+ >>> parser = argparse.ArgumentParser(prog='PROG', prefix_chars='+/')
>>> parser.print_help()
- usage: PROG [options]
-
- positional arguments:
- bar bar help
+ usage: PROG [+h]
optional arguments:
- -h, --help show this help message and exit
- --foo [FOO] foo help
-
-The ``%(prog)s`` format specifier is available to fill in the program name in
-your usage messages.
+ +h, ++help show this help message and exit
The add_argument() method
@@ -743,7 +753,7 @@ the Action API. The easiest way to do this is to extend
* ``values`` - The associated command-line arguments, with any type conversions
applied. (Type conversions are specified with the type_ keyword argument to
- :meth:`~ArgumentParser.add_argument`.
+ :meth:`~ArgumentParser.add_argument`.)
* ``option_string`` - The option string that was used to invoke this action.
The ``option_string`` argument is optional, and will be absent if the action
@@ -897,6 +907,17 @@ was not present at the command line::
>>> parser.parse_args(''.split())
Namespace(foo=42)
+If the ``default`` value is a string, the parser parses the value as if it
+were a command-line argument. In particular, the parser applies any type_
+conversion argument, if provided, before setting the attribute on the
+:class:`Namespace` return value. Otherwise, the parser uses the value as is::
+
+ >>> parser = argparse.ArgumentParser()
+ >>> parser.add_argument('--length', default='10', type=int)
+ >>> parser.add_argument('--width', default=10.5, type=int)
+ >>> parser.parse_args()
+ Namespace(length=10, width=10.5)
+
For positional arguments with nargs_ equal to ``?`` or ``*``, the ``default`` value
is used when no command-line argument was present::
@@ -935,6 +956,9 @@ types and functions can be used directly as the value of the ``type`` argument::
>>> parser.parse_args('2 temp.txt'.split())
Namespace(bar=<open file 'temp.txt', mode 'r' at 0x...>, foo=2)
+See the section on the default_ keyword argument for information on when the
+``type`` argument is applied to default arguments.
+
To ease the use of various types of files, the argparse module provides the
factory FileType which takes the ``mode=`` and ``bufsize=`` arguments of the
``file`` object. For example, ``FileType('w')`` can be used to create a
@@ -982,32 +1006,33 @@ choices
^^^^^^^
Some command-line arguments should be selected from a restricted set of values.
-These can be handled by passing a container object as the ``choices`` keyword
+These can be handled by passing a container object as the *choices* keyword
argument to :meth:`~ArgumentParser.add_argument`. When the command line is
-parsed, argument values will be checked, and an error message will be displayed if
-the argument was not one of the acceptable values::
-
- >>> parser = argparse.ArgumentParser(prog='PROG')
- >>> parser.add_argument('foo', choices='abc')
- >>> parser.parse_args('c'.split())
- Namespace(foo='c')
- >>> parser.parse_args('X'.split())
- usage: PROG [-h] {a,b,c}
- PROG: error: argument foo: invalid choice: 'X' (choose from 'a', 'b', 'c')
-
-Note that inclusion in the ``choices`` container is checked after any type_
-conversions have been performed, so the type of the objects in the ``choices``
+parsed, argument values will be checked, and an error message will be displayed
+if the argument was not one of the acceptable values::
+
+ >>> parser = argparse.ArgumentParser(prog='game.py')
+ >>> parser.add_argument('move', choices=['rock', 'paper', 'scissors'])
+ >>> parser.parse_args(['rock'])
+ Namespace(move='rock')
+ >>> parser.parse_args(['fire'])
+ usage: game.py [-h] {rock,paper,scissors}
+ game.py: error: argument move: invalid choice: 'fire' (choose from 'rock',
+ 'paper', 'scissors')
+
+Note that inclusion in the *choices* container is checked after any type_
+conversions have been performed, so the type of the objects in the *choices*
container should match the type_ specified::
- >>> parser = argparse.ArgumentParser(prog='PROG')
- >>> parser.add_argument('foo', type=complex, choices=[1, 1j])
- >>> parser.parse_args('1j'.split())
- Namespace(foo=1j)
- >>> parser.parse_args('-- -4'.split())
- usage: PROG [-h] {1,1j}
- PROG: error: argument foo: invalid choice: (-4+0j) (choose from 1, 1j)
-
-Any object that supports the ``in`` operator can be passed as the ``choices``
+ >>> parser = argparse.ArgumentParser(prog='doors.py')
+ >>> parser.add_argument('door', type=int, choices=range(1, 4))
+ >>> print(parser.parse_args(['3']))
+ Namespace(door=3)
+ >>> parser.parse_args(['4'])
+ usage: doors.py [-h] {1,2,3}
+ doors.py: error: argument door: invalid choice: 4 (choose from 1, 2, 3)
+
+Any object that supports the ``in`` operator can be passed as the *choices*
value, so :class:`dict` objects, :class:`set` objects, custom containers,
etc. are all supported.
@@ -1093,7 +1118,7 @@ setting the ``help`` value to ``argparse.SUPPRESS``::
metavar
^^^^^^^
-When :class:`ArgumentParser` generates help messages, it need some way to refer
+When :class:`ArgumentParser` generates help messages, it needs some way to refer
to each expected argument. By default, ArgumentParser objects use the dest_
value as the "name" of each object. By default, for positional argument
actions, the dest_ value is used directly, and for optional argument actions,
@@ -1326,12 +1351,14 @@ argument::
>>> parser.parse_args(['--', '-f'])
Namespace(foo='-f', one=None)
+.. _prefix-matching:
-Argument abbreviations
-^^^^^^^^^^^^^^^^^^^^^^
+Argument abbreviations (prefix matching)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The :meth:`~ArgumentParser.parse_args` method allows long options to be
-abbreviated if the abbreviation is unambiguous::
+abbreviated to a prefix, if the abbreviation is unambiguous (the prefix matches
+a unique option)::
>>> parser = argparse.ArgumentParser(prog='PROG')
>>> parser.add_argument('-bacon')
@@ -1407,7 +1434,10 @@ Other utilities
Sub-commands
^^^^^^^^^^^^
-.. method:: ArgumentParser.add_subparsers()
+.. method:: ArgumentParser.add_subparsers([title], [description], [prog], \
+ [parser_class], [action], \
+ [option_string], [dest], [help], \
+ [metavar])
Many programs split up their functionality into a number of sub-commands,
for example, the ``svn`` program can invoke sub-commands like ``svn
@@ -1416,11 +1446,35 @@ Sub-commands
different functions which require different kinds of command-line arguments.
:class:`ArgumentParser` supports the creation of such sub-commands with the
:meth:`add_subparsers` method. The :meth:`add_subparsers` method is normally
- called with no arguments and returns an special action object. This object
+ called with no arguments and returns a special action object. This object
has a single method, :meth:`~ArgumentParser.add_parser`, which takes a
command name and any :class:`ArgumentParser` constructor arguments, and
returns an :class:`ArgumentParser` object that can be modified as usual.
+ Description of parameters:
+
+ * title - title for the sub-parser group in help output; by default
+ "subcommands" if description is provided, otherwise uses title for
+ positional arguments
+
+ * description - description for the sub-parser group in help output, by
+ default None
+
+ * prog - usage information that will be displayed with sub-command help,
+ by default the name of the program and any positional arguments before the
+ subparser argument
+
+ * parser_class - class which will be used to create sub-parser instances, by
+ default the class of the current parser (e.g. ArgumentParser)
+
+ * dest - name of the attribute under which sub-command name will be
+ stored; by default None and no value is stored
+
+ * help - help for sub-parser group in help output, by default None
+
+ * metavar - string presenting available sub-commands in help; by default it
+ is None and presents sub-commands in form {cmd1, cmd2, ..}
+
Some example usage::
>>> # create the top-level parser
@@ -1462,8 +1516,8 @@ Sub-commands
positional arguments:
{a,b} sub-command help
- a a help
- b b help
+ a a help
+ b b help
optional arguments:
-h, --help show this help message and exit
@@ -1634,14 +1688,14 @@ Argument groups
--bar BAR bar help
- Note that any arguments not your user defined groups will end up back in the
- usual "positional arguments" and "optional arguments" sections.
+ Note that any arguments not in your user-defined groups will end up back
+ in the usual "positional arguments" and "optional arguments" sections.
Mutual exclusion
^^^^^^^^^^^^^^^^
-.. method:: add_mutually_exclusive_group(required=False)
+.. method:: ArgumentParser.add_mutually_exclusive_group(required=False)
Create a mutually exclusive group. :mod:`argparse` will make sure that only
one of the arguments in the mutually exclusive group was present on the
@@ -1770,6 +1824,12 @@ the populated namespace and the list of remaining argument strings.
>>> parser.parse_known_args(['--foo', '--badger', 'BAR', 'spam'])
(Namespace(bar='BAR', foo=True), ['--badger', 'spam'])
+.. warning::
+ :ref:`Prefix matching <prefix-matching>` rules apply to
+ :meth:`parse_known_args`. The parser may consume an option even if it's just
+ a prefix of one of its known options, instead of leaving it in the remaining
+ arguments list.
+
Customizing file parsing
^^^^^^^^^^^^^^^^^^^^^^^^
@@ -1826,9 +1886,10 @@ A partial upgrade path from :mod:`optparse` to :mod:`argparse`:
* Replace all :meth:`optparse.OptionParser.add_option` calls with
:meth:`ArgumentParser.add_argument` calls.
-* Replace ``options, args = parser.parse_args()`` with ``args =
+* Replace ``(options, args) = parser.parse_args()`` with ``args =
parser.parse_args()`` and add additional :meth:`ArgumentParser.add_argument`
- calls for the positional arguments.
+ calls for the positional arguments. Keep in mind that what was previously
+ called ``options``, now in :mod:`argparse` context is called ``args``.
* Replace callback actions and the ``callback_*`` keyword arguments with
``type`` or ``action`` arguments.
diff --git a/Doc/library/array.rst b/Doc/library/array.rst
index d34cf38..1766d47 100644
--- a/Doc/library/array.rst
+++ b/Doc/library/array.rst
@@ -268,9 +268,7 @@ Examples::
Packing and unpacking of External Data Representation (XDR) data as used in some
remote procedure call systems.
- `The Numerical Python Manual <http://numpy.sourceforge.net/numdoc/HTML/numdoc.htm>`_
+ `The Numerical Python Documentation <http://docs.scipy.org/doc/>`_
The Numeric Python extension (NumPy) defines another array type; see
- http://numpy.sourceforge.net/ for further information about Numerical Python.
- (A PDF version of the NumPy manual is available at
- http://numpy.sourceforge.net/numdoc/numdoc.pdf).
+ http://www.numpy.org/ for further information about Numerical Python.
diff --git a/Doc/library/ast.rst b/Doc/library/ast.rst
index 5130d00..8adc88f 100644
--- a/Doc/library/ast.rst
+++ b/Doc/library/ast.rst
@@ -131,10 +131,10 @@ and classes for traversing abstract syntax trees:
.. function:: literal_eval(node_or_string)
- Safely evaluate an expression node or a string containing a Python
- expression. The string or node provided may only consist of the following
- Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
- and ``None``.
+ Safely evaluate an expression node or a Unicode or *Latin-1* encoded string
+ containing a Python expression. The string or node provided may only consist
+ of the following Python literal structures: strings, numbers, tuples, lists,
+ dicts, booleans, and ``None``.
This can be used for safely evaluating strings containing Python expressions
from untrusted sources without the need to parse the values oneself.
@@ -257,6 +257,6 @@ and classes for traversing abstract syntax trees:
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
- wanted *annotate_fields* must be set to False. Attributes such as line
+ wanted *annotate_fields* must be set to ``False``. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to ``True``.
diff --git a/Doc/library/asyncore.rst b/Doc/library/asyncore.rst
index c108450..70ca8e7 100644
--- a/Doc/library/asyncore.rst
+++ b/Doc/library/asyncore.rst
@@ -53,10 +53,10 @@ any that have been added to the map during asynchronous service) is closed.
channels have been closed. All arguments are optional. The *count*
parameter defaults to None, resulting in the loop terminating only when all
channels have been closed. The *timeout* argument sets the timeout
- parameter for the appropriate :func:`select` or :func:`poll` call, measured
- in seconds; the default is 30 seconds. The *use_poll* parameter, if true,
- indicates that :func:`poll` should be used in preference to :func:`select`
- (the default is ``False``).
+ parameter for the appropriate :func:`~select.select` or :func:`~select.poll`
+ call, measured in seconds; the default is 30 seconds. The *use_poll*
+ parameter, if true, indicates that :func:`~select.poll` should be used in
+ preference to :func:`~select.select` (the default is ``False``).
The *map* parameter is a dictionary whose items are the channels to watch.
As channels are closed they are deleted from their map. If *map* is
@@ -318,13 +318,10 @@ connections and dispatches the incoming connections to a handler::
def handle_accept(self):
pair = self.accept()
- if pair is None:
- pass
- else:
+ if pair is not None:
sock, addr = pair
print 'Incoming connection from %s' % repr(addr)
handler = EchoHandler(sock)
server = EchoServer('localhost', 8080)
asyncore.loop()
-
diff --git a/Doc/library/atexit.rst b/Doc/library/atexit.rst
index 6ac36b2..0b5e121 100644
--- a/Doc/library/atexit.rst
+++ b/Doc/library/atexit.rst
@@ -15,13 +15,14 @@
The :mod:`atexit` module defines a single function to register cleanup
functions. Functions thus registered are automatically executed upon normal
-interpreter termination. The order in which the functions are called is not
-defined; if you have cleanup operations that depend on each other, you should
-wrap them in a function and register that one. This keeps :mod:`atexit` simple.
+interpreter termination. :mod:`atexit` runs these functions in the *reverse*
+order in which they were registered; if you register ``A``, ``B``, and ``C``,
+at interpreter termination time they will be run in the order ``C``, ``B``,
+``A``.
-Note: the functions registered via this module are not called when the program
-is killed by a signal not handled by Python, when a Python fatal internal error
-is detected, or when :func:`os._exit` is called.
+**Note:** The functions registered via this module are not called when the
+program is killed by a signal not handled by Python, when a Python fatal
+internal error is detected, or when :func:`os._exit` is called.
.. index:: single: exitfunc (in sys)
@@ -76,7 +77,7 @@ automatically when the program terminates without relying on the application
making an explicit call into this module at termination. ::
try:
- _count = int(open("/tmp/counter").read())
+ _count = int(open("counter").read())
except IOError:
_count = 0
@@ -85,7 +86,7 @@ making an explicit call into this module at termination. ::
_count = _count + n
def savecounter():
- open("/tmp/counter", "w").write("%d" % _count)
+ open("counter", "w").write("%d" % _count)
import atexit
atexit.register(savecounter)
diff --git a/Doc/library/audioop.rst b/Doc/library/audioop.rst
index 29c3914..e747ba1 100644
--- a/Doc/library/audioop.rst
+++ b/Doc/library/audioop.rst
@@ -38,7 +38,7 @@ The module defines the following variables and functions:
Return a fragment which is the addition of the two samples passed as parameters.
*width* is the sample width in bytes, either ``1``, ``2`` or ``4``. Both
- fragments should have the same length.
+ fragments should have the same length. Samples are truncated in case of overflow.
.. function:: adpcm2lin(adpcmfragment, width, state)
@@ -71,7 +71,7 @@ The module defines the following variables and functions:
.. function:: bias(fragment, width, bias)
Return a fragment that is the original fragment with a bias added to each
- sample.
+ sample. Samples wrap around in case of overflow.
.. function:: cross(fragment, width)
@@ -162,12 +162,6 @@ The module defines the following variables and functions:
hardware, among others.
-.. function:: minmax(fragment, width)
-
- Return a tuple consisting of the minimum and maximum values of all samples in
- the sound fragment.
-
-
.. function:: max(fragment, width)
Return the maximum of the *absolute value* of all samples in a fragment.
@@ -178,10 +172,16 @@ The module defines the following variables and functions:
Return the maximum peak-peak value in the sound fragment.
+.. function:: minmax(fragment, width)
+
+ Return a tuple consisting of the minimum and maximum values of all samples in
+ the sound fragment.
+
+
.. function:: mul(fragment, width, factor)
Return a fragment that has all samples in the original fragment multiplied by
- the floating-point value *factor*. Overflow is silently ignored.
+ the floating-point value *factor*. Samples are truncated in case of overflow.
.. function:: ratecv(fragment, width, nchannels, inrate, outrate, state[, weightA[, weightB]])
@@ -247,7 +247,7 @@ to be stateless (i.e. to be able to tolerate packet loss) you should not only
transmit the data but also the state. Note that you should send the *initial*
state (the one you passed to :func:`lin2adpcm`) along to the decoder, not the
final state (as returned by the coder). If you want to use
-:func:`struct.struct` to store the state in binary you can code the first
+:class:`struct.Struct` to store the state in binary you can code the first
element (the predicted value) in 16 bits and the second (the delta index) in 8.
The ADPCM coders have never been tried against other ADPCM coders, only against
diff --git a/Doc/library/base64.rst b/Doc/library/base64.rst
index ab85436..c9ce28b 100644
--- a/Doc/library/base64.rst
+++ b/Doc/library/base64.rst
@@ -93,7 +93,7 @@ The modern interface, which was introduced in Python 2.4, provides:
digit 0 is always mapped to the letter O). For security purposes the default is
``None``, so that 0 and 1 are not allowed in the input.
- The decoded string is returned. A :exc:`TypeError` is raised if *s* were
+ The decoded string is returned. A :exc:`TypeError` is raised if *s* is
incorrectly padded or if there are non-alphabet characters present in the
string.
diff --git a/Doc/library/basehttpserver.rst b/Doc/library/basehttpserver.rst
index 98b5ce4..01776af 100644
--- a/Doc/library/basehttpserver.rst
+++ b/Doc/library/basehttpserver.rst
@@ -6,8 +6,8 @@
.. note::
The :mod:`BaseHTTPServer` module has been merged into :mod:`http.server` in
- Python 3.0. The :term:`2to3` tool will automatically adapt imports when
- converting your sources to 3.0.
+ Python 3. The :term:`2to3` tool will automatically adapt imports when
+ converting your sources to Python 3.
.. index::
@@ -240,7 +240,7 @@ to a handler. Code to create and run the server looks like this::
to create custom error logging mechanisms. The *format* argument is a
standard printf-style format string, where the additional arguments to
:meth:`log_message` are applied as inputs to the formatting. The client
- address and current date and time are prefixed to every message logged.
+ ip address and current date and time are prefixed to every message logged.
.. method:: version_string()
diff --git a/Doc/library/bastion.rst b/Doc/library/bastion.rst
index 8f103e7..2e3efcd 100644
--- a/Doc/library/bastion.rst
+++ b/Doc/library/bastion.rst
@@ -7,7 +7,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`Bastion` module has been removed in Python 3.0.
+ The :mod:`Bastion` module has been removed in Python 3.
.. moduleauthor:: Barry Warsaw <bwarsaw@python.org>
diff --git a/Doc/library/bdb.rst b/Doc/library/bdb.rst
index bd2b16f..82ef37e 100644
--- a/Doc/library/bdb.rst
+++ b/Doc/library/bdb.rst
@@ -20,7 +20,7 @@ The following exception is defined:
The :mod:`bdb` module also defines two classes:
-.. class:: Breakpoint(self, file, line[, temporary=0[, cond=None [, funcname=None]]])
+.. class:: Breakpoint(self, file, line, temporary=0, cond=None, funcname=None)
This class implements temporary breakpoints, ignore counts, disabling and
(re-)enabling, and conditionals.
@@ -186,17 +186,17 @@ The :mod:`bdb` module also defines two classes:
.. method:: user_line(frame)
This method is called from :meth:`dispatch_line` when either
- :meth:`stop_here` or :meth:`break_here` yields True.
+ :meth:`stop_here` or :meth:`break_here` yields ``True``.
.. method:: user_return(frame, return_value)
This method is called from :meth:`dispatch_return` when :meth:`stop_here`
- yields True.
+ yields ``True``.
.. method:: user_exception(frame, exc_info)
This method is called from :meth:`dispatch_exception` when
- :meth:`stop_here` yields True.
+ :meth:`stop_here` yields ``True``.
.. method:: do_clear(arg)
@@ -237,7 +237,7 @@ The :mod:`bdb` module also defines two classes:
.. method:: set_quit()
- Set the :attr:`quitting` attribute to True. This raises :exc:`BdbQuit` in
+ Set the :attr:`quitting` attribute to ``True``. This raises :exc:`BdbQuit` in
the next call to one of the :meth:`dispatch_\*` methods.
@@ -245,7 +245,7 @@ The :mod:`bdb` module also defines two classes:
breakpoints. These methods return a string containing an error message if
something went wrong, or ``None`` if all is well.
- .. method:: set_break(filename, lineno[, temporary=0[, cond[, funcname]]])
+ .. method:: set_break(filename, lineno, temporary=0, cond=None, funcname=None)
Set a new breakpoint. If the *lineno* line doesn't exist for the
*filename* passed as argument, return an error message. The *filename*
diff --git a/Doc/library/binascii.rst b/Doc/library/binascii.rst
index 23939de..0f8a3de 100644
--- a/Doc/library/binascii.rst
+++ b/Doc/library/binascii.rst
@@ -127,7 +127,7 @@ The :mod:`binascii` module defines the following functions:
The return value is in the range [-2**31, 2**31-1]
regardless of platform. In the past the value would be signed on
some platforms and unsigned on others. Use & 0xffffffff on the
- value if you want it to match 3.0 behavior.
+ value if you want it to match Python 3 behavior.
.. versionchanged:: 3.0
The return value is unsigned and in the range [0, 2**32-1]
diff --git a/Doc/library/bisect.rst b/Doc/library/bisect.rst
index 27ab526..64a362e 100644
--- a/Doc/library/bisect.rst
+++ b/Doc/library/bisect.rst
@@ -123,9 +123,9 @@ based on a set of ordered numeric breakpoints: 90 and up is an 'A', 80 to 89 is
a 'B', and so on::
>>> def grade(score, breakpoints=[60, 70, 80, 90], grades='FDCBA'):
- ... i = bisect(breakpoints, score)
- ... return grades[i]
- ...
+ i = bisect(breakpoints, score)
+ return grades[i]
+
>>> [grade(score) for score in [33, 99, 77, 70, 89, 90, 100]]
['F', 'A', 'C', 'C', 'B', 'A', 'A']
diff --git a/Doc/library/bsddb.rst b/Doc/library/bsddb.rst
index cdd380a..7551f10 100644
--- a/Doc/library/bsddb.rst
+++ b/Doc/library/bsddb.rst
@@ -7,7 +7,7 @@
.. sectionauthor:: Skip Montanaro <skip@pobox.com>
.. deprecated:: 2.6
- The :mod:`bsddb` module has been deprecated for removal in Python 3.0.
+ The :mod:`bsddb` module has been removed in Python 3.
The :mod:`bsddb` module provides an interface to the Berkeley DB library. Users
@@ -52,7 +52,7 @@ arguments should be used in most instances.
Open the hash format file named *filename*. Files never intended to be
preserved on disk may be created by passing ``None`` as the *filename*. The
optional *flag* identifies the mode used to open the file. It may be ``'r'``
- (read only), ``'w'`` (read-write) , ``'c'`` (read-write - create if necessary;
+ (read only), ``'w'`` (read-write), ``'c'`` (read-write - create if necessary;
the default) or ``'n'`` (read-write - truncate to zero length). The other
arguments are rarely used and are just passed to the low-level :c:func:`dbopen`
function. Consult the Berkeley DB documentation for their use and
@@ -86,7 +86,7 @@ arguments should be used in most instances.
This is present *only* to allow backwards compatibility with systems which ship
with the old Berkeley DB 1.85 database library. The :mod:`bsddb185` module
should never be used directly in new code. The module has been removed in
- Python 3.0. If you find you still need it look in PyPI.
+ Python 3. If you find you still need it look in PyPI.
.. seealso::
@@ -170,7 +170,7 @@ dictionaries. In addition, they support the methods listed below.
Example::
>>> import bsddb
- >>> db = bsddb.btopen('/tmp/spam.db', 'c')
+ >>> db = bsddb.btopen('spam.db', 'c')
>>> for i in range(10): db['%d'%i] = '%d'% (i*i)
...
>>> db['3']
diff --git a/Doc/library/bz2.rst b/Doc/library/bz2.rst
index 20dc765..6793c44 100644
--- a/Doc/library/bz2.rst
+++ b/Doc/library/bz2.rst
@@ -14,9 +14,6 @@ This module provides a comprehensive interface for the bz2 compression library.
It implements a complete file interface, one-shot (de)compression functions, and
types for sequential (de)compression.
-For other archive formats, see the :mod:`gzip`, :mod:`zipfile`, and
-:mod:`tarfile` modules.
-
Here is a summary of the features offered by the bz2 module:
* :class:`BZ2File` class implements a complete file interface, including
@@ -45,6 +42,9 @@ Here is a summary of the features offered by the bz2 module:
Handling of compressed files is offered by the :class:`BZ2File` class.
+.. index::
+ single: universal newlines; bz2.BZ2File class
+
.. class:: BZ2File(filename[, mode[, buffering[, compresslevel]]])
Open a bz2 file. Mode can be either ``'r'`` or ``'w'``, for reading (default)
@@ -53,7 +53,7 @@ Handling of compressed files is offered by the :class:`BZ2File` class.
unbuffered, and larger numbers specify the buffer size; the default is
``0``. If *compresslevel* is given, it must be a number between ``1`` and
``9``; the default is ``9``. Add a ``'U'`` to mode to open the file for input
- with universal newline support. Any line ending in the input file will be
+ in :term:`universal newlines` mode. Any line ending in the input file will be
seen as a ``'\n'`` in Python. Also, a file so opened gains the attribute
:attr:`newlines`; the value for this attribute is one of ``None`` (no newline
read yet), ``'\r'``, ``'\n'``, ``'\r\n'`` or a tuple containing all the
diff --git a/Doc/library/calendar.rst b/Doc/library/calendar.rst
index f4f4693..a04505f 100644
--- a/Doc/library/calendar.rst
+++ b/Doc/library/calendar.rst
@@ -294,10 +294,11 @@ For simple text calendars this module provides the following functions.
.. function:: timegm(tuple)
- An unrelated but handy function that takes a time tuple such as returned by the
- :func:`gmtime` function in the :mod:`time` module, and returns the corresponding
- Unix timestamp value, assuming an epoch of 1970, and the POSIX encoding. In
- fact, :func:`time.gmtime` and :func:`timegm` are each others' inverse.
+ An unrelated but handy function that takes a time tuple such as returned by
+ the :func:`~time.gmtime` function in the :mod:`time` module, and returns the
+ corresponding Unix timestamp value, assuming an epoch of 1970, and the POSIX
+ encoding. In fact, :func:`time.gmtime` and :func:`timegm` are each others'
+ inverse.
.. versionadded:: 2.0
diff --git a/Doc/library/carbon.rst b/Doc/library/carbon.rst
index 4abb495..40b76ef 100644
--- a/Doc/library/carbon.rst
+++ b/Doc/library/carbon.rst
@@ -5,7 +5,7 @@
Mac OS Toolbox Modules
**********************
-There are a set of modules that provide interfaces to various Mac OS toolboxes.
+These are a set of modules that provide interfaces to various legacy Mac OS toolboxes.
If applicable the module will define a number of Python objects for the various
structures declared by the toolbox, and operations will be implemented as
methods of the object. Other operations will be implemented as functions in the
@@ -24,7 +24,10 @@ framework and Qt is in the QuickTime framework. The normal use pattern is ::
.. note::
- The Carbon modules have been removed in Python 3.0.
+ Most of the OS X APIs that these modules use are deprecated or removed
+ in recent versions of OS X. Many are not available when Python is
+ executing in 64-bit mode. The Carbon modules have been removed in
+ Python 3. You should avoid using them in Python 2.
:mod:`Carbon.AE` --- Apple Events
diff --git a/Doc/library/cd.rst b/Doc/library/cd.rst
index f1d9763..40b8ce6 100644
--- a/Doc/library/cd.rst
+++ b/Doc/library/cd.rst
@@ -9,7 +9,7 @@
.. deprecated:: 2.6
- The :mod:`cd` module has been deprecated for removal in Python 3.0.
+ The :mod:`cd` module has been removed in Python 3.
This module provides an interface to the Silicon Graphics CD library. It is
diff --git a/Doc/library/cgi.rst b/Doc/library/cgi.rst
index b95f131..6f7c265 100644
--- a/Doc/library/cgi.rst
+++ b/Doc/library/cgi.rst
@@ -81,7 +81,7 @@ program to users of your script, you can have the reports saved to files
instead, with code like this::
import cgitb
- cgitb.enable(display=0, logdir="/tmp")
+ cgitb.enable(display=0, logdir="/path/to/logdir")
It's very helpful to use this feature during script development. The reports
produced by :mod:`cgitb` provide information that can save you a lot of time in
@@ -97,7 +97,7 @@ consume standard input, it should be instantiated only once.
The :class:`FieldStorage` instance can be indexed like a Python dictionary.
It allows membership testing with the :keyword:`in` operator, and also supports
-the standard dictionary method :meth:`keys` and the built-in function
+the standard dictionary method :meth:`~dict.keys` and the built-in function
:func:`len`. Form fields containing empty strings are ignored and do not appear
in the dictionary; to keep such values, provide a true value for the optional
*keep_blank_values* keyword parameter when creating the :class:`FieldStorage`
@@ -119,28 +119,29 @@ string::
Here the fields, accessed through ``form[key]``, are themselves instances of
:class:`FieldStorage` (or :class:`MiniFieldStorage`, depending on the form
-encoding). The :attr:`value` attribute of the instance yields the string value
-of the field. The :meth:`getvalue` method returns this string value directly;
-it also accepts an optional second argument as a default to return if the
-requested key is not present.
+encoding). The :attr:`~FieldStorage.value` attribute of the instance yields
+the string value of the field. The :meth:`~FieldStorage.getvalue` method
+returns this string value directly; it also accepts an optional second argument
+as a default to return if the requested key is not present.
If the submitted form data contains more than one field with the same name, the
object retrieved by ``form[key]`` is not a :class:`FieldStorage` or
:class:`MiniFieldStorage` instance but a list of such instances. Similarly, in
this situation, ``form.getvalue(key)`` would return a list of strings. If you
expect this possibility (when your HTML form contains multiple fields with the
-same name), use the :func:`getlist` function, which always returns a list of
-values (so that you do not need to special-case the single item case). For
-example, this code concatenates any number of username fields, separated by
-commas::
+same name), use the :meth:`~FieldStorage.getlist` method, which always returns
+a list of values (so that you do not need to special-case the single item
+case). For example, this code concatenates any number of username fields,
+separated by commas::
value = form.getlist("username")
usernames = ",".join(value)
If a field represents an uploaded file, accessing the value via the
-:attr:`value` attribute or the :func:`getvalue` method reads the entire file in
-memory as a string. This may not be what you want. You can test for an uploaded
-file by testing either the :attr:`filename` attribute or the :attr:`!file`
+:attr:`~FieldStorage.value` attribute or the :func:`~FieldStorage.getvalue`
+method reads the entire file in memory as a string. This may not be what you
+want. You can test for an uploaded file by testing either the
+:attr:`~FieldStorage.filename` attribute or the :attr:`~FieldStorage.file`
attribute. You can then read the data at leisure from the :attr:`!file`
attribute::
@@ -155,8 +156,8 @@ attribute::
If an error is encountered when obtaining the contents of an uploaded file
(for example, when the user interrupts the form submission by clicking on
-a Back or Cancel button) the :attr:`done` attribute of the object for the
-field will be set to the value -1.
+a Back or Cancel button) the :attr:`~FieldStorage.done` attribute of the
+object for the field will be set to the value -1.
The file upload draft standard entertains the possibility of uploading multiple
files from one field (using a recursive :mimetype:`multipart/\*` encoding).
@@ -225,8 +226,8 @@ Therefore, the appropriate way to read form data values was to always use the
code which checks whether the obtained value is a single value or a list of
values. That's annoying and leads to less readable scripts.
-A more convenient approach is to use the methods :meth:`getfirst` and
-:meth:`getlist` provided by this higher level interface.
+A more convenient approach is to use the methods :meth:`~FieldStorage.getfirst`
+and :meth:`~FieldStorage.getlist` provided by this higher level interface.
.. method:: FieldStorage.getfirst(name[, default])
@@ -284,10 +285,10 @@ These are useful if you want more control, or if you want to employ some of the
algorithms implemented in this module in other circumstances.
-.. function:: parse(fp[, keep_blank_values[, strict_parsing]])
+.. function:: parse(fp[, environ[, keep_blank_values[, strict_parsing]]])
Parse a query in the environment or from a file (the file defaults to
- ``sys.stdin``). The *keep_blank_values* and *strict_parsing* parameters are
+ ``sys.stdin`` and environment defaults to ``os.environ``). The *keep_blank_values* and *strict_parsing* parameters are
passed to :func:`urlparse.parse_qs` unchanged.
diff --git a/Doc/library/cgihttpserver.rst b/Doc/library/cgihttpserver.rst
index 390b2a6..013ee82 100644
--- a/Doc/library/cgihttpserver.rst
+++ b/Doc/library/cgihttpserver.rst
@@ -8,8 +8,8 @@
.. note::
The :mod:`CGIHTTPServer` module has been merged into :mod:`http.server` in
- Python 3.0. The :term:`2to3` tool will automatically adapt imports when
- converting your sources to 3.0.
+ Python 3. The :term:`2to3` tool will automatically adapt imports when
+ converting your sources to Python 3.
The :mod:`CGIHTTPServer` module defines a request-handler class, interface
diff --git a/Doc/library/chunk.rst b/Doc/library/chunk.rst
index 64ce4e2..04c7e27 100644
--- a/Doc/library/chunk.rst
+++ b/Doc/library/chunk.rst
@@ -55,8 +55,9 @@ instance will fail with a :exc:`EOFError` exception.
Class which represents a chunk. The *file* argument is expected to be a
file-like object. An instance of this class is specifically allowed. The
- only method that is needed is :meth:`read`. If the methods :meth:`seek` and
- :meth:`tell` are present and don't raise an exception, they are also used.
+ only method that is needed is :meth:`~file.read`. If the methods
+ :meth:`~file.seek` and :meth:`~file.tell` are present and don't
+ raise an exception, they are also used.
If these methods are present and raise an exception, they are expected to not
have altered the object. If the optional argument *align* is true, chunks
are assumed to be aligned on 2-byte boundaries. If *align* is false, no
diff --git a/Doc/library/code.rst b/Doc/library/code.rst
index 38e26bc..698fb11 100644
--- a/Doc/library/code.rst
+++ b/Doc/library/code.rst
@@ -33,11 +33,11 @@ build applications which provide an interactive interpreter prompt.
Convenience function to run a read-eval-print loop. This creates a new instance
of :class:`InteractiveConsole` and sets *readfunc* to be used as the
- :meth:`raw_input` method, if provided. If *local* is provided, it is passed to
- the :class:`InteractiveConsole` constructor for use as the default namespace for
- the interpreter loop. The :meth:`interact` method of the instance is then run
- with *banner* passed as the banner to use, if provided. The console object is
- discarded after use.
+ :meth:`InteractiveConsole.raw_input` method, if provided. If *local* is
+ provided, it is passed to the :class:`InteractiveConsole` constructor for
+ use as the default namespace for the interpreter loop. The :meth:`interact`
+ method of the instance is then run with *banner* passed as the banner to
+ use, if provided. The console object is discarded after use.
.. function:: compile_command(source[, filename[, symbol]])
diff --git a/Doc/library/codecs.rst b/Doc/library/codecs.rst
index 62bb504..05c7156 100644
--- a/Doc/library/codecs.rst
+++ b/Doc/library/codecs.rst
@@ -23,6 +23,31 @@ manages the codec and error handling lookup process.
It defines the following functions:
+.. function:: encode(obj, [encoding[, errors]])
+
+ Encodes *obj* using the codec registered for *encoding*. The default
+ encoding is ``'ascii'``.
+
+ *Errors* may be given to set the desired error handling scheme. The
+ default error handler is ``'strict'`` meaning that encoding errors raise
+ :exc:`ValueError` (or a more codec specific subclass, such as
+ :exc:`UnicodeEncodeError`). Refer to :ref:`codec-base-classes` for more
+ information on codec error handling.
+
+ .. versionadded:: 2.4
+
+.. function:: decode(obj, [encoding[, errors]])
+
+ Decodes *obj* using the codec registered for *encoding*. The default
+ encoding is ``'ascii'``.
+
+ *Errors* may be given to set the desired error handling scheme. The
+ default error handler is ``'strict'`` meaning that decoding errors raise
+ :exc:`ValueError` (or a more codec specific subclass, such as
+ :exc:`UnicodeDecodeError`). Refer to :ref:`codec-base-classes` for more
+ information on codec error handling.
+
+ .. versionadded:: 2.4
.. function:: register(search_function)
@@ -47,9 +72,9 @@ It defines the following functions:
The various functions or classes take the following arguments:
*encode* and *decode*: These must be functions or methods which have the same
- interface as the :meth:`encode`/:meth:`decode` methods of Codec instances (see
- Codec Interface). The functions/methods are expected to work in a stateless
- mode.
+ interface as the :meth:`~Codec.encode`/:meth:`~Codec.decode` methods of Codec
+ instances (see :ref:`Codec Interface <codec-objects>`). The functions/methods
+ are expected to work in a stateless mode.
*incrementalencoder* and *incrementaldecoder*: These have to be factory
functions providing the following interface:
@@ -66,7 +91,7 @@ It defines the following functions:
``factory(stream, errors='strict')``
The factory functions must return objects providing the interfaces defined by
- the base classes :class:`StreamWriter` and :class:`StreamReader`, respectively.
+ the base classes :class:`StreamReader` and :class:`StreamWriter`, respectively.
Stream codecs can maintain state.
Possible values for errors are
@@ -315,11 +340,13 @@ implement the file protocols.
The :class:`Codec` class defines the interface for stateless encoders/decoders.
-To simplify and standardize error handling, the :meth:`encode` and
-:meth:`decode` methods may implement different error handling schemes by
+To simplify and standardize error handling, the :meth:`~Codec.encode` and
+:meth:`~Codec.decode` methods may implement different error handling schemes by
providing the *errors* string argument. The following string values are defined
and implemented by all standard Python codecs:
+.. tabularcolumns:: |l|L|
+
+-------------------------+-----------------------------------------------+
| Value | Meaning |
+=========================+===============================================+
@@ -395,12 +422,14 @@ interfaces of the stateless encoder and decoder:
The :class:`IncrementalEncoder` and :class:`IncrementalDecoder` classes provide
the basic interface for incremental encoding and decoding. Encoding/decoding the
input isn't done with one call to the stateless encoder/decoder function, but
-with multiple calls to the :meth:`encode`/:meth:`decode` method of the
-incremental encoder/decoder. The incremental encoder/decoder keeps track of the
-encoding/decoding process during method calls.
-
-The joined output of calls to the :meth:`encode`/:meth:`decode` method is the
-same as if all the single inputs were joined into one, and this input was
+with multiple calls to the
+:meth:`~IncrementalEncoder.encode`/:meth:`~IncrementalDecoder.decode` method of
+the incremental encoder/decoder. The incremental encoder/decoder keeps track of
+the encoding/decoding process during method calls.
+
+The joined output of calls to the
+:meth:`~IncrementalEncoder.encode`/:meth:`~IncrementalDecoder.decode` method is
+the same as if all the single inputs were joined into one, and this input was
encoded/decoded with the stateless encoder/decoder.
@@ -651,7 +680,7 @@ compatible with the Python codec registry.
Read one line from the input stream and return the decoded data.
*size*, if given, is passed as size argument to the stream's
- :meth:`readline` method.
+ :meth:`read` method.
If *keepends* is false line-endings will be stripped from the lines
returned.
@@ -887,6 +916,8 @@ particular, the following variants typically exist:
* an IBM PC code page, which is ASCII compatible
+.. tabularcolumns:: |l|p{0.3\linewidth}|p{0.3\linewidth}|
+
+-----------------+--------------------------------+--------------------------------+
| Codec | Aliases | Languages |
+=================+================================+================================+
@@ -1094,84 +1125,112 @@ particular, the following variants typically exist:
| utf_8_sig | | all languages |
+-----------------+--------------------------------+--------------------------------+
-A number of codecs are specific to Python, so their codec names have no meaning
-outside Python. Some of them don't convert from Unicode strings to byte strings,
-but instead use the property of the Python codecs machinery that any bijective
-function with one argument can be considered as an encoding.
-
-For the codecs listed below, the result in the "encoding" direction is always a
-byte string. The result of the "decoding" direction is listed as operand type in
-the table.
-
-+--------------------+---------------------------+----------------+---------------------------+
-| Codec | Aliases | Operand type | Purpose |
-+====================+===========================+================+===========================+
-| base64_codec | base64, base-64 | byte string | Convert operand to MIME |
-| | | | base64 |
-+--------------------+---------------------------+----------------+---------------------------+
-| bz2_codec | bz2 | byte string | Compress the operand |
-| | | | using bz2 |
-+--------------------+---------------------------+----------------+---------------------------+
-| hex_codec | hex | byte string | Convert operand to |
-| | | | hexadecimal |
-| | | | representation, with two |
-| | | | digits per byte |
-+--------------------+---------------------------+----------------+---------------------------+
-| idna | | Unicode string | Implements :rfc:`3490`, |
-| | | | see also |
-| | | | :mod:`encodings.idna` |
-+--------------------+---------------------------+----------------+---------------------------+
-| mbcs | dbcs | Unicode string | Windows only: Encode |
-| | | | operand according to the |
-| | | | ANSI codepage (CP_ACP) |
-+--------------------+---------------------------+----------------+---------------------------+
-| palmos | | Unicode string | Encoding of PalmOS 3.5 |
-+--------------------+---------------------------+----------------+---------------------------+
-| punycode | | Unicode string | Implements :rfc:`3492` |
-+--------------------+---------------------------+----------------+---------------------------+
-| quopri_codec | quopri, quoted-printable, | byte string | Convert operand to MIME |
-| | quotedprintable | | quoted printable |
-+--------------------+---------------------------+----------------+---------------------------+
-| raw_unicode_escape | | Unicode string | Produce a string that is |
-| | | | suitable as raw Unicode |
-| | | | literal in Python source |
-| | | | code |
-+--------------------+---------------------------+----------------+---------------------------+
-| rot_13 | rot13 | Unicode string | Returns the Caesar-cypher |
-| | | | encryption of the operand |
-+--------------------+---------------------------+----------------+---------------------------+
-| string_escape | | byte string | Produce a string that is |
-| | | | suitable as string |
-| | | | literal in Python source |
-| | | | code |
-+--------------------+---------------------------+----------------+---------------------------+
-| undefined | | any | Raise an exception for |
-| | | | all conversions. Can be |
-| | | | used as the system |
-| | | | encoding if no automatic |
-| | | | :term:`coercion` between |
-| | | | byte and Unicode strings |
-| | | | is desired. |
-+--------------------+---------------------------+----------------+---------------------------+
-| unicode_escape | | Unicode string | Produce a string that is |
-| | | | suitable as Unicode |
-| | | | literal in Python source |
-| | | | code |
-+--------------------+---------------------------+----------------+---------------------------+
-| unicode_internal | | Unicode string | Return the internal |
-| | | | representation of the |
-| | | | operand |
-+--------------------+---------------------------+----------------+---------------------------+
-| uu_codec | uu | byte string | Convert the operand using |
-| | | | uuencode |
-+--------------------+---------------------------+----------------+---------------------------+
-| zlib_codec | zip, zlib | byte string | Compress the operand |
-| | | | using gzip |
-+--------------------+---------------------------+----------------+---------------------------+
+Python Specific Encodings
+-------------------------
+
+A number of predefined codecs are specific to Python, so their codec names have
+no meaning outside Python. These are listed in the tables below based on the
+expected input and output types (note that while text encodings are the most
+common use case for codecs, the underlying codec infrastructure supports
+arbitrary data transforms rather than just text encodings). For asymmetric
+codecs, the stated purpose describes the encoding direction.
+
+The following codecs provide unicode-to-str encoding [#encoding-note]_ and
+str-to-unicode decoding [#decoding-note]_, similar to the Unicode text
+encodings.
+
+.. tabularcolumns:: |l|L|L|
+
++--------------------+---------------------------+---------------------------+
+| Codec | Aliases | Purpose |
++====================+===========================+===========================+
+| idna | | Implements :rfc:`3490`, |
+| | | see also |
+| | | :mod:`encodings.idna` |
++--------------------+---------------------------+---------------------------+
+| mbcs | dbcs | Windows only: Encode |
+| | | operand according to the |
+| | | ANSI codepage (CP_ACP) |
++--------------------+---------------------------+---------------------------+
+| palmos | | Encoding of PalmOS 3.5 |
++--------------------+---------------------------+---------------------------+
+| punycode | | Implements :rfc:`3492` |
++--------------------+---------------------------+---------------------------+
+| raw_unicode_escape | | Produce a string that is |
+| | | suitable as raw Unicode |
+| | | literal in Python source |
+| | | code |
++--------------------+---------------------------+---------------------------+
+| rot_13 | rot13 | Returns the Caesar-cypher |
+| | | encryption of the operand |
++--------------------+---------------------------+---------------------------+
+| undefined | | Raise an exception for |
+| | | all conversions. Can be |
+| | | used as the system |
+| | | encoding if no automatic |
+| | | :term:`coercion` between |
+| | | byte and Unicode strings |
+| | | is desired. |
++--------------------+---------------------------+---------------------------+
+| unicode_escape | | Produce a string that is |
+| | | suitable as Unicode |
+| | | literal in Python source |
+| | | code |
++--------------------+---------------------------+---------------------------+
+| unicode_internal | | Return the internal |
+| | | representation of the |
+| | | operand |
++--------------------+---------------------------+---------------------------+
.. versionadded:: 2.3
The ``idna`` and ``punycode`` encodings.
+The following codecs provide str-to-str encoding and decoding
+[#decoding-note]_.
+
+.. tabularcolumns:: |l|L|L|L|
+
++--------------------+---------------------------+---------------------------+------------------------------+
+| Codec | Aliases | Purpose | Encoder/decoder |
++====================+===========================+===========================+==============================+
+| base64_codec | base64, base-64 | Convert operand to MIME | :meth:`base64.b64encode`, |
+| | | base64 (the result always | :meth:`base64.b64decode` |
+| | | includes a trailing | |
+| | | ``'\n'``) | |
++--------------------+---------------------------+---------------------------+------------------------------+
+| bz2_codec | bz2 | Compress the operand | :meth:`bz2.compress`, |
+| | | using bz2 | :meth:`bz2.decompress` |
++--------------------+---------------------------+---------------------------+------------------------------+
+| hex_codec | hex | Convert operand to | :meth:`base64.b16encode`, |
+| | | hexadecimal | :meth:`base64.b16decode` |
+| | | representation, with two | |
+| | | digits per byte | |
++--------------------+---------------------------+---------------------------+------------------------------+
+| quopri_codec | quopri, quoted-printable, | Convert operand to MIME | :meth:`quopri.encodestring`, |
+| | quotedprintable | quoted printable | :meth:`quopri.decodestring` |
++--------------------+---------------------------+---------------------------+------------------------------+
+| string_escape | | Produce a string that is | |
+| | | suitable as string | |
+| | | literal in Python source | |
+| | | code | |
++--------------------+---------------------------+---------------------------+------------------------------+
+| uu_codec | uu | Convert the operand using | :meth:`uu.encode`, |
+| | | uuencode | :meth:`uu.decode` |
++--------------------+---------------------------+---------------------------+------------------------------+
+| zlib_codec | zip, zlib | Compress the operand | :meth:`zlib.compress`, |
+| | | using gzip | :meth:`zlib.decompress` |
++--------------------+---------------------------+---------------------------+------------------------------+
+
+.. [#encoding-note] str objects are also accepted as input in place of unicode
+ objects. They are implicitly converted to unicode by decoding them using
+ the default encoding. If this conversion fails, it may lead to encoding
+ operations raising :exc:`UnicodeDecodeError`.
+
+.. [#decoding-note] unicode objects are also accepted as input in place of str
+ objects. They are implicitly converted to str by encoding them using the
+ default encoding. If this conversion fails, it may lead to decoding
+ operations raising :exc:`UnicodeEncodeError`.
+
:mod:`encodings.idna` --- Internationalized Domain Names in Applications
------------------------------------------------------------------------
diff --git a/Doc/library/collections.rst b/Doc/library/collections.rst
index abb36db..5bb3569 100644
--- a/Doc/library/collections.rst
+++ b/Doc/library/collections.rst
@@ -51,7 +51,7 @@ For example::
>>> # Find the ten most common words in Hamlet
>>> import re
- >>> words = re.findall('\w+', open('hamlet.txt').read().lower())
+ >>> words = re.findall(r'\w+', open('hamlet.txt').read().lower())
>>> Counter(words).most_common(10)
[('the', 1143), ('and', 966), ('to', 762), ('of', 669), ('i', 631),
('you', 554), ('a', 546), ('my', 514), ('hamlet', 471), ('in', 451)]
@@ -120,6 +120,7 @@ For example::
>>> c = Counter(a=4, b=2, c=0, d=-2)
>>> d = Counter(a=1, b=2, c=3, d=4)
>>> c.subtract(d)
+ >>> c
Counter({'a': 3, 'b': 0, 'c': -3, 'd': -6})
The usual dictionary methods are available for :class:`Counter` objects
@@ -145,7 +146,7 @@ Common patterns for working with :class:`Counter` objects::
dict(c) # convert to a regular dictionary
c.items() # convert to a list of (elem, cnt) pairs
Counter(dict(list_of_pairs)) # convert from a list of (elem, cnt) pairs
- c.most_common()[:-n:-1] # n least common elements
+ c.most_common()[:-n-1:-1] # n least common elements
c += Counter() # remove zero and negative counts
Several mathematical operations are provided for combining :class:`Counter`
@@ -601,47 +602,53 @@ Example:
>>> Point = namedtuple('Point', ['x', 'y'], verbose=True)
class Point(tuple):
- 'Point(x, y)'
+ 'Point(x, y)'
<BLANKLINE>
- __slots__ = ()
+ __slots__ = ()
<BLANKLINE>
- _fields = ('x', 'y')
+ _fields = ('x', 'y')
<BLANKLINE>
- def __new__(_cls, x, y):
- 'Create a new instance of Point(x, y)'
- return _tuple.__new__(_cls, (x, y))
+ def __new__(_cls, x, y):
+ 'Create a new instance of Point(x, y)'
+ return _tuple.__new__(_cls, (x, y))
<BLANKLINE>
- @classmethod
- def _make(cls, iterable, new=tuple.__new__, len=len):
- 'Make a new Point object from a sequence or iterable'
- result = new(cls, iterable)
- if len(result) != 2:
- raise TypeError('Expected 2 arguments, got %d' % len(result))
- return result
+ @classmethod
+ def _make(cls, iterable, new=tuple.__new__, len=len):
+ 'Make a new Point object from a sequence or iterable'
+ result = new(cls, iterable)
+ if len(result) != 2:
+ raise TypeError('Expected 2 arguments, got %d' % len(result))
+ return result
<BLANKLINE>
- def __repr__(self):
- 'Return a nicely formatted representation string'
- return 'Point(x=%r, y=%r)' % self
+ def __repr__(self):
+ 'Return a nicely formatted representation string'
+ return 'Point(x=%r, y=%r)' % self
<BLANKLINE>
- def _asdict(self):
- 'Return a new OrderedDict which maps field names to their values'
- return OrderedDict(zip(self._fields, self))
+ def _asdict(self):
+ 'Return a new OrderedDict which maps field names to their values'
+ return OrderedDict(zip(self._fields, self))
<BLANKLINE>
- __dict__ = property(_asdict)
+ def _replace(_self, **kwds):
+ 'Return a new Point object replacing specified fields with new values'
+ result = _self._make(map(kwds.pop, ('x', 'y'), _self))
+ if kwds:
+ raise ValueError('Got unexpected field names: %r' % kwds.keys())
+ return result
<BLANKLINE>
- def _replace(_self, **kwds):
- 'Return a new Point object replacing specified fields with new values'
- result = _self._make(map(kwds.pop, ('x', 'y'), _self))
- if kwds:
- raise ValueError('Got unexpected field names: %r' % kwds.keys())
- return result
+ def __getnewargs__(self):
+ 'Return self as a plain tuple. Used by copy and pickle.'
+ return tuple(self)
<BLANKLINE>
- def __getnewargs__(self):
- 'Return self as a plain tuple. Used by copy and pickle.'
- return tuple(self)
+ __dict__ = _property(_asdict)
+ <BLANKLINE>
+ def __getstate__(self):
+ 'Exclude the OrderedDict from pickling'
+ pass
+ <BLANKLINE>
+ x = _property(_itemgetter(0), doc='Alias for field number 0')
+ <BLANKLINE>
+ y = _property(_itemgetter(1), doc='Alias for field number 1')
<BLANKLINE>
- x = _property(_itemgetter(0), doc='Alias for field number 0')
- y = _property(_itemgetter(1), doc='Alias for field number 1')
>>> p = Point(11, y=22) # instantiate with positional or keyword arguments
>>> p[0] + p[1] # indexable like the plain tuple (11, 22)
@@ -811,9 +818,9 @@ reverse iteration using :func:`reversed`.
Equality tests between :class:`OrderedDict` objects are order-sensitive
and are implemented as ``list(od1.items())==list(od2.items())``.
Equality tests between :class:`OrderedDict` objects and other
-:class:`Mapping` objects are order-insensitive like regular dictionaries.
-This allows :class:`OrderedDict` objects to be substituted anywhere a
-regular dictionary is used.
+:class:`Mapping` objects are order-insensitive like regular
+dictionaries. This allows :class:`OrderedDict` objects to be substituted
+anywhere a regular dictionary is used.
The :class:`OrderedDict` constructor and :meth:`update` method both accept
keyword arguments, but their order is lost because Python's function call
@@ -850,7 +857,7 @@ are deleted. But when new keys are added, the keys are appended
to the end and the sort is not maintained.
It is also straight-forward to create an ordered dictionary variant
-that the remembers the order the keys were *last* inserted.
+that remembers the order the keys were *last* inserted.
If a new entry overwrites an existing entry, the
original insertion position is changed and moved to the end::
@@ -892,29 +899,35 @@ ABC Inherits from Abstract Methods Mixin
:class:`Sized` ``__len__``
:class:`Callable` ``__call__``
-:class:`Sequence` :class:`Sized`, ``__getitem__`` ``__contains__``, ``__iter__``, ``__reversed__``,
- :class:`Iterable`, ``index``, and ``count``
- :class:`Container`
-
-:class:`MutableSequence` :class:`Sequence` ``__setitem__``, Inherited :class:`Sequence` methods and
- ``__delitem__``, ``append``, ``reverse``, ``extend``, ``pop``,
- ``insert`` ``remove``, and ``__iadd__``
-
-:class:`Set` :class:`Sized`, ``__le__``, ``__lt__``, ``__eq__``, ``__ne__``,
- :class:`Iterable`, ``__gt__``, ``__ge__``, ``__and__``, ``__or__``,
- :class:`Container` ``__sub__``, ``__xor__``, and ``isdisjoint``
-
-:class:`MutableSet` :class:`Set` ``add``, Inherited :class:`Set` methods and
- ``discard`` ``clear``, ``pop``, ``remove``, ``__ior__``,
- ``__iand__``, ``__ixor__``, and ``__isub__``
-
-:class:`Mapping` :class:`Sized`, ``__getitem__`` ``__contains__``, ``keys``, ``items``, ``values``,
- :class:`Iterable`, ``get``, ``__eq__``, and ``__ne__``
+:class:`Sequence` :class:`Sized`, ``__getitem__``, ``__contains__``, ``__iter__``, ``__reversed__``,
+ :class:`Iterable`, ``__len__`` ``index``, and ``count``
:class:`Container`
-:class:`MutableMapping` :class:`Mapping` ``__setitem__``, Inherited :class:`Mapping` methods and
- ``__delitem__`` ``pop``, ``popitem``, ``clear``, ``update``,
- and ``setdefault``
+:class:`MutableSequence` :class:`Sequence` ``__getitem__``, Inherited :class:`Sequence` methods and
+ ``__setitem__``, ``append``, ``reverse``, ``extend``, ``pop``,
+ ``__delitem__``, ``remove``, and ``__iadd__``
+ ``__len__``,
+ ``insert``
+
+:class:`Set` :class:`Sized`, ``__contains__``, ``__le__``, ``__lt__``, ``__eq__``, ``__ne__``,
+ :class:`Iterable`, ``__iter__``, ``__gt__``, ``__ge__``, ``__and__``, ``__or__``,
+ :class:`Container` ``__len__`` ``__sub__``, ``__xor__``, and ``isdisjoint``
+
+:class:`MutableSet` :class:`Set` ``__contains__``, Inherited :class:`Set` methods and
+ ``__iter__``, ``clear``, ``pop``, ``remove``, ``__ior__``,
+ ``__len__``, ``__iand__``, ``__ixor__``, and ``__isub__``
+ ``add``,
+ ``discard``
+
+:class:`Mapping` :class:`Sized`, ``__getitem__``, ``__contains__``, ``keys``, ``items``, ``values``,
+ :class:`Iterable`, ``__iter__``, ``get``, ``__eq__``, and ``__ne__``
+ :class:`Container` ``__len__``
+
+:class:`MutableMapping` :class:`Mapping` ``__getitem__``, Inherited :class:`Mapping` methods and
+ ``__setitem__``, ``pop``, ``popitem``, ``clear``, ``update``,
+ ``__delitem__``, and ``setdefault``
+ ``__iter__``,
+ ``__len__``
:class:`MappingView` :class:`Sized` ``__len__``
diff --git a/Doc/library/colorsys.rst b/Doc/library/colorsys.rst
index dbab706..225306c 100644
--- a/Doc/library/colorsys.rst
+++ b/Doc/library/colorsys.rst
@@ -58,7 +58,7 @@ The :mod:`colorsys` module defines the following functions:
Example::
>>> import colorsys
- >>> colorsys.rgb_to_hsv(.3, .4, .2)
- (0.25, 0.5, 0.4)
- >>> colorsys.hsv_to_rgb(0.25, 0.5, 0.4)
- (0.3, 0.4, 0.2)
+ >>> colorsys.rgb_to_hsv(0.2, 0.4, 0.4)
+ (0.5, 0.5, 0.4)
+ >>> colorsys.hsv_to_rgb(0.5, 0.5, 0.4)
+ (0.2, 0.4, 0.4)
diff --git a/Doc/library/commands.rst b/Doc/library/commands.rst
index 46ff823..0b73e42 100644
--- a/Doc/library/commands.rst
+++ b/Doc/library/commands.rst
@@ -8,7 +8,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`commands` module has been removed in Python 3.0. Use the
+ The :mod:`commands` module has been removed in Python 3. Use the
:mod:`subprocess` module instead.
.. sectionauthor:: Sue Williams <sbw@provis.com>
diff --git a/Doc/library/compileall.rst b/Doc/library/compileall.rst
index cf0d5f8..0e79a5a 100644
--- a/Doc/library/compileall.rst
+++ b/Doc/library/compileall.rst
@@ -127,7 +127,7 @@ subdirectory and all its subdirectories::
# Perform same compilation, excluding files in .svn directories.
import re
- compileall.compile_dir('Lib/', rx=re.compile('/[.]svn'), force=True)
+ compileall.compile_dir('Lib/', rx=re.compile(r'[/\\][.]svn'), force=True)
.. seealso::
diff --git a/Doc/library/compiler.rst b/Doc/library/compiler.rst
index 458e653..494902e 100644
--- a/Doc/library/compiler.rst
+++ b/Doc/library/compiler.rst
@@ -6,7 +6,7 @@ Python compiler package
***********************
.. deprecated:: 2.6
- The :mod:`compiler` package has been removed in Python 3.0.
+ The :mod:`compiler` package has been removed in Python 3.
.. sectionauthor:: Jeremy Hylton <jeremy@zope.com>
@@ -540,7 +540,7 @@ examples demonstrate how to use the :func:`parse` function, what the repr of an
AST looks like, and how to access attributes of an AST node.
The first module defines a single function. Assume it is stored in
-:file:`/tmp/doublelib.py`. ::
+:file:`doublelib.py`. ::
"""This is an example module.
@@ -557,7 +557,7 @@ to create an instance from a repr, you must import the class names from the
:mod:`compiler.ast` module. ::
>>> import compiler
- >>> mod = compiler.parseFile("/tmp/doublelib.py")
+ >>> mod = compiler.parseFile("doublelib.py")
>>> mod
Module('This is an example module.\n\nThis is the docstring.\n',
Stmt([Function(None, 'double', ['x'], [], 0,
diff --git a/Doc/library/configparser.rst b/Doc/library/configparser.rst
index 3536f3e..515074a 100644
--- a/Doc/library/configparser.rst
+++ b/Doc/library/configparser.rst
@@ -12,8 +12,8 @@
.. note::
The :mod:`ConfigParser` module has been renamed to :mod:`configparser` in
- Python 3.0. The :term:`2to3` tool will automatically adapt imports when
- converting your sources to 3.0.
+ Python 3. The :term:`2to3` tool will automatically adapt imports when
+ converting your sources to Python 3.
.. index::
pair: .ini; file
@@ -451,9 +451,9 @@ An example of writing to a configuration file::
# when attempting to write to a file or when you get it in non-raw
# mode. SafeConfigParser does not allow such assignments to take place.
config.add_section('Section1')
- config.set('Section1', 'int', '15')
- config.set('Section1', 'bool', 'true')
- config.set('Section1', 'float', '3.1415')
+ config.set('Section1', 'an_int', '15')
+ config.set('Section1', 'a_bool', 'true')
+ config.set('Section1', 'a_float', '3.1415')
config.set('Section1', 'baz', 'fun')
config.set('Section1', 'bar', 'Python')
config.set('Section1', 'foo', '%(bar)s is %(baz)s!')
@@ -471,13 +471,13 @@ An example of reading the configuration file again::
# getfloat() raises an exception if the value is not a float
# getint() and getboolean() also do this for their respective types
- float = config.getfloat('Section1', 'float')
- int = config.getint('Section1', 'int')
- print float + int
+ a_float = config.getfloat('Section1', 'a_float')
+ an_int = config.getint('Section1', 'an_int')
+ print a_float + an_int
# Notice that the next output does not interpolate '%(bar)s' or '%(baz)s'.
# This is because we are using a RawConfigParser().
- if config.getboolean('Section1', 'bool'):
+ if config.getboolean('Section1', 'a_bool'):
print config.get('Section1', 'foo')
To get interpolation, you will need to use a :class:`ConfigParser` or
diff --git a/Doc/library/cookie.rst b/Doc/library/cookie.rst
index 480dffa..19786f7 100644
--- a/Doc/library/cookie.rst
+++ b/Doc/library/cookie.rst
@@ -8,8 +8,8 @@
.. note::
The :mod:`Cookie` module has been renamed to :mod:`http.cookies` in Python
- 3.0. The :term:`2to3` tool will automatically adapt imports when converting
- your sources to 3.0.
+ 3. The :term:`2to3` tool will automatically adapt imports when converting
+ your sources to Python 3.
**Source code:** :source:`Lib/Cookie.py`
@@ -22,8 +22,14 @@ cookie value.
The module formerly strictly applied the parsing rules described in the
:rfc:`2109` and :rfc:`2068` specifications. It has since been discovered that
-MSIE 3.0x doesn't follow the character rules outlined in those specs. As a
-result, the parsing rules used are a bit less strict.
+MSIE 3.0x doesn't follow the character rules outlined in those specs and also
+many current day browsers and servers have relaxed parsing rules when comes to
+Cookie handling. As a result, the parsing rules used are a bit less strict.
+
+The character set, :data:`string.ascii_letters`, :data:`string.digits` and
+``!#$%&'*+-.^_`|~`` denote the set of valid characters allowed by this module
+in Cookie name (as :attr:`~Morsel.key`).
+
.. note::
diff --git a/Doc/library/cookielib.rst b/Doc/library/cookielib.rst
index 77d6624..b1baef1 100644
--- a/Doc/library/cookielib.rst
+++ b/Doc/library/cookielib.rst
@@ -8,8 +8,8 @@
.. note::
The :mod:`cookielib` module has been renamed to :mod:`http.cookiejar` in
- Python 3.0. The :term:`2to3` tool will automatically adapt imports when
- converting your sources to 3.0.
+ Python 3. The :term:`2to3` tool will automatically adapt imports when
+ converting your sources to Python 3.
.. versionadded:: 2.4
@@ -98,7 +98,7 @@ The following classes are provided:
Netscape and RFC 2965 cookies. By default, RFC 2109 cookies (ie. cookies
received in a :mailheader:`Set-Cookie` header with a version cookie-attribute of
1) are treated according to the RFC 2965 rules. However, if RFC 2965 handling
- is turned off or :attr:`rfc2109_as_netscape` is True, RFC 2109 cookies are
+ is turned off or :attr:`rfc2109_as_netscape` is ``True``, RFC 2109 cookies are
'downgraded' by the :class:`CookieJar` instance to Netscape cookies, by
setting the :attr:`version` attribute of the :class:`Cookie` instance to 0.
:class:`DefaultCookiePolicy` also provides some parameters to allow some
@@ -308,7 +308,7 @@ FileCookieJar subclasses and co-operation with web browsers
-----------------------------------------------------------
The following :class:`CookieJar` subclasses are provided for reading and
-writing .
+writing.
.. class:: MozillaCookieJar(filename, delayload=None, policy=None)
@@ -652,7 +652,7 @@ internal consistency, so you should know what you're doing if you do that.
.. attribute:: Cookie.secure
- True if cookie should only be returned over a secure connection.
+ ``True`` if cookie should only be returned over a secure connection.
.. attribute:: Cookie.expires
@@ -663,7 +663,7 @@ internal consistency, so you should know what you're doing if you do that.
.. attribute:: Cookie.discard
- True if this is a session cookie.
+ ``True`` if this is a session cookie.
.. attribute:: Cookie.comment
@@ -680,7 +680,7 @@ internal consistency, so you should know what you're doing if you do that.
.. attribute:: Cookie.rfc2109
- True if this cookie was received as an RFC 2109 cookie (ie. the cookie
+ ``True`` if this cookie was received as an RFC 2109 cookie (ie. the cookie
arrived in a :mailheader:`Set-Cookie` header, and the value of the Version
cookie-attribute in that header was 1). This attribute is provided because
:mod:`cookielib` may 'downgrade' RFC 2109 cookies to Netscape cookies, in
@@ -691,18 +691,18 @@ internal consistency, so you should know what you're doing if you do that.
.. attribute:: Cookie.port_specified
- True if a port or set of ports was explicitly specified by the server (in the
+ ``True`` if a port or set of ports was explicitly specified by the server (in the
:mailheader:`Set-Cookie` / :mailheader:`Set-Cookie2` header).
.. attribute:: Cookie.domain_specified
- True if a domain was explicitly specified by the server.
+ ``True`` if a domain was explicitly specified by the server.
.. attribute:: Cookie.domain_initial_dot
- True if the domain explicitly specified by the server began with a dot
+ ``True`` if the domain explicitly specified by the server began with a dot
(``'.'``).
Cookies may have additional non-standard cookie-attributes. These may be
@@ -729,7 +729,7 @@ The :class:`Cookie` class also defines the following method:
.. method:: Cookie.is_expired([now=None])
- True if cookie has passed the time at which the server requested it should
+ ``True`` if cookie has passed the time at which the server requested it should
expire. If *now* is given (in seconds since the epoch), return whether the
cookie has expired at the specified time.
diff --git a/Doc/library/copy_reg.rst b/Doc/library/copy_reg.rst
index 609ded0..3d8ef77 100644
--- a/Doc/library/copy_reg.rst
+++ b/Doc/library/copy_reg.rst
@@ -5,20 +5,20 @@
:synopsis: Register pickle support functions.
.. note::
- The :mod:`copy_reg` module has been renamed to :mod:`copyreg` in Python 3.0.
+ The :mod:`copy_reg` module has been renamed to :mod:`copyreg` in Python 3.
The :term:`2to3` tool will automatically adapt imports when converting your
- sources to 3.0.
+ sources to Python 3.
.. index::
module: pickle
module: cPickle
module: copy
-The :mod:`copy_reg` module provides support for the :mod:`pickle` and
-:mod:`cPickle` modules. The :mod:`copy` module is likely to use this in the
-future as well. It provides configuration information about object constructors
-which are not classes. Such constructors may be factory functions or class
-instances.
+The :mod:`copy_reg` module offers a way to define fuctions used while pickling
+specific objects. The :mod:`pickle`, :mod:`cPickle`, and :mod:`copy` modules
+use those functions when pickling/copying those objects. The module provides
+configuration information about object constructors which are not classes.
+Such constructors may be factory functions or class instances.
.. function:: constructor(object)
@@ -43,3 +43,24 @@ instances.
See the :mod:`pickle` module for more details on the interface expected of
*function* and *constructor*.
+Example
+-------
+
+The example below would like to show how to register a pickle function and how
+it will be used:
+
+ >>> import copy_reg, copy, pickle
+ >>> class C(object):
+ ... def __init__(self, a):
+ ... self.a = a
+ ...
+ >>> def pickle_c(c):
+ ... print("pickling a C instance...")
+ ... return C, (c.a,)
+ ...
+ >>> copy_reg.pickle(C, pickle_c)
+ >>> c = C(1)
+ >>> d = copy.copy(c)
+ pickling a C instance...
+ >>> p = pickle.dumps(c)
+ pickling a C instance...
diff --git a/Doc/library/csv.rst b/Doc/library/csv.rst
index f4e9d7c..2054ce6 100644
--- a/Doc/library/csv.rst
+++ b/Doc/library/csv.rst
@@ -40,7 +40,7 @@ using the :class:`DictReader` and :class:`DictWriter` classes.
This version of the :mod:`csv` module doesn't support Unicode input. Also,
there are currently some issues regarding ASCII NUL characters. Accordingly,
all input should be UTF-8 or printable ASCII to be safe; see the examples in
- section :ref:`csv-examples`. These restrictions will be removed in the future.
+ section :ref:`csv-examples`.
.. seealso::
@@ -57,7 +57,7 @@ Module Contents
The :mod:`csv` module defines the following functions:
-.. function:: reader(csvfile[, dialect='excel'][, fmtparam])
+.. function:: reader(csvfile, dialect='excel', **fmtparams)
Return a reader object which will iterate over lines in the given *csvfile*.
*csvfile* can be any object which supports the :term:`iterator` protocol and returns a
@@ -67,7 +67,7 @@ The :mod:`csv` module defines the following functions:
*dialect* parameter can be given which is used to define a set of parameters
specific to a particular CSV dialect. It may be an instance of a subclass of
the :class:`Dialect` class or one of the strings returned by the
- :func:`list_dialects` function. The other optional *fmtparam* keyword arguments
+ :func:`list_dialects` function. The other optional *fmtparams* keyword arguments
can be given to override individual formatting parameters in the current
dialect. For full details about the dialect and formatting parameters, see
section :ref:`csv-fmt-params`.
@@ -78,9 +78,10 @@ The :mod:`csv` module defines the following functions:
A short usage example::
>>> import csv
- >>> spamReader = csv.reader(open('eggs.csv', 'rb'), delimiter=' ', quotechar='|')
- >>> for row in spamReader:
- ... print ', '.join(row)
+ >>> with open('eggs.csv', 'rb') as csvfile:
+ ... spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
+ ... for row in spamreader:
+ ... print ', '.join(row)
Spam, Spam, Spam, Spam, Spam, Baked Beans
Spam, Lovely Spam, Wonderful Spam
@@ -94,7 +95,7 @@ The :mod:`csv` module defines the following functions:
be split into lines in a manner which preserves the newline characters.
-.. function:: writer(csvfile[, dialect='excel'][, fmtparam])
+.. function:: writer(csvfile, dialect='excel', **fmtparams)
Return a writer object responsible for converting the user's data into delimited
strings on the given file-like object. *csvfile* can be any object with a
@@ -103,7 +104,7 @@ The :mod:`csv` module defines the following functions:
parameter can be given which is used to define a set of parameters specific to a
particular CSV dialect. It may be an instance of a subclass of the
:class:`Dialect` class or one of the strings returned by the
- :func:`list_dialects` function. The other optional *fmtparam* keyword arguments
+ :func:`list_dialects` function. The other optional *fmtparams* keyword arguments
can be given to override individual formatting parameters in the current
dialect. For full details about the dialect and formatting parameters, see
section :ref:`csv-fmt-params`. To make it
@@ -115,18 +116,19 @@ The :mod:`csv` module defines the following functions:
A short usage example::
- >>> import csv
- >>> spamWriter = csv.writer(open('eggs.csv', 'wb'), delimiter=' ',
- ... quotechar='|', quoting=csv.QUOTE_MINIMAL)
- >>> spamWriter.writerow(['Spam'] * 5 + ['Baked Beans'])
- >>> spamWriter.writerow(['Spam', 'Lovely Spam', 'Wonderful Spam'])
+ import csv
+ with open('eggs.csv', 'wb') as csvfile:
+ spamwriter = csv.writer(csvfile, delimiter=' ',
+ quotechar='|', quoting=csv.QUOTE_MINIMAL)
+ spamwriter.writerow(['Spam'] * 5 + ['Baked Beans'])
+ spamwriter.writerow(['Spam', 'Lovely Spam', 'Wonderful Spam'])
-.. function:: register_dialect(name[, dialect][, fmtparam])
+.. function:: register_dialect(name[, dialect], **fmtparams)
Associate *dialect* with *name*. *name* must be a string or Unicode object. The
dialect can be specified either by passing a sub-class of :class:`Dialect`, or
- by *fmtparam* keyword arguments, or both, with keyword arguments overriding
+ by *fmtparams* keyword arguments, or both, with keyword arguments overriding
parameters of the dialect. For full details about the dialect and formatting
parameters, see section :ref:`csv-fmt-params`.
@@ -162,36 +164,43 @@ The :mod:`csv` module defines the following functions:
The :mod:`csv` module defines the following classes:
-.. class:: DictReader(csvfile[, fieldnames=None[, restkey=None[, restval=None[, dialect='excel'[, *args, **kwds]]]]])
-
- Create an object which operates like a regular reader but maps the information
- read into a dict whose keys are given by the optional *fieldnames* parameter.
- If the *fieldnames* parameter is omitted, the values in the first row of the
- *csvfile* will be used as the fieldnames. If the row read has more fields
- than the fieldnames sequence, the remaining data is added as a sequence
- keyed by the value of *restkey*. If the row read has fewer fields than the
- fieldnames sequence, the remaining keys take the value of the optional
- *restval* parameter. Any other optional or keyword arguments are passed to
- the underlying :class:`reader` instance.
-
-
-.. class:: DictWriter(csvfile, fieldnames[, restval=''[, extrasaction='raise'[, dialect='excel'[, *args, **kwds]]]])
-
- Create an object which operates like a regular writer but maps dictionaries onto
- output rows. The *fieldnames* parameter identifies the order in which values in
- the dictionary passed to the :meth:`writerow` method are written to the
- *csvfile*. The optional *restval* parameter specifies the value to be written
- if the dictionary is missing a key in *fieldnames*. If the dictionary passed to
- the :meth:`writerow` method contains a key not found in *fieldnames*, the
- optional *extrasaction* parameter indicates what action to take. If it is set
- to ``'raise'`` a :exc:`ValueError` is raised. If it is set to ``'ignore'``,
- extra values in the dictionary are ignored. Any other optional or keyword
- arguments are passed to the underlying :class:`writer` instance.
-
- Note that unlike the :class:`DictReader` class, the *fieldnames* parameter of
- the :class:`DictWriter` is not optional. Since Python's :class:`dict` objects
- are not ordered, there is not enough information available to deduce the order
- in which the row should be written to the *csvfile*.
+.. class:: DictReader(csvfile, fieldnames=None, restkey=None, restval=None, \
+ dialect='excel', *args, **kwds)
+
+ Create an object which operates like a regular reader but maps the
+ information read into a dict whose keys are given by the optional
+ *fieldnames* parameter. The *fieldnames* parameter is a :ref:`sequence
+ <collections-abstract-base-classes>` whose elements are associated with the
+ fields of the input data in order. These elements become the keys of the
+ resulting dictionary. If the *fieldnames* parameter is omitted, the values
+ in the first row of the *csvfile* will be used as the fieldnames. If the
+ row read has more fields than the fieldnames sequence, the remaining data is
+ added as a sequence keyed by the value of *restkey*. If the row read has
+ fewer fields than the fieldnames sequence, the remaining keys take the value
+ of the optional *restval* parameter. Any other optional or keyword
+ arguments are passed to the underlying :class:`reader` instance.
+
+
+.. class:: DictWriter(csvfile, fieldnames, restval='', extrasaction='raise', \
+ dialect='excel', *args, **kwds)
+
+ Create an object which operates like a regular writer but maps dictionaries
+ onto output rows. The *fieldnames* parameter is a :ref:`sequence
+ <collections-abstract-base-classes>` of keys that identify the order in
+ which values in the dictionary passed to the :meth:`writerow` method are
+ written to the *csvfile*. The optional *restval* parameter specifies the
+ value to be written if the dictionary is missing a key in *fieldnames*. If
+ the dictionary passed to the :meth:`writerow` method contains a key not
+ found in *fieldnames*, the optional *extrasaction* parameter indicates what
+ action to take. If it is set to ``'raise'`` a :exc:`ValueError` is raised.
+ If it is set to ``'ignore'``, extra values in the dictionary are ignored.
+ Any other optional or keyword arguments are passed to the underlying
+ :class:`writer` instance.
+
+ Note that unlike the :class:`DictReader` class, the *fieldnames* parameter
+ of the :class:`DictWriter` is not optional. Since Python's :class:`dict`
+ objects are not ordered, there is not enough information available to deduce
+ the order in which the row should be written to the *csvfile*.
.. class:: Dialect
@@ -219,7 +228,7 @@ The :mod:`csv` module defines the following classes:
The :class:`Sniffer` class provides two methods:
- .. method:: sniff(sample[, delimiters=None])
+ .. method:: sniff(sample, delimiters=None)
Analyze the given *sample* and return a :class:`Dialect` subclass
reflecting the parameters found. If the optional *delimiters* parameter
@@ -234,11 +243,11 @@ The :mod:`csv` module defines the following classes:
An example for :class:`Sniffer` use::
- csvfile = open("example.csv", "rb")
- dialect = csv.Sniffer().sniff(csvfile.read(1024))
- csvfile.seek(0)
- reader = csv.reader(csvfile, dialect)
- # ... process CSV file contents here ...
+ with open('example.csv', 'rb') as csvfile:
+ dialect = csv.Sniffer().sniff(csvfile.read(1024))
+ csvfile.seek(0)
+ reader = csv.reader(csvfile, dialect)
+ # ... process CSV file contents here ...
The :mod:`csv` module defines the following constants:
@@ -353,6 +362,11 @@ Dialects support the following attributes:
The default is :const:`False`.
+.. attribute:: Dialect.strict
+
+ When ``True``, raise exception :exc:`Error` on bad CSV input.
+ The default is ``False``.
+
Reader Objects
--------------
@@ -478,7 +492,7 @@ A slightly more advanced use of the reader --- catching and reporting errors::
try:
for row in reader:
print row
- except csv.Error, e:
+ except csv.Error as e:
sys.exit('file %s, line %d: %s' % (filename, reader.line_num, e))
And while the module doesn't directly support parsing strings, it can easily be
diff --git a/Doc/library/ctypes.rst b/Doc/library/ctypes.rst
index 4913b82..31d5b31 100644
--- a/Doc/library/ctypes.rst
+++ b/Doc/library/ctypes.rst
@@ -215,7 +215,7 @@ more about :mod:`ctypes` data types.
Fundamental data types
^^^^^^^^^^^^^^^^^^^^^^
-:mod:`ctypes` defines a number of primitive C compatible data types :
+:mod:`ctypes` defines a number of primitive C compatible data types:
+----------------------+------------------------------------------+----------------------------+
| ctypes type | C type | Python type |
@@ -568,8 +568,8 @@ Here is a simple example of a POINT structure, which contains two integers named
ValueError: too many initializers
>>>
-You can, however, build much more complicated structures. Structures can itself
-contain other structures by using a structure as a field type.
+You can, however, build much more complicated structures. A structure can
+itself contain other structures by using a structure as a field type.
Here is a RECT structure which contains two POINTs named *upperleft* and
*lowerright*::
@@ -602,6 +602,13 @@ for debugging because they can provide useful information::
.. _ctypes-structureunion-alignment-byte-order:
+.. warning::
+
+ :mod:`ctypes` does not support passing unions or structures with bit-fields
+ to functions by value. While this may work on 32-bit x86, it's not
+ guaranteed by the library to work in the general case. Unions and
+ structures with bit-fields should always be passed to functions by pointer.
+
Structure/union alignment and byte order
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -816,6 +823,11 @@ pointer types. So, for ``POINTER(c_int)``, ctypes accepts an array of c_int::
3
>>>
+In addition, if a function argument is explicitly declared to be a pointer type
+(such as ``POINTER(c_int)``) in :attr:`argtypes`, an object of the pointed
+type (``c_int`` in this case) can be passed to the function. ctypes will apply
+the required :func:`byref` conversion in this case automatically.
+
To set a POINTER type field to ``NULL``, you can assign ``None``::
>>> bar.values = None
@@ -1068,12 +1080,18 @@ As we can easily check, our array is sorted now::
1 5 7 33 99
>>>
-**Important note for callback functions:**
+.. note::
-Make sure you keep references to CFUNCTYPE objects as long as they are used from
-C code. :mod:`ctypes` doesn't, and if you don't, they may be garbage collected,
-crashing your program when a callback is made.
+ Make sure you keep references to :func:`CFUNCTYPE` objects as long as they
+ are used from C code. :mod:`ctypes` doesn't, and if you don't, they may be
+ garbage collected, crashing your program when a callback is made.
+ Also, note that if the callback function is called in a thread created
+ outside of Python's control (e.g. by the foreign code that calls the
+ callback), ctypes creates a new dummy Python thread on every invocation. This
+ behavior is correct for most purposes, but it means that values stored with
+ `threading.local` will *not* survive across different callbacks, even when
+ those calls are made from the same C thread.
.. _ctypes-accessing-values-exported-from-dlls:
@@ -1153,8 +1171,8 @@ testing. Try it out with ``import __hello__`` for example.
Surprises
^^^^^^^^^
-There are some edges in :mod:`ctypes` where you may be expect something else than
-what actually happens.
+There are some edge cases in :mod:`ctypes` where you might expect something
+other than what actually happens.
Consider the following example::
@@ -1321,7 +1339,7 @@ returns the full pathname, but since there is no predefined naming scheme a call
like ``find_library("c")`` will fail and return ``None``.
If wrapping a shared library with :mod:`ctypes`, it *may* be better to determine
-the shared library name at development type, and hardcode that into the wrapper
+the shared library name at development time, and hardcode that into the wrapper
module instead of using :func:`find_library` to locate the library at runtime.
@@ -1702,7 +1720,7 @@ the windows header file is this::
WINUSERAPI int WINAPI
MessageBoxA(
- HWND hWnd ,
+ HWND hWnd,
LPCSTR lpText,
LPCSTR lpCaption,
UINT uType);
@@ -1994,8 +2012,8 @@ Utility functions
.. function:: sizeof(obj_or_type)
- Returns the size in bytes of a ctypes type or instance memory buffer. Does the
- same as the C ``sizeof()`` function.
+ Returns the size in bytes of a ctypes type or instance memory buffer.
+ Does the same as the C ``sizeof`` operator.
.. function:: string_at(address[, size])
@@ -2339,7 +2357,7 @@ These are the fundamental ctypes data types:
.. class:: c_bool
Represent the C :c:type:`bool` datatype (more accurately, :c:type:`_Bool` from
- C99). Its value can be True or False, and the constructor accepts any object
+ C99). Its value can be ``True`` or ``False``, and the constructor accepts any object
that has a truth value.
.. versionadded:: 2.6
diff --git a/Doc/library/curses.rst b/Doc/library/curses.rst
index ecdac9a..642d25b 100644
--- a/Doc/library/curses.rst
+++ b/Doc/library/curses.rst
@@ -48,7 +48,7 @@ Linux and the BSD variants of Unix.
Tutorial material on using curses with Python, by Andrew Kuchling and Eric
Raymond.
- The :file:`Demo/curses/` directory in the Python source distribution contains
+ The :source:`Demo/curses/` directory in the Python source distribution contains
some example programs using the curses bindings provided by this module.
@@ -380,7 +380,8 @@ The module :mod:`curses` defines the following functions:
is to be displayed.
-.. function:: newwin([nlines, ncols,] begin_y, begin_x)
+.. function:: newwin(nlines, ncols)
+ newwin(nlines, ncols, begin_y, begin_x)
Return a new window, whose left-upper corner is at ``(begin_y, begin_x)``, and
whose height/width is *nlines*/*ncols*.
@@ -648,7 +649,8 @@ Window objects, as returned by :func:`initscr` and :func:`newwin` above, have
the following methods:
-.. method:: window.addch([y, x,] ch[, attr])
+.. method:: window.addch(ch[, attr])
+ window.addch(y, x, ch[, attr])
.. note::
@@ -662,13 +664,15 @@ the following methods:
position and attributes are the current settings for the window object.
-.. method:: window.addnstr([y, x,] str, n[, attr])
+.. method:: window.addnstr(str, n[, attr])
+ window.addnstr(y, x, str, n[, attr])
Paint at most *n* characters of the string *str* at ``(y, x)`` with attributes
*attr*, overwriting anything previously on the display.
-.. method:: window.addstr([y, x,] str[, attr])
+.. method:: window.addstr(str[, attr])
+ window.addstr(y, x, str[, attr])
Paint the string *str* at ``(y, x)`` with attributes *attr*, overwriting
anything previously on the display.
@@ -755,7 +759,10 @@ the following methods:
*bs* are *horch*. The default corner characters are always used by this function.
-.. method:: window.chgat([y, x, ] [num,] attr)
+.. method:: window.chgat(attr)
+ window.chgat(num, attr)
+ window.chgat(y, x, attr)
+ window.chgat(y, x, num, attr)
Set the attributes of *num* characters at the current cursor position, or at
position ``(y, x)`` if supplied. If no value of *num* is given or *num* = -1,
@@ -804,7 +811,8 @@ the following methods:
Delete the line under the cursor. All following lines are moved up by one line.
-.. method:: window.derwin([nlines, ncols,] begin_y, begin_x)
+.. method:: window.derwin(begin_y, begin_x)
+ window.derwin(nlines, ncols, begin_y, begin_x)
An abbreviation for "derive window", :meth:`derwin` is the same as calling
:meth:`subwin`, except that *begin_y* and *begin_x* are relative to the origin
@@ -879,7 +887,8 @@ the following methods:
upper-left corner.
-.. method:: window.hline([y, x,] ch, n)
+.. method:: window.hline(ch, n)
+ window.hline(y, x, ch, n)
Display a horizontal line starting at ``(y, x)`` with length *n* consisting of
the character *ch*.
@@ -913,7 +922,8 @@ the following methods:
the character proper, and upper bits are the attributes.
-.. method:: window.insch([y, x,] ch[, attr])
+.. method:: window.insch(ch[, attr])
+ window.insch(y, x, ch[, attr])
Paint character *ch* at ``(y, x)`` with attributes *attr*, moving the line from
position *x* right by one character.
@@ -934,7 +944,8 @@ the following methods:
line.
-.. method:: window.insnstr([y, x,] str, n [, attr])
+.. method:: window.insnstr(str, n[, attr])
+ window.insnstr(y, x, str, n[, attr])
Insert a character string (as many characters as will fit on the line) before
the character under the cursor, up to *n* characters. If *n* is zero or
@@ -943,7 +954,8 @@ the following methods:
The cursor position does not change (after moving to *y*, *x*, if specified).
-.. method:: window.insstr([y, x, ] str [, attr])
+.. method:: window.insstr(str[, attr])
+ window.insstr(y, x, str[, attr])
Insert a character string (as many characters as will fit on the line) before
the character under the cursor. All characters to the right of the cursor are
@@ -951,7 +963,8 @@ the following methods:
position does not change (after moving to *y*, *x*, if specified).
-.. method:: window.instr([y, x] [, n])
+.. method:: window.instr([n])
+ window.instr(y, x[, n])
Return a string of characters, extracted from the window starting at the
current cursor position, or at *y*, *x* if specified. Attributes are stripped
@@ -1126,13 +1139,15 @@ the following methods:
Turn on attribute *A_STANDOUT*.
-.. method:: window.subpad([nlines, ncols,] begin_y, begin_x)
+.. method:: window.subpad(begin_y, begin_x)
+ window.subpad(nlines, ncols, begin_y, begin_x)
Return a sub-window, whose upper-left corner is at ``(begin_y, begin_x)``, and
whose width/height is *ncols*/*nlines*.
-.. method:: window.subwin([nlines, ncols,] begin_y, begin_x)
+.. method:: window.subwin(begin_y, begin_x)
+ window.subwin(nlines, ncols, begin_y, begin_x)
Return a sub-window, whose upper-left corner is at ``(begin_y, begin_x)``, and
whose width/height is *ncols*/*nlines*.
@@ -1189,7 +1204,8 @@ the following methods:
:meth:`refresh`.
-.. method:: window.vline([y, x,] ch, n)
+.. method:: window.vline(ch, n)
+ window.vline(y, x, ch, n)
Display a vertical line starting at ``(y, x)`` with length *n* consisting of the
character *ch*.
diff --git a/Doc/library/datetime.rst b/Doc/library/datetime.rst
index f3b0870..db95269 100644
--- a/Doc/library/datetime.rst
+++ b/Doc/library/datetime.rst
@@ -14,27 +14,34 @@
The :mod:`datetime` module supplies classes for manipulating dates and times in
both simple and complex ways. While date and time arithmetic is supported, the
focus of the implementation is on efficient attribute extraction for output
-formatting and manipulation. For related
-functionality, see also the :mod:`time` and :mod:`calendar` modules.
-
-There are two kinds of date and time objects: "naive" and "aware". This
-distinction refers to whether the object has any notion of time zone, daylight
-saving time, or other kind of algorithmic or political time adjustment. Whether
-a naive :class:`.datetime` object represents Coordinated Universal Time (UTC),
-local time, or time in some other timezone is purely up to the program, just
-like it's up to the program whether a particular number represents metres,
-miles, or mass. Naive :class:`.datetime` objects are easy to understand and to
-work with, at the cost of ignoring some aspects of reality.
-
-For applications requiring more, :class:`.datetime` and :class:`.time` objects
-have an optional time zone information attribute, :attr:`tzinfo`, that can be
-set to an instance of a subclass of the abstract :class:`tzinfo` class. These
-:class:`tzinfo` objects capture information about the offset from UTC time, the
-time zone name, and whether Daylight Saving Time is in effect. Note that no
-concrete :class:`tzinfo` classes are supplied by the :mod:`datetime` module.
-Supporting timezones at whatever level of detail is required is up to the
-application. The rules for time adjustment across the world are more political
-than rational, and there is no standard suitable for every application.
+formatting and manipulation. For related functionality, see also the
+:mod:`time` and :mod:`calendar` modules.
+
+There are two kinds of date and time objects: "naive" and "aware".
+
+An aware object has sufficient knowledge of applicable algorithmic and
+political time adjustments, such as time zone and daylight saving time
+information, to locate itself relative to other aware objects. An aware object
+is used to represent a specific moment in time that is not open to
+interpretation [#]_.
+
+A naive object does not contain enough information to unambiguously locate
+itself relative to other date/time objects. Whether a naive object represents
+Coordinated Universal Time (UTC), local time, or time in some other timezone is
+purely up to the program, just like it's up to the program whether a particular
+number represents metres, miles, or mass. Naive objects are easy to understand
+and to work with, at the cost of ignoring some aspects of reality.
+
+For applications requiring aware objects, :class:`.datetime` and :class:`.time`
+objects have an optional time zone information attribute, :attr:`tzinfo`, that
+can be set to an instance of a subclass of the abstract :class:`tzinfo` class.
+These :class:`tzinfo` objects capture information about the offset from UTC
+time, the time zone name, and whether Daylight Saving Time is in effect. Note
+that no concrete :class:`tzinfo` classes are supplied by the :mod:`datetime`
+module. Supporting timezones at whatever level of detail is required is up to
+the application. The rules for time adjustment across the world are more
+political than rational, and there is no standard suitable for every
+application.
The :mod:`datetime` module exports the following constants:
@@ -105,10 +112,13 @@ Objects of these types are immutable.
Objects of the :class:`date` type are always naive.
-An object *d* of type :class:`.time` or :class:`.datetime` may be naive or aware.
-*d* is aware if ``d.tzinfo`` is not ``None`` and ``d.tzinfo.utcoffset(d)`` does
-not return ``None``. If ``d.tzinfo`` is ``None``, or if ``d.tzinfo`` is not
-``None`` but ``d.tzinfo.utcoffset(d)`` returns ``None``, *d* is naive.
+An object of type :class:`.time` or :class:`.datetime` may be naive or aware.
+A :class:`.datetime` object *d* is aware if ``d.tzinfo`` is not ``None`` and
+``d.tzinfo.utcoffset(d)`` does not return ``None``. If ``d.tzinfo`` is
+``None``, or if ``d.tzinfo`` is not ``None`` but ``d.tzinfo.utcoffset(d)``
+returns ``None``, *d* is naive. A :class:`.time` object *t* is aware
+if ``t.tzinfo`` is not ``None`` and ``t.tzinfo.utcoffset(None)`` does not return
+``None``. Otherwise, *t* is naive.
The distinction between naive and aware doesn't apply to :class:`timedelta`
objects.
@@ -541,8 +551,16 @@ Instance methods:
.. method:: date.strftime(format)
Return a string representing the date, controlled by an explicit format string.
- Format codes referring to hours, minutes or seconds will see 0 values. See
- section :ref:`strftime-strptime-behavior`.
+ Format codes referring to hours, minutes or seconds will see 0 values. For a
+ complete list of formatting directives, see section
+ :ref:`strftime-strptime-behavior`.
+
+
+.. method:: date.__format__(format)
+
+ Same as :meth:`.date.strftime`. This makes it possible to specify format
+ string for a :class:`.date` object when using :meth:`str.format`.
+ See section :ref:`strftime-strptime-behavior`.
Example of counting days to an event::
@@ -595,6 +613,8 @@ Example of working with :class:`date`:
'11/03/02'
>>> d.strftime("%A %d. %B %Y")
'Monday 11. March 2002'
+ >>> 'The {1} is {0:%d}, the {2} is {0:%B}.'.format(d, "day", "month")
+ 'The day is 11, the month is March.'
.. _datetime-datetime:
@@ -711,7 +731,8 @@ Other constructors, all class methods:
*format*. This is equivalent to ``datetime(*(time.strptime(date_string,
format)[0:6]))``. :exc:`ValueError` is raised if the date_string and format
can't be parsed by :func:`time.strptime` or if it returns a value which isn't a
- time tuple. See section :ref:`strftime-strptime-behavior`.
+ time tuple. For a complete list of formatting directives, see section
+ :ref:`strftime-strptime-behavior`.
.. versionadded:: 2.5
@@ -1031,7 +1052,15 @@ Instance methods:
.. method:: datetime.strftime(format)
Return a string representing the date and time, controlled by an explicit format
- string. See section :ref:`strftime-strptime-behavior`.
+ string. For a complete list of formatting directives, see section
+ :ref:`strftime-strptime-behavior`.
+
+
+.. method:: datetime.__format__(format)
+
+ Same as :meth:`.datetime.strftime`. This makes it possible to specify format
+ string for a :class:`.datetime` object when using :meth:`str.format`.
+ See section :ref:`strftime-strptime-behavior`.
Examples of working with datetime objects:
@@ -1078,19 +1107,21 @@ Examples of working with datetime objects:
>>> # Formatting datetime
>>> dt.strftime("%A, %d. %B %Y %I:%M%p")
'Tuesday, 21. November 2006 04:30PM'
+ >>> 'The {1} is {0:%d}, the {2} is {0:%B}, the {3} is {0:%I:%M%p}.'.format(dt, "day", "month", "time")
+ 'The day is 21, the month is November, the time is 04:30PM.'
Using datetime with tzinfo:
>>> from datetime import timedelta, datetime, tzinfo
>>> class GMT1(tzinfo):
- ... def __init__(self): # DST starts last Sunday in March
+ ... def utcoffset(self, dt):
+ ... return timedelta(hours=1) + self.dst(dt)
+ ... def dst(self, dt):
+ ... # DST starts last Sunday in March
... d = datetime(dt.year, 4, 1) # ends last Sunday in October
... self.dston = d - timedelta(days=d.weekday() + 1)
... d = datetime(dt.year, 11, 1)
... self.dstoff = d - timedelta(days=d.weekday() + 1)
- ... def utcoffset(self, dt):
- ... return timedelta(hours=1) + self.dst(dt)
- ... def dst(self, dt):
... if self.dston <= dt.replace(tzinfo=None) < self.dstoff:
... return timedelta(hours=1)
... else:
@@ -1099,16 +1130,15 @@ Using datetime with tzinfo:
... return "GMT +1"
...
>>> class GMT2(tzinfo):
- ... def __init__(self):
+ ... def utcoffset(self, dt):
+ ... return timedelta(hours=2) + self.dst(dt)
+ ... def dst(self, dt):
... d = datetime(dt.year, 4, 1)
... self.dston = d - timedelta(days=d.weekday() + 1)
... d = datetime(dt.year, 11, 1)
... self.dstoff = d - timedelta(days=d.weekday() + 1)
- ... def utcoffset(self, dt):
- ... return timedelta(hours=1) + self.dst(dt)
- ... def dst(self, dt):
... if self.dston <= dt.replace(tzinfo=None) < self.dstoff:
- ... return timedelta(hours=2)
+ ... return timedelta(hours=1)
... else:
... return timedelta(0)
... def tzname(self,dt):
@@ -1145,7 +1175,7 @@ Using datetime with tzinfo:
A time object represents a (local) time of day, independent of any particular
day, and subject to adjustment via a :class:`tzinfo` object.
-.. class:: time(hour[, minute[, second[, microsecond[, tzinfo]]]])
+.. class:: time([hour[, minute[, second[, microsecond[, tzinfo]]]]])
All arguments are optional. *tzinfo* may be ``None``, or an instance of a
:class:`tzinfo` subclass. The remaining arguments may be ints or longs, in the
@@ -1256,6 +1286,14 @@ Instance methods:
.. method:: time.strftime(format)
Return a string representing the time, controlled by an explicit format string.
+ For a complete list of formatting directives, see section
+ :ref:`strftime-strptime-behavior`.
+
+
+.. method:: time.__format__(format)
+
+ Same as :meth:`.time.strftime`. This makes it possible to specify format string
+ for a :class:`.time` object when using :meth:`str.format`.
See section :ref:`strftime-strptime-behavior`.
@@ -1305,6 +1343,8 @@ Example:
'Europe/Prague'
>>> t.strftime("%H:%M:%S %Z")
'12:10:30 Europe/Prague'
+ >>> 'The {} is {:%H:%M}.'.format("time", t)
+ 'The time is 12:10.'
.. _datetime-tzinfo:
@@ -1521,6 +1561,21 @@ Applications that can't bear such ambiguities should avoid using hybrid
other fixed-offset :class:`tzinfo` subclass (such as a class representing only
EST (fixed offset -5 hours), or only EDT (fixed offset -4 hours)).
+.. seealso::
+
+ `pytz <http://pypi.python.org/pypi/pytz/>`_
+ The standard library has no :class:`tzinfo` instances, but
+ there exists a third-party library which brings the *IANA timezone
+ database* (also known as the Olson database) to Python: *pytz*.
+
+ *pytz* contains up-to-date information and its usage is recommended.
+
+ `IANA timezone database <http://www.iana.org/time-zones>`_
+ The Time Zone Database (often called tz or zoneinfo) contains code and
+ data that represent the history of local time for many representative
+ locations around the globe. It is updated periodically to reflect changes
+ made by political bodies to time zone boundaries, UTC offsets, and
+ daylight-saving rules.
.. _strftime-strptime-behavior:
@@ -1546,30 +1601,10 @@ For :class:`date` objects, the format codes for hours, minutes, seconds, and
microseconds should not be used, as :class:`date` objects have no such
values. If they're used anyway, ``0`` is substituted for them.
-.. versionadded:: 2.6
- :class:`.time` and :class:`.datetime` objects support a ``%f`` format code
- which expands to the number of microseconds in the object, zero-padded on
- the left to six places.
-
-For a naive object, the ``%z`` and ``%Z`` format codes are replaced by empty
-strings.
-
-For an aware object:
-
-``%z``
- :meth:`utcoffset` is transformed into a 5-character string of the form +HHMM or
- -HHMM, where HH is a 2-digit string giving the number of UTC offset hours, and
- MM is a 2-digit string giving the number of UTC offset minutes. For example, if
- :meth:`utcoffset` returns ``timedelta(hours=-3, minutes=-30)``, ``%z`` is
- replaced with the string ``'-0330'``.
-
-``%Z``
- If :meth:`tzname` returns ``None``, ``%Z`` is replaced by an empty string.
- Otherwise ``%Z`` is replaced by the returned value, which must be a string.
-
The full set of format codes supported varies across platforms, because Python
calls the platform C library's :func:`strftime` function, and platform
-variations are common.
+variations are common. To see the full set of format codes supported on your
+platform, consult the :manpage:`strftime(3)` documentation.
The following is a list of all the format codes that the C standard (1989
version) requires, and these work on all platforms with a standard C
@@ -1579,116 +1614,156 @@ format codes.
The exact range of years for which :meth:`strftime` works also varies across
platforms. Regardless of platform, years before 1900 cannot be used.
-+-----------+--------------------------------+-------+
-| Directive | Meaning | Notes |
-+===========+================================+=======+
-| ``%a`` | Locale's abbreviated weekday | |
-| | name. | |
-+-----------+--------------------------------+-------+
-| ``%A`` | Locale's full weekday name. | |
-+-----------+--------------------------------+-------+
-| ``%b`` | Locale's abbreviated month | |
-| | name. | |
-+-----------+--------------------------------+-------+
-| ``%B`` | Locale's full month name. | |
-+-----------+--------------------------------+-------+
-| ``%c`` | Locale's appropriate date and | |
-| | time representation. | |
-+-----------+--------------------------------+-------+
-| ``%d`` | Day of the month as a decimal | |
-| | number [01,31]. | |
-+-----------+--------------------------------+-------+
-| ``%f`` | Microsecond as a decimal | \(1) |
-| | number [0,999999], zero-padded | |
-| | on the left | |
-+-----------+--------------------------------+-------+
-| ``%H`` | Hour (24-hour clock) as a | |
-| | decimal number [00,23]. | |
-+-----------+--------------------------------+-------+
-| ``%I`` | Hour (12-hour clock) as a | |
-| | decimal number [01,12]. | |
-+-----------+--------------------------------+-------+
-| ``%j`` | Day of the year as a decimal | |
-| | number [001,366]. | |
-+-----------+--------------------------------+-------+
-| ``%m`` | Month as a decimal number | |
-| | [01,12]. | |
-+-----------+--------------------------------+-------+
-| ``%M`` | Minute as a decimal number | |
-| | [00,59]. | |
-+-----------+--------------------------------+-------+
-| ``%p`` | Locale's equivalent of either | \(2) |
-| | AM or PM. | |
-+-----------+--------------------------------+-------+
-| ``%S`` | Second as a decimal number | \(3) |
-| | [00,61]. | |
-+-----------+--------------------------------+-------+
-| ``%U`` | Week number of the year | \(4) |
-| | (Sunday as the first day of | |
-| | the week) as a decimal number | |
-| | [00,53]. All days in a new | |
-| | year preceding the first | |
-| | Sunday are considered to be in | |
-| | week 0. | |
-+-----------+--------------------------------+-------+
-| ``%w`` | Weekday as a decimal number | |
-| | [0(Sunday),6]. | |
-+-----------+--------------------------------+-------+
-| ``%W`` | Week number of the year | \(4) |
-| | (Monday as the first day of | |
-| | the week) as a decimal number | |
-| | [00,53]. All days in a new | |
-| | year preceding the first | |
-| | Monday are considered to be in | |
-| | week 0. | |
-+-----------+--------------------------------+-------+
-| ``%x`` | Locale's appropriate date | |
-| | representation. | |
-+-----------+--------------------------------+-------+
-| ``%X`` | Locale's appropriate time | |
-| | representation. | |
-+-----------+--------------------------------+-------+
-| ``%y`` | Year without century as a | |
-| | decimal number [00,99]. | |
-+-----------+--------------------------------+-------+
-| ``%Y`` | Year with century as a decimal | |
-| | number. | |
-+-----------+--------------------------------+-------+
-| ``%z`` | UTC offset in the form +HHMM | \(5) |
-| | or -HHMM (empty string if the | |
-| | the object is naive). | |
-+-----------+--------------------------------+-------+
-| ``%Z`` | Time zone name (empty string | |
-| | if the object is naive). | |
-+-----------+--------------------------------+-------+
-| ``%%`` | A literal ``'%'`` character. | |
-+-----------+--------------------------------+-------+
++-----------+--------------------------------+------------------------+-------+
+| Directive | Meaning | Example | Notes |
++===========+================================+========================+=======+
+| ``%a`` | Weekday as locale's || Sun, Mon, ..., Sat | \(1) |
+| | abbreviated name. | (en_US); | |
+| | || So, Mo, ..., Sa | |
+| | | (de_DE) | |
++-----------+--------------------------------+------------------------+-------+
+| ``%A`` | Weekday as locale's full name. || Sunday, Monday, ..., | \(1) |
+| | | Saturday (en_US); | |
+| | || Sonntag, Montag, ..., | |
+| | | Samstag (de_DE) | |
++-----------+--------------------------------+------------------------+-------+
+| ``%w`` | Weekday as a decimal number, | 0, 1, ..., 6 | |
+| | where 0 is Sunday and 6 is | | |
+| | Saturday. | | |
++-----------+--------------------------------+------------------------+-------+
+| ``%d`` | Day of the month as a | 01, 02, ..., 31 | |
+| | zero-padded decimal number. | | |
++-----------+--------------------------------+------------------------+-------+
+| ``%b`` | Month as locale's abbreviated || Jan, Feb, ..., Dec | \(1) |
+| | name. | (en_US); | |
+| | || Jan, Feb, ..., Dez | |
+| | | (de_DE) | |
++-----------+--------------------------------+------------------------+-------+
+| ``%B`` | Month as locale's full name. || January, February, | \(1) |
+| | | ..., December (en_US);| |
+| | || Januar, Februar, ..., | |
+| | | Dezember (de_DE) | |
++-----------+--------------------------------+------------------------+-------+
+| ``%m`` | Month as a zero-padded | 01, 02, ..., 12 | |
+| | decimal number. | | |
++-----------+--------------------------------+------------------------+-------+
+| ``%y`` | Year without century as a | 00, 01, ..., 99 | |
+| | zero-padded decimal number. | | |
++-----------+--------------------------------+------------------------+-------+
+| ``%Y`` | Year with century as a decimal | 1970, 1988, 2001, 2013 | |
+| | number. | | |
++-----------+--------------------------------+------------------------+-------+
+| ``%H`` | Hour (24-hour clock) as a | 00, 01, ..., 23 | |
+| | zero-padded decimal number. | | |
++-----------+--------------------------------+------------------------+-------+
+| ``%I`` | Hour (12-hour clock) as a | 01, 02, ..., 12 | |
+| | zero-padded decimal number. | | |
++-----------+--------------------------------+------------------------+-------+
+| ``%p`` | Locale's equivalent of either || AM, PM (en_US); | \(1), |
+| | AM or PM. || am, pm (de_DE) | \(2) |
++-----------+--------------------------------+------------------------+-------+
+| ``%M`` | Minute as a zero-padded | 00, 01, ..., 59 | |
+| | decimal number. | | |
++-----------+--------------------------------+------------------------+-------+
+| ``%S`` | Second as a zero-padded | 00, 01, ..., 59 | \(3) |
+| | decimal number. | | |
++-----------+--------------------------------+------------------------+-------+
+| ``%f`` | Microsecond as a decimal | 000000, 000001, ..., | \(4) |
+| | number, zero-padded on the | 999999 | |
+| | left. | | |
++-----------+--------------------------------+------------------------+-------+
+| ``%z`` | UTC offset in the form +HHMM | (empty), +0000, -0400, | \(5) |
+| | or -HHMM (empty string if the | +1030 | |
+| | the object is naive). | | |
++-----------+--------------------------------+------------------------+-------+
+| ``%Z`` | Time zone name (empty string | (empty), UTC, EST, CST | |
+| | if the object is naive). | | |
++-----------+--------------------------------+------------------------+-------+
+| ``%j`` | Day of the year as a | 001, 002, ..., 366 | |
+| | zero-padded decimal number. | | |
++-----------+--------------------------------+------------------------+-------+
+| ``%U`` | Week number of the year | 00, 01, ..., 53 | \(6) |
+| | (Sunday as the first day of | | |
+| | the week) as a zero padded | | |
+| | decimal number. All days in a | | |
+| | new year preceding the first | | |
+| | Sunday are considered to be in | | |
+| | week 0. | | |
++-----------+--------------------------------+------------------------+-------+
+| ``%W`` | Week number of the year | 00, 01, ..., 53 | \(6) |
+| | (Monday as the first day of | | |
+| | the week) as a decimal number. | | |
+| | All days in a new year | | |
+| | preceding the first Monday | | |
+| | are considered to be in | | |
+| | week 0. | | |
++-----------+--------------------------------+------------------------+-------+
+| ``%c`` | Locale's appropriate date and || Tue Aug 16 21:30:00 | \(1) |
+| | time representation. | 1988 (en_US); | |
+| | || Di 16 Aug 21:30:00 | |
+| | | 1988 (de_DE) | |
++-----------+--------------------------------+------------------------+-------+
+| ``%x`` | Locale's appropriate date || 08/16/88 (None); | \(1) |
+| | representation. || 08/16/1988 (en_US); | |
+| | || 16.08.1988 (de_DE) | |
++-----------+--------------------------------+------------------------+-------+
+| ``%X`` | Locale's appropriate time || 21:30:00 (en_US); | \(1) |
+| | representation. || 21:30:00 (de_DE) | |
++-----------+--------------------------------+------------------------+-------+
+| ``%%`` | A literal ``'%'`` character. | % | |
++-----------+--------------------------------+------------------------+-------+
Notes:
(1)
- When used with the :meth:`strptime` method, the ``%f`` directive
- accepts from one to six digits and zero pads on the right. ``%f`` is
- an extension to the set of format characters in the C standard (but
- implemented separately in datetime objects, and therefore always
- available).
+ Because the format depends on the current locale, care should be taken when
+ making assumptions about the output value. Field orderings will vary (for
+ example, "month/day/year" versus "day/month/year"), and the output may
+ contain Unicode characters encoded using the locale's default encoding (for
+ example, if the current locale is ``ja_JP``, the default encoding could be
+ any one of ``eucJP``, ``SJIS``, or ``utf-8``; use :meth:`locale.getlocale`
+ to determine the current locale's encoding).
(2)
When used with the :meth:`strptime` method, the ``%p`` directive only affects
the output hour field if the ``%I`` directive is used to parse the hour.
(3)
- The range really is ``0`` to ``61``; according to the Posix standard this
- accounts for leap seconds and the (very rare) double leap seconds.
- The :mod:`time` module may produce and does accept leap seconds since
- it is based on the Posix standard, but the :mod:`datetime` module
- does not accept leap seconds in :meth:`strptime` input nor will it
- produce them in :func:`strftime` output.
+ Unlike the :mod:`time` module, the :mod:`datetime` module does not support
+ leap seconds.
(4)
- When used with the :meth:`strptime` method, ``%U`` and ``%W`` are only used in
- calculations when the day of the week and the year are specified.
+ ``%f`` is an extension to the set of format characters in the C standard
+ (but implemented separately in datetime objects, and therefore always
+ available). When used with the :meth:`strptime` method, the ``%f``
+ directive accepts from one to six digits and zero pads on the right.
+
+ .. versionadded:: 2.6
(5)
- For example, if :meth:`utcoffset` returns ``timedelta(hours=-3, minutes=-30)``,
- ``%z`` is replaced with the string ``'-0330'``.
+ For a naive object, the ``%z`` and ``%Z`` format codes are replaced by empty
+ strings.
+
+ For an aware object:
+
+ ``%z``
+ :meth:`utcoffset` is transformed into a 5-character string of the form
+ +HHMM or -HHMM, where HH is a 2-digit string giving the number of UTC
+ offset hours, and MM is a 2-digit string giving the number of UTC offset
+ minutes. For example, if :meth:`utcoffset` returns
+ ``timedelta(hours=-3, minutes=-30)``, ``%z`` is replaced with the string
+ ``'-0330'``.
+
+ ``%Z``
+ If :meth:`tzname` returns ``None``, ``%Z`` is replaced by an empty
+ string. Otherwise ``%Z`` is replaced by the returned value, which must
+ be a string.
+
+(6)
+ When used with the :meth:`strptime` method, ``%U`` and ``%W`` are only used
+ in calculations when the day of the week and the year are specified.
+
+
+.. rubric:: Footnotes
+
+.. [#] If, that is, we ignore the effects of Relativity
diff --git a/Doc/library/dbhash.rst b/Doc/library/dbhash.rst
index 0b440ab..ed965e1 100644
--- a/Doc/library/dbhash.rst
+++ b/Doc/library/dbhash.rst
@@ -6,7 +6,7 @@
.. sectionauthor:: Fred L. Drake, Jr. <fdrake@acm.org>
.. deprecated:: 2.6
- The :mod:`dbhash` module has been deprecated for removal in Python 3.0.
+ The :mod:`dbhash` module has been removed in Python 3.
.. index:: module: bsddb
diff --git a/Doc/library/dbm.rst b/Doc/library/dbm.rst
index 8747789..a03c7c5 100644
--- a/Doc/library/dbm.rst
+++ b/Doc/library/dbm.rst
@@ -6,9 +6,9 @@
:synopsis: The standard "database" interface, based on ndbm.
.. note::
- The :mod:`dbm` module has been renamed to :mod:`dbm.ndbm` in Python 3.0. The
+ The :mod:`dbm` module has been renamed to :mod:`dbm.ndbm` in Python 3. The
:term:`2to3` tool will automatically adapt imports when converting your
- sources to 3.0.
+ sources to Python 3.
The :mod:`dbm` module provides an interface to the Unix "(n)dbm" library. Dbm
@@ -64,6 +64,14 @@ The module defines the following:
database has to be created. It defaults to octal ``0666`` (and will be
modified by the prevailing umask).
+ In addition to the dictionary-like methods, ``dbm`` objects
+ provide the following method:
+
+
+ .. function:: close()
+
+ Close the ``dbm`` database.
+
.. seealso::
diff --git a/Doc/library/decimal.rst b/Doc/library/decimal.rst
index f9e70de..d5ba4f0 100644
--- a/Doc/library/decimal.rst
+++ b/Doc/library/decimal.rst
@@ -375,6 +375,29 @@ Decimal objects
compared, sorted, and coerced to another type (such as :class:`float` or
:class:`long`).
+ There are some small differences between arithmetic on Decimal objects and
+ arithmetic on integers and floats. When the remainder operator ``%`` is
+ applied to Decimal objects, the sign of the result is the sign of the
+ *dividend* rather than the sign of the divisor::
+
+ >>> (-7) % 4
+ 1
+ >>> Decimal(-7) % Decimal(4)
+ Decimal('-3')
+
+ The integer division operator ``//`` behaves analogously, returning the
+ integer part of the true quotient (truncating towards zero) rather than its
+ floor, so as to preserve the usual identity ``x == (x // y) * y + x % y``::
+
+ >>> -7 // 4
+ -2
+ >>> Decimal(-7) // Decimal(4)
+ Decimal('-1')
+
+ The ``%`` and ``//`` operators implement the ``remainder`` and
+ ``divide-integer`` operations (respectively) as described in the
+ specification.
+
Decimal objects cannot generally be combined with floats in
arithmetic operations: an attempt to add a :class:`Decimal` to a
:class:`float`, for example, will raise a :exc:`TypeError`.
@@ -802,12 +825,21 @@ Decimal objects
.. method:: remainder_near(other[, context])
- Compute the modulo as either a positive or negative value depending on
- which is closest to zero. For instance, ``Decimal(10).remainder_near(6)``
- returns ``Decimal('-2')`` which is closer to zero than ``Decimal('4')``.
+ Return the remainder from dividing *self* by *other*. This differs from
+ ``self % other`` in that the sign of the remainder is chosen so as to
+ minimize its absolute value. More precisely, the return value is
+ ``self - n * other`` where ``n`` is the integer nearest to the exact
+ value of ``self / other``, and if two integers are equally near then the
+ even one is chosen.
+
+ If the result is zero then its sign will be the sign of *self*.
- If both are equally close, the one chosen will have the same sign as
- *self*.
+ >>> Decimal(18).remainder_near(Decimal(10))
+ Decimal('-2')
+ >>> Decimal(25).remainder_near(Decimal(10))
+ Decimal('5')
+ >>> Decimal(35).remainder_near(Decimal(10))
+ Decimal('-5')
.. method:: rotate(other[, context])
@@ -944,6 +976,10 @@ the :func:`localcontext` function to temporarily change the active context.
s = calculate_something()
s = +s # Round the final result back to the default precision
+ with localcontext(BasicContext): # temporarily use the BasicContext
+ print Decimal(1) / Decimal(7)
+ print Decimal(355) / Decimal(113)
+
New contexts can also be created using the :class:`Context` constructor
described below. In addition, the module provides three pre-made contexts:
@@ -1190,52 +1226,52 @@ In addition to the three supplied contexts, new contexts can be created with the
.. method:: is_canonical(x)
- Returns True if *x* is canonical; otherwise returns False.
+ Returns ``True`` if *x* is canonical; otherwise returns ``False``.
.. method:: is_finite(x)
- Returns True if *x* is finite; otherwise returns False.
+ Returns ``True`` if *x* is finite; otherwise returns ``False``.
.. method:: is_infinite(x)
- Returns True if *x* is infinite; otherwise returns False.
+ Returns ``True`` if *x* is infinite; otherwise returns ``False``.
.. method:: is_nan(x)
- Returns True if *x* is a qNaN or sNaN; otherwise returns False.
+ Returns ``True`` if *x* is a qNaN or sNaN; otherwise returns ``False``.
.. method:: is_normal(x)
- Returns True if *x* is a normal number; otherwise returns False.
+ Returns ``True`` if *x* is a normal number; otherwise returns ``False``.
.. method:: is_qnan(x)
- Returns True if *x* is a quiet NaN; otherwise returns False.
+ Returns ``True`` if *x* is a quiet NaN; otherwise returns ``False``.
.. method:: is_signed(x)
- Returns True if *x* is negative; otherwise returns False.
+ Returns ``True`` if *x* is negative; otherwise returns ``False``.
.. method:: is_snan(x)
- Returns True if *x* is a signaling NaN; otherwise returns False.
+ Returns ``True`` if *x* is a signaling NaN; otherwise returns ``False``.
.. method:: is_subnormal(x)
- Returns True if *x* is subnormal; otherwise returns False.
+ Returns ``True`` if *x* is subnormal; otherwise returns ``False``.
.. method:: is_zero(x)
- Returns True if *x* is a zero; otherwise returns False.
+ Returns ``True`` if *x* is a zero; otherwise returns ``False``.
.. method:: ln(x)
@@ -1395,7 +1431,7 @@ In addition to the three supplied contexts, new contexts can be created with the
.. method:: same_quantum(x, y)
- Returns True if the two operands have the same exponent.
+ Returns ``True`` if the two operands have the same exponent.
.. method:: scaleb (x, y)
diff --git a/Doc/library/difflib.rst b/Doc/library/difflib.rst
index 225b486..878d8e6 100644
--- a/Doc/library/difflib.rst
+++ b/Doc/library/difflib.rst
@@ -84,7 +84,7 @@ diffs. For comparing directories and files, see also, the :mod:`filecmp` module.
The constructor for this class is:
- .. function:: __init__([tabsize][, wrapcolumn][, linejunk][, charjunk])
+ .. function:: __init__(tabsize=8, wrapcolumn=None, linejunk=None, charjunk=IS_CHARACTER_JUNK)
Initializes instance of :class:`HtmlDiff`.
@@ -344,7 +344,7 @@ SequenceMatcher Objects
The :class:`SequenceMatcher` class has this constructor:
-.. class:: SequenceMatcher([isjunk[, a[, b[, autojunk=True]]]])
+.. class:: SequenceMatcher(isjunk=None, a='', b='', autojunk=True)
Optional argument *isjunk* must be ``None`` (the default) or a one-argument
function that takes a sequence element and returns true if and only if the
@@ -632,10 +632,12 @@ The :class:`Differ` class has this constructor:
Compare two sequences of lines, and generate the delta (a sequence of lines).
- Each sequence must contain individual single-line strings ending with newlines.
- Such sequences can be obtained from the :meth:`readlines` method of file-like
- objects. The delta generated also consists of newline-terminated strings, ready
- to be printed as-is via the :meth:`writelines` method of a file-like object.
+ Each sequence must contain individual single-line strings ending with
+ newlines. Such sequences can be obtained from the
+ :meth:`~file.readlines` method of file-like objects. The delta
+ generated also consists of newline-terminated strings, ready to be
+ printed as-is via the :meth:`~file.writelines` method of a
+ file-like object.
.. _differ-examples:
@@ -645,7 +647,7 @@ Differ Example
This example compares two texts. First we set up the texts, sequences of
individual single-line strings ending with newlines (such sequences can also be
-obtained from the :meth:`readlines` method of file-like objects):
+obtained from the :meth:`~file.readlines` method of file-like objects):
>>> text1 = ''' 1. Beautiful is better than ugly.
... 2. Explicit is better than implicit.
diff --git a/Doc/library/dircache.rst b/Doc/library/dircache.rst
index 71a8abe..632ddd5 100644
--- a/Doc/library/dircache.rst
+++ b/Doc/library/dircache.rst
@@ -7,7 +7,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`dircache` module has been removed in Python 3.0.
+ The :mod:`dircache` module has been removed in Python 3.
.. sectionauthor:: Moshe Zadka <moshez@zadka.site.co.il>
diff --git a/Doc/library/dl.rst b/Doc/library/dl.rst
index 13510c5..40556cc 100644
--- a/Doc/library/dl.rst
+++ b/Doc/library/dl.rst
@@ -8,7 +8,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`dl` module has been removed in Python 3.0. Use the :mod:`ctypes`
+ The :mod:`dl` module has been removed in Python 3. Use the :mod:`ctypes`
module instead.
.. sectionauthor:: Moshe Zadka <moshez@zadka.site.co.il>
diff --git a/Doc/library/doctest.rst b/Doc/library/doctest.rst
index b6d1756..a1e270d 100644
--- a/Doc/library/doctest.rst
+++ b/Doc/library/doctest.rst
@@ -1,3 +1,5 @@
+:keepdoctest:
+
:mod:`doctest` --- Test interactive Python examples
===================================================
@@ -339,7 +341,8 @@ The fine print:
Tabs in output generated by the tested code are not modified. Because any
hard tabs in the sample output *are* expanded, this means that if the code
output includes hard tabs, the only way the doctest can pass is if the
- :const:`NORMALIZE_WHITESPACE` option or directive is in effect.
+ :const:`NORMALIZE_WHITESPACE` option or :ref:`directive <doctest-directives>`
+ is in effect.
Alternatively, the test can be rewritten to capture the output and compare it
to an expected value as part of the test. This handling of tabs in the
source was arrived at through trial and error, and has proven to be the least
@@ -363,7 +366,7 @@ The fine print:
Backslashes in a raw docstring: m\n
Otherwise, the backslash will be interpreted as part of the string. For example,
- the "\\" above would be interpreted as a newline character. Alternatively, you
+ the ``\n`` above would be interpreted as a newline character. Alternatively, you
can double each backslash in the doctest version (and not use a raw string)::
>>> def f(x):
@@ -511,15 +514,16 @@ Some details you should read once, but won't need to remember:
SyntaxError: invalid syntax
+.. _option-flags-and-directives:
.. _doctest-options:
-Option Flags and Directives
-^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Option Flags
+^^^^^^^^^^^^
A number of option flags control various aspects of doctest's behavior.
Symbolic names for the flags are supplied as module constants, which can be
or'ed together and passed to various functions. The names can also be used in
-doctest directives (see below).
+:ref:`doctest directives <doctest-directives>`.
The first group of options define test semantics, controlling aspects of how
doctest decides whether actual output matches an example's expected output:
@@ -573,14 +577,14 @@ doctest decides whether actual output matches an example's expected output:
:exc:`TypeError` is raised.
It will also ignore the module name used in Python 3 doctest reports. Hence
- both these variations will work regardless of whether the test is run under
- Python 2.7 or Python 3.2 (or later versions):
+ both of these variations will work with the flag specified, regardless of
+ whether the test is run under Python 2.7 or Python 3.2 (or later versions)::
- >>> raise CustomError('message') #doctest: +IGNORE_EXCEPTION_DETAIL
+ >>> raise CustomError('message')
Traceback (most recent call last):
CustomError: message
- >>> raise CustomError('message') #doctest: +IGNORE_EXCEPTION_DETAIL
+ >>> raise CustomError('message')
Traceback (most recent call last):
my_module.CustomError: message
@@ -590,15 +594,16 @@ doctest decides whether actual output matches an example's expected output:
exception name. Using :const:`IGNORE_EXCEPTION_DETAIL` and the details
from Python 2.3 is also the only clear way to write a doctest that doesn't
care about the exception detail yet continues to pass under Python 2.3 or
- earlier (those releases do not support doctest directives and ignore them
- as irrelevant comments). For example, ::
+ earlier (those releases do not support :ref:`doctest directives
+ <doctest-directives>` and ignore them as irrelevant comments). For example::
- >>> (1, 2)[3] = 'moo' #doctest: +IGNORE_EXCEPTION_DETAIL
+ >>> (1, 2)[3] = 'moo'
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TypeError: object doesn't support item assignment
- passes under Python 2.3 and later Python versions, even though the detail
+ passes under Python 2.3 and later Python versions with the flag specified,
+ even though the detail
changed in Python 2.4 to say "does not" instead of "doesn't".
.. versionchanged:: 2.7
@@ -662,9 +667,40 @@ The second group of options controls how test failures are reported:
A bitmask or'ing together all the reporting flags above.
-"Doctest directives" may be used to modify the option flags for individual
-examples. Doctest directives are expressed as a special Python comment
-following an example's source code:
+
+.. versionadded:: 2.4
+ The constants
+ :const:`DONT_ACCEPT_BLANKLINE`, :const:`NORMALIZE_WHITESPACE`,
+ :const:`ELLIPSIS`, :const:`IGNORE_EXCEPTION_DETAIL`, :const:`REPORT_UDIFF`,
+ :const:`REPORT_CDIFF`, :const:`REPORT_NDIFF`,
+ :const:`REPORT_ONLY_FIRST_FAILURE`, :const:`COMPARISON_FLAGS` and
+ :const:`REPORTING_FLAGS` were added.
+
+There's also a way to register new option flag names, although this isn't useful
+unless you intend to extend :mod:`doctest` internals via subclassing:
+
+
+.. function:: register_optionflag(name)
+
+ Create a new option flag with a given name, and return the new flag's integer
+ value. :func:`register_optionflag` can be used when subclassing
+ :class:`OutputChecker` or :class:`DocTestRunner` to create new options that are
+ supported by your subclasses. :func:`register_optionflag` should always be
+ called using the following idiom::
+
+ MY_FLAG = register_optionflag('MY_FLAG')
+
+ .. versionadded:: 2.4
+
+
+.. _doctest-directives:
+
+Directives
+^^^^^^^^^^
+
+Doctest directives may be used to modify the :ref:`option flags
+<doctest-options>` for an individual example. Doctest directives are
+special Python comments following an example's source code:
.. productionlist:: doctest
directive: "#" "doctest:" `directive_options`
@@ -682,7 +718,7 @@ example. Use ``+`` to enable the named behavior, or ``-`` to disable it.
For example, this test passes::
- >>> print range(20) #doctest: +NORMALIZE_WHITESPACE
+ >>> print range(20) # doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
@@ -691,10 +727,11 @@ two blanks before the single-digit list elements, and because the actual output
is on a single line. This test also passes, and also requires a directive to do
so::
- >>> print range(20) # doctest:+ELLIPSIS
+ >>> print range(20) # doctest: +ELLIPSIS
[0, 1, ..., 18, 19]
-Multiple directives can be used on a single physical line, separated by commas::
+Multiple directives can be used on a single physical line, separated by
+commas::
>>> print range(20) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[0, 1, ..., 18, 19]
@@ -721,28 +758,7 @@ functions that run doctests, establishing different defaults. In such cases,
disabling an option via ``-`` in a directive can be useful.
.. versionadded:: 2.4
- Doctest directives and the associated constants
- :const:`DONT_ACCEPT_BLANKLINE`, :const:`NORMALIZE_WHITESPACE`,
- :const:`ELLIPSIS`, :const:`IGNORE_EXCEPTION_DETAIL`, :const:`REPORT_UDIFF`,
- :const:`REPORT_CDIFF`, :const:`REPORT_NDIFF`,
- :const:`REPORT_ONLY_FIRST_FAILURE`, :const:`COMPARISON_FLAGS` and
- :const:`REPORTING_FLAGS` were added.
-
-There's also a way to register new option flag names, although this isn't useful
-unless you intend to extend :mod:`doctest` internals via subclassing:
-
-
-.. function:: register_optionflag(name)
-
- Create a new option flag with a given name, and return the new flag's integer
- value. :func:`register_optionflag` can be used when subclassing
- :class:`OutputChecker` or :class:`DocTestRunner` to create new options that are
- supported by your subclasses. :func:`register_optionflag` should always be
- called using the following idiom::
-
- MY_FLAG = register_optionflag('MY_FLAG')
-
- .. versionadded:: 2.4
+ Support for doctest directives was added.
.. _doctest-warnings:
@@ -1060,6 +1076,16 @@ from text files and modules with doctests:
.. versionchanged:: 2.5
The parameter *encoding* was added.
+ .. note::
+ Unlike :func:`testmod` and :class:`DocTestFinder`, this function raises
+ a :exc:`ValueError` if *module* contains no docstrings. You can prevent
+ this error by passing a :class:`DocTestFinder` instance as the
+ *test_finder* argument with its *exclude_empty* keyword argument set
+ to ``False``::
+
+ >>> finder = doctest.DocTestFinder(exclude_empty=False)
+ >>> suite = doctest.DocTestSuite(test_finder=finder)
+
.. function:: DocTestSuite([module][, globs][, extraglobs][, test_finder][, setUp][, tearDown][, checker])
diff --git a/Doc/library/docxmlrpcserver.rst b/Doc/library/docxmlrpcserver.rst
index 67cb3b9..08e4e4b 100644
--- a/Doc/library/docxmlrpcserver.rst
+++ b/Doc/library/docxmlrpcserver.rst
@@ -8,8 +8,8 @@
.. note::
The :mod:`DocXMLRPCServer` module has been merged into :mod:`xmlrpc.server`
- in Python 3.0. The :term:`2to3` tool will automatically adapt imports when
- converting your sources to 3.0.
+ in Python 3. The :term:`2to3` tool will automatically adapt imports when
+ converting your sources to Python 3.
.. versionadded:: 2.3
diff --git a/Doc/library/dumbdbm.rst b/Doc/library/dumbdbm.rst
index a511855..1a9a647 100644
--- a/Doc/library/dumbdbm.rst
+++ b/Doc/library/dumbdbm.rst
@@ -5,9 +5,9 @@
:synopsis: Portable implementation of the simple DBM interface.
.. note::
- The :mod:`dumbdbm` module has been renamed to :mod:`dbm.dumb` in Python 3.0.
+ The :mod:`dumbdbm` module has been renamed to :mod:`dbm.dumb` in Python 3.
The :term:`2to3` tool will automatically adapt imports when converting your
- sources to 3.0.
+ sources to Python 3.
.. index:: single: databases
@@ -49,6 +49,14 @@ The module defines the following:
.. versionchanged:: 2.2
The *mode* argument was ignored in earlier versions.
+In addition to the dictionary-like methods, ``dumbdm`` objects
+provide the following method:
+
+
+.. function:: close()
+
+ Close the ``dumbdm`` database.
+
.. seealso::
diff --git a/Doc/library/dummy_thread.rst b/Doc/library/dummy_thread.rst
index a4dba86..a1d977d 100644
--- a/Doc/library/dummy_thread.rst
+++ b/Doc/library/dummy_thread.rst
@@ -6,8 +6,8 @@
.. note::
The :mod:`dummy_thread` module has been renamed to :mod:`_dummy_thread` in
- Python 3.0. The :term:`2to3` tool will automatically adapt imports when
- converting your sources to 3.0; however, you should consider using the
+ Python 3. The :term:`2to3` tool will automatically adapt imports when
+ converting your sources to Python 3; however, you should consider using the
high-lever :mod:`dummy_threading` module instead.
**Source code:** :source:`Lib/dummy_thread.py`
diff --git a/Doc/library/email.charset.rst b/Doc/library/email.charset.rst
index b008386..0ed6f0a 100644
--- a/Doc/library/email.charset.rst
+++ b/Doc/library/email.charset.rst
@@ -1,5 +1,5 @@
-:mod:`email`: Representing character sets
------------------------------------------
+:mod:`email.charset`: Representing character sets
+-------------------------------------------------
.. module:: email.charset
:synopsis: Character Sets
@@ -249,5 +249,5 @@ new entries to the global character set, alias, and codec registries:
*charset* is the canonical name of a character set. *codecname* is the name of a
Python codec, as appropriate for the second argument to the :func:`unicode`
- built-in, or to the :meth:`encode` method of a Unicode string.
+ built-in, or to the :meth:`~unicode.encode` method of a Unicode string.
diff --git a/Doc/library/email.encoders.rst b/Doc/library/email.encoders.rst
index 5421b9f..916ba5d 100644
--- a/Doc/library/email.encoders.rst
+++ b/Doc/library/email.encoders.rst
@@ -1,5 +1,5 @@
-:mod:`email`: Encoders
-----------------------
+:mod:`email.encoders`: Encoders
+-------------------------------
.. module:: email.encoders
:synopsis: Encoders for email message payloads.
@@ -18,6 +18,10 @@ exactly one argument, the message object to encode. They usually extract the
payload, encode it, and reset the payload to this newly encoded value. They
should also set the :mailheader:`Content-Transfer-Encoding` header as appropriate.
+Note that these functions are not meaningful for a multipart message. They
+must be applied to individual subparts instead, and will raise a
+:exc:`TypeError` if passed a message whose type is multipart.
+
Here are the encoding functions provided:
diff --git a/Doc/library/email.errors.rst b/Doc/library/email.errors.rst
index 06598d5..499d754 100644
--- a/Doc/library/email.errors.rst
+++ b/Doc/library/email.errors.rst
@@ -1,5 +1,5 @@
-:mod:`email`: Exception and Defect classes
-------------------------------------------
+:mod:`email.errors`: Exception and Defect classes
+-------------------------------------------------
.. module:: email.errors
:synopsis: The exception classes used by the email package.
@@ -25,7 +25,8 @@ The following exception classes are defined in the :mod:`email.errors` module:
Raised under some error conditions when parsing the :rfc:`2822` headers of a
message, this class is derived from :exc:`MessageParseError`. It can be raised
- from the :meth:`Parser.parse` or :meth:`Parser.parsestr` methods.
+ from the :meth:`Parser.parse <email.parser.Parser.parse>` or
+ :meth:`Parser.parsestr <email.parser.Parser.parsestr>` methods.
Situations where it can be raised include finding an envelope header after the
first :rfc:`2822` header of the message, finding a continuation line before the
@@ -37,7 +38,8 @@ The following exception classes are defined in the :mod:`email.errors` module:
Raised under some error conditions when parsing the :rfc:`2822` headers of a
message, this class is derived from :exc:`MessageParseError`. It can be raised
- from the :meth:`Parser.parse` or :meth:`Parser.parsestr` methods.
+ from the :meth:`Parser.parse <email.parser.Parser.parse>` or
+ :meth:`Parser.parsestr <email.parser.Parser.parsestr>` methods.
Situations where it can be raised include not being able to find the starting or
terminating boundary in a :mimetype:`multipart/\*` message when strict parsing
@@ -46,19 +48,20 @@ The following exception classes are defined in the :mod:`email.errors` module:
.. exception:: MultipartConversionError()
- Raised when a payload is added to a :class:`Message` object using
- :meth:`add_payload`, but the payload is already a scalar and the message's
- :mailheader:`Content-Type` main type is not either :mimetype:`multipart` or
- missing. :exc:`MultipartConversionError` multiply inherits from
- :exc:`MessageError` and the built-in :exc:`TypeError`.
+ Raised when a payload is added to a :class:`~email.message.Message` object
+ using :meth:`add_payload`, but the payload is already a scalar and the
+ message's :mailheader:`Content-Type` main type is not either
+ :mimetype:`multipart` or missing. :exc:`MultipartConversionError` multiply
+ inherits from :exc:`MessageError` and the built-in :exc:`TypeError`.
- Since :meth:`Message.add_payload` is deprecated, this exception is rarely raised
- in practice. However the exception may also be raised if the :meth:`attach`
+ Since :meth:`Message.add_payload` is deprecated, this exception is rarely
+ raised in practice. However the exception may also be raised if the
+ :meth:`~email.message.Message.attach`
method is called on an instance of a class derived from
:class:`~email.mime.nonmultipart.MIMENonMultipart` (e.g.
:class:`~email.mime.image.MIMEImage`).
-Here's the list of the defects that the :class:`~email.mime.parser.FeedParser`
+Here's the list of the defects that the :class:`~email.parser.FeedParser`
can find while parsing messages. Note that the defects are added to the message
where the problem was found, so for example, if a message nested inside a
:mimetype:`multipart/alternative` had a malformed header, that nested message
@@ -86,7 +89,7 @@ this class is *not* an exception!
or was otherwise malformed.
* :class:`MultipartInvariantViolationDefect` -- A message claimed to be a
- :mimetype:`multipart`, but no subparts were found. Note that when a message has
- this defect, its :meth:`is_multipart` method may return false even though its
- content type claims to be :mimetype:`multipart`.
+ :mimetype:`multipart`, but no subparts were found. Note that when a message
+ has this defect, its :meth:`~email.message.Message.is_multipart` method may
+ return false even though its content type claims to be :mimetype:`multipart`.
diff --git a/Doc/library/email.generator.rst b/Doc/library/email.generator.rst
index f02e7d8..4ea7e6a 100644
--- a/Doc/library/email.generator.rst
+++ b/Doc/library/email.generator.rst
@@ -1,5 +1,5 @@
-:mod:`email`: Generating MIME documents
----------------------------------------
+:mod:`email.generator`: Generating MIME documents
+-------------------------------------------------
.. module:: email.generator
:synopsis: Generate flat text email messages from a message structure.
@@ -17,10 +17,10 @@ yourself. However the bundled generator knows how to generate most email in a
standards-compliant way, should handle MIME and non-MIME email messages just
fine, and is designed so that the transformation from flat text, to a message
structure via the :class:`~email.parser.Parser` class, and back to flat text,
-is idempotent (the input is identical to the output). On the other hand, using
-the Generator on a :class:`~email.message.Message` constructed by program may
-result in changes to the :class:`~email.message.Message` object as defaults are
-filled in.
+is idempotent (the input is identical to the output) [#]_. On the other hand,
+using the Generator on a :class:`~email.message.Message` constructed by program
+may result in changes to the :class:`~email.message.Message` object as defaults
+are filled in.
Here are the public methods of the :class:`Generator` class, imported from the
:mod:`email.generator` module:
@@ -125,3 +125,11 @@ representing the part.
.. versionchanged:: 2.5
The previously deprecated method :meth:`__call__` was removed.
+
+.. rubric:: Footnotes
+
+.. [#] This statement assumes that you use the appropriate setting for the
+ ``unixfrom`` argument, and that you set maxheaderlen=0 (which will
+ preserve whatever the input line lengths were). It is also not strictly
+ true, since in many cases runs of whitespace in headers are collapsed
+ into single blanks. The latter is a bug that will eventually be fixed.
diff --git a/Doc/library/email.header.rst b/Doc/library/email.header.rst
index fe09de5..4e585fc 100644
--- a/Doc/library/email.header.rst
+++ b/Doc/library/email.header.rst
@@ -1,5 +1,5 @@
-:mod:`email`: Internationalized headers
----------------------------------------
+:mod:`email.header`: Internationalized headers
+----------------------------------------------
.. module:: email.header
:synopsis: Representing non-ASCII headers
@@ -103,7 +103,7 @@ Here is the :class:`Header` class description:
not provoke a :exc:`UnicodeError` is used.
Optional *errors* is passed through to any :func:`unicode` or
- :func:`ustr.encode` call, and defaults to "strict".
+ :meth:`unicode.encode` call, and defaults to "strict".
.. method:: encode([splitchars])
diff --git a/Doc/library/email.iterators.rst b/Doc/library/email.iterators.rst
index aa70141..6621d51 100644
--- a/Doc/library/email.iterators.rst
+++ b/Doc/library/email.iterators.rst
@@ -1,13 +1,14 @@
-:mod:`email`: Iterators
------------------------
+:mod:`email.iterators`: Iterators
+---------------------------------
.. module:: email.iterators
:synopsis: Iterate over a message object tree.
Iterating over a message object tree is fairly easy with the
-:meth:`Message.walk` method. The :mod:`email.iterators` module provides some
-useful higher level iterations over message object trees.
+:meth:`Message.walk <email.message.Message.walk>` method. The
+:mod:`email.iterators` module provides some useful higher level iterations over
+message object trees.
.. function:: body_line_iterator(msg[, decode])
@@ -16,9 +17,11 @@ useful higher level iterations over message object trees.
string payloads line-by-line. It skips over all the subpart headers, and it
skips over any subpart with a payload that isn't a Python string. This is
somewhat equivalent to reading the flat text representation of the message from
- a file using :meth:`readline`, skipping over all the intervening headers.
+ a file using :meth:`~io.TextIOBase.readline`, skipping over all the
+ intervening headers.
- Optional *decode* is passed through to :meth:`Message.get_payload`.
+ Optional *decode* is passed through to :meth:`Message.get_payload
+ <email.message.Message.get_payload>`.
.. function:: typed_subpart_iterator(msg[, maintype[, subtype]])
diff --git a/Doc/library/email.message.rst b/Doc/library/email.message.rst
index 77ce99e..838bcf5 100644
--- a/Doc/library/email.message.rst
+++ b/Doc/library/email.message.rst
@@ -1,5 +1,5 @@
-:mod:`email`: Representing an email message
--------------------------------------------
+:mod:`email.message`: Representing an email message
+---------------------------------------------------
.. module:: email.message
:synopsis: The base class representing email messages.
@@ -48,8 +48,8 @@ Here are the methods of the :class:`Message` class:
Note that this method is provided as a convenience and may not always
format the message the way you want. For example, by default it mangles
lines that begin with ``From``. For more flexibility, instantiate a
- :class:`~email.generator.Generator` instance and use its :meth:`flatten`
- method directly. For example::
+ :class:`~email.generator.Generator` instance and use its
+ :meth:`~email.generator.Generator.flatten` method directly. For example::
from cStringIO import StringIO
from email.generator import Generator
@@ -68,7 +68,7 @@ Here are the methods of the :class:`Message` class:
Return ``True`` if the message's payload is a list of sub-\
:class:`Message` objects, otherwise return ``False``. When
- :meth:`is_multipart` returns False, the payload should be a string object.
+ :meth:`is_multipart` returns ``False``, the payload should be a string object.
.. method:: set_unixfrom(unixfrom)
@@ -494,8 +494,8 @@ Here are the methods of the :class:`Message` class:
Set the ``boundary`` parameter of the :mailheader:`Content-Type` header to
*boundary*. :meth:`set_boundary` will always quote *boundary* if
- necessary. A :exc:`HeaderParseError` is raised if the message object has
- no :mailheader:`Content-Type` header.
+ necessary. A :exc:`~email.errors.HeaderParseError` is raised if the
+ message object has no :mailheader:`Content-Type` header.
Note that using this method is subtly different than deleting the old
:mailheader:`Content-Type` header and adding a new one with the new
@@ -589,7 +589,8 @@ Here are the methods of the :class:`Message` class:
.. versionchanged:: 2.5
You do not need to set the epilogue to the empty string in order for the
- :class:`Generator` to print a newline at the end of the file.
+ :class:`~email.generator.Generator` to print a newline at the end of the
+ file.
.. attribute:: defects
diff --git a/Doc/library/email.mime.rst b/Doc/library/email.mime.rst
index 78fdc76..dcf7b59 100644
--- a/Doc/library/email.mime.rst
+++ b/Doc/library/email.mime.rst
@@ -1,5 +1,5 @@
-:mod:`email`: Creating email and MIME objects from scratch
-----------------------------------------------------------
+:mod:`email.mime`: Creating email and MIME objects from scratch
+---------------------------------------------------------------
.. module:: email.mime
:synopsis: Build MIME messages.
@@ -35,7 +35,8 @@ Here are the classes:
*_maintype* is the :mailheader:`Content-Type` major type (e.g. :mimetype:`text`
or :mimetype:`image`), and *_subtype* is the :mailheader:`Content-Type` minor
type (e.g. :mimetype:`plain` or :mimetype:`gif`). *_params* is a parameter
- key/value dictionary and is passed directly to :meth:`Message.add_header`.
+ key/value dictionary and is passed directly to :meth:`Message.add_header
+ <email.message.Message.add_header>`.
The :class:`MIMEBase` class always adds a :mailheader:`Content-Type` header
(based on *_maintype*, *_subtype*, and *_params*), and a
@@ -50,8 +51,9 @@ Here are the classes:
A subclass of :class:`~email.mime.base.MIMEBase`, this is an intermediate base
class for MIME messages that are not :mimetype:`multipart`. The primary
- purpose of this class is to prevent the use of the :meth:`attach` method,
- which only makes sense for :mimetype:`multipart` messages. If :meth:`attach`
+ purpose of this class is to prevent the use of the
+ :meth:`~email.message.Message.attach` method, which only makes sense for
+ :mimetype:`multipart` messages. If :meth:`~email.message.Message.attach`
is called, a :exc:`~email.errors.MultipartConversionError` exception is raised.
.. versionadded:: 2.2.2
@@ -76,7 +78,8 @@ Here are the classes:
*_subparts* is a sequence of initial subparts for the payload. It must be
possible to convert this sequence to a list. You can always attach new subparts
- to the message by using the :meth:`Message.attach` method.
+ to the message by using the :meth:`Message.attach
+ <email.message.Message.attach>` method.
Additional parameters for the :mailheader:`Content-Type` header are taken from
the keyword arguments, or passed into the *_params* argument, which is a keyword
@@ -99,8 +102,10 @@ Here are the classes:
Optional *_encoder* is a callable (i.e. function) which will perform the actual
encoding of the data for transport. This callable takes one argument, which is
- the :class:`MIMEApplication` instance. It should use :meth:`get_payload` and
- :meth:`set_payload` to change the payload to encoded form. It should also add
+ the :class:`MIMEApplication` instance. It should use
+ :meth:`~email.message.Message.get_payload` and
+ :meth:`~email.message.Message.set_payload` to change the payload to encoded
+ form. It should also add
any :mailheader:`Content-Transfer-Encoding` or other headers to the message
object as necessary. The default encoding is base64. See the
:mod:`email.encoders` module for a list of the built-in encoders.
@@ -127,8 +132,10 @@ Here are the classes:
Optional *_encoder* is a callable (i.e. function) which will perform the actual
encoding of the audio data for transport. This callable takes one argument,
- which is the :class:`MIMEAudio` instance. It should use :meth:`get_payload` and
- :meth:`set_payload` to change the payload to encoded form. It should also add
+ which is the :class:`MIMEAudio` instance. It should use
+ :meth:`~email.message.Message.get_payload` and
+ :meth:`~email.message.Message.set_payload` to change the payload to encoded
+ form. It should also add
any :mailheader:`Content-Transfer-Encoding` or other headers to the message
object as necessary. The default encoding is base64. See the
:mod:`email.encoders` module for a list of the built-in encoders.
@@ -153,8 +160,10 @@ Here are the classes:
Optional *_encoder* is a callable (i.e. function) which will perform the actual
encoding of the image data for transport. This callable takes one argument,
- which is the :class:`MIMEImage` instance. It should use :meth:`get_payload` and
- :meth:`set_payload` to change the payload to encoded form. It should also add
+ which is the :class:`MIMEImage` instance. It should use
+ :meth:`~email.message.Message.get_payload` and
+ :meth:`~email.message.Message.set_payload` to change the payload to encoded
+ form. It should also add
any :mailheader:`Content-Transfer-Encoding` or other headers to the message
object as necessary. The default encoding is base64. See the
:mod:`email.encoders` module for a list of the built-in encoders.
@@ -199,3 +208,12 @@ Here are the classes:
Transfer Encoding now happens implicitly based on the *_charset*
argument.
+ Unless the ``_charset`` parameter is explicitly set to ``None``, the
+ MIMEText object created will have both a :mailheader:`Content-Type` header
+ with a ``charset`` parameter, and a :mailheader:`Content-Transfer-Endcoding`
+ header. This means that a subsequent ``set_payload`` call will not result
+ in an encoded payload, even if a charset is passed in the ``set_payload``
+ command. You can "reset" this behavior by deleting the
+ ``Content-Transfer-Encoding`` header, after which a ``set_payload`` call
+ will automatically encode the new payload (and add a new
+ :mailheader:`Content-Transfer-Encoding` header).
diff --git a/Doc/library/email.parser.rst b/Doc/library/email.parser.rst
index b2f6b05..0f99a2f 100644
--- a/Doc/library/email.parser.rst
+++ b/Doc/library/email.parser.rst
@@ -1,5 +1,5 @@
-:mod:`email`: Parsing email messages
-------------------------------------
+:mod:`email.parser`: Parsing email messages
+-------------------------------------------
.. module:: email.parser
:synopsis: Parse flat text email messages to produce a message object structure.
@@ -7,7 +7,8 @@
Message object structures can be created in one of two ways: they can be created
from whole cloth by instantiating :class:`~email.message.Message` objects and
-stringing them together via :meth:`attach` and :meth:`set_payload` calls, or they
+stringing them together via :meth:`~email.message.Message.attach` and
+:meth:`~email.message.Message.set_payload` calls, or they
can be created by parsing a flat text representation of the email message.
The :mod:`email` package provides a standard parser that understands most email
@@ -16,8 +17,9 @@ or a file object, and the parser will return to you the root
:class:`~email.message.Message` instance of the object structure. For simple,
non-MIME messages the payload of this root object will likely be a string
containing the text of the message. For MIME messages, the root object will
-return ``True`` from its :meth:`is_multipart` method, and the subparts can be
-accessed via the :meth:`get_payload` and :meth:`walk` methods.
+return ``True`` from its :meth:`~email.message.Message.is_multipart` method, and
+the subparts can be accessed via the :meth:`~email.message.Message.get_payload`
+and :meth:`~email.message.Message.walk` methods.
There are actually two parser interfaces available for use, the classic
:class:`Parser` API and the incremental :class:`FeedParser` API. The classic
@@ -127,7 +129,8 @@ class.
Read all the data from the file-like object *fp*, parse the resulting
text, and return the root message object. *fp* must support both the
- :meth:`readline` and the :meth:`read` methods on file-like objects.
+ :meth:`~io.TextIOBase.readline` and the :meth:`~io.TextIOBase.read`
+ methods on file-like objects.
The text contained in *fp* must be formatted as a block of :rfc:`2822`
style headers and header continuation lines, optionally preceded by a
@@ -147,7 +150,7 @@ class.
Similar to the :meth:`parse` method, except it takes a string object
instead of a file-like object. Calling this method on a string is exactly
- equivalent to wrapping *text* in a :class:`StringIO` instance first and
+ equivalent to wrapping *text* in a :class:`~StringIO.StringIO` instance first and
calling :meth:`parse`.
Optional *headersonly* is as with the :meth:`parse` method.
@@ -165,7 +168,7 @@ in the top-level :mod:`email` package namespace.
Return a message object structure from a string. This is exactly equivalent to
``Parser().parsestr(s)``. Optional *_class* and *strict* are interpreted as
- with the :class:`Parser` class constructor.
+ with the :class:`~email.parser.Parser` class constructor.
.. versionchanged:: 2.2.2
The *strict* flag was added.
@@ -175,7 +178,7 @@ in the top-level :mod:`email` package namespace.
Return a message object structure tree from an open file object. This is
exactly equivalent to ``Parser().parse(fp)``. Optional *_class* and *strict*
- are interpreted as with the :class:`Parser` class constructor.
+ are interpreted as with the :class:`~email.parser.Parser` class constructor.
.. versionchanged:: 2.2.2
The *strict* flag was added.
@@ -193,32 +196,35 @@ Here are some notes on the parsing semantics:
* Most non-\ :mimetype:`multipart` type messages are parsed as a single message
object with a string payload. These objects will return ``False`` for
- :meth:`is_multipart`. Their :meth:`get_payload` method will return a string
- object.
+ :meth:`~email.message.Message.is_multipart`. Their
+ :meth:`~email.message.Message.get_payload` method will return a string object.
* All :mimetype:`multipart` type messages will be parsed as a container message
object with a list of sub-message objects for their payload. The outer
- container message will return ``True`` for :meth:`is_multipart` and their
- :meth:`get_payload` method will return the list of :class:`~email.message.Message`
- subparts.
+ container message will return ``True`` for
+ :meth:`~email.message.Message.is_multipart` and their
+ :meth:`~email.message.Message.get_payload` method will return the list of
+ :class:`~email.message.Message` subparts.
* Most messages with a content type of :mimetype:`message/\*` (e.g.
:mimetype:`message/delivery-status` and :mimetype:`message/rfc822`) will also be
parsed as container object containing a list payload of length 1. Their
- :meth:`is_multipart` method will return ``True``. The single element in the
- list payload will be a sub-message object.
+ :meth:`~email.message.Message.is_multipart` method will return ``True``.
+ The single element in the list payload will be a sub-message object.
* Some non-standards compliant messages may not be internally consistent about
their :mimetype:`multipart`\ -edness. Such messages may have a
:mailheader:`Content-Type` header of type :mimetype:`multipart`, but their
- :meth:`is_multipart` method may return ``False``. If such messages were parsed
- with the :class:`FeedParser`, they will have an instance of the
- :class:`MultipartInvariantViolationDefect` class in their *defects* attribute
- list. See :mod:`email.errors` for details.
+ :meth:`~email.message.Message.is_multipart` method may return ``False``.
+ If such messages were parsed with the :class:`~email.parser.FeedParser`,
+ they will have an instance of the
+ :class:`~email.errors.MultipartInvariantViolationDefect` class in their
+ *defects* attribute list. See :mod:`email.errors` for details.
.. rubric:: Footnotes
.. [#] As of email package version 3.0, introduced in Python 2.4, the classic
- :class:`Parser` was re-implemented in terms of the :class:`FeedParser`, so the
- semantics and results are identical between the two parsers.
+ :class:`~email.parser.Parser` was re-implemented in terms of the
+ :class:`~email.parser.FeedParser`, so the semantics and results are
+ identical between the two parsers.
diff --git a/Doc/library/email.rst b/Doc/library/email.rst
index aaff153..4e90ce4 100644
--- a/Doc/library/email.rst
+++ b/Doc/library/email.rst
@@ -112,14 +112,15 @@ Here are the major differences between :mod:`email` version 4 and version 3:
*Note that the version 3 names will continue to work until Python 2.6*.
* The :mod:`email.mime.application` module was added, which contains the
- :class:`MIMEApplication` class.
+ :class:`~email.mime.application.MIMEApplication` class.
* Methods that were deprecated in version 3 have been removed. These include
:meth:`Generator.__call__`, :meth:`Message.get_type`,
:meth:`Message.get_main_type`, :meth:`Message.get_subtype`.
* Fixes have been added for :rfc:`2231` support which can change some of the
- return types for :func:`Message.get_param` and friends. Under some
+ return types for :func:`Message.get_param <email.message.Message.get_param>`
+ and friends. Under some
circumstances, values which used to return a 3-tuple now return simple strings
(specifically, if all extended parameter segments were unencoded, there is no
language and charset designation expected, so the return type is now a simple
@@ -128,23 +129,24 @@ Here are the major differences between :mod:`email` version 4 and version 3:
Here are the major differences between :mod:`email` version 3 and version 2:
-* The :class:`FeedParser` class was introduced, and the :class:`Parser` class
- was implemented in terms of the :class:`FeedParser`. All parsing therefore is
+* The :class:`~email.parser.FeedParser` class was introduced, and the
+ :class:`~email.parser.Parser` class was implemented in terms of the
+ :class:`~email.parser.FeedParser`. All parsing therefore is
non-strict, and parsing will make a best effort never to raise an exception.
Problems found while parsing messages are stored in the message's *defect*
attribute.
* All aspects of the API which raised :exc:`DeprecationWarning`\ s in version 2
have been removed. These include the *_encoder* argument to the
- :class:`MIMEText` constructor, the :meth:`Message.add_payload` method, the
- :func:`Utils.dump_address_pair` function, and the functions :func:`Utils.decode`
- and :func:`Utils.encode`.
+ :class:`~email.mime.text.MIMEText` constructor, the
+ :meth:`Message.add_payload` method, the :func:`Utils.dump_address_pair`
+ function, and the functions :func:`Utils.decode` and :func:`Utils.encode`.
* New :exc:`DeprecationWarning`\ s have been added to:
:meth:`Generator.__call__`, :meth:`Message.get_type`,
:meth:`Message.get_main_type`, :meth:`Message.get_subtype`, and the *strict*
- argument to the :class:`Parser` class. These are expected to be removed in
- future versions.
+ argument to the :class:`~email.parser.Parser` class. These are expected to
+ be removed in future versions.
* Support for Pythons earlier than 2.3 has been removed.
@@ -152,53 +154,61 @@ Here are the differences between :mod:`email` version 2 and version 1:
* The :mod:`email.Header` and :mod:`email.Charset` modules have been added.
-* The pickle format for :class:`Message` instances has changed. Since this was
- never (and still isn't) formally defined, this isn't considered a backward
- incompatibility. However if your application pickles and unpickles
- :class:`Message` instances, be aware that in :mod:`email` version 2,
- :class:`Message` instances now have private variables *_charset* and
- *_default_type*.
+* The pickle format for :class:`~email.message.Message` instances has changed.
+ Since this was never (and still isn't) formally defined, this isn't
+ considered a backward incompatibility. However if your application pickles
+ and unpickles :class:`~email.message.Message` instances, be aware that in
+ :mod:`email` version 2, :class:`~email.message.Message` instances now have
+ private variables *_charset* and *_default_type*.
-* Several methods in the :class:`Message` class have been deprecated, or their
- signatures changed. Also, many new methods have been added. See the
- documentation for the :class:`Message` class for details. The changes should be
- completely backward compatible.
+* Several methods in the :class:`~email.message.Message` class have been
+ deprecated, or their signatures changed. Also, many new methods have been
+ added. See the documentation for the :class:`~email.message.Message` class
+ for details. The changes should be completely backward compatible.
* The object structure has changed in the face of :mimetype:`message/rfc822`
- content types. In :mod:`email` version 1, such a type would be represented by a
- scalar payload, i.e. the container message's :meth:`is_multipart` returned
- false, :meth:`get_payload` was not a list object, but a single :class:`Message`
- instance.
+ content types. In :mod:`email` version 1, such a type would be represented
+ by a scalar payload, i.e. the container message's
+ :meth:`~email.message.Message.is_multipart` returned false,
+ :meth:`~email.message.Message.get_payload` was not a list object, but a
+ single :class:`~email.message.Message` instance.
This structure was inconsistent with the rest of the package, so the object
representation for :mimetype:`message/rfc822` content types was changed. In
:mod:`email` version 2, the container *does* return ``True`` from
- :meth:`is_multipart`, and :meth:`get_payload` returns a list containing a single
- :class:`Message` item.
-
- Note that this is one place that backward compatibility could not be completely
- maintained. However, if you're already testing the return type of
- :meth:`get_payload`, you should be fine. You just need to make sure your code
- doesn't do a :meth:`set_payload` with a :class:`Message` instance on a container
- with a content type of :mimetype:`message/rfc822`.
-
-* The :class:`Parser` constructor's *strict* argument was added, and its
- :meth:`parse` and :meth:`parsestr` methods grew a *headersonly* argument. The
- *strict* flag was also added to functions :func:`email.message_from_file` and
- :func:`email.message_from_string`.
-
-* :meth:`Generator.__call__` is deprecated; use :meth:`Generator.flatten`
- instead. The :class:`Generator` class has also grown the :meth:`clone` method.
-
-* The :class:`DecodedGenerator` class in the :mod:`email.Generator` module was
- added.
-
-* The intermediate base classes :class:`MIMENonMultipart` and
- :class:`MIMEMultipart` have been added, and interposed in the class hierarchy
- for most of the other MIME-related derived classes.
-
-* The *_encoder* argument to the :class:`MIMEText` constructor has been
- deprecated. Encoding now happens implicitly based on the *_charset* argument.
+ :meth:`~email.message.Message.is_multipart`, and
+ :meth:`~email.message.Message.get_payload` returns a list containing a single
+ :class:`~email.message.Message` item.
+
+ Note that this is one place that backward compatibility could not be
+ completely maintained. However, if you're already testing the return type of
+ :meth:`~email.message.Message.get_payload`, you should be fine. You just need
+ to make sure your code doesn't do a :meth:`~email.message.Message.set_payload`
+ with a :class:`~email.message.Message` instance on a container with a content
+ type of :mimetype:`message/rfc822`.
+
+* The :class:`~email.parser.Parser` constructor's *strict* argument was added,
+ and its :meth:`~email.parser.Parser.parse` and
+ :meth:`~email.parser.Parser.parsestr` methods grew a *headersonly* argument.
+ The *strict* flag was also added to functions :func:`email.message_from_file`
+ and :func:`email.message_from_string`.
+
+* :meth:`Generator.__call__` is deprecated; use :meth:`Generator.flatten
+ <email.generator.Generator.flatten>` instead. The
+ :class:`~email.generator.Generator` class has also grown the
+ :meth:`~email.generator.Generator.clone` method.
+
+* The :class:`~email.generator.DecodedGenerator` class in the
+ :mod:`email.generator` module was added.
+
+* The intermediate base classes
+ :class:`~email.mime.nonmultipart.MIMENonMultipart` and
+ :class:`~email.mime.multipart.MIMEMultipart` have been added, and interposed
+ in the class hierarchy for most of the other MIME-related derived classes.
+
+* The *_encoder* argument to the :class:`~email.mime.text.MIMEText` constructor
+ has been deprecated. Encoding now happens implicitly based on the
+ *_charset* argument.
* The following functions in the :mod:`email.Utils` module have been deprecated:
:func:`dump_address_pairs`, :func:`decode`, and :func:`encode`. The following
@@ -231,17 +241,22 @@ package has the following differences:
* :func:`messageFromFile` has been renamed to :func:`message_from_file`.
-The :class:`Message` class has the following differences:
+The :class:`~email.message.Message` class has the following differences:
-* The method :meth:`asString` was renamed to :meth:`as_string`.
+* The method :meth:`asString` was renamed to
+ :meth:`~email.message.Message.as_string`.
-* The method :meth:`ismultipart` was renamed to :meth:`is_multipart`.
+* The method :meth:`ismultipart` was renamed to
+ :meth:`~email.message.Message.is_multipart`.
-* The :meth:`get_payload` method has grown a *decode* optional argument.
+* The :meth:`~email.message.Message.get_payload` method has grown a *decode*
+ optional argument.
-* The method :meth:`getall` was renamed to :meth:`get_all`.
+* The method :meth:`getall` was renamed to
+ :meth:`~email.message.Message.get_all`.
-* The method :meth:`addheader` was renamed to :meth:`add_header`.
+* The method :meth:`addheader` was renamed to
+ :meth:`~email.message.Message.add_header`.
* The method :meth:`gettype` was renamed to :meth:`get_type`.
@@ -249,48 +264,57 @@ The :class:`Message` class has the following differences:
* The method :meth:`getsubtype` was renamed to :meth:`get_subtype`.
-* The method :meth:`getparams` was renamed to :meth:`get_params`. Also, whereas
- :meth:`getparams` returned a list of strings, :meth:`get_params` returns a list
- of 2-tuples, effectively the key/value pairs of the parameters, split on the
- ``'='`` sign.
+* The method :meth:`getparams` was renamed to
+ :meth:`~email.message.Message.get_params`. Also, whereas :meth:`getparams`
+ returned a list of strings, :meth:`~email.message.Message.get_params` returns
+ a list of 2-tuples, effectively the key/value pairs of the parameters, split
+ on the ``'='`` sign.
-* The method :meth:`getparam` was renamed to :meth:`get_param`.
+* The method :meth:`getparam` was renamed to
+ :meth:`~email.message.Message.get_param`.
-* The method :meth:`getcharsets` was renamed to :meth:`get_charsets`.
+* The method :meth:`getcharsets` was renamed to
+ :meth:`~email.message.Message.get_charsets`.
-* The method :meth:`getfilename` was renamed to :meth:`get_filename`.
+* The method :meth:`getfilename` was renamed to
+ :meth:`~email.message.Message.get_filename`.
-* The method :meth:`getboundary` was renamed to :meth:`get_boundary`.
+* The method :meth:`getboundary` was renamed to
+ :meth:`~email.message.Message.get_boundary`.
-* The method :meth:`setboundary` was renamed to :meth:`set_boundary`.
+* The method :meth:`setboundary` was renamed to
+ :meth:`~email.message.Message.set_boundary`.
* The method :meth:`getdecodedpayload` was removed. To get similar
- functionality, pass the value 1 to the *decode* flag of the get_payload()
- method.
+ functionality, pass the value 1 to the *decode* flag of the
+ :meth:`~email.message.Message.get_payload` method.
* The method :meth:`getpayloadastext` was removed. Similar functionality is
- supported by the :class:`DecodedGenerator` class in the :mod:`email.generator`
- module.
+ supported by the :class:`~email.generator.DecodedGenerator` class in the
+ :mod:`email.generator` module.
* The method :meth:`getbodyastext` was removed. You can get similar
- functionality by creating an iterator with :func:`typed_subpart_iterator` in the
- :mod:`email.iterators` module.
+ functionality by creating an iterator with
+ :func:`~email.iterators.typed_subpart_iterator` in the :mod:`email.iterators`
+ module.
-The :class:`Parser` class has no differences in its public interface. It does
-have some additional smarts to recognize :mimetype:`message/delivery-status`
-type messages, which it represents as a :class:`Message` instance containing
-separate :class:`Message` subparts for each header block in the delivery status
-notification [#]_.
+The :class:`~email.parser.Parser` class has no differences in its public
+interface. It does have some additional smarts to recognize
+:mimetype:`message/delivery-status` type messages, which it represents as a
+:class:`~email.message.Message` instance containing separate
+:class:`~email.message.Message` subparts for each header block in the delivery
+status notification [#]_.
-The :class:`Generator` class has no differences in its public interface. There
-is a new class in the :mod:`email.generator` module though, called
-:class:`DecodedGenerator` which provides most of the functionality previously
-available in the :meth:`Message.getpayloadastext` method.
+The :class:`~email.generator.Generator` class has no differences in its public
+interface. There is a new class in the :mod:`email.generator` module though,
+called :class:`~email.generator.DecodedGenerator` which provides most of the
+functionality previously available in the :meth:`Message.getpayloadastext`
+method.
The following modules and classes have been changed:
-* The :class:`MIMEBase` class constructor arguments *_major* and *_minor* have
- changed to *_maintype* and *_subtype* respectively.
+* The :class:`~email.mime.base.MIMEBase` class constructor arguments *_major*
+ and *_minor* have changed to *_maintype* and *_subtype* respectively.
* The ``Image`` class/module has been renamed to ``MIMEImage``. The *_minor*
argument has been renamed to *_subtype*.
@@ -303,7 +327,8 @@ The following modules and classes have been changed:
but that clashed with the Python standard library module :mod:`rfc822` on some
case-insensitive file systems.
- Also, the :class:`MIMEMessage` class now represents any kind of MIME message
+ Also, the :class:`~email.mime.message.MIMEMessage` class now represents any
+ kind of MIME message
with main type :mimetype:`message`. It takes an optional argument *_subtype*
which is used to set the MIME subtype. *_subtype* defaults to
:mimetype:`rfc822`.
@@ -313,8 +338,8 @@ The following modules and classes have been changed:
:mod:`email.utils` module.
The ``MsgReader`` class/module has been removed. Its functionality is most
-closely supported in the :func:`body_line_iterator` function in the
-:mod:`email.iterators` module.
+closely supported in the :func:`~email.iterators.body_line_iterator` function
+in the :mod:`email.iterators` module.
.. rubric:: Footnotes
diff --git a/Doc/library/email.util.rst b/Doc/library/email.util.rst
index 153ba78..9b583c0 100644
--- a/Doc/library/email.util.rst
+++ b/Doc/library/email.util.rst
@@ -1,5 +1,5 @@
-:mod:`email`: Miscellaneous utilities
--------------------------------------
+:mod:`email.utils`: Miscellaneous utilities
+-------------------------------------------
.. module:: email.utils
:synopsis: Miscellaneous email package utilities.
@@ -41,8 +41,8 @@ There are several useful utilities provided in the :mod:`email.utils` module:
This method returns a list of 2-tuples of the form returned by ``parseaddr()``.
*fieldvalues* is a sequence of header field values as might be returned by
- :meth:`Message.get_all`. Here's a simple example that gets all the recipients
- of a message::
+ :meth:`Message.get_all <email.message.Message.get_all>`. Here's a simple
+ example that gets all the recipients of a message::
from email.utils import getaddresses
@@ -76,12 +76,9 @@ There are several useful utilities provided in the :mod:`email.utils` module:
.. function:: mktime_tz(tuple)
- Turn a 10-tuple as returned by :func:`parsedate_tz` into a UTC timestamp. It
- the timezone item in the tuple is ``None``, assume local time. Minor
- deficiency: :func:`mktime_tz` interprets the first 8 elements of *tuple* as a
- local time and then compensates for the timezone difference. This may yield a
- slight error around changes in daylight savings time, though not worth worrying
- about for common use.
+ Turn a 10-tuple as returned by :func:`parsedate_tz` into a UTC
+ timestamp (seconds since the Epoch). If the timezone item in the
+ tuple is ``None``, assume local time.
.. function:: formatdate([timeval[, localtime][, usegmt]])
@@ -130,7 +127,8 @@ There are several useful utilities provided in the :mod:`email.utils` module:
.. function:: collapse_rfc2231_value(value[, errors[, fallback_charset]])
When a header parameter is encoded in :rfc:`2231` format,
- :meth:`Message.get_param` may return a 3-tuple containing the character set,
+ :meth:`Message.get_param <email.message.Message.get_param>` may return a
+ 3-tuple containing the character set,
language, and value. :func:`collapse_rfc2231_value` turns this into a unicode
string. Optional *errors* is passed to the *errors* argument of the built-in
:func:`unicode` function; it defaults to ``replace``. Optional
@@ -152,15 +150,15 @@ There are several useful utilities provided in the :mod:`email.utils` module:
.. versionchanged:: 2.4
The :func:`decode` function has been removed; use the
- :meth:`Header.decode_header` method instead.
+ :meth:`Header.decode_header <email.header.Header.decode_header>` method
+ instead.
.. versionchanged:: 2.4
- The :func:`encode` function has been removed; use the :meth:`Header.encode`
- method instead.
+ The :func:`encode` function has been removed; use the :meth:`Header.encode
+ <email.header.Header.encode>` method instead.
.. rubric:: Footnotes
.. [#] Note that the sign of the timezone offset is the opposite of the sign of the
``time.timezone`` variable for the same timezone; the latter variable follows
the POSIX standard while this module follows :rfc:`2822`.
-
diff --git a/Doc/library/exceptions.rst b/Doc/library/exceptions.rst
index 6c80df7..9291c18 100644
--- a/Doc/library/exceptions.rst
+++ b/Doc/library/exceptions.rst
@@ -38,10 +38,10 @@ handler or to report an error condition "just like" the situation in which the
interpreter raises the same exception; but beware that there is nothing to
prevent user code from raising an inappropriate error.
-The built-in exception classes can be sub-classed to define new exceptions;
-programmers are encouraged to at least derive new exceptions from the
-:exc:`Exception` class and not :exc:`BaseException`. More information on
-defining exceptions is available in the Python Tutorial under
+The built-in exception classes can be subclassed to define new exceptions;
+programmers are encouraged to derive new exceptions from the :exc:`Exception`
+class or one of its subclasses, and not from :exc:`BaseException`. More
+information on defining exceptions is available in the Python Tutorial under
:ref:`tut-userexceptions`.
The following exceptions are only used as base classes for other exceptions.
@@ -158,9 +158,9 @@ The following exceptions are the exceptions that are actually raised.
.. exception:: GeneratorExit
- Raise when a :term:`generator`\'s :meth:`close` method is called. It
- directly inherits from :exc:`BaseException` instead of :exc:`StandardError` since
- it is technically not an error.
+ Raised when a :term:`generator`\'s :meth:`close` method is called. It
+ directly inherits from :exc:`BaseException` instead of :exc:`StandardError`
+ since it is technically not an error.
.. versionadded:: 2.5
@@ -286,8 +286,7 @@ The following exceptions are the exceptions that are actually raised.
Raised when an error is detected that doesn't fall in any of the other
categories. The associated value is a string indicating what precisely went
- wrong. (This exception is mostly a relic from a previous version of the
- interpreter; it is not used very much any more.)
+ wrong.
.. exception:: StopIteration
@@ -346,7 +345,7 @@ The following exceptions are the exceptions that are actually raised.
it has another type (such as a string), the object's value is printed and the
exit status is one.
- Instances have an attribute :attr:`code` which is set to the proposed exit
+ Instances have an attribute :attr:`!code` which is set to the proposed exit
status or error message (defaulting to ``None``). Also, this exception derives
directly from :exc:`BaseException` and not :exc:`StandardError`, since it is not
technically an error.
@@ -356,7 +355,7 @@ The following exceptions are the exceptions that are actually raised.
executed, and so that a debugger can execute a script without running the risk
of losing control. The :func:`os._exit` function can be used if it is
absolutely positively necessary to exit immediately (for example, in the child
- process after a call to :func:`fork`).
+ process after a call to :func:`os.fork`).
The exception inherits from :exc:`BaseException` instead of :exc:`StandardError`
or :exc:`Exception` so that it is not accidentally caught by code that catches
@@ -387,6 +386,30 @@ The following exceptions are the exceptions that are actually raised.
Raised when a Unicode-related encoding or decoding error occurs. It is a
subclass of :exc:`ValueError`.
+ :exc:`UnicodeError` has attributes that describe the encoding or decoding
+ error. For example, ``err.object[err.start:err.end]`` gives the particular
+ invalid input that the codec failed on.
+
+ .. attribute:: encoding
+
+ The name of the encoding that raised the error.
+
+ .. attribute:: reason
+
+ A string describing the specific codec error.
+
+ .. attribute:: object
+
+ The object the codec was attempting to encode or decode.
+
+ .. attribute:: start
+
+ The first index of invalid data in :attr:`object`.
+
+ .. attribute:: end
+
+ The index after the last invalid data in :attr:`object`.
+
.. versionadded:: 2.0
diff --git a/Doc/library/fcntl.rst b/Doc/library/fcntl.rst
index 40ae08b..562e78f 100644
--- a/Doc/library/fcntl.rst
+++ b/Doc/library/fcntl.rst
@@ -1,6 +1,5 @@
-
-:mod:`fcntl` --- The :func:`fcntl` and :func:`ioctl` system calls
-=================================================================
+:mod:`fcntl` --- The ``fcntl`` and ``ioctl`` system calls
+=========================================================
.. module:: fcntl
:platform: Unix
@@ -18,17 +17,18 @@ interface to the :c:func:`fcntl` and :c:func:`ioctl` Unix routines.
All functions in this module take a file descriptor *fd* as their first
argument. This can be an integer file descriptor, such as returned by
``sys.stdin.fileno()``, or a file object, such as ``sys.stdin`` itself, which
-provides a :meth:`fileno` which returns a genuine file descriptor.
+provides a :meth:`~io.IOBase.fileno` which returns a genuine file descriptor.
The module defines the following functions:
.. function:: fcntl(fd, op[, arg])
- Perform the requested operation on file descriptor *fd* (file objects providing
- a :meth:`fileno` method are accepted as well). The operation is defined by *op*
- and is operating system dependent. These codes are also found in the
- :mod:`fcntl` module. The argument *arg* is optional, and defaults to the integer
+ Perform the operation *op* on file descriptor *fd* (file objects providing
+ a :meth:`~io.IOBase.fileno` method are accepted as well). The values used
+ for for *op* are operating system dependent, and are available as constants
+ in the :mod:`fcntl` module, using the same names as used in the relevant C
+ header files. The argument *arg* is optional, and defaults to the integer
value ``0``. When present, it can either be an integer value, or a string.
With the argument missing or an integer value, the return value of this function
is the integer return value of the C :c:func:`fcntl` call. When the argument is
@@ -46,17 +46,21 @@ The module defines the following functions:
.. function:: ioctl(fd, op[, arg[, mutate_flag]])
- This function is identical to the :func:`fcntl` function, except that the
+ This function is identical to the :func:`~fcntl.fcntl` function, except that the
operations are typically defined in the library module :mod:`termios` and the
argument handling is even more complicated.
The op parameter is limited to values that can fit in 32-bits.
+ Additional constants of interest for use as the *op* argument can be
+ found in the :mod:`termios` module, under the same names as used in
+ the relevant C header files.
The parameter *arg* can be one of an integer, absent (treated identically to the
integer ``0``), an object supporting the read-only buffer interface (most likely
a plain Python string) or an object supporting the read-write buffer interface.
- In all but the last case, behaviour is as for the :func:`fcntl` function.
+ In all but the last case, behaviour is as for the :func:`~fcntl.fcntl`
+ function.
If a mutable buffer is passed, then the behaviour is determined by the value of
the *mutate_flag* parameter.
@@ -95,16 +99,16 @@ The module defines the following functions:
.. function:: flock(fd, op)
Perform the lock operation *op* on file descriptor *fd* (file objects providing
- a :meth:`fileno` method are accepted as well). See the Unix manual
+ a :meth:`~io.IOBase.fileno` method are accepted as well). See the Unix manual
:manpage:`flock(2)` for details. (On some systems, this function is emulated
using :c:func:`fcntl`.)
.. function:: lockf(fd, operation, [length, [start, [whence]]])
- This is essentially a wrapper around the :func:`fcntl` locking calls. *fd* is
- the file descriptor of the file to lock or unlock, and *operation* is one of the
- following values:
+ This is essentially a wrapper around the :func:`~fcntl.fcntl` locking calls.
+ *fd* is the file descriptor of the file to lock or unlock, and *operation*
+ is one of the following values:
* :const:`LOCK_UN` -- unlock
* :const:`LOCK_SH` -- acquire a shared lock
@@ -119,13 +123,13 @@ The module defines the following functions:
systems, :const:`LOCK_EX` can only be used if the file descriptor refers to a
file opened for writing.
- *length* is the number of bytes to lock, *start* is the byte offset at which the
- lock starts, relative to *whence*, and *whence* is as with :func:`fileobj.seek`,
- specifically:
+ *length* is the number of bytes to lock, *start* is the byte offset at
+ which the lock starts, relative to *whence*, and *whence* is as with
+ :func:`io.IOBase.seek`, specifically:
- * :const:`0` -- relative to the start of the file (:const:`SEEK_SET`)
- * :const:`1` -- relative to the current buffer position (:const:`SEEK_CUR`)
- * :const:`2` -- relative to the end of the file (:const:`SEEK_END`)
+ * :const:`0` -- relative to the start of the file (:data:`os.SEEK_SET`)
+ * :const:`1` -- relative to the current buffer position (:data:`os.SEEK_CUR`)
+ * :const:`2` -- relative to the end of the file (:data:`os.SEEK_END`)
The default for *start* is 0, which means to start at the beginning of the file.
The default for *length* is 0 which means to lock to the end of the file. The
@@ -150,7 +154,8 @@ lay-out for the *lockdata* variable is system dependent --- therefore using the
.. seealso::
Module :mod:`os`
- If the locking flags :const:`O_SHLOCK` and :const:`O_EXLOCK` are present
- in the :mod:`os` module (on BSD only), the :func:`os.open` function
- provides an alternative to the :func:`lockf` and :func:`flock` functions.
+ If the locking flags :data:`~os.O_SHLOCK` and :data:`~os.O_EXLOCK` are
+ present in the :mod:`os` module (on BSD only), the :func:`os.open`
+ function provides an alternative to the :func:`lockf` and :func:`flock`
+ functions.
diff --git a/Doc/library/filecmp.rst b/Doc/library/filecmp.rst
index 25d0701..f9c5fb1 100644
--- a/Doc/library/filecmp.rst
+++ b/Doc/library/filecmp.rst
@@ -54,9 +54,9 @@ The :mod:`filecmp` module defines the following functions:
Example::
>>> import filecmp
- >>> filecmp.cmp('undoc.rst', 'undoc.rst')
+ >>> filecmp.cmp('undoc.rst', 'undoc.rst') # doctest: +SKIP
True
- >>> filecmp.cmp('undoc.rst', 'index.rst')
+ >>> filecmp.cmp('undoc.rst', 'index.rst') # doctest: +SKIP
False
@@ -75,6 +75,9 @@ The :class:`dircmp` class
'tags']``. *hide* is a list of names to hide, and defaults to ``[os.curdir,
os.pardir]``.
+ The :class:`dircmp` class compares files by doing *shallow* comparisons
+ as described for :func:`filecmp.cmp`.
+
The :class:`dircmp` class provides the following methods:
@@ -94,7 +97,7 @@ The :class:`dircmp` class
Print a comparison between *a* and *b* and common subdirectories
(recursively).
- The :class:`dircmp` offers a number of interesting attributes that may be
+ The :class:`dircmp` class offers a number of interesting attributes that may be
used to get various bits of information about the directory trees being
compared.
@@ -103,6 +106,16 @@ The :class:`dircmp` class
to compute are used.
+ .. attribute:: left
+
+ The directory *a*.
+
+
+ .. attribute:: right
+
+ The directory *b*.
+
+
.. attribute:: left_list
Files and subdirectories in *a*, filtered by *hide* and *ignore*.
@@ -146,12 +159,14 @@ The :class:`dircmp` class
.. attribute:: same_files
- Files which are identical in both *a* and *b*.
+ Files which are identical in both *a* and *b*, using the class's
+ file comparison operator.
.. attribute:: diff_files
- Files which are in both *a* and *b*, whose contents differ.
+ Files which are in both *a* and *b*, whose contents differ according
+ to the class's file comparison operator.
.. attribute:: funny_files
@@ -163,3 +178,18 @@ The :class:`dircmp` class
A dictionary mapping names in :attr:`common_dirs` to :class:`dircmp` objects.
+
+Here is a simplified example of using the ``subdirs`` attribute to search
+recursively through two directories to show common different files::
+
+ >>> from filecmp import dircmp
+ >>> def print_diff_files(dcmp):
+ ... for name in dcmp.diff_files:
+ ... print "diff_file %s found in %s and %s" % (name, dcmp.left,
+ ... dcmp.right)
+ ... for sub_dcmp in dcmp.subdirs.values():
+ ... print_diff_files(sub_dcmp)
+ ...
+ >>> dcmp = dircmp('dir1', 'dir2') # doctest: +SKIP
+ >>> print_diff_files(dcmp) # doctest: +SKIP
+
diff --git a/Doc/library/fileinput.rst b/Doc/library/fileinput.rst
index 172a643..710bef3 100644
--- a/Doc/library/fileinput.rst
+++ b/Doc/library/fileinput.rst
@@ -50,7 +50,7 @@ provided by this module.
The following function is the primary interface of this module:
-.. function:: input([files[, inplace[, backup[, mode[, openhook]]]]])
+.. function:: input([files[, inplace[, backup[, bufsize[, mode[, openhook]]]]]])
Create an instance of the :class:`FileInput` class. The instance will be used
as global state for the functions of this module, and is also returned to use
@@ -122,15 +122,16 @@ The class which implements the sequence behavior provided by the module is
available for subclassing as well:
-.. class:: FileInput([files[, inplace[, backup[, mode[, openhook]]]]])
+.. class:: FileInput([files[, inplace[, backup[,bufsize[, mode[, openhook]]]]]])
Class :class:`FileInput` is the implementation; its methods :meth:`filename`,
:meth:`fileno`, :meth:`lineno`, :meth:`filelineno`, :meth:`isfirstline`,
- :meth:`isstdin`, :meth:`nextfile` and :meth:`close` correspond to the functions
- of the same name in the module. In addition it has a :meth:`readline` method
- which returns the next input line, and a :meth:`__getitem__` method which
- implements the sequence behavior. The sequence must be accessed in strictly
- sequential order; random access and :meth:`readline` cannot be mixed.
+ :meth:`isstdin`, :meth:`nextfile` and :meth:`close` correspond to the
+ functions of the same name in the module. In addition it has a
+ :meth:`~file.readline` method which returns the next input line,
+ and a :meth:`__getitem__` method which implements the sequence behavior.
+ The sequence must be accessed in strictly sequential order; random access
+ and :meth:`~file.readline` cannot be mixed.
With *mode* you can specify which file mode will be passed to :func:`open`. It
must be one of ``'r'``, ``'rU'``, ``'U'`` and ``'rb'``.
diff --git a/Doc/library/fl.rst b/Doc/library/fl.rst
index 540cac9..c689372 100644
--- a/Doc/library/fl.rst
+++ b/Doc/library/fl.rst
@@ -9,7 +9,7 @@
.. deprecated:: 2.6
- The :mod:`fl` module has been deprecated for removal in Python 3.0.
+ The :mod:`fl` module has been removed in Python 3.
.. index::
@@ -487,7 +487,7 @@ FORMS objects have these data attributes; see the FORMS documentation:
.. deprecated:: 2.6
- The :mod:`FL` module has been deprecated for removal in Python 3.0.
+ The :mod:`FL` module has been removed in Python 3.
This module defines symbolic constants needed to use the built-in module
@@ -509,7 +509,7 @@ source for a complete list of the defined names. Suggested use::
.. deprecated:: 2.6
- The :mod:`flp` module has been deprecated for removal in Python 3.0.
+ The :mod:`flp` module has been removed in Python 3.
This module defines functions that can read form definitions created by the
diff --git a/Doc/library/fm.rst b/Doc/library/fm.rst
index 6bf6808..c7eb4f3 100644
--- a/Doc/library/fm.rst
+++ b/Doc/library/fm.rst
@@ -8,7 +8,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`fm` module has been deprecated for removal in Python 3.0.
+ The :mod:`fm` module has been removed in Python 3.
diff --git a/Doc/library/fnmatch.rst b/Doc/library/fnmatch.rst
index 4911980..b14c551 100644
--- a/Doc/library/fnmatch.rst
+++ b/Doc/library/fnmatch.rst
@@ -29,6 +29,9 @@ special characters used in shell-style wildcards are:
| ``[!seq]`` | matches any character not in *seq* |
+------------+------------------------------------+
+For a literal match, wrap the meta-characters in brackets.
+For example, ``'[?]'`` matches the character ``'?'``.
+
.. index:: module: glob
Note that the filename separator (``'/'`` on Unix) is *not* special to this
@@ -76,8 +79,6 @@ patterns.
Return the shell-style *pattern* converted to a regular expression.
- Be aware there is no way to quote meta-characters.
-
Example:
>>> import fnmatch, re
diff --git a/Doc/library/formatter.rst b/Doc/library/formatter.rst
index ba09b8e..e696fec 100644
--- a/Doc/library/formatter.rst
+++ b/Doc/library/formatter.rst
@@ -341,10 +341,10 @@ this module. Most applications will need to derive new writer classes from the
output.
-.. class:: DumbWriter([file[, maxcol=72]])
+.. class:: DumbWriter(file=None, maxcol=72)
Simple writer class which writes output on the file object passed in as *file*
- or, if *file* is omitted, on standard output. The output is simply word-wrapped
+ or, if *file* is None, on standard output. The output is simply word-wrapped
to the number of columns specified by *maxcol*. This class is suitable for
reflowing a sequence of paragraphs.
diff --git a/Doc/library/fpectl.rst b/Doc/library/fpectl.rst
index ef030f0..8ca671b 100644
--- a/Doc/library/fpectl.rst
+++ b/Doc/library/fpectl.rst
@@ -113,8 +113,8 @@ The :mod:`fpectl` module is not thread-safe.
.. seealso::
Some files in the source distribution may be interesting in learning more about
- how this module operates. The include file :file:`Include/pyfpe.h` discusses the
- implementation of this module at some length. :file:`Modules/fpetestmodule.c`
+ how this module operates. The include file :source:`Include/pyfpe.h` discusses the
+ implementation of this module at some length. :source:`Modules/fpetestmodule.c`
gives several examples of use. Many additional examples can be found in
- :file:`Objects/floatobject.c`.
+ :source:`Objects/floatobject.c`.
diff --git a/Doc/library/fpformat.rst b/Doc/library/fpformat.rst
index 3448585..1713301 100644
--- a/Doc/library/fpformat.rst
+++ b/Doc/library/fpformat.rst
@@ -7,7 +7,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`fpformat` module has been removed in Python 3.0.
+ The :mod:`fpformat` module has been removed in Python 3.
.. sectionauthor:: Moshe Zadka <moshez@zadka.site.co.il>
diff --git a/Doc/library/fractions.rst b/Doc/library/fractions.rst
index 4d10cbd..81b419e 100644
--- a/Doc/library/fractions.rst
+++ b/Doc/library/fractions.rst
@@ -57,7 +57,6 @@ another rational number, or from a string.
Fraction(0, 1)
>>> Fraction('3/7')
Fraction(3, 7)
- [40794 refs]
>>> Fraction(' -3/7 ')
Fraction(-3, 7)
>>> Fraction('1.414213 \t\n')
diff --git a/Doc/library/ftplib.rst b/Doc/library/ftplib.rst
index b8f5b4e..b42cf64 100644
--- a/Doc/library/ftplib.rst
+++ b/Doc/library/ftplib.rst
@@ -23,16 +23,17 @@ see Internet :rfc:`959`.
Here's a sample session using the :mod:`ftplib` module::
>>> from ftplib import FTP
- >>> ftp = FTP('ftp.cwi.nl') # connect to host, default port
- >>> ftp.login() # user anonymous, passwd anonymous@
- >>> ftp.retrlines('LIST') # list directory contents
- total 24418
- drwxrwsr-x 5 ftp-usr pdmaint 1536 Mar 20 09:48 .
- dr-xr-srwt 105 ftp-usr pdmaint 1536 Mar 21 14:32 ..
- -rw-r--r-- 1 ftp-usr pdmaint 5305 Mar 20 09:48 INDEX
- .
- .
- .
+ >>> ftp = FTP('ftp.debian.org') # connect to host, default port
+ >>> ftp.login() # user anonymous, passwd anonymous@
+ '230 Login successful.'
+ >>> ftp.cwd('debian') # change into "debian" directory
+ >>> ftp.retrlines('LIST') # list directory contents
+ -rw-rw-r-- 1 1176 1176 1063 Jun 15 10:18 README
+ ...
+ drwxr-sr-x 5 1176 1176 4096 Dec 19 2000 pool
+ drwxr-sr-x 4 1176 1176 4096 Nov 17 2008 project
+ drwxr-xr-x 3 1176 1176 4096 Oct 10 2012 tools
+ '226 Directory send OK.'
>>> ftp.retrbinary('RETR README', open('README', 'wb').write)
'226 Transfer complete.'
>>> ftp.quit()
@@ -264,8 +265,8 @@ followed by ``lines`` for the text version or ``binary`` for the binary version.
Store a file in ASCII transfer mode. *command* should be an appropriate
``STOR`` command (see :meth:`storbinary`). Lines are read until EOF from the
- open file object *file* using its :meth:`readline` method to provide the data to
- be stored. *callback* is an optional single parameter callable
+ open file object *file* using its :meth:`~file.readline` method to provide
+ the data to be stored. *callback* is an optional single parameter callable
that is called on each line after it is sent.
.. versionchanged:: 2.6
@@ -370,10 +371,10 @@ followed by ``lines`` for the text version or ``binary`` for the binary version.
.. method:: FTP.close()
Close the connection unilaterally. This should not be applied to an already
- closed connection such as after a successful call to :meth:`quit`. After this
- call the :class:`FTP` instance should not be used any more (after a call to
- :meth:`close` or :meth:`quit` you cannot reopen the connection by issuing
- another :meth:`login` method).
+ closed connection such as after a successful call to :meth:`~FTP.quit`.
+ After this call the :class:`FTP` instance should not be used any more (after
+ a call to :meth:`close` or :meth:`~FTP.quit` you cannot reopen the
+ connection by issuing another :meth:`login` method).
FTP_TLS Objects
@@ -387,7 +388,8 @@ FTP_TLS Objects
.. method:: FTP_TLS.auth()
- Set up secure control connection by using TLS or SSL, depending on what specified in :meth:`ssl_version` attribute.
+ Set up secure control connection by using TLS or SSL, depending on what
+ specified in :meth:`ssl_version` attribute.
.. method:: FTP_TLS.prot_p()
@@ -396,5 +398,3 @@ FTP_TLS Objects
.. method:: FTP_TLS.prot_c()
Set up clear text data connection.
-
-
diff --git a/Doc/library/functions.rst b/Doc/library/functions.rst
index 5f7bf4d..80ce681 100644
--- a/Doc/library/functions.rst
+++ b/Doc/library/functions.rst
@@ -18,16 +18,26 @@ available. They are listed here in alphabetical order.
:func:`bool` :func:`filter` :func:`len` :func:`range` :func:`type`
:func:`bytearray` :func:`float` :func:`list` :func:`raw_input` :func:`unichr`
:func:`callable` :func:`format` :func:`locals` :func:`reduce` :func:`unicode`
-:func:`chr` :func:`frozenset` :func:`long` :func:`reload` :func:`vars`
-:func:`classmethod` :func:`getattr` :func:`map` :func:`repr` :func:`xrange`
+:func:`chr` |func-frozenset|_ :func:`long` :func:`reload` :func:`vars`
+:func:`classmethod` :func:`getattr` :func:`map` |func-repr|_ :func:`xrange`
:func:`cmp` :func:`globals` :func:`max` :func:`reversed` :func:`zip`
-:func:`compile` :func:`hasattr` :func:`memoryview` :func:`round` :func:`__import__`
-:func:`complex` :func:`hash` :func:`min` :func:`set` :func:`apply`
+:func:`compile` :func:`hasattr` |func-memoryview|_ :func:`round` :func:`__import__`
+:func:`complex` :func:`hash` :func:`min` |func-set|_ :func:`apply`
:func:`delattr` :func:`help` :func:`next` :func:`setattr` :func:`buffer`
-:func:`dict` :func:`hex` :func:`object` :func:`slice` :func:`coerce`
+|func-dict|_ :func:`hex` :func:`object` :func:`slice` :func:`coerce`
:func:`dir` :func:`id` :func:`oct` :func:`sorted` :func:`intern`
=================== ================= ================== ================= ====================
+.. using :func:`dict` would create a link to another page, so local targets are
+ used, with replacement texts to make the output in the table consistent
+
+.. |func-dict| replace:: ``dict()``
+.. |func-frozenset| replace:: ``frozenset()``
+.. |func-memoryview| replace:: ``memoryview()``
+.. |func-repr| replace:: ``repr()``
+.. |func-set| replace:: ``set()``
+
+
.. function:: abs(x)
Return the absolute value of a number. The argument may be a plain or long
@@ -37,7 +47,7 @@ available. They are listed here in alphabetical order.
.. function:: all(iterable)
- Return True if all elements of the *iterable* are true (or if the iterable
+ Return ``True`` if all elements of the *iterable* are true (or if the iterable
is empty). Equivalent to::
def all(iterable):
@@ -51,8 +61,8 @@ available. They are listed here in alphabetical order.
.. function:: any(iterable)
- Return True if any element of the *iterable* is true. If the iterable
- is empty, return False. Equivalent to::
+ Return ``True`` if any element of the *iterable* is true. If the iterable
+ is empty, return ``False``. Equivalent to::
def any(iterable):
for element in iterable:
@@ -153,9 +163,10 @@ available. They are listed here in alphabetical order.
instance method receives the instance. To declare a class method, use this
idiom::
- class C:
+ class C(object):
@classmethod
- def f(cls, arg1, arg2, ...): ...
+ def f(cls, arg1, arg2, ...):
+ ...
The ``@classmethod`` form is a function :term:`decorator` -- see the description
of function definitions in :ref:`function` for details.
@@ -188,8 +199,10 @@ available. They are listed here in alphabetical order.
Compile the *source* into a code or AST object. Code objects can be executed
by an :keyword:`exec` statement or evaluated by a call to :func:`eval`.
- *source* can either be a string or an AST object. Refer to the :mod:`ast`
- module documentation for information on how to work with AST objects.
+ *source* can either be a Unicode string, a *Latin-1* encoded string or an
+ AST object.
+ Refer to the :mod:`ast` module documentation for information on how to work
+ with AST objects.
The *filename* argument should give the file from which the code was read;
pass some recognizable value if it wasn't read from a file (``'<string>'`` is
@@ -213,8 +226,8 @@ available. They are listed here in alphabetical order.
Future statements are specified by bits which can be bitwise ORed together to
specify multiple statements. The bitfield required to specify a given feature
- can be found as the :attr:`compiler_flag` attribute on the :class:`_Feature`
- instance in the :mod:`__future__` module.
+ can be found as the :attr:`~__future__._Feature.compiler_flag` attribute on
+ the :class:`~__future__._Feature` instance in the :mod:`__future__` module.
This function raises :exc:`SyntaxError` if the compiled source is invalid,
and :exc:`TypeError` if the source contains null bytes.
@@ -247,6 +260,13 @@ available. They are listed here in alphabetical order.
the function serves as a numeric conversion function like :func:`int`,
:func:`long` and :func:`float`. If both arguments are omitted, returns ``0j``.
+ .. note::
+
+ When converting from a string, the string must not contain whitespace
+ around the central ``+`` or ``-`` operator. For example,
+ ``complex('1+2j')`` is fine, but ``complex('1 + 2j')`` raises
+ :exc:`ValueError`.
+
The complex type is described in :ref:`typesnumeric`.
@@ -258,14 +278,18 @@ available. They are listed here in alphabetical order.
example, ``delattr(x, 'foobar')`` is equivalent to ``del x.foobar``.
-.. function:: dict([arg])
+.. _func-dict:
+.. function:: dict(**kwarg)
+ dict(mapping, **kwarg)
+ dict(iterable, **kwarg)
:noindex:
- Create a new data dictionary, optionally with items taken from *arg*.
- The dictionary type is described in :ref:`typesmapping`.
+ Create a new dictionary. The :class:`dict` object is the dictionary class.
+ See :class:`dict` and :ref:`typesmapping` for documentation about this
+ class.
- For other containers see the built in :class:`list`, :class:`set`, and
- :class:`tuple` classes, and the :mod:`collections` module.
+ For other containers see the built-in :class:`list`, :class:`set`, and
+ :class:`tuple` classes, as well as the :mod:`collections` module.
.. function:: dir([object])
@@ -337,7 +361,7 @@ available. They are listed here in alphabetical order.
Using :func:`divmod` with complex numbers is deprecated.
-.. function:: enumerate(sequence[, start=0])
+.. function:: enumerate(sequence, start=0)
Return an enumerate object. *sequence* must be a sequence, an
:term:`iterator`, or some other object which supports iteration. The
@@ -366,9 +390,9 @@ available. They are listed here in alphabetical order.
.. function:: eval(expression[, globals[, locals]])
- The arguments are a string and optional globals and locals. If provided,
- *globals* must be a dictionary. If provided, *locals* can be any mapping
- object.
+ The arguments are a Unicode or *Latin-1* encoded string and optional
+ globals and locals. If provided, *globals* must be a dictionary.
+ If provided, *locals* can be any mapping object.
.. versionchanged:: 2.4
formerly *locals* was required to be a dictionary.
@@ -413,7 +437,10 @@ available. They are listed here in alphabetical order.
The arguments are a file name and two optional dictionaries. The file is parsed
and evaluated as a sequence of Python statements (similarly to a module) using
the *globals* and *locals* dictionaries as global and local namespace. If
- provided, *locals* can be any mapping object.
+ provided, *locals* can be any mapping object. Remember that at module level,
+ globals and locals are the same dictionary. If two separate objects are
+ passed as *globals* and *locals*, the code will be executed as if it were
+ embedded in a class definition.
.. versionchanged:: 2.4
formerly *locals* was required to be a dictionary.
@@ -431,7 +458,7 @@ available. They are listed here in alphabetical order.
used reliably to modify a function's locals.
-.. function:: file(filename[, mode[, bufsize]])
+.. function:: file(name[, mode[, buffering]])
Constructor function for the :class:`file` type, described further in section
:ref:`bltin-file-objects`. The constructor's arguments are the same as those
@@ -506,14 +533,17 @@ available. They are listed here in alphabetical order.
.. versionadded:: 2.6
+.. _func-frozenset:
.. function:: frozenset([iterable])
:noindex:
- Return a frozenset object, optionally with elements taken from *iterable*.
- The frozenset type is described in :ref:`types-set`.
+ Return a new :class:`frozenset` object, optionally with elements taken from
+ *iterable*. ``frozenset`` is a built-in class. See :class:`frozenset` and
+ :ref:`types-set` for documentation about this class.
- For other containers see the built in :class:`dict`, :class:`list`, and
- :class:`tuple` classes, and the :mod:`collections` module.
+ For other containers see the built-in :class:`set`, :class:`list`,
+ :class:`tuple`, and :class:`dict` classes, as well as the :mod:`collections`
+ module.
.. versionadded:: 2.4
@@ -566,8 +596,21 @@ available. They are listed here in alphabetical order.
.. function:: hex(x)
- Convert an integer number (of any size) to a hexadecimal string. The result is a
- valid Python expression.
+ Convert an integer number (of any size) to a lowercase hexadecimal string
+ prefixed with "0x", for example:
+
+ >>> hex(255)
+ '0xff'
+ >>> hex(-42)
+ '-0x2a'
+ >>> hex(1L)
+ '0x1L'
+
+ If x is not a Python :class:`int` or :class:`long` object, it has to
+ define an __index__() method that returns an integer.
+
+ See also :func:`int` for converting a hexadecimal string to an
+ integer using a base of 16.
.. note::
@@ -602,20 +645,26 @@ available. They are listed here in alphabetical order.
Consider using the :func:`raw_input` function for general input from users.
-.. function:: int([x[, base]])
+.. function:: int(x=0)
+ int(x, base=10)
+
+ Convert a number or string *x* to an integer, or return ``0`` if no
+ arguments are given. If *x* is a number, it can be a plain integer, a long
+ integer, or a floating point number. If *x* is floating point, the conversion
+ truncates towards zero. If the argument is outside the integer range, the
+ function returns a long object instead.
- Convert a string or number to a plain integer. If the argument is a string,
- it must contain a possibly signed decimal number representable as a Python
- integer, possibly embedded in whitespace. The *base* parameter gives the
- base for the conversion (which is 10 by default) and may be any integer in
- the range [2, 36], or zero. If *base* is zero, the proper radix is
- determined based on the contents of string; the interpretation is the same as
- for integer literals. (See :ref:`numbers`.) If *base* is specified and *x*
- is not a string, :exc:`TypeError` is raised. Otherwise, the argument may be a
- plain or long integer or a floating point number. Conversion of floating
- point numbers to integers truncates (towards zero). If the argument is
- outside the integer range a long object will be returned instead. If no
- arguments are given, returns ``0``.
+ If *x* is not a number or if *base* is given, then *x* must be a string or
+ Unicode object representing an :ref:`integer literal <integers>` in radix
+ *base*. Optionally, the literal can be
+ preceded by ``+`` or ``-`` (with no space in between) and surrounded by
+ whitespace. A base-n literal consists of the digits 0 to n-1, with ``a``
+ to ``z`` (or ``A`` to ``Z``) having
+ values 10 to 35. The default *base* is 10. The allowed values are 0 and 2-36.
+ Base-2, -8, and -16 literals can be optionally prefixed with ``0b``/``0B``,
+ ``0o``/``0O``/``0``, or ``0x``/``0X``, as with integer literals in code.
+ Base 0 means to interpret the string exactly as an integer literal, so that
+ the actual base is 2, 8, 10, or 16.
The integer type is described in :ref:`typesnumeric`.
@@ -665,7 +714,7 @@ available. They are listed here in alphabetical order.
One useful application of the second form of :func:`iter` is to read lines of
a file until a certain line is reached. The following example reads a file
- until the :meth:`readline` method returns an empty string::
+ until the :meth:`~io.TextIOBase.readline` method returns an empty string::
with open('mydata.txt') as fp:
for line in iter(fp.readline, ''):
@@ -677,7 +726,8 @@ available. They are listed here in alphabetical order.
.. function:: len(s)
Return the length (the number of items) of an object. The argument may be a
- sequence (string, tuple or list) or a mapping (dictionary).
+ sequence (such as a string, bytes, tuple, list, or range) or a collection
+ (such as a dictionary, set, or frozen set).
.. function:: list([iterable])
@@ -706,7 +756,8 @@ available. They are listed here in alphabetical order.
affect the values of local and free variables used by the interpreter.
-.. function:: long([x[, base]])
+.. function:: long(x=0)
+ long(x, base=10)
Convert a string or number to a long integer. If the argument is a string, it
must contain a possibly signed number of arbitrary size, possibly embedded in
@@ -732,11 +783,16 @@ available. They are listed here in alphabetical order.
the result is always a list.
-.. function:: max(iterable[, args...][key])
+.. function:: max(iterable[, key])
+ max(arg1, arg2, *args[, key])
- With a single argument *iterable*, return the largest item of a non-empty
- iterable (such as a string, tuple or list). With more than one argument, return
- the largest of the arguments.
+ Return the largest item in an iterable or the largest of two or more
+ arguments.
+
+ If one positional argument is provided, *iterable* must be a non-empty
+ iterable (such as a non-empty string, tuple or list). The largest item
+ in the iterable is returned. If two or more positional arguments are
+ provided, the largest of the positional arguments is returned.
The optional *key* argument specifies a one-argument ordering function like that
used for :meth:`list.sort`. The *key* argument, if supplied, must be in keyword
@@ -745,7 +801,7 @@ available. They are listed here in alphabetical order.
.. versionchanged:: 2.5
Added support for the optional *key* argument.
-
+.. _func-memoryview:
.. function:: memoryview(obj)
:noindex:
@@ -753,11 +809,16 @@ available. They are listed here in alphabetical order.
:ref:`typememoryview` for more information.
-.. function:: min(iterable[, args...][key])
+.. function:: min(iterable[, key])
+ min(arg1, arg2, *args[, key])
+
+ Return the smallest item in an iterable or the smallest of two or more
+ arguments.
- With a single argument *iterable*, return the smallest item of a non-empty
- iterable (such as a string, tuple or list). With more than one argument, return
- the smallest of the arguments.
+ If one positional argument is provided, *iterable* must be a non-empty
+ iterable (such as a non-empty string, tuple or list). The smallest item
+ in the iterable is returned. If two or more positional arguments are
+ provided, the smallest of the positional arguments is returned.
The optional *key* argument specifies a one-argument ordering function like that
used for :meth:`list.sort`. The *key* argument, if supplied, must be in keyword
@@ -829,26 +890,29 @@ available. They are listed here in alphabetical order.
The optional *buffering* argument specifies the file's desired buffer size: 0
means unbuffered, 1 means line buffered, any other positive value means use a
- buffer of (approximately) that size. A negative *buffering* means to use the
- system default, which is usually line buffered for tty devices and fully
- buffered for other files. If omitted, the system default is used. [#]_
+ buffer of (approximately) that size (in bytes). A negative *buffering* means
+ to use the system default, which is usually line buffered for tty devices and
+ fully buffered for other files. If omitted, the system default is used. [#]_
- Modes ``'r+'``, ``'w+'`` and ``'a+'`` open the file for updating (note that
- ``'w+'`` truncates the file). Append ``'b'`` to the mode to open the file in
+ Modes ``'r+'``, ``'w+'`` and ``'a+'`` open the file for updating (reading and writing);
+ note that ``'w+'`` truncates the file. Append ``'b'`` to the mode to open the file in
binary mode, on systems that differentiate between binary and text files; on
systems that don't have this distinction, adding the ``'b'`` has no effect.
+ .. index::
+ single: universal newlines; open() built-in function
+
In addition to the standard :c:func:`fopen` values *mode* may be ``'U'`` or
- ``'rU'``. Python is usually built with universal newline support; supplying
- ``'U'`` opens the file as a text file, but lines may be terminated by any of the
- following: the Unix end-of-line convention ``'\n'``, the Macintosh convention
- ``'\r'``, or the Windows convention ``'\r\n'``. All of these external
- representations are seen as ``'\n'`` by the Python program. If Python is built
- without universal newline support a *mode* with ``'U'`` is the same as normal
- text mode. Note that file objects so opened also have an attribute called
- :attr:`newlines` which has a value of ``None`` (if no newlines have yet been
- seen), ``'\n'``, ``'\r'``, ``'\r\n'``, or a tuple containing all the newline
- types seen.
+ ``'rU'``. Python is usually built with :term:`universal newlines` support;
+ supplying ``'U'`` opens the file as a text file, but lines may be terminated
+ by any of the following: the Unix end-of-line convention ``'\n'``, the
+ Macintosh convention ``'\r'``, or the Windows convention ``'\r\n'``. All of
+ these external representations are seen as ``'\n'`` by the Python program.
+ If Python is built without universal newlines support a *mode* with ``'U'``
+ is the same as normal text mode. Note that file objects so opened also have
+ an attribute called :attr:`newlines` which has a value of ``None`` (if no
+ newlines have yet been seen), ``'\n'``, ``'\r'``, ``'\r\n'``, or a tuple
+ containing all the newline types seen.
Python enforces that the mode, after stripping ``'U'``, begins with ``'r'``,
``'w'`` or ``'a'``.
@@ -894,16 +958,16 @@ available. They are listed here in alphabetical order.
accidents.)
-.. function:: print([object, ...][, sep=' '][, end='\\n'][, file=sys.stdout])
+.. function:: print(*objects, sep=' ', end='\\n', file=sys.stdout)
- Print *object*\(s) to the stream *file*, separated by *sep* and followed by
+ Print *objects* to the stream *file*, separated by *sep* and followed by
*end*. *sep*, *end* and *file*, if present, must be given as keyword
arguments.
All non-keyword arguments are converted to strings like :func:`str` does and
written to the stream, separated by *sep* and followed by *end*. Both *sep*
and *end* must be strings; they can also be ``None``, which means to use the
- default values. If no *object* is given, :func:`print` will just write
+ default values. If no *objects* are given, :func:`print` will just write
*end*.
The *file* argument must be an object with a ``write(string)`` method; if it
@@ -963,10 +1027,10 @@ available. They are listed here in alphabetical order.
turns the :meth:`voltage` method into a "getter" for a read-only attribute
with the same name.
- A property object has :attr:`getter`, :attr:`setter`, and :attr:`deleter`
- methods usable as decorators that create a copy of the property with the
- corresponding accessor function set to the decorated function. This is
- best explained with an example::
+ A property object has :attr:`~property.getter`, :attr:`~property.setter`,
+ and :attr:`~property.deleter` methods usable as decorators that create a
+ copy of the property with the corresponding accessor function set to the
+ decorated function. This is best explained with an example::
class C(object):
def __init__(self):
@@ -1001,7 +1065,8 @@ available. They are listed here in alphabetical order.
The ``getter``, ``setter``, and ``deleter`` attributes were added.
-.. function:: range([start,] stop[, step])
+.. function:: range(stop)
+ range(start, stop[, step])
This is a versatile function to create lists containing arithmetic progressions.
It is most often used in :keyword:`for` loops. The arguments must be plain
@@ -1065,7 +1130,7 @@ available. They are listed here in alphabetical order.
except StopIteration:
raise TypeError('reduce() of empty sequence with no initial value')
accum_value = initializer
- for x in iterable:
+ for x in it:
accum_value = function(accum_value, x)
return accum_value
@@ -1131,6 +1196,7 @@ available. They are listed here in alphabetical order.
continue to use the old class definition. The same is true for derived classes.
+.. _func-repr:
.. function:: repr(object)
Return a string containing a printable representation of an object. This is
@@ -1157,13 +1223,14 @@ available. They are listed here in alphabetical order.
Added the possibility to write a custom :meth:`__reversed__` method.
-.. function:: round(x[, n])
+.. function:: round(number[, ndigits])
- Return the floating point value *x* rounded to *n* digits after the decimal
- point. If *n* is omitted, it defaults to zero. The result is a floating point
- number. Values are rounded to the closest multiple of 10 to the power minus
- *n*; if two multiples are equally close, rounding is done away from 0 (so. for
- example, ``round(0.5)`` is ``1.0`` and ``round(-0.5)`` is ``-1.0``).
+ Return the floating point value *number* rounded to *ndigits* digits after
+ the decimal point. If *ndigits* is omitted, it defaults to zero. The result
+ is a floating point number. Values are rounded to the closest multiple of
+ 10 to the power minus *ndigits*; if two multiples are equally close,
+ rounding is done away from 0 (so. for example, ``round(0.5)`` is ``1.0`` and
+ ``round(-0.5)`` is ``-1.0``).
.. note::
@@ -1174,14 +1241,18 @@ available. They are listed here in alphabetical order.
can't be represented exactly as a float. See :ref:`tut-fp-issues` for
more information.
+
+.. _func-set:
.. function:: set([iterable])
:noindex:
- Return a new set, optionally with elements taken from *iterable*.
- The set type is described in :ref:`types-set`.
+ Return a new :class:`set` object, optionally with elements taken from
+ *iterable*. ``set`` is a built-in class. See :class:`set` and
+ :ref:`types-set` for documentation about this class.
- For other containers see the built in :class:`dict`, :class:`list`, and
- :class:`tuple` classes, and the :mod:`collections` module.
+ For other containers see the built-in :class:`frozenset`, :class:`list`,
+ :class:`tuple`, and :class:`dict` classes, as well as the :mod:`collections`
+ module.
.. versionadded:: 2.4
@@ -1195,19 +1266,20 @@ available. They are listed here in alphabetical order.
``x.foobar = 123``.
-.. function:: slice([start,] stop[, step])
+.. function:: slice(stop)
+ slice(start, stop[, step])
.. index:: single: Numerical Python
Return a :term:`slice` object representing the set of indices specified by
``range(start, stop, step)``. The *start* and *step* arguments default to
- ``None``. Slice objects have read-only data attributes :attr:`start`,
- :attr:`stop` and :attr:`step` which merely return the argument values (or their
- default). They have no other explicit functionality; however they are used by
- Numerical Python and other third party extensions. Slice objects are also
- generated when extended indexing syntax is used. For example:
- ``a[start:stop:step]`` or ``a[start:stop, i]``. See :func:`itertools.islice`
- for an alternate version that returns an iterator.
+ ``None``. Slice objects have read-only data attributes :attr:`~slice.start`,
+ :attr:`~slice.stop` and :attr:`~slice.step` which merely return the argument
+ values (or their default). They have no other explicit functionality;
+ however they are used by Numerical Python and other third party extensions.
+ Slice objects are also generated when extended indexing syntax is used. For
+ example: ``a[start:stop:step]`` or ``a[start:stop, i]``. See
+ :func:`itertools.islice` for an alternate version that returns an iterator.
.. function:: sorted(iterable[, cmp[, key[, reverse]]])
@@ -1250,9 +1322,10 @@ available. They are listed here in alphabetical order.
A static method does not receive an implicit first argument. To declare a static
method, use this idiom::
- class C:
+ class C(object):
@staticmethod
- def f(arg1, arg2, ...): ...
+ def f(arg1, arg2, ...):
+ ...
The ``@staticmethod`` form is a function :term:`decorator` -- see the
description of function definitions in :ref:`function` for details.
@@ -1273,7 +1346,7 @@ available. They are listed here in alphabetical order.
Function decorator syntax added.
-.. function:: str([object])
+.. function:: str(object='')
Return a string containing a nicely printable representation of an object. For
strings, this returns the string itself. The difference with ``repr(object)``
@@ -1311,9 +1384,10 @@ available. They are listed here in alphabetical order.
been overridden in a class. The search order is same as that used by
:func:`getattr` except that the *type* itself is skipped.
- The :attr:`__mro__` attribute of the *type* lists the method resolution
- search order used by both :func:`getattr` and :func:`super`. The attribute
- is dynamic and can change whenever the inheritance hierarchy is updated.
+ The :attr:`~class.__mro__` attribute of the *type* lists the method
+ resolution search order used by both :func:`getattr` and :func:`super`. The
+ attribute is dynamic and can change whenever the inheritance hierarchy is
+ updated.
If the second argument is omitted, the super object returned is unbound. If
the second argument is an object, ``isinstance(obj, type)`` must be true. If
@@ -1377,26 +1451,21 @@ available. They are listed here in alphabetical order.
.. function:: type(object)
+ type(name, bases, dict)
.. index:: object: type
- Return the type of an *object*. The return value is a type object. The
- :func:`isinstance` built-in function is recommended for testing the type of an
- object.
-
- With three arguments, :func:`type` functions as a constructor as detailed below.
-
-
-.. function:: type(name, bases, dict)
- :noindex:
+ With one argument, return the type of an *object*. The return value is a
+ type object. The :func:`isinstance` built-in function is recommended for
+ testing the type of an object.
- Return a new type object. This is essentially a dynamic form of the
- :keyword:`class` statement. The *name* string is the class name and becomes the
- :attr:`__name__` attribute; the *bases* tuple itemizes the base classes and
- becomes the :attr:`__bases__` attribute; and the *dict* dictionary is the
- namespace containing definitions for class body and becomes the :attr:`__dict__`
- attribute. For example, the following two statements create identical
- :class:`type` objects:
+ With three arguments, return a new type object. This is essentially a
+ dynamic form of the :keyword:`class` statement. The *name* string is the
+ class name and becomes the :attr:`~class.__name__` attribute; the *bases* tuple
+ itemizes the base classes and becomes the :attr:`~class.__bases__` attribute;
+ and the *dict* dictionary is the namespace containing definitions for class
+ body and becomes the :attr:`~object.__dict__` attribute. For example, the
+ following two statements create identical :class:`type` objects:
>>> class X(object):
... a = 1
@@ -1418,7 +1487,8 @@ available. They are listed here in alphabetical order.
.. versionadded:: 2.0
-.. function:: unicode([object[, encoding [, errors]]])
+.. function:: unicode(object='')
+ unicode(object[, encoding [, errors]])
Return the Unicode string version of *object* using one of the following modes:
@@ -1458,7 +1528,7 @@ available. They are listed here in alphabetical order.
.. function:: vars([object])
- Return the :attr:`__dict__` attribute for a module, class, instance,
+ Return the :attr:`~object.__dict__` attribute for a module, class, instance,
or any other object with a :attr:`__dict__` attribute.
Objects such as modules and instances have an updateable :attr:`__dict__`
@@ -1471,16 +1541,19 @@ available. They are listed here in alphabetical order.
dictionary are ignored.
-.. function:: xrange([start,] stop[, step])
+.. function:: xrange(stop)
+ xrange(start, stop[, step])
- This function is very similar to :func:`range`, but returns an "xrange object"
+ This function is very similar to :func:`range`, but returns an :ref:`xrange
+ object <typesseq-xrange>`
instead of a list. This is an opaque sequence type which yields the same values
as the corresponding list, without actually storing them all simultaneously.
The advantage of :func:`xrange` over :func:`range` is minimal (since
:func:`xrange` still has to create the values when asked for them) except when a
very large range is used on a memory-starved machine or when all of the range's
elements are never used (such as when the loop is usually terminated with
- :keyword:`break`).
+ :keyword:`break`). For more information on xrange objects, see
+ :ref:`typesseq-xrange` and :ref:`typesseq`.
.. impl-detail::
@@ -1535,7 +1608,7 @@ available. They are listed here in alphabetical order.
.. note::
This is an advanced function that is not needed in everyday Python
- programming.
+ programming, unlike :func:`importlib.import_module`.
This function is invoked by the :keyword:`import` statement. It can be
replaced (by importing the :mod:`__builtin__` module and assigning to
@@ -1586,15 +1659,8 @@ available. They are listed here in alphabetical order.
names.
If you simply want to import a module (potentially within a package) by name,
- you can call :func:`__import__` and then look it up in :data:`sys.modules`::
+ use :func:`importlib.import_module`.
- >>> import sys
- >>> name = 'foo.bar.baz'
- >>> __import__(name)
- <module 'foo' from ...>
- >>> baz = sys.modules[name]
- >>> baz
- <module 'foo.bar.baz' from ...>
.. versionchanged:: 2.5
The level parameter was added.
@@ -1631,7 +1697,8 @@ bypass these functions without concerns about missing something important.
``function(*args, **keywords)``.
.. deprecated:: 2.3
- Use the extended call syntax with ``*args`` and ``**keywords`` instead.
+ Use ``function(*args, **keywords)`` instead of
+ ``apply(function, args, keywords)`` (see :ref:`tut-unpacking-arguments`).
.. function:: buffer(object[, offset[, size]])
diff --git a/Doc/library/future_builtins.rst b/Doc/library/future_builtins.rst
index 04f2052..62392eb 100644
--- a/Doc/library/future_builtins.rst
+++ b/Doc/library/future_builtins.rst
@@ -50,6 +50,11 @@ Available builtins are:
Works like :func:`itertools.imap`.
+ .. note::
+
+ In Python 3, :func:`map` does not accept ``None`` for the
+ function argument.
+
.. function:: oct(object)
Works like the built-in :func:`oct`, but instead of :meth:`__oct__` it will
diff --git a/Doc/library/gc.rst b/Doc/library/gc.rst
index 80a2d92..be0063d 100644
--- a/Doc/library/gc.rst
+++ b/Doc/library/gc.rst
@@ -132,8 +132,8 @@ The :mod:`gc` module provides the following functions:
Return a list of objects directly referred to by any of the arguments. The
referents returned are those objects visited by the arguments' C-level
- :attr:`tp_traverse` methods (if any), and may not be all objects actually
- directly reachable. :attr:`tp_traverse` methods are supported only by objects
+ :c:member:`~PyTypeObject.tp_traverse` methods (if any), and may not be all objects actually
+ directly reachable. :c:member:`~PyTypeObject.tp_traverse` methods are supported only by objects
that support garbage collection, and are only required to visit objects that may
be involved in a cycle. So, for example, if an integer is directly reachable
from an argument, that integer object may or may not appear in the result list.
@@ -142,8 +142,8 @@ The :mod:`gc` module provides the following functions:
.. function:: is_tracked(obj)
- Returns True if the object is currently tracked by the garbage collector,
- False otherwise. As a general rule, instances of atomic types aren't
+ Returns ``True`` if the object is currently tracked by the garbage collector,
+ ``False`` otherwise. As a general rule, instances of atomic types aren't
tracked and instances of non-atomic types (containers, user-defined
objects...) are. However, some type-specific optimizations can be present
in order to suppress the garbage collector footprint of simple instances
diff --git a/Doc/library/gdbm.rst b/Doc/library/gdbm.rst
index aec23e6..f36bb28 100644
--- a/Doc/library/gdbm.rst
+++ b/Doc/library/gdbm.rst
@@ -6,9 +6,9 @@
:synopsis: GNU's reinterpretation of dbm.
.. note::
- The :mod:`gdbm` module has been renamed to :mod:`dbm.gnu` in Python 3.0. The
+ The :mod:`gdbm` module has been renamed to :mod:`dbm.gnu` in Python 3. The
:term:`2to3` tool will automatically adapt imports when converting your
- sources to 3.0.
+ sources to Python 3.
.. index:: module: dbm
@@ -116,6 +116,11 @@ methods:
unwritten data to be written to the disk.
+.. function:: close()
+
+ Close the ``gdbm`` database.
+
+
.. seealso::
Module :mod:`anydbm`
diff --git a/Doc/library/getopt.rst b/Doc/library/getopt.rst
index b3ba614..2dfb102 100644
--- a/Doc/library/getopt.rst
+++ b/Doc/library/getopt.rst
@@ -10,6 +10,7 @@
--------------
.. note::
+
The :mod:`getopt` module is a parser for command line options whose API is
designed to be familiar to users of the C :c:func:`getopt` function. Users who
are unfamiliar with the C :c:func:`getopt` function or who would like to write
@@ -126,7 +127,7 @@ In a script, typical usage is something like this::
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:v", ["help", "output="])
- except getopt.GetoptError, err:
+ except getopt.GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
diff --git a/Doc/library/gl.rst b/Doc/library/gl.rst
index 0d189dc..7ea9cf3 100644
--- a/Doc/library/gl.rst
+++ b/Doc/library/gl.rst
@@ -8,7 +8,7 @@
.. deprecated:: 2.6
- The :mod:`gl` module has been deprecated for removal in Python 3.0.
+ The :mod:`gl` module has been removed in Python 3.
This module provides access to the Silicon Graphics *Graphics Library*. It is
@@ -168,7 +168,7 @@ Here is a tiny but complete example GL program in Python::
.. deprecated:: 2.6
- The :mod:`DEVICE` module has been deprecated for removal in Python 3.0.
+ The :mod:`DEVICE` module has been removed in Python 3.
This modules defines the constants used by the Silicon Graphics *Graphics
@@ -186,7 +186,7 @@ module source file for details.
.. deprecated:: 2.6
- The :mod:`GL` module has been deprecated for removal in Python 3.0.
+ The :mod:`GL` module has been removed in Python 3.
This module contains constants used by the Silicon Graphics *Graphics Library*
from the C header file ``<gl/gl.h>``. Read the module source file for details.
diff --git a/Doc/library/glob.rst b/Doc/library/glob.rst
index 68cc9f0..75f67b9 100644
--- a/Doc/library/glob.rst
+++ b/Doc/library/glob.rst
@@ -16,8 +16,13 @@ according to the rules used by the Unix shell. No tilde expansion is done, but
``*``, ``?``, and character ranges expressed with ``[]`` will be correctly
matched. This is done by using the :func:`os.listdir` and
:func:`fnmatch.fnmatch` functions in concert, and not by actually invoking a
-subshell. (For tilde and shell variable expansion, use
-:func:`os.path.expanduser` and :func:`os.path.expandvars`.)
+subshell. Note that unlike :func:`fnmatch.fnmatch`, :mod:`glob` treats
+filenames beginning with a dot (``.``) as special cases. (For tilde and shell
+variable expansion, use :func:`os.path.expanduser` and
+:func:`os.path.expandvars`.)
+
+For a literal match, wrap the meta-characters in brackets.
+For example, ``'[?]'`` matches the character ``'?'``.
.. function:: glob(pathname)
@@ -49,6 +54,15 @@ preserved. ::
>>> glob.glob('?.gif')
['1.gif']
+If the directory contains files starting with ``.`` they won't be matched by
+default. For example, consider a directory containing :file:`card.gif` and
+:file:`.card.gif`::
+
+ >>> import glob
+ >>> glob.glob('*.gif')
+ ['card.gif']
+ >>> glob.glob('.c*')
+ ['.card.gif']
.. seealso::
diff --git a/Doc/library/gzip.rst b/Doc/library/gzip.rst
index e074bfc..e26fe28 100644
--- a/Doc/library/gzip.rst
+++ b/Doc/library/gzip.rst
@@ -22,9 +22,6 @@ Note that additional file formats which can be decompressed by the
:program:`gzip` and :program:`gunzip` programs, such as those produced by
:program:`compress` and :program:`pack`, are not supported by this module.
-For other archive formats, see the :mod:`bz2`, :mod:`zipfile`, and
-:mod:`tarfile` modules.
-
The module defines the following items:
@@ -36,12 +33,12 @@ The module defines the following items:
given a non-trivial value.
The new class instance is based on *fileobj*, which can be a regular file, a
- :class:`StringIO` object, or any other object which simulates a file. It
+ :class:`~StringIO.StringIO` object, or any other object which simulates a file. It
defaults to ``None``, in which case *filename* is opened to provide a file
object.
When *fileobj* is not ``None``, the *filename* argument is only used to be
- included in the :program:`gzip` file header, which may includes the original
+ included in the :program:`gzip` file header, which may include the original
filename of the uncompressed file. It defaults to the filename of *fileobj*, if
discernible; otherwise, it defaults to the empty string, and in this case the
original filename is not included in the header.
@@ -52,9 +49,10 @@ The module defines the following items:
not given, the 'b' flag will be added to the mode to ensure the file is opened
in binary mode for cross-platform portability.
- The *compresslevel* argument is an integer from ``1`` to ``9`` controlling the
- level of compression; ``1`` is fastest and produces the least compression, and
- ``9`` is slowest and produces the most compression. The default is ``9``.
+ The *compresslevel* argument is an integer from ``0`` to ``9`` controlling
+ the level of compression; ``1`` is fastest and produces the least
+ compression, and ``9`` is slowest and produces the most compression. ``0``
+ is no compression. The default is ``9``.
The *mtime* argument is an optional numeric timestamp to be written to
the stream when compressing. All :program:`gzip` compressed streams are
@@ -67,9 +65,9 @@ The module defines the following items:
Calling a :class:`GzipFile` object's :meth:`close` method does not close
*fileobj*, since you might wish to append more material after the compressed
- data. This also allows you to pass a :class:`StringIO` object opened for
+ data. This also allows you to pass a :class:`~StringIO.StringIO` object opened for
writing as *fileobj*, and retrieve the resulting memory buffer using the
- :class:`StringIO` object's :meth:`getvalue` method.
+ :class:`StringIO` object's :meth:`~StringIO.StringIO.getvalue` method.
:class:`GzipFile` supports iteration and the :keyword:`with` statement.
@@ -79,6 +77,9 @@ The module defines the following items:
.. versionchanged:: 2.7
Support for zero-padded files was added.
+ .. versionadded:: 2.7
+ The *mtime* argument.
+
.. function:: open(filename[, mode[, compresslevel]])
@@ -95,7 +96,7 @@ Examples of usage
Example of how to read a compressed file::
import gzip
- f = gzip.open('/home/joe/file.txt.gz', 'rb')
+ f = gzip.open('file.txt.gz', 'rb')
file_content = f.read()
f.close()
@@ -103,15 +104,15 @@ Example of how to create a compressed GZIP file::
import gzip
content = "Lots of content here"
- f = gzip.open('/home/joe/file.txt.gz', 'wb')
+ f = gzip.open('file.txt.gz', 'wb')
f.write(content)
f.close()
Example of how to GZIP compress an existing file::
import gzip
- f_in = open('/home/joe/file.txt', 'rb')
- f_out = gzip.open('/home/joe/file.txt.gz', 'wb')
+ f_in = open('file.txt', 'rb')
+ f_out = gzip.open('file.txt.gz', 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
diff --git a/Doc/library/hashlib.rst b/Doc/library/hashlib.rst
index 063ad59..e3b5ebb 100644
--- a/Doc/library/hashlib.rst
+++ b/Doc/library/hashlib.rst
@@ -25,12 +25,14 @@ digest are interchangeable. Older algorithms were called message digests. The
modern term is secure hash.
.. note::
- If you want the adler32 or crc32 hash functions they are available in
+
+ If you want the adler32 or crc32 hash functions, they are available in
the :mod:`zlib` module.
.. warning::
- Some algorithms have known hash collision weaknesses, see the FAQ at the end.
+ Some algorithms have known hash collision weaknesses, refer to the "See
+ also" section at the end.
There is one constructor method named for each type of :dfn:`hash`. All return
a hash object with the same simple interface. For example: use :func:`sha1` to
@@ -108,7 +110,6 @@ A hash object has the following methods:
m.update(b)`` is equivalent to ``m.update(a+b)``.
.. versionchanged:: 2.7
-
The Python GIL is released to allow other threads to run while
hash updates on data larger than 2048 bytes is taking place when
using hash algorithms supplied by OpenSSL.
@@ -134,6 +135,46 @@ A hash object has the following methods:
compute the digests of strings that share a common initial substring.
+Key Derivation Function
+-----------------------
+
+Key derivation and key stretching algorithms are designed for secure password
+hashing. Naive algorithms such as ``sha1(password)`` are not resistant against
+brute-force attacks. A good password hashing function must be tunable, slow, and
+include a `salt <https://en.wikipedia.org/wiki/Salt_%28cryptography%29>`_.
+
+
+.. function:: pbkdf2_hmac(name, password, salt, rounds, dklen=None)
+
+ The function provides PKCS#5 password-based key derivation function 2. It
+ uses HMAC as pseudorandom function.
+
+ The string *name* is the desired name of the hash digest algorithm for
+ HMAC, e.g. 'sha1' or 'sha256'. *password* and *salt* are interpreted as
+ buffers of bytes. Applications and libraries should limit *password* to
+ a sensible value (e.g. 1024). *salt* should be about 16 or more bytes from
+ a proper source, e.g. :func:`os.urandom`.
+
+ The number of *rounds* should be chosen based on the hash algorithm and
+ computing power. As of 2013, at least 100,000 rounds of SHA-256 is suggested.
+
+ *dklen* is the length of the derived key. If *dklen* is ``None`` then the
+ digest size of the hash algorithm *name* is used, e.g. 64 for SHA-512.
+
+ >>> import hashlib, binascii
+ >>> dk = hashlib.pbkdf2_hmac('sha256', b'password', b'salt', 100000)
+ >>> binascii.hexlify(dk)
+ b'0394a2ede332c9a13eb82e9b24631604c31df978b4e2f0fbd2c549944f9d79a5'
+
+ .. versionadded:: 2.7.8
+
+ .. note::
+
+ A fast implementation of *pbkdf2_hmac* is available with OpenSSL. The
+ Python implementation uses an inline version of :mod:`hmac`. It is about
+ three times slower and doesn't release the GIL.
+
+
.. seealso::
Module :mod:`hmac`
diff --git a/Doc/library/heapq.rst b/Doc/library/heapq.rst
index f0723b7..e8acd6c 100644
--- a/Doc/library/heapq.rst
+++ b/Doc/library/heapq.rst
@@ -260,7 +260,7 @@ A nice feature of this sort is that you can efficiently insert new items while
the sort is going on, provided that the inserted items are not "better" than the
last 0'th element you extracted. This is especially useful in simulation
contexts, where the tree holds all incoming events, and the "win" condition
-means the smallest scheduled time. When an event schedule other events for
+means the smallest scheduled time. When an event schedules other events for
execution, they are scheduled into the future, so they can easily go into the
heap. So, a heap is a good structure for implementing schedulers (this is what
I used for my MIDI sequencer :-).
diff --git a/Doc/library/hmac.rst b/Doc/library/hmac.rst
index e962ff0..09e819a 100644
--- a/Doc/library/hmac.rst
+++ b/Doc/library/hmac.rst
@@ -20,7 +20,7 @@ This module implements the HMAC algorithm as described by :rfc:`2104`.
Return a new hmac object. If *msg* is present, the method call ``update(msg)``
is made. *digestmod* is the digest constructor or module for the HMAC object to
- use. It defaults to the :func:`hashlib.md5` constructor.
+ use. It defaults to the :data:`hashlib.md5` constructor.
An HMAC object has the following methods:
@@ -38,6 +38,13 @@ An HMAC object has the following methods:
This string will be the same length as the *digest_size* of the digest given to
the constructor. It may contain non-ASCII characters, including NUL bytes.
+ .. warning::
+
+ When comparing the output of :meth:`digest` to an externally-supplied
+ digest during a verification routine, it is recommended to use the
+ :func:`compare_digest` function instead of the ``==`` operator
+ to reduce the vulnerability to timing attacks.
+
.. method:: HMAC.hexdigest()
@@ -45,6 +52,13 @@ An HMAC object has the following methods:
containing only hexadecimal digits. This may be used to exchange the value
safely in email or other non-binary environments.
+ .. warning::
+
+ When comparing the output of :meth:`hexdigest` to an externally-supplied
+ digest during a verification routine, it is recommended to use the
+ :func:`compare_digest` function instead of the ``==`` operator
+ to reduce the vulnerability to timing attacks.
+
.. method:: HMAC.copy()
@@ -52,6 +66,25 @@ An HMAC object has the following methods:
compute the digests of strings that share a common initial substring.
+This module also provides the following helper function:
+
+.. function:: compare_digest(a, b)
+
+ Return ``a == b``. This function uses an approach designed to prevent
+ timing analysis by avoiding content-based short circuiting behaviour,
+ making it appropriate for cryptography. *a* and *b* must both be of the
+ same type: either :class:`unicode` or a :term:`bytes-like object`.
+
+ .. note::
+
+ If *a* and *b* are of different lengths, or if an error occurs,
+ a timing attack could theoretically reveal information about the
+ types and lengths of *a* and *b*--but not their values.
+
+
+ .. versionadded:: 2.7.7
+
+
.. seealso::
Module :mod:`hashlib`
diff --git a/Doc/library/htmllib.rst b/Doc/library/htmllib.rst
index f253d12..9e68f45 100644
--- a/Doc/library/htmllib.rst
+++ b/Doc/library/htmllib.rst
@@ -6,7 +6,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`htmllib` module has been removed in Python 3.0.
+ The :mod:`htmllib` module has been removed in Python 3.
.. index::
@@ -162,8 +162,8 @@ additional methods and instance variables for use within tag methods.
.. note::
The :mod:`htmlentitydefs` module has been renamed to :mod:`html.entities` in
- Python 3.0. The :term:`2to3` tool will automatically adapt imports when
- converting your sources to 3.0.
+ Python 3. The :term:`2to3` tool will automatically adapt imports when
+ converting your sources to Python 3.
**Source code:** :source:`Lib/htmlentitydefs.py`
diff --git a/Doc/library/httplib.rst b/Doc/library/httplib.rst
index f26fba6..fcdfbc0 100644
--- a/Doc/library/httplib.rst
+++ b/Doc/library/httplib.rst
@@ -6,8 +6,8 @@
.. note::
The :mod:`httplib` module has been renamed to :mod:`http.client` in Python
- 3.0. The :term:`2to3` tool will automatically adapt imports when converting
- your sources to 3.0.
+ 3. The :term:`2to3` tool will automatically adapt imports when converting
+ your sources to Python 3.
.. index::
@@ -89,7 +89,7 @@ The module provides the following classes:
*source_address* was added.
-.. class:: HTTPResponse(sock[, debuglevel=0][, strict=0])
+.. class:: HTTPResponse(sock, debuglevel=0, strict=0)
Class whose instances are returned upon successful connection. Not instantiated
directly by user.
@@ -612,3 +612,20 @@ Here is an example session that shows how to ``POST`` requests::
'Redirecting to <a href="http://bugs.python.org/issue12524">http://bugs.python.org/issue12524</a>'
>>> conn.close()
+Client side ``HTTP PUT`` requests are very similar to ``POST`` requests. The
+difference lies only the server side where HTTP server will allow resources to
+be created via ``PUT`` request. Here is an example session that shows how to do
+``PUT`` request using httplib::
+
+ >>> # This creates an HTTP message
+ >>> # with the content of BODY as the enclosed representation
+ >>> # for the resource http://localhost:8080/foobar
+ ...
+ >>> import httplib
+ >>> BODY = "***filecontents***"
+ >>> conn = httplib.HTTPConnection("localhost", 8080)
+ >>> conn.request("PUT", "/file", BODY)
+ >>> response = conn.getresponse()
+ >>> print response.status, response.reason
+ 200, OK
+
diff --git a/Doc/library/idle.rst b/Doc/library/idle.rst
index 6bd1898..36d78b0 100644
--- a/Doc/library/idle.rst
+++ b/Doc/library/idle.rst
@@ -33,8 +33,8 @@ Menus
File menu
^^^^^^^^^
-New window
- create a new editing window
+New file
+ create a new file editing window
Open...
open an existing file
@@ -154,27 +154,77 @@ The rest of this menu lists the names of all open windows; select one to bring
it to the foreground (deiconifying it if necessary).
-Debug menu (in the Python Shell window only)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Debug menu
+^^^^^^^^^^
+
+* in the Python Shell window only
Go to file/line
- look around the insert point for a filename and linenumber, open the file, and
- show the line.
+ Look around the insert point for a filename and line number, open the file,
+ and show the line. Useful to view the source lines referenced in an
+ exception traceback.
-Open stack viewer
- show the stack traceback of the last exception
+Debugger
+ Run commands in the shell under the debugger.
-Debugger toggle
- Run commands in the shell under the debugger
+Stack viewer
+ Show the stack traceback of the last exception.
-JIT Stack viewer toggle
- Open stack viewer on traceback
+Auto-open Stack Viewer
+ Open stack viewer on traceback.
.. index::
single: stack viewer
single: debugger
+Edit context menu
+^^^^^^^^^^^^^^^^^
+
+* Right-click in Edit window (Control-click on OS X)
+
+Cut
+ Copy selection into system-wide clipboard; then delete selection
+
+Copy
+ Copy selection into system-wide clipboard
+
+Paste
+ Insert system-wide clipboard into window
+
+Set Breakpoint
+ Sets a breakpoint. Breakpoints are only enabled when the debugger is open.
+
+Clear Breakpoint
+ Clears the breakpoint on that line.
+
+.. index::
+ single: Cut
+ single: Copy
+ single: Paste
+ single: Set Breakpoint
+ single: Clear Breakpoint
+ single: breakpoints
+
+
+Shell context menu
+^^^^^^^^^^^^^^^^^^
+
+* Right-click in Python Shell window (Control-click on OS X)
+
+Cut
+ Copy selection into system-wide clipboard; then delete selection
+
+Copy
+ Copy selection into system-wide clipboard
+
+Paste
+ Insert system-wide clipboard into window
+
+Go to file/line
+ Same as in Debug menu.
+
+
Basic editing and navigation
----------------------------
diff --git a/Doc/library/imageop.rst b/Doc/library/imageop.rst
index ceef0c7..e6cb669 100644
--- a/Doc/library/imageop.rst
+++ b/Doc/library/imageop.rst
@@ -7,7 +7,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`imageop` module has been removed in Python 3.0.
+ The :mod:`imageop` module has been removed in Python 3.
The :mod:`imageop` module contains some useful operations on images. It operates
on images consisting of 8 or 32 bit pixels stored in Python strings. This is
diff --git a/Doc/library/imaplib.rst b/Doc/library/imaplib.rst
index 9fcbaaa..ca18a9c 100644
--- a/Doc/library/imaplib.rst
+++ b/Doc/library/imaplib.rst
@@ -313,8 +313,9 @@ An :class:`IMAP4` instance has the following methods:
Opens socket to *port* at *host*. This method is implicitly called by
the :class:`IMAP4` constructor. The connection objects established by this
- method will be used in the ``read``, ``readline``, ``send``, and ``shutdown``
- methods. You may override this method.
+ method will be used in the :meth:`IMAP4.read`, :meth:`IMAP4.readline`,
+ :meth:`IMAP4.send`, and :meth:`IMAP4.shutdown` methods. You may override
+ this method.
.. method:: IMAP4.partial(message_num, message_part, start, length)
diff --git a/Doc/library/imgfile.rst b/Doc/library/imgfile.rst
index 84ede95..f4c670f 100644
--- a/Doc/library/imgfile.rst
+++ b/Doc/library/imgfile.rst
@@ -8,7 +8,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`imgfile` module has been deprecated for removal in Python 3.0.
+ The :mod:`imgfile` module has been removed in Python 3.
diff --git a/Doc/library/imghdr.rst b/Doc/library/imghdr.rst
index 20f789f..24ab571 100644
--- a/Doc/library/imghdr.rst
+++ b/Doc/library/imghdr.rst
@@ -68,6 +68,6 @@ to this variable:
Example::
>>> import imghdr
- >>> imghdr.what('/tmp/bass.gif')
+ >>> imghdr.what('bass.gif')
'gif'
diff --git a/Doc/library/imp.rst b/Doc/library/imp.rst
index 607dd14..8f98d65 100644
--- a/Doc/library/imp.rst
+++ b/Doc/library/imp.rst
@@ -65,7 +65,7 @@ This module provides an interface to the mechanisms used to implement the
path and the last item in the *description* tuple is :const:`PKG_DIRECTORY`.
This function does not handle hierarchical module names (names containing
- dots). In order to find *P*.*M*, that is, submodule *M* of package *P*, use
+ dots). In order to find *P.M*, that is, submodule *M* of package *P*, use
:func:`find_module` and :func:`load_module` to find and load package *P*, and
then use :func:`find_module` with the *path* argument set to ``P.__path__``.
When *P* itself has a dotted name, apply this recipe recursively.
@@ -237,6 +237,17 @@ around for backward compatibility:
using shared libraries is highly system dependent, and not all systems support
it.)
+ .. impl-detail::
+
+ The import internals identify extension modules by filename, so doing
+ ``foo = load_dynamic("foo", "mod.so")`` and
+ ``bar = load_dynamic("bar", "mod.so")`` will result in both foo and bar
+ referring to the same module, regardless of whether or not
+ ``mod.so`` exports an ``initbar`` function. On systems which
+ support them, symlinks can be used to import multiple modules from
+ the same shared library, as each reference to the module will use
+ a different file name.
+
.. function:: load_source(name, pathname[, file])
diff --git a/Doc/library/imputil.rst b/Doc/library/imputil.rst
index 94194e2..14d7041 100644
--- a/Doc/library/imputil.rst
+++ b/Doc/library/imputil.rst
@@ -7,7 +7,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`imputil` module has been removed in Python 3.0.
+ The :mod:`imputil` module has been removed in Python 3.
.. index:: statement: import
diff --git a/Doc/library/index.rst b/Doc/library/index.rst
index 5d860f8..71ba916 100644
--- a/Doc/library/index.rst
+++ b/Doc/library/index.rst
@@ -4,9 +4,6 @@
The Python Standard Library
###############################
-:Release: |version|
-:Date: |today|
-
While :ref:`reference-index` describes the exact syntax and
semantics of the Python language, this library reference manual
describes the standard library that is distributed with Python. It also
diff --git a/Doc/library/io.rst b/Doc/library/io.rst
index 1d1f490..7b40085 100644
--- a/Doc/library/io.rst
+++ b/Doc/library/io.rst
@@ -92,7 +92,7 @@ Module Interface
``'b'`` binary mode
``'t'`` text mode (default)
``'+'`` open a disk file for updating (reading and writing)
- ``'U'`` universal newline mode (for backwards compatibility; should
+ ``'U'`` universal newlines mode (for backwards compatibility; should
not be used in new code)
========= ===============================================================
@@ -141,14 +141,17 @@ Module Interface
used. Any other error handling name that has been registered with
:func:`codecs.register_error` is also valid.
- *newline* controls how universal newlines works (it only applies to text
- mode). It can be ``None``, ``''``, ``'\n'``, ``'\r'``, and ``'\r\n'``. It
- works as follows:
+ .. index::
+ single: universal newlines; open() (in module io)
+
+ *newline* controls how :term:`universal newlines` works (it only applies to
+ text mode). It can be ``None``, ``''``, ``'\n'``, ``'\r'``, and ``'\r\n'``.
+ It works as follows:
* On input, if *newline* is ``None``, universal newlines mode is enabled.
Lines in the input can end in ``'\n'``, ``'\r'``, or ``'\r\n'``, and these
are translated into ``'\n'`` before being returned to the caller. If it is
- ``''``, universal newline mode is enabled, but line endings are returned to
+ ``''``, universal newlines mode is enabled, but line endings are returned to
the caller untranslated. If it has any of the other legal values, input
lines are only terminated by the given string, and the line ending is
returned to the caller untranslated.
@@ -275,7 +278,7 @@ I/O Base Classes
.. method:: readable()
- Return ``True`` if the stream can be read from. If False, :meth:`read`
+ Return ``True`` if the stream can be read from. If ``False``, :meth:`read`
will raise :exc:`IOError`.
.. method:: readline(limit=-1)
@@ -293,6 +296,9 @@ I/O Base Classes
to control the number of lines read: no more lines will be read if the
total size (in bytes/characters) of all lines so far exceeds *hint*.
+ Note that it's already possible to iterate on file objects using ``for
+ line in file: ...`` without calling ``file.readlines()``.
+
.. method:: seek(offset, whence=SEEK_SET)
Change the stream position to the given byte *offset*. *offset* is
@@ -340,6 +346,12 @@ I/O Base Classes
is usual for each of the lines provided to have a line separator at the
end.
+ .. method:: __del__()
+
+ Prepare for object destruction. :class:`IOBase` provides a default
+ implementation of this method that calls the instance's
+ :meth:`~IOBase.close` method.
+
.. class:: RawIOBase
@@ -638,6 +650,7 @@ than raw I/O does.
:exc:`UnsupportedOperation`.
.. warning::
+
:class:`BufferedRWPair` does not attempt to synchronize accesses to
its underlying raw streams. You should not pass it the same object
as reader and writer; use :class:`BufferedRandom` instead.
@@ -696,11 +709,13 @@ Text I/O
Read and return at most *n* characters from the stream as a single
:class:`unicode`. If *n* is negative or ``None``, reads until EOF.
- .. method:: readline()
+ .. method:: readline(limit=-1)
Read until newline or EOF and return a single ``unicode``. If the
stream is already at EOF, an empty string is returned.
+ If *limit* is specified, at most *limit* characters will be read.
+
.. method:: seek(offset, whence=SEEK_SET)
Change the stream position to the given *offset*. Behaviour depends
@@ -752,14 +767,25 @@ Text I/O
sequences) can be used. Any other error handling name that has been
registered with :func:`codecs.register_error` is also valid.
- *newline* can be ``None``, ``''``, ``'\n'``, ``'\r'``, or ``'\r\n'``. It
- controls the handling of line endings. If it is ``None``, universal newlines
- is enabled. With this enabled, on input, the lines endings ``'\n'``,
- ``'\r'``, or ``'\r\n'`` are translated to ``'\n'`` before being returned to
- the caller. Conversely, on output, ``'\n'`` is translated to the system
- default line separator, :data:`os.linesep`. If *newline* is any other of its
- legal values, that newline becomes the newline when the file is read and it
- is returned untranslated. On output, ``'\n'`` is converted to the *newline*.
+ .. index::
+ single: universal newlines; io.TextIOWrapper class
+
+ *newline* controls how line endings are handled. It can be ``None``,
+ ``''``, ``'\n'``, ``'\r'``, and ``'\r\n'``. It works as follows:
+
+ * On input, if *newline* is ``None``, :term:`universal newlines` mode is
+ enabled. Lines in the input can end in ``'\n'``, ``'\r'``, or ``'\r\n'``,
+ and these are translated into ``'\n'`` before being returned to the
+ caller. If it is ``''``, universal newlines mode is enabled, but line
+ endings are returned to the caller untranslated. If it has any of the
+ other legal values, input lines are only terminated by the given string,
+ and the line ending is returned to the caller untranslated.
+
+ * On output, if *newline* is ``None``, any ``'\n'`` characters written are
+ translated to the system default line separator, :data:`os.linesep`. If
+ *newline* is ``''``, no translation takes place. If *newline* is any of
+ the other legal values, any ``'\n'`` characters written are translated to
+ the given string.
If *line_buffering* is ``True``, :meth:`flush` is implied when a call to
write contains a newline character.
@@ -772,14 +798,14 @@ Text I/O
Whether line buffering is enabled.
-.. class:: StringIO(initial_value=u'', newline=None)
+.. class:: StringIO(initial_value=u'', newline=u'\\n')
An in-memory stream for unicode text. It inherits :class:`TextIOWrapper`.
The initial value of the buffer (an empty unicode string by default) can
be set by providing *initial_value*. The *newline* argument works like
- that of :class:`TextIOWrapper`. The default is to do no newline
- translation.
+ that of :class:`TextIOWrapper`. The default is to consider only ``\n``
+ characters as end of lines and to do no newline translation.
:class:`StringIO` provides this method in addition to those from
:class:`TextIOWrapper` and its parents:
@@ -807,10 +833,13 @@ Text I/O
output.close()
+.. index::
+ single: universal newlines; io.IncrementalNewlineDecoder class
+
.. class:: IncrementalNewlineDecoder
- A helper codec that decodes newlines for universal newlines mode. It
- inherits :class:`codecs.IncrementalDecoder`.
+ A helper codec that decodes newlines for :term:`universal newlines` mode.
+ It inherits :class:`codecs.IncrementalDecoder`.
Advanced topics
diff --git a/Doc/library/itertools.rst b/Doc/library/itertools.rst
index a553d09..ea279b0 100644
--- a/Doc/library/itertools.rst
+++ b/Doc/library/itertools.rst
@@ -52,12 +52,12 @@ Iterator Arguments Results
:func:`compress` data, selectors (d[0] if s[0]), (d[1] if s[1]), ... ``compress('ABCDEF', [1,0,1,0,1,1]) --> A C E F``
:func:`dropwhile` pred, seq seq[n], seq[n+1], starting when pred fails ``dropwhile(lambda x: x<5, [1,4,6,4,1]) --> 6 4 1``
:func:`groupby` iterable[, keyfunc] sub-iterators grouped by value of keyfunc(v)
-:func:`ifilter` pred, seq elements of seq where pred(elem) is True ``ifilter(lambda x: x%2, range(10)) --> 1 3 5 7 9``
-:func:`ifilterfalse` pred, seq elements of seq where pred(elem) is False ``ifilterfalse(lambda x: x%2, range(10)) --> 0 2 4 6 8``
+:func:`ifilter` pred, seq elements of seq where pred(elem) is true ``ifilter(lambda x: x%2, range(10)) --> 1 3 5 7 9``
+:func:`ifilterfalse` pred, seq elements of seq where pred(elem) is false ``ifilterfalse(lambda x: x%2, range(10)) --> 0 2 4 6 8``
:func:`islice` seq, [start,] stop [, step] elements from seq[start:stop:step] ``islice('ABCDEFG', 2, None) --> C D E F G``
:func:`imap` func, p, q, ... func(p0, q0), func(p1, q1), ... ``imap(pow, (2,3,10), (5,2,3)) --> 32 9 1000``
:func:`starmap` func, seq func(\*seq[0]), func(\*seq[1]), ... ``starmap(pow, [(2,5), (3,2), (10,3)]) --> 32 9 1000``
-:func:`tee` it, n it1, it2 , ... itn splits one iterator into n
+:func:`tee` it, n it1, it2, ... itn splits one iterator into n
:func:`takewhile` pred, seq seq[0], seq[1], until pred fails ``takewhile(lambda x: x<5, [1,4,6,4,1]) --> 1 4``
:func:`izip` p, q, ... (p[0], q[0]), (p[1], q[1]), ... ``izip('ABCD', 'xy') --> Ax By``
:func:`izip_longest` p, q, ... (p[0], q[0]), (p[1], q[1]), ... ``izip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-``
@@ -106,9 +106,8 @@ loops that truncate the stream.
.. classmethod:: chain.from_iterable(iterable)
Alternate constructor for :func:`chain`. Gets chained inputs from a
- single iterable argument that is evaluated lazily. Equivalent to::
+ single iterable argument that is evaluated lazily. Roughly equivalent to::
- @classmethod
def from_iterable(iterables):
# chain.from_iterable(['ABC', 'DEF']) --> A B C D E F
for it in iterables:
@@ -393,7 +392,8 @@ loops that truncate the stream.
yield function(*args)
-.. function:: islice(iterable, [start,] stop [, step])
+.. function:: islice(iterable, stop)
+ islice(iterable, start, stop[, step])
Make an iterator that returns selected elements from the iterable. If *start* is
non-zero, then elements from the iterable are skipped until start is reached.
@@ -732,8 +732,9 @@ which incur interpreter overhead.
next(b, None)
return izip(a, b)
- def grouper(n, iterable, fillvalue=None):
- "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
+ def grouper(iterable, n, fillvalue=None):
+ "Collect data into fixed-length chunks or blocks"
+ # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
@@ -827,6 +828,18 @@ which incur interpreter overhead.
indices = sorted(random.randrange(n) for i in xrange(r))
return tuple(pool[i] for i in indices)
+ def tee_lookahead(t, i):
+ """Inspect the i-th upcomping value from a tee object
+ while leaving the tee object at its current position.
+
+ Raise an IndexError if the underlying iterator doesn't
+ have enough values.
+
+ """
+ for value in islice(t.__copy__(), i, None):
+ return value
+ raise IndexError(i)
+
Note, many of the above recipes can be optimized by replacing global lookups
with local variables defined as default values. For example, the
*dotproduct* recipe can be written as::
diff --git a/Doc/library/jpeg.rst b/Doc/library/jpeg.rst
index 98497ad..2a8e4e8 100644
--- a/Doc/library/jpeg.rst
+++ b/Doc/library/jpeg.rst
@@ -8,7 +8,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`jpeg` module has been deprecated for removal in Python 3.0.
+ The :mod:`jpeg` module has been removed in Python 3.
diff --git a/Doc/library/json.rst b/Doc/library/json.rst
index 546a09d..caee953 100644
--- a/Doc/library/json.rst
+++ b/Doc/library/json.rst
@@ -7,8 +7,10 @@
.. sectionauthor:: Bob Ippolito <bob@redivi.com>
.. versionadded:: 2.6
-`JSON (JavaScript Object Notation) <http://json.org>`_ is a subset of JavaScript
-syntax (ECMA-262 3rd edition) used as a lightweight data interchange format.
+`JSON (JavaScript Object Notation) <http://json.org>`_, specified by
+:rfc:`4627`, is a lightweight data interchange format based on a subset of
+`JavaScript <http://en.wikipedia.org/wiki/JavaScript>`_ syntax (`ECMA-262 3rd
+edition <http://www.ecma-international.org/publications/files/ECMA-ST-ARCH/ECMA-262,%203rd%20edition,%20December%201999.pdf>`_).
:mod:`json` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules.
@@ -41,7 +43,8 @@ Compact encoding::
Pretty printing::
>>> import json
- >>> print json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
+ >>> print json.dumps({'4': 5, '6': 7}, sort_keys=True,
+ ... indent=4, separators=(',', ': '))
{
"4": 5,
"6": 7
@@ -81,6 +84,7 @@ Extending :class:`JSONEncoder`::
... def default(self, obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
+ ... # Let the base class default method raise the TypeError
... return json.JSONEncoder.default(self, obj)
...
>>> dumps(2 + 1j, cls=ComplexEncoder)
@@ -99,35 +103,44 @@ Using json.tool from the shell to validate and pretty-print::
{
"json": "obj"
}
- $ echo '{ 1.2:3.4}' | python -mjson.tool
- Expecting property name: line 1 column 2 (char 2)
+ $ echo '{1.2:3.4}' | python -mjson.tool
+ Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
.. highlight:: python
.. note::
- The JSON produced by this module's default settings is a subset of
- YAML, so it may be used as a serializer for that as well.
+ JSON is a subset of `YAML <http://yaml.org/>`_ 1.2. The JSON produced by
+ this module's default settings (in particular, the default *separators*
+ value) is also a subset of YAML 1.0 and 1.1. This module can thus also be
+ used as a YAML serializer.
Basic Usage
-----------
-.. function:: dump(obj, fp[, skipkeys[, ensure_ascii[, check_circular[, allow_nan[, cls[, indent[, separators[, encoding[, default[, **kw]]]]]]]]]])
+.. function:: dump(obj, fp, skipkeys=False, ensure_ascii=True, \
+ check_circular=True, allow_nan=True, cls=None, \
+ indent=None, separators=None, encoding="utf-8", \
+ default=None, sort_keys=False, **kw)
Serialize *obj* as a JSON formatted stream to *fp* (a ``.write()``-supporting
- file-like object).
+ :term:`file-like object`) using this :ref:`conversion table
+ <py-to-json-table>`.
If *skipkeys* is ``True`` (default: ``False``), then dict keys that are not
of a basic type (:class:`str`, :class:`unicode`, :class:`int`, :class:`long`,
:class:`float`, :class:`bool`, ``None``) will be skipped instead of raising a
:exc:`TypeError`.
- If *ensure_ascii* is ``False`` (default: ``True``), then some chunks written
- to *fp* may be :class:`unicode` instances, subject to normal Python
- :class:`str` to :class:`unicode` coercion rules. Unless ``fp.write()``
- explicitly understands :class:`unicode` (as in :func:`codecs.getwriter`) this
- is likely to cause an error.
+ If *ensure_ascii* is ``True`` (the default), all non-ASCII characters in the
+ output are escaped with ``\uXXXX`` sequences, and the result is a
+ :class:`str` instance consisting of ASCII characters only. If
+ *ensure_ascii* is ``False``, some chunks written to *fp* may be
+ :class:`unicode` instances. This usually happens because the input contains
+ unicode strings or the *encoding* parameter is used. Unless ``fp.write()``
+ explicitly understands :class:`unicode` (as in :func:`codecs.getwriter`)
+ this is likely to cause an error.
If *check_circular* is ``False`` (default: ``True``), then the circular
reference check for container types will be skipped and a circular reference
@@ -143,6 +156,12 @@ Basic Usage
or negative, will only insert newlines. ``None`` (the default) selects the
most compact representation.
+ .. note::
+
+ Since the default item separator is ``', '``, the output might include
+ trailing whitespace when *indent* is specified. You can use
+ ``separators=(',', ': ')`` to avoid this.
+
If *separators* is an ``(item_separator, dict_separator)`` tuple, then it
will be used instead of the default ``(', ', ': ')`` separators. ``(',',
':')`` is the most compact JSON representation.
@@ -152,6 +171,9 @@ Basic Usage
*default(obj)* is a function that should return a serializable version of
*obj* or raise :exc:`TypeError`. The default simply raises :exc:`TypeError`.
+ If *sort_keys* is ``True`` (default: ``False``), then the output of
+ dictionaries will be sorted by key.
+
To use a custom :class:`JSONEncoder` subclass (e.g. one that overrides the
:meth:`default` method to serialize additional types), specify it with the
*cls* kwarg; otherwise :class:`JSONEncoder` is used.
@@ -162,19 +184,32 @@ Basic Usage
trying to serialize more objects with repeated calls to :func:`dump` and
the same *fp* will result in an invalid JSON file.
-.. function:: dumps(obj[, skipkeys[, ensure_ascii[, check_circular[, allow_nan[, cls[, indent[, separators[, encoding[, default[, **kw]]]]]]]]]])
+.. function:: dumps(obj, skipkeys=False, ensure_ascii=True, \
+ check_circular=True, allow_nan=True, cls=None, \
+ indent=None, separators=None, encoding="utf-8", \
+ default=None, sort_keys=False, **kw)
+
+ Serialize *obj* to a JSON formatted :class:`str` using this :ref:`conversion
+ table <py-to-json-table>`. If *ensure_ascii* is ``False``, the result may
+ contain non-ASCII characters and the return value may be a :class:`unicode`
+ instance.
- Serialize *obj* to a JSON formatted :class:`str`.
+ The arguments have the same meaning as in :func:`dump`.
- If *ensure_ascii* is ``False``, then the return value will be a
- :class:`unicode` instance. The other arguments have the same meaning as in
- :func:`dump`.
+ .. note::
+ Keys in key/value pairs of JSON are always of the type :class:`str`. When
+ a dictionary is converted into JSON, all the keys of the dictionary are
+ coerced to strings. As a result of this, if a dictionary is converted
+ into JSON and then back into a dictionary, the dictionary may not equal
+ the original one. That is, ``loads(dumps(x)) != x`` if x has non-string
+ keys.
.. function:: load(fp[, encoding[, cls[, object_hook[, parse_float[, parse_int[, parse_constant[, object_pairs_hook[, **kw]]]]]]]])
- Deserialize *fp* (a ``.read()``-supporting file-like object containing a JSON
- document) to a Python object.
+ Deserialize *fp* (a ``.read()``-supporting :term:`file-like object`
+ containing a JSON document) to a Python object using this :ref:`conversion
+ table <json-to-py-table>`.
If the contents of *fp* are encoded with an ASCII based encoding other than
UTF-8 (e.g. latin-1), then an appropriate *encoding* name must be specified.
@@ -185,7 +220,8 @@ Basic Usage
*object_hook* is an optional function that will be called with the result of
any object literal decoded (a :class:`dict`). The return value of
*object_hook* will be used instead of the :class:`dict`. This feature can be used
- to implement custom decoders (e.g. JSON-RPC class hinting).
+ to implement custom decoders (e.g. `JSON-RPC <http://www.jsonrpc.org>`_
+ class hinting).
*object_pairs_hook* is an optional function that will be called with the
result of any object literal decoded with an ordered list of pairs. The
@@ -209,10 +245,13 @@ Basic Usage
(e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the following
- strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``, ``'null'``, ``'true'``,
- ``'false'``. This can be used to raise an exception if invalid JSON numbers
+ strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``.
+ This can be used to raise an exception if invalid JSON numbers
are encountered.
+ .. versionchanged:: 2.7
+ *parse_constant* doesn't get called on 'null', 'true', 'false' anymore.
+
To use a custom :class:`JSONDecoder` subclass, specify it with the ``cls``
kwarg; otherwise :class:`JSONDecoder` is used. Additional keyword arguments
will be passed to the constructor of the class.
@@ -221,7 +260,8 @@ Basic Usage
.. function:: loads(s[, encoding[, cls[, object_hook[, parse_float[, parse_int[, parse_constant[, object_pairs_hook[, **kw]]]]]]]])
Deserialize *s* (a :class:`str` or :class:`unicode` instance containing a JSON
- document) to a Python object.
+ document) to a Python object using this :ref:`conversion table
+ <json-to-py-table>`.
If *s* is a :class:`str` instance and is encoded with an ASCII based encoding
other than UTF-8 (e.g. latin-1), then an appropriate *encoding* name must be
@@ -231,7 +271,7 @@ Basic Usage
The other arguments have the same meaning as in :func:`load`.
-Encoders and decoders
+Encoders and Decoders
---------------------
.. class:: JSONDecoder([encoding[, object_hook[, parse_float[, parse_int[, parse_constant[, strict[, object_pairs_hook]]]]]]])
@@ -240,6 +280,8 @@ Encoders and decoders
Performs the following translations in decoding by default:
+ .. _json-to-py-table:
+
+---------------+-------------------+
| JSON | Python |
+===============+===================+
@@ -306,6 +348,8 @@ Encoders and decoders
those with character codes in the 0-31 range, including ``'\t'`` (tab),
``'\n'``, ``'\r'`` and ``'\0'``.
+ If the data being deserialized is not a valid JSON document, a
+ :exc:`ValueError` will be raised.
.. method:: decode(s)
@@ -328,6 +372,8 @@ Encoders and decoders
Supports the following objects and types by default:
+ .. _py-to-json-table:
+
+-------------------+---------------+
| Python | JSON |
+===================+===============+
@@ -355,9 +401,12 @@ Encoders and decoders
attempt encoding of keys that are not str, int, long, float or None. If
*skipkeys* is ``True``, such items are simply skipped.
- If *ensure_ascii* is ``True`` (the default), the output is guaranteed to be
- :class:`str` objects with all incoming unicode characters escaped. If
- *ensure_ascii* is ``False``, the output will be a unicode object.
+ If *ensure_ascii* is ``True`` (the default), all non-ASCII characters in the
+ output are escaped with ``\uXXXX`` sequences, and the results are
+ :class:`str` instances consisting of ASCII characters only. If
+ *ensure_ascii* is ``False``, a result may be a :class:`unicode`
+ instance. This usually happens if the input contains unicode strings or the
+ *encoding* parameter is used.
If *check_circular* is ``True`` (the default), then lists, dicts, and custom
encoded objects will be checked for circular references during encoding to
@@ -379,6 +428,12 @@ Encoders and decoders
level. An indent level of 0 will only insert newlines. ``None`` is the most
compact representation.
+ .. note::
+
+ Since the default item separator is ``', '``, the output might include
+ trailing whitespace when *indent* is specified. You can use
+ ``separators=(',', ': ')`` to avoid this.
+
If specified, *separators* should be an ``(item_separator, key_separator)``
tuple. The default is ``(', ', ': ')``. To get the most compact JSON
representation, you should specify ``(',', ':')`` to eliminate whitespace.
@@ -408,6 +463,7 @@ Encoders and decoders
pass
else:
return list(iterable)
+ # Let the base class default method raise the TypeError
return JSONEncoder.default(self, o)
@@ -427,3 +483,108 @@ Encoders and decoders
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
+
+
+Standard Compliance
+-------------------
+
+The JSON format is specified by :rfc:`4627`. This section details this
+module's level of compliance with the RFC. For simplicity,
+:class:`JSONEncoder` and :class:`JSONDecoder` subclasses, and parameters other
+than those explicitly mentioned, are not considered.
+
+This module does not comply with the RFC in a strict fashion, implementing some
+extensions that are valid JavaScript but not valid JSON. In particular:
+
+- Top-level non-object, non-array values are accepted and output;
+- Infinite and NaN number values are accepted and output;
+- Repeated names within an object are accepted, and only the value of the last
+ name-value pair is used.
+
+Since the RFC permits RFC-compliant parsers to accept input texts that are not
+RFC-compliant, this module's deserializer is technically RFC-compliant under
+default settings.
+
+Character Encodings
+^^^^^^^^^^^^^^^^^^^
+
+The RFC recommends that JSON be represented using either UTF-8, UTF-16, or
+UTF-32, with UTF-8 being the default. Accordingly, this module uses UTF-8 as
+the default for its *encoding* parameter.
+
+This module's deserializer only directly works with ASCII-compatible encodings;
+UTF-16, UTF-32, and other ASCII-incompatible encodings require the use of
+workarounds described in the documentation for the deserializer's *encoding*
+parameter.
+
+The RFC also non-normatively describes a limited encoding detection technique
+for JSON texts; this module's deserializer does not implement this or any other
+kind of encoding detection.
+
+As permitted, though not required, by the RFC, this module's serializer sets
+*ensure_ascii=True* by default, thus escaping the output so that the resulting
+strings only contain ASCII characters.
+
+
+Top-level Non-Object, Non-Array Values
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The RFC specifies that the top-level value of a JSON text must be either a
+JSON object or array (Python :class:`dict` or :class:`list`). This module's
+deserializer also accepts input texts consisting solely of a
+JSON null, boolean, number, or string value::
+
+ >>> just_a_json_string = '"spam and eggs"' # Not by itself a valid JSON text
+ >>> json.loads(just_a_json_string)
+ u'spam and eggs'
+
+This module itself does not include a way to request that such input texts be
+regarded as illegal. Likewise, this module's serializer also accepts single
+Python :data:`None`, :class:`bool`, numeric, and :class:`str`
+values as input and will generate output texts consisting solely of a top-level
+JSON null, boolean, number, or string value without raising an exception::
+
+ >>> neither_a_list_nor_a_dict = u"spam and eggs"
+ >>> json.dumps(neither_a_list_nor_a_dict) # The result is not a valid JSON text
+ '"spam and eggs"'
+
+This module's serializer does not itself include a way to enforce the
+aforementioned constraint.
+
+
+Infinite and NaN Number Values
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The RFC does not permit the representation of infinite or NaN number values.
+Despite that, by default, this module accepts and outputs ``Infinity``,
+``-Infinity``, and ``NaN`` as if they were valid JSON number literal values::
+
+ >>> # Neither of these calls raises an exception, but the results are not valid JSON
+ >>> json.dumps(float('-inf'))
+ '-Infinity'
+ >>> json.dumps(float('nan'))
+ 'NaN'
+ >>> # Same when deserializing
+ >>> json.loads('-Infinity')
+ -inf
+ >>> json.loads('NaN')
+ nan
+
+In the serializer, the *allow_nan* parameter can be used to alter this
+behavior. In the deserializer, the *parse_constant* parameter can be used to
+alter this behavior.
+
+
+Repeated Names Within an Object
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The RFC specifies that the names within a JSON object should be unique, but
+does not specify how repeated names in JSON objects should be handled. By
+default, this module does not raise an exception; instead, it ignores all but
+the last name-value pair for a given name::
+
+ >>> weird_json = '{"x": 1, "x": 2, "x": 3}'
+ >>> json.loads(weird_json)
+ {u'x': 3}
+
+The *object_pairs_hook* parameter can be used to alter this behavior.
diff --git a/Doc/library/locale.rst b/Doc/library/locale.rst
index 36fbde8..5590514 100644
--- a/Doc/library/locale.rst
+++ b/Doc/library/locale.rst
@@ -59,6 +59,8 @@ The :mod:`locale` module defines the following exception and functions:
Returns the database of the local conventions as a dictionary. This dictionary
has the following strings as keys:
+ .. tabularcolumns:: |l|l|L|
+
+----------------------+-------------------------------------+--------------------------------+
| Category | Key | Meaning |
+======================+=====================================+================================+
@@ -164,22 +166,22 @@ The :mod:`locale` module defines the following exception and functions:
.. data:: D_T_FMT
- Get a string that can be used as a format string for :func:`strftime` to
+ Get a string that can be used as a format string for :func:`time.strftime` to
represent date and time in a locale-specific way.
.. data:: D_FMT
- Get a string that can be used as a format string for :func:`strftime` to
+ Get a string that can be used as a format string for :func:`time.strftime` to
represent a date in a locale-specific way.
.. data:: T_FMT
- Get a string that can be used as a format string for :func:`strftime` to
+ Get a string that can be used as a format string for :func:`time.strftime` to
represent a time in a locale-specific way.
.. data:: T_FMT_AMPM
- Get a format string for :func:`strftime` to represent time in the am/pm
+ Get a format string for :func:`time.strftime` to represent time in the am/pm
format.
.. data:: DAY_1 ... DAY_7
@@ -243,24 +245,24 @@ The :mod:`locale` module defines the following exception and functions:
then-emperor's reign.
Normally it should not be necessary to use this value directly. Specifying
- the ``E`` modifier in their format strings causes the :func:`strftime`
+ the ``E`` modifier in their format strings causes the :func:`time.strftime`
function to use this information. The format of the returned string is not
specified, and therefore you should not assume knowledge of it on different
systems.
.. data:: ERA_D_T_FMT
- Get a format string for :func:`strftime` to represent date and time in a
+ Get a format string for :func:`time.strftime` to represent date and time in a
locale-specific era-based way.
.. data:: ERA_D_FMT
- Get a format string for :func:`strftime` to represent a date in a
+ Get a format string for :func:`time.strftime` to represent a date in a
locale-specific era-based way.
.. data:: ERA_T_FMT
- Get a format string for :func:`strftime` to represent a time in a
+ Get a format string for :func:`time.strftime` to represent a time in a
locale-specific era-based way.
.. data:: ALT_DIGITS
diff --git a/Doc/library/logging.config.rst b/Doc/library/logging.config.rst
index 8b39be4..c618aa8 100644
--- a/Doc/library/logging.config.rst
+++ b/Doc/library/logging.config.rst
@@ -17,6 +17,10 @@
* :ref:`Advanced Tutorial <logging-advanced-tutorial>`
* :ref:`Logging Cookbook <logging-cookbook>`
+**Source code:** :source:`Lib/logging/config.py`
+
+--------------
+
This section describes the API for configuring the logging module.
.. _logging-config-api:
@@ -77,8 +81,9 @@ in :mod:`logging` itself) and defining handlers which are declared either in
.. function:: fileConfig(fname, defaults=None, disable_existing_loggers=True)
Reads the logging configuration from a :mod:`configparser`\-format file
- named *fname*. This function can be called several times from an
- application, allowing an end user to select from various pre-canned
+ named *fname*. The format of the file should be as described in
+ :ref:`logging-config-fileformat`. This function can be called several times
+ from an application, allowing an end user to select from various pre-canned
configurations (if the developer provides a mechanism to present the choices
and load the chosen configuration).
@@ -104,14 +109,30 @@ in :mod:`logging` itself) and defining handlers which are declared either in
configurations. If no port is specified, the module's default
:const:`DEFAULT_LOGGING_CONFIG_PORT` is used. Logging configurations will be
sent as a file suitable for processing by :func:`fileConfig`. Returns a
- :class:`Thread` instance on which you can call :meth:`start` to start the
- server, and which you can :meth:`join` when appropriate. To stop the server,
+ :class:`~threading.Thread` instance on which you can call
+ :meth:`~threading.Thread.start` to start the server, and which you can
+ :meth:`~threading.Thread.join` when appropriate. To stop the server,
call :func:`stopListening`.
To send a configuration to the socket, read in the configuration file and
send it to the socket as a string of bytes preceded by a four-byte length
string packed in binary using ``struct.pack('>L', n)``.
+ .. note::
+
+ Because portions of the configuration are passed through
+ :func:`eval`, use of this function may open its users to a security risk.
+ While the function only binds to a socket on ``localhost``, and so does
+ not accept connections from remote machines, there are scenarios where
+ untrusted code could be run under the account of the process which calls
+ :func:`listen`. Specifically, if the process calling :func:`listen` runs
+ on a multi-user machine where users cannot trust each other, then a
+ malicious user could arrange to run essentially arbitrary code in a
+ victim user's process, simply by connecting to the victim's
+ :func:`listen` socket and sending a configuration which runs whatever
+ code the attacker wants to have executed in the victim's process. This is
+ especially easy to do if the default port is used, but not hard even if a
+ different port is used).
.. function:: stopListening()
@@ -156,11 +177,11 @@ otherwise, the context is used to determine what to instantiate.
* *formatters* - the corresponding value will be a dict in which each
key is a formatter id and each value is a dict describing how to
- configure the corresponding Formatter instance.
+ configure the corresponding :class:`~logging.Formatter` instance.
The configuring dict is searched for keys ``format`` and ``datefmt``
(with defaults of ``None``) and these are used to construct a
- :class:`logging.Formatter` instance.
+ :class:`~logging.Formatter` instance.
* *filters* - the corresponding value will be a dict in which each key
is a filter id and each value is a dict describing how to configure
@@ -698,8 +719,17 @@ format string, with a comma separator. An example time in ISO8601 format is
The ``class`` entry is optional. It indicates the name of the formatter's class
(as a dotted module and class name.) This option is useful for instantiating a
-:class:`Formatter` subclass. Subclasses of :class:`Formatter` can present
-exception tracebacks in an expanded or condensed format.
+:class:`~logging.Formatter` subclass. Subclasses of
+:class:`~logging.Formatter` can present exception tracebacks in an expanded or
+condensed format.
+
+.. note::
+
+ Due to the use of :func:`eval` as described above, there are
+ potential security risks which result from using the :func:`listen` to send
+ and receive configurations via sockets. The risks are limited to where
+ multiple users with no mutual trust run code on the same machine; see the
+ :func:`listen` documentation for more information.
.. seealso::
diff --git a/Doc/library/logging.handlers.rst b/Doc/library/logging.handlers.rst
index c944454..d0f9be8 100644
--- a/Doc/library/logging.handlers.rst
+++ b/Doc/library/logging.handlers.rst
@@ -17,6 +17,10 @@
* :ref:`Advanced Tutorial <logging-advanced-tutorial>`
* :ref:`Logging Cookbook <logging-cookbook>`
+**Source code:** :source:`Lib/logging/handlers.py`
+
+--------------
+
.. currentmodule:: logging
The following useful handlers are provided in the package. Note that three of
@@ -53,8 +57,8 @@ and :meth:`flush` methods).
.. method:: flush()
Flushes the stream by calling its :meth:`flush` method. Note that the
- :meth:`close` method is inherited from :class:`Handler` and so does
- no output, so an explicit :meth:`flush` call may be needed at times.
+ :meth:`close` method is inherited from :class:`~logging.Handler` and so
+ does no output, so an explicit :meth:`flush` call may be needed at times.
.. _file-handler:
@@ -142,8 +146,8 @@ new stream.
This handler is not appropriate for use under Windows, because under Windows
open log files cannot be moved or renamed - logging opens the files with
exclusive locks - and so there is no need for such a handler. Furthermore,
-*ST_INO* is not supported under Windows; :func:`stat` always returns zero for
-this value.
+*ST_INO* is not supported under Windows; :func:`~os.stat` always returns zero
+for this value.
.. class:: WatchedFileHandler(filename[,mode[, encoding[, delay]]])
@@ -236,11 +240,15 @@ timed intervals.
+----------------+-----------------------+
| ``'D'`` | Days |
+----------------+-----------------------+
- | ``'W'`` | Week day (0=Monday) |
+ | ``'W0'-'W6'`` | Weekday (0=Monday) |
+----------------+-----------------------+
| ``'midnight'`` | Roll over at midnight |
+----------------+-----------------------+
+ When using weekday-based rotation, specify 'W0' for Monday, 'W1' for
+ Tuesday, and so on up to 'W6' for Sunday. In this case, the value passed for
+ *interval* isn't used.
+
The system will save old log files by appending extensions to the filename.
The extensions are date-and-time based, using the strftime format
``%Y-%m-%d_%H-%M-%S`` or a leading portion thereof, depending on the
@@ -301,7 +309,8 @@ sends logging output to a network socket. The base class uses a TCP socket.
binary format. If there is an error with the socket, silently drops the
packet. If the connection was previously lost, re-establishes the
connection. To unpickle the record at the receiving end into a
- :class:`LogRecord`, use the :func:`makeLogRecord` function.
+ :class:`~logging.LogRecord`, use the :func:`~logging.makeLogRecord`
+ function.
.. method:: handleError()
@@ -379,7 +388,8 @@ over UDP sockets.
Pickles the record's attribute dictionary and writes it to the socket in
binary format. If there is an error with the socket, silently drops the
packet. To unpickle the record at the receiving end into a
- :class:`LogRecord`, use the :func:`makeLogRecord` function.
+ :class:`~logging.LogRecord`, use the :func:`~logging.makeLogRecord`
+ function.
.. method:: makeSocket()
@@ -650,7 +660,7 @@ event of a certain severity or greater is seen.
:class:`BufferingHandler`, which is an abstract class. This buffers logging
records in memory. Whenever each record is added to the buffer, a check is made
by calling :meth:`shouldFlush` to see if the buffer should be flushed. If it
-should, then :meth:`flush` is expected to do the needful.
+should, then :meth:`flush` is expected to do the flushing.
.. class:: BufferingHandler(capacity)
@@ -698,9 +708,6 @@ should, then :meth:`flush` is expected to do the needful.
.. method:: setTarget(target)
- .. versionchanged:: 2.6
- *credentials* was added.
-
Sets the target handler for this handler.
@@ -722,15 +729,29 @@ supports sending logging messages to a Web server, using either ``GET`` or
.. class:: HTTPHandler(host, url, method='GET')
- Returns a new instance of the :class:`HTTPHandler` class. The *host* can be
+ Returns a new instance of the :class:`HTTPHandler` class. The ``host`` can be
of the form ``host:port``, should you need to use a specific port number.
- If no *method* is specified, ``GET`` is used.
+ .. method:: mapLogRecord(record)
+
+ Provides a dictionary, based on ``record``, which is to be URL-encoded
+ and sent to the web server. The default implementation just returns
+ ``record.__dict__``. This method can be overridden if e.g. only a
+ subset of :class:`~logging.LogRecord` is to be sent to the web server, or
+ if more specific customization of what's sent to the server is required.
.. method:: emit(record)
- Sends the record to the Web server as a percent-encoded dictionary.
+ Sends the record to the Web server as an URL-encoded dictionary. The
+ :meth:`mapLogRecord` method is used to convert the record to the
+ dictionary to be sent.
+ .. note:: Since preparing a record for sending it to a Web server is not
+ the same as a generic formatting operation, using :meth:`setFormatter`
+ to specify a :class:`Formatter` for a :class:`HTTPHandler` has no effect.
+ Instead of calling :meth:`format`, this handler calls :meth:`mapLogRecord`
+ and then :func:`urllib.urlencode` to encode the dictionary in a form
+ suitable for sending to a Web server.
.. seealso::
diff --git a/Doc/library/logging.rst b/Doc/library/logging.rst
index ea69b9e..0c50b2f 100644
--- a/Doc/library/logging.rst
+++ b/Doc/library/logging.rst
@@ -20,6 +20,9 @@
* :ref:`Advanced Tutorial <logging-advanced-tutorial>`
* :ref:`Logging Cookbook <logging-cookbook>`
+**Source code:** :source:`Lib/logging/__init__.py`
+
+--------------
.. versionadded:: 2.3
@@ -51,24 +54,46 @@ listed below.
Logger Objects
--------------
-Loggers have the following attributes and methods. Note that Loggers are never
+Loggers have the following attributes and methods. Note that Loggers are never
instantiated directly, but always through the module-level function
-``logging.getLogger(name)``.
+``logging.getLogger(name)``. Multiple calls to :func:`getLogger` with the same
+name will always return a reference to the same Logger object.
+
+The ``name`` is potentially a period-separated hierarchical value, like
+``foo.bar.baz`` (though it could also be just plain ``foo``, for example).
+Loggers that are further down in the hierarchical list are children of loggers
+higher up in the list. For example, given a logger with a name of ``foo``,
+loggers with names of ``foo.bar``, ``foo.bar.baz``, and ``foo.bam`` are all
+descendants of ``foo``. The logger name hierarchy is analogous to the Python
+package hierarchy, and identical to it if you organise your loggers on a
+per-module basis using the recommended construction
+``logging.getLogger(__name__)``. That's because in a module, ``__name__``
+is the module's name in the Python package namespace.
+
.. class:: Logger
.. attribute:: Logger.propagate
- If this evaluates to true, logging messages are passed by this logger and by
- its child loggers to the handlers of higher level (ancestor) loggers.
- Messages are passed directly to the ancestor loggers' handlers - neither the
- level nor filters of the ancestor loggers in question are considered.
+ If this evaluates to true, events logged to this logger will be passed to the
+ handlers of higher level (ancestor) loggers, in addition to any handlers
+ attached to this logger. Messages are passed directly to the ancestor
+ loggers' handlers - neither the level nor filters of the ancestor loggers in
+ question are considered.
If this evaluates to false, logging messages are not passed to the handlers
of ancestor loggers.
The constructor sets this attribute to ``True``.
+ .. note:: If you attach a handler to a logger *and* one or more of its
+ ancestors, it may emit the same record multiple times. In general, you
+ should not need to attach a handler to more than one logger - if you just
+ attach it to the appropriate logger which is highest in the logger
+ hierarchy, then it will see all events logged by all descendant loggers,
+ provided that their propagate setting is left set to ``True``. A common
+ scenario is to attach handlers only to the root logger, and to let
+ propagation take care of the rest.
.. method:: Logger.setLevel(lvl)
@@ -89,6 +114,8 @@ instantiated directly, but always through the module-level function
If the root is reached, and it has a level of NOTSET, then all messages will be
processed. Otherwise, the root's level will be used as the effective level.
+ See :ref:`levels` for a list of levels.
+
.. method:: Logger.isEnabledFor(lvl)
@@ -138,7 +165,7 @@ instantiated directly, but always through the module-level function
FORMAT = '%(asctime)-15s %(clientip)s %(user)-8s %(message)s'
logging.basicConfig(format=FORMAT)
- d = { 'clientip' : '192.168.0.1', 'user' : 'fbloggs' }
+ d = {'clientip': '192.168.0.1', 'user': 'fbloggs'}
logger = logging.getLogger('tcpserver')
logger.warning('Protocol problem: %s', 'connection reset', extra=d)
@@ -195,7 +222,7 @@ instantiated directly, but always through the module-level function
interpreted as for :meth:`debug`.
-.. method:: Logger.exception(msg, *args)
+.. method:: Logger.exception(msg, *args, **kwargs)
Logs a message with level :const:`ERROR` on this logger. The arguments are
interpreted as for :meth:`debug`. Exception info is added to the logging
@@ -215,7 +242,10 @@ instantiated directly, but always through the module-level function
.. method:: Logger.filter(record)
Applies this logger's filters to the record and returns a true value if the
- record is to be processed.
+ record is to be processed. The filters are consulted in turn, until one of
+ them returns a false value. If none of them return a false value, the record
+ will be processed (passed to handlers). If one returns a false value, no
+ further processing of the record occurs.
.. method:: Logger.addHandler(hdlr)
@@ -253,6 +283,35 @@ instantiated directly, but always through the module-level function
.. versionchanged:: 2.5
*func* and *extra* were added.
+
+.. _levels:
+
+Logging Levels
+--------------
+
+The numeric values of logging levels are given in the following table. These are
+primarily of interest if you want to define your own levels, and need them to
+have specific values relative to the predefined levels. If you define a level
+with the same numeric value, it overwrites the predefined value; the predefined
+name is lost.
+
++--------------+---------------+
+| Level | Numeric value |
++==============+===============+
+| ``CRITICAL`` | 50 |
++--------------+---------------+
+| ``ERROR`` | 40 |
++--------------+---------------+
+| ``WARNING`` | 30 |
++--------------+---------------+
+| ``INFO`` | 20 |
++--------------+---------------+
+| ``DEBUG`` | 10 |
++--------------+---------------+
+| ``NOTSET`` | 0 |
++--------------+---------------+
+
+
.. _handler:
Handler Objects
@@ -293,6 +352,7 @@ subclasses. However, the :meth:`__init__` method in subclasses needs to call
severe than *lvl* will be ignored. When a handler is created, the level is set
to :const:`NOTSET` (which causes all messages to be processed).
+ See :ref:`levels` for a list of levels.
.. method:: Handler.setFormatter(form)
@@ -312,7 +372,10 @@ subclasses. However, the :meth:`__init__` method in subclasses needs to call
.. method:: Handler.filter(record)
Applies this handler's filters to the record and returns a true value if the
- record is to be processed.
+ record is to be processed. The filters are consulted in turn, until one of
+ them returns a false value. If none of them return a false value, the record
+ will be emitted. If one returns a false value, the handler will not emit the
+ record.
.. method:: Handler.flush()
@@ -465,12 +528,12 @@ empty string, all events are passed.
yes. If deemed appropriate, the record may be modified in-place by this
method.
-Note that filters attached to handlers are consulted whenever an event is
+Note that filters attached to handlers are consulted before an event is
emitted by the handler, whereas filters attached to loggers are consulted
-whenever an event is logged to the handler (using :meth:`debug`, :meth:`info`,
-etc.) This means that events which have been generated by descendant loggers
-will not be filtered by a logger's filter setting, unless the filter has also
-been applied to those descendant loggers.
+whenever an event is logged (using :meth:`debug`, :meth:`info`,
+etc.), before sending an event to handlers. This means that events which have
+been generated by descendant loggers will not be filtered by a logger's filter
+setting, unless the filter has also been applied to those descendant loggers.
You don't actually need to subclass ``Filter``: you can pass any instance
which has a ``filter`` method with the same semantics.
@@ -504,7 +567,9 @@ wire).
record.
:param name: The name of the logger used to log the event represented by
- this LogRecord.
+ this LogRecord. Note that this name will always have this
+ value, even though it may be emitted by a handler attached to
+ a different (ancestor) logger.
:param level: The numeric level of the logging event (one of DEBUG, INFO etc.)
Note that this is converted to *two* attributes of the LogRecord:
``levelno`` for the numeric value and ``levelname`` for the
@@ -617,13 +682,16 @@ format string.
.. versionchanged:: 2.5
*funcName* was added.
+.. versionchanged:: 2.6
+ *processName* was added.
+
.. _logger-adapter:
LoggerAdapter Objects
---------------------
:class:`LoggerAdapter` instances are used to conveniently pass contextual
-information into logging calls. For a usage example , see the section on
+information into logging calls. For a usage example, see the section on
:ref:`adding contextual information to your logging output <context-info>`.
.. versionadded:: 2.6
@@ -643,16 +711,15 @@ information into logging calls. For a usage example , see the section on
(possibly modified) versions of the arguments passed in.
In addition to the above, :class:`LoggerAdapter` supports the following
-methods of :class:`Logger`, i.e. :meth:`debug`, :meth:`info`, :meth:`warning`,
-:meth:`error`, :meth:`exception`, :meth:`critical`, :meth:`log`,
-:meth:`isEnabledFor`, :meth:`getEffectiveLevel`, :meth:`setLevel`,
-:meth:`hasHandlers`. These methods have the same signatures as their
-counterparts in :class:`Logger`, so you can use the two types of instances
-interchangeably.
+methods of :class:`Logger`: :meth:`~Logger.debug`, :meth:`~Logger.info`,
+:meth:`~Logger.warning`, :meth:`~Logger.error`, :meth:`~Logger.exception`,
+:meth:`~Logger.critical`, :meth:`~Logger.log` and :meth:`~Logger.isEnabledFor`.
+These methods have the same signatures as their counterparts in :class:`Logger`,
+so you can use the two types of instances interchangeably for these calls.
.. versionchanged:: 2.7
- The :meth:`isEnabledFor` method was added to :class:`LoggerAdapter`. This
- method delegates to the underlying logger.
+ The :meth:`~Logger.isEnabledFor` method was added to :class:`LoggerAdapter`.
+ This method delegates to the underlying logger.
Thread Safety
@@ -692,8 +759,8 @@ functions.
Return either the standard :class:`Logger` class, or the last class passed to
:func:`setLoggerClass`. This function may be called from within a new class
- definition, to ensure that installing a customised :class:`Logger` class will
- not undo customisations already applied by other code. For example::
+ definition, to ensure that installing a customized :class:`Logger` class will
+ not undo customizations already applied by other code. For example::
class MyLogger(logging.getLoggerClass()):
# ... override behaviour here
@@ -773,7 +840,7 @@ functions.
are interpreted as for :func:`debug`.
-.. function:: exception(msg[, *args])
+.. function:: exception(msg[, *args[, **kwargs]])
Logs a message with level :const:`ERROR` on the root logger. The arguments are
interpreted as for :func:`debug`. Exception info is added to the logging
@@ -785,14 +852,15 @@ functions.
Logs a message with level *level* on the root logger. The other arguments are
interpreted as for :func:`debug`.
- PLEASE NOTE: The above module-level functions which delegate to the root
- logger should *not* be used in threads, in versions of Python earlier than
- 2.7.1 and 3.2, unless at least one handler has been added to the root
- logger *before* the threads are started. These convenience functions call
- :func:`basicConfig` to ensure that at least one handler is available; in
- earlier versions of Python, this can (under rare circumstances) lead to
- handlers being added multiple times to the root logger, which can in turn
- lead to multiple messages for the same event.
+ .. note:: The above module-level convenience functions, which delegate to the
+ root logger, call :func:`basicConfig` to ensure that at least one handler
+ is available. Because of this, they should *not* be used in threads,
+ in versions of Python earlier than 2.7.1 and 3.2, unless at least one
+ handler has been added to the root logger *before* the threads are
+ started. In earlier versions of Python, due to a thread safety shortcoming
+ in :func:`basicConfig`, this can (under rare circumstances) lead to
+ handlers being added multiple times to the root logger, which can in turn
+ lead to multiple messages for the same event.
.. function:: disable(lvl)
@@ -802,7 +870,10 @@ functions.
effect is to disable all logging calls of severity *lvl* and below, so that
if you call it with a value of INFO, then all INFO and DEBUG events would be
discarded, whereas those of severity WARNING and above would be processed
- according to the logger's effective level.
+ according to the logger's effective level. If
+ ``logging.disable(logging.NOTSET)`` is called, it effectively removes this
+ overriding level, so that logging output again depends on the effective
+ levels of individual loggers.
.. function:: addLevelName(lvl, levelName)
@@ -814,8 +885,8 @@ functions.
registered using this function, levels should be positive integers and they
should increase in increasing order of severity.
- NOTE: If you are thinking of defining your own levels, please see the section
- on :ref:`custom-levels`.
+ .. note:: If you are thinking of defining your own levels, please see the
+ section on :ref:`custom-levels`.
.. function:: getLevelName(lvl)
@@ -850,15 +921,17 @@ functions.
.. versionchanged:: 2.4
Formerly, :func:`basicConfig` did not take any keyword arguments.
- PLEASE NOTE: This function should be called from the main thread
- before other threads are started. In versions of Python prior to
- 2.7.1 and 3.2, if this function is called from multiple threads,
- it is possible (in rare circumstances) that a handler will be added
- to the root logger more than once, leading to unexpected results
- such as messages being duplicated in the log.
+ .. note:: This function should be called from the main thread before other
+ threads are started. In versions of Python prior to 2.7.1 and 3.2, if
+ this function is called from multiple threads, it is possible (in rare
+ circumstances) that a handler will be added to the root logger more than
+ once, leading to unexpected results such as messages being duplicated in
+ the log.
The following keyword arguments are supported.
+ .. tabularcolumns:: |l|L|
+
+--------------+---------------------------------------------+
| Format | Description |
+==============+=============================================+
@@ -915,12 +988,11 @@ with the :mod:`warnings` module.
If *capture* is ``True``, warnings issued by the :mod:`warnings` module will
be redirected to the logging system. Specifically, a warning will be
formatted using :func:`warnings.formatwarning` and the resulting string
- logged to a logger named 'py.warnings' with a severity of `WARNING`.
+ logged to a logger named ``'py.warnings'`` with a severity of :const:`WARNING`.
If *capture* is ``False``, the redirection of warnings to the logging system
will stop, and warnings will be redirected to their original destinations
- (i.e. those in effect before `captureWarnings(True)` was called).
-
+ (i.e. those in effect before ``captureWarnings(True)`` was called).
.. seealso::
diff --git a/Doc/library/mac.rst b/Doc/library/mac.rst
index 7ac1ca2..d66931c 100644
--- a/Doc/library/mac.rst
+++ b/Doc/library/mac.rst
@@ -12,7 +12,10 @@ Mac-specific Python programming.
.. note::
- These modules are deprecated and have been removed in Python 3.x.
+ Most of the OS X APIs that these modules use are deprecated or removed
+ in recent versions of OS X. Many are not available when Python is
+ executing in 64-bit mode. These modules have been removed in
+ Python 3. You should avoid using them in Python 2.
.. toctree::
diff --git a/Doc/library/macosa.rst b/Doc/library/macosa.rst
index 54e62f2..f6b3b48 100644
--- a/Doc/library/macosa.rst
+++ b/Doc/library/macosa.rst
@@ -9,8 +9,7 @@ This chapter describes the current implementation of the Open Scripting
Architecture (OSA, also commonly referred to as AppleScript) for Python,
allowing you to control scriptable applications from your Python program,
and with a fairly pythonic interface. Development on this set of modules has
-stopped. For more up-to-date implementation of AppleScript support for Python,
-see the third-party py-appscript project: <http://pypi.python.org/pypi/appscript/>.
+stopped.
For a description of the various components of AppleScript and OSA, and to get
an understanding of the architecture and terminology, you should read Apple's
diff --git a/Doc/library/macostools.rst b/Doc/library/macostools.rst
index f2a2643..7924669 100644
--- a/Doc/library/macostools.rst
+++ b/Doc/library/macostools.rst
@@ -15,7 +15,7 @@ files, so it should not be used on UFS partitions.
.. note::
- This module has been removed in Python 3.0.
+ This module has been removed in Python 3.
diff --git a/Doc/library/mailbox.rst b/Doc/library/mailbox.rst
index 7e6f44a..94fde14 100644
--- a/Doc/library/mailbox.rst
+++ b/Doc/library/mailbox.rst
@@ -11,8 +11,9 @@
This module defines two classes, :class:`Mailbox` and :class:`Message`, for
accessing and manipulating on-disk mailboxes and the messages they contain.
:class:`Mailbox` offers a dictionary-like mapping from keys to messages.
-:class:`Message` extends the :mod:`email.Message` module's :class:`Message`
-class with format-specific state and behavior. Supported mailbox formats are
+:class:`Message` extends the :mod:`email.message` module's
+:class:`~email.message.Message` class with format-specific state and behavior.
+Supported mailbox formats are
Maildir, mbox, MH, Babyl, and MMDF.
@@ -83,7 +84,7 @@ Maildir, mbox, MH, Babyl, and MMDF.
it.
Parameter *message* may be a :class:`Message` instance, an
- :class:`email.Message.Message` instance, a string, or a file-like object
+ :class:`email.message.Message` instance, a string, or a file-like object
(which should be open in text mode). If *message* is an instance of the
appropriate format-specific :class:`Message` subclass (e.g., if it's an
:class:`mboxMessage` instance and this is an :class:`mbox` instance), its
@@ -110,7 +111,7 @@ Maildir, mbox, MH, Babyl, and MMDF.
:exc:`KeyError` exception if no message already corresponds to *key*.
As with :meth:`add`, parameter *message* may be a :class:`Message`
- instance, an :class:`email.Message.Message` instance, a string, or a
+ instance, an :class:`email.message.Message` instance, a string, or a
file-like object (which should be open in text mode). If *message* is an
instance of the appropriate format-specific :class:`Message` subclass
(e.g., if it's an :class:`mboxMessage` instance and this is an
@@ -154,7 +155,7 @@ Maildir, mbox, MH, Babyl, and MMDF.
when the :class:`Mailbox` instance was initialized.
- .. method:: get(key[, default=None])
+ .. method:: get(key, default=None)
__getitem__(key)
Return a representation of the message corresponding to *key*. If no such
@@ -278,7 +279,7 @@ Maildir, mbox, MH, Babyl, and MMDF.
^^^^^^^^^^^^^^^^
-.. class:: Maildir(dirname[, factory=rfc822.Message[, create=True]])
+.. class:: Maildir(dirname, factory=rfc822.Message, create=True)
A subclass of :class:`Mailbox` for mailboxes in Maildir format. Parameter
*factory* is a callable object that accepts a file-like message representation
@@ -423,7 +424,7 @@ Maildir, mbox, MH, Babyl, and MMDF.
^^^^^^^^^^^^^
-.. class:: mbox(path[, factory=None[, create=True]])
+.. class:: mbox(path, factory=None, create=True)
A subclass of :class:`Mailbox` for mailboxes in mbox format. Parameter *factory*
is a callable object that accepts a file-like message representation (which
@@ -483,7 +484,7 @@ Maildir, mbox, MH, Babyl, and MMDF.
^^^^^^^^^^^
-.. class:: MH(path[, factory=None[, create=True]])
+.. class:: MH(path, factory=None, create=True)
A subclass of :class:`Mailbox` for mailboxes in MH format. Parameter *factory*
is a callable object that accepts a file-like message representation (which
@@ -613,7 +614,7 @@ Maildir, mbox, MH, Babyl, and MMDF.
^^^^^^^^^^^^^^
-.. class:: Babyl(path[, factory=None[, create=True]])
+.. class:: Babyl(path, factory=None, create=True)
A subclass of :class:`Mailbox` for mailboxes in Babyl format. Parameter
*factory* is a callable object that accepts a file-like message representation
@@ -660,7 +661,7 @@ Maildir, mbox, MH, Babyl, and MMDF.
In Babyl mailboxes, the headers of a message are not stored contiguously
with the body of the message. To generate a file-like representation, the
- headers and body are copied together into a :class:`StringIO` instance
+ headers and body are copied together into a :class:`~StringIO.StringIO` instance
(from the :mod:`StringIO` module), which has an API identical to that of a
file. As a result, the file-like object is truly independent of the
underlying mailbox but does not save memory compared to a string
@@ -689,7 +690,7 @@ Maildir, mbox, MH, Babyl, and MMDF.
^^^^^^^^^^^^^
-.. class:: MMDF(path[, factory=None[, create=True]])
+.. class:: MMDF(path, factory=None, create=True)
A subclass of :class:`Mailbox` for mailboxes in MMDF format. Parameter *factory*
is a callable object that accepts a file-like message representation (which
@@ -743,11 +744,12 @@ Maildir, mbox, MH, Babyl, and MMDF.
.. class:: Message([message])
- A subclass of the :mod:`email.Message` module's :class:`Message`. Subclasses of
- :class:`mailbox.Message` add mailbox-format-specific state and behavior.
+ A subclass of the :mod:`email.message` module's
+ :class:`~email.message.Message`. Subclasses of :class:`mailbox.Message` add
+ mailbox-format-specific state and behavior.
If *message* is omitted, the new instance is created in a default, empty state.
- If *message* is an :class:`email.Message.Message` instance, its contents are
+ If *message* is an :class:`email.message.Message` instance, its contents are
copied; furthermore, any format-specific information is converted insofar as
possible if *message* is a :class:`Message` instance. If *message* is a string
or a file, it should contain an :rfc:`2822`\ -compliant message, which is read
@@ -987,12 +989,12 @@ When a :class:`MaildirMessage` instance is created based upon a
are excluded.
- .. method:: set_from(from_[, time_=None])
+ .. method:: set_from(from_, time_=None)
Set the "From " line to *from_*, which should be specified without a
leading "From " or trailing newline. For convenience, *time_* may be
specified and will be formatted appropriately and appended to *from_*. If
- *time_* is specified, it should be a :class:`struct_time` instance, a
+ *time_* is specified, it should be a :class:`time.struct_time` instance, a
tuple suitable for passing to :meth:`time.strftime`, or ``True`` (to use
:meth:`time.gmtime`).
@@ -1251,7 +1253,7 @@ When an :class:`MHMessage` instance is created based upon a
Set the message's visible headers to be the same as the headers in
*message*. Parameter *visible* should be a :class:`Message` instance, an
- :class:`email.Message.Message` instance, a string, or a file-like object
+ :class:`email.message.Message` instance, a string, or a file-like object
(which should be open in text mode).
@@ -1358,12 +1360,12 @@ When a :class:`BabylMessage` instance is created based upon an
are excluded.
- .. method:: set_from(from_[, time_=None])
+ .. method:: set_from(from_, time_=None)
Set the "From " line to *from_*, which should be specified without a
leading "From " or trailing newline. For convenience, *time_* may be
specified and will be formatted appropriately and appended to *from_*. If
- *time_* is specified, it should be a :class:`struct_time` instance, a
+ *time_* is specified, it should be a :class:`time.struct_time` instance, a
tuple suitable for passing to :meth:`time.strftime`, or ``True`` (to use
:meth:`time.gmtime`).
@@ -1513,7 +1515,7 @@ Older versions of the :mod:`mailbox` module do not support modification of
mailboxes, such as adding or removing message, and do not provide classes to
represent format-specific message properties. For backward compatibility, the
older mailbox classes are still available, but the newer classes should be used
-in preference to them. The old classes will be removed in Python 3.0.
+in preference to them. The old classes have been removed in Python 3.
Older mailbox objects support only iteration and provide a single public method:
@@ -1662,7 +1664,7 @@ programs, mail loss due to interruption of the program, or premature termination
due to malformed messages in the mailbox::
import mailbox
- import email.Errors
+ import email.errors
list_names = ('python-list', 'python-dev', 'python-bugs')
@@ -1672,7 +1674,7 @@ due to malformed messages in the mailbox::
for key in inbox.iterkeys():
try:
message = inbox[key]
- except email.Errors.MessageParseError:
+ except email.errors.MessageParseError:
continue # The message is malformed. Just leave it.
for name in list_names:
diff --git a/Doc/library/mailcap.rst b/Doc/library/mailcap.rst
index 5507211..b359509 100644
--- a/Doc/library/mailcap.rst
+++ b/Doc/library/mailcap.rst
@@ -71,6 +71,6 @@ An example usage::
>>> import mailcap
>>> d=mailcap.getcaps()
- >>> mailcap.findmatch(d, 'video/mpeg', filename='/tmp/tmp1223')
- ('xmpeg /tmp/tmp1223', {'view': 'xmpeg %s'})
+ >>> mailcap.findmatch(d, 'video/mpeg', filename='tmp1223')
+ ('xmpeg tmp1223', {'view': 'xmpeg %s'})
diff --git a/Doc/library/markup.rst b/Doc/library/markup.rst
index 8508a1f..0d05ef1 100644
--- a/Doc/library/markup.rst
+++ b/Doc/library/markup.rst
@@ -1,4 +1,3 @@
-
.. _markup:
**********************************
@@ -26,7 +25,8 @@ definition of the Python bindings for the DOM and SAX interfaces.
htmlparser.rst
sgmllib.rst
htmllib.rst
- pyexpat.rst
+ xml.rst
+ xml.etree.elementtree.rst
xml.dom.rst
xml.dom.minidom.rst
xml.dom.pulldom.rst
@@ -34,4 +34,4 @@ definition of the Python bindings for the DOM and SAX interfaces.
xml.sax.handler.rst
xml.sax.utils.rst
xml.sax.reader.rst
- xml.etree.elementtree.rst
+ pyexpat.rst
diff --git a/Doc/library/marshal.rst b/Doc/library/marshal.rst
index f463a7a..f4dfc1f 100644
--- a/Doc/library/marshal.rst
+++ b/Doc/library/marshal.rst
@@ -66,8 +66,9 @@ The module defines these functions:
.. function:: dump(value, file[, version])
Write the value on the open file. The value must be a supported type. The
- file must be an open file object such as ``sys.stdout`` or returned by
- :func:`open` or :func:`os.popen`. It must be opened in binary mode (``'wb'``
+ file must be a open file object such as ``sys.stdout`` or returned by
+ :func:`open` or :func:`os.popen`. It may not be a wrapper such as
+ TemporaryFile on Windows. It must be opened in binary mode (``'wb'``
or ``'w+b'``).
If the value has (or contains an object that has) an unsupported type, a
diff --git a/Doc/library/math.rst b/Doc/library/math.rst
index e5ffba0..562388e 100644
--- a/Doc/library/math.rst
+++ b/Doc/library/math.rst
@@ -5,6 +5,9 @@
.. module:: math
:synopsis: Mathematical functions (sin() etc.).
+.. testsetup::
+
+ from math import fsum
This module is always available. It provides access to the mathematical
functions defined by the C standard.
@@ -133,8 +136,9 @@ Number-theoretic and representation functions
.. function:: trunc(x)
- Return the :class:`Real` value *x* truncated to an :class:`Integral` (usually
- a long integer). Uses the ``__trunc__`` method.
+ Return the :class:`~numbers.Real` value *x* truncated to an
+ :class:`~numbers.Integral` (usually a long integer). Uses the
+ ``__trunc__`` method.
.. versionadded:: 2.6
@@ -209,6 +213,10 @@ Power and logarithmic functions
``x`` is negative, and ``y`` is not an integer then ``pow(x, y)``
is undefined, and raises :exc:`ValueError`.
+ Unlike the built-in ``**`` operator, :func:`math.pow` converts both
+ its arguments to type :class:`float`. Use ``**`` or the built-in
+ :func:`pow` function for computing exact integer powers.
+
.. versionchanged:: 2.6
The outcome of ``1**nan`` and ``nan**0`` was undefined.
diff --git a/Doc/library/mhlib.rst b/Doc/library/mhlib.rst
index 2aab1dc..939bdc7 100644
--- a/Doc/library/mhlib.rst
+++ b/Doc/library/mhlib.rst
@@ -6,7 +6,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`mhlib` module has been removed in Python 3.0. Use the
+ The :mod:`mhlib` module has been removed in Python 3. Use the
:mod:`mailbox` instead.
.. sectionauthor:: Skip Montanaro <skip@pobox.com>
diff --git a/Doc/library/mimetypes.rst b/Doc/library/mimetypes.rst
index ccda1e9..8891e7a 100644
--- a/Doc/library/mimetypes.rst
+++ b/Doc/library/mimetypes.rst
@@ -85,6 +85,9 @@ behavior of the module.
:const:`knownfiles` takes precedence over those named before it. Calling
:func:`init` repeatedly is allowed.
+ Specifying an empty list for *files* will prevent the system defaults from
+ being applied: only the well-known values will be present from a built-in list.
+
.. versionchanged:: 2.7
Previously, Windows registry settings were ignored.
diff --git a/Doc/library/mimewriter.rst b/Doc/library/mimewriter.rst
index 2070ff6..a30caef 100644
--- a/Doc/library/mimewriter.rst
+++ b/Doc/library/mimewriter.rst
@@ -24,7 +24,7 @@ to rearrange their order.
Return a new instance of the :class:`MimeWriter` class. The only argument
passed, *fp*, is a file object to be used for writing. Note that a
- :class:`StringIO` object could also be used.
+ :class:`~StringIO.StringIO` object could also be used.
.. _mimewriter-objects:
diff --git a/Doc/library/mmap.rst b/Doc/library/mmap.rst
index 55861f9..ac1963f 100644
--- a/Doc/library/mmap.rst
+++ b/Doc/library/mmap.rst
@@ -114,19 +114,19 @@ memory but does not update the underlying file.
with open("hello.txt", "r+b") as f:
# memory-map the file, size 0 means whole file
- map = mmap.mmap(f.fileno(), 0)
+ mm = mmap.mmap(f.fileno(), 0)
# read content via standard file methods
- print map.readline() # prints "Hello Python!"
+ print mm.readline() # prints "Hello Python!"
# read content via slice notation
- print map[:5] # prints "Hello"
+ print mm[:5] # prints "Hello"
# update content using slice notation;
# note that new content must have same size
- map[6:] = " world!\n"
+ mm[6:] = " world!\n"
# ... and read again using standard file methods
- map.seek(0)
- print map.readline() # prints "Hello world!"
+ mm.seek(0)
+ print mm.readline() # prints "Hello world!"
# close the map
- map.close()
+ mm.close()
The next example demonstrates how to create an anonymous map and exchange
@@ -135,16 +135,16 @@ memory but does not update the underlying file.
import mmap
import os
- map = mmap.mmap(-1, 13)
- map.write("Hello world!")
+ mm = mmap.mmap(-1, 13)
+ mm.write("Hello world!")
pid = os.fork()
if pid == 0: # In a child process
- map.seek(0)
- print map.readline()
+ mm.seek(0)
+ print mm.readline()
- map.close()
+ mm.close()
Memory-mapped file objects support the following methods:
@@ -152,8 +152,9 @@ memory but does not update the underlying file.
.. method:: close()
- Close the file. Subsequent calls to other methods of the object will
- result in an exception being raised.
+ Closes the mmap. Subsequent calls to other methods of the object will
+ result in a ValueError exception being raised. This will not close
+ the open file.
.. method:: find(string[, start[, end]])
diff --git a/Doc/library/msilib.rst b/Doc/library/msilib.rst
index 59e9cf9..17d71ab 100644
--- a/Doc/library/msilib.rst
+++ b/Doc/library/msilib.rst
@@ -432,8 +432,9 @@ GUI classes
-----------
:mod:`msilib` provides several classes that wrap the GUI tables in an MSI
-database. However, no standard user interface is provided; use :mod:`bdist_msi`
-to create MSI files with a user-interface for installing Python packages.
+database. However, no standard user interface is provided; use
+:mod:`~distutils.command.bdist_msi` to create MSI files with a user-interface
+for installing Python packages.
.. class:: Control(dlg, name)
diff --git a/Doc/library/multiprocessing.rst b/Doc/library/multiprocessing.rst
index 7d9aaf3..886b173 100644
--- a/Doc/library/multiprocessing.rst
+++ b/Doc/library/multiprocessing.rst
@@ -81,7 +81,8 @@ To show the individual process IDs involved, here is an expanded example::
def info(title):
print title
print 'module name:', __name__
- print 'parent process:', os.getppid()
+ if hasattr(os, 'getppid'): # only available on Unix
+ print 'parent process:', os.getppid()
print 'process id:', os.getpid()
def f(name):
@@ -107,7 +108,7 @@ processes:
**Queues**
- The :class:`Queue` class is a near clone of :class:`Queue.Queue`. For
+ The :class:`~multiprocessing.Queue` class is a near clone of :class:`Queue.Queue`. For
example::
from multiprocessing import Process, Queue
@@ -231,7 +232,7 @@ However, if you really do need to use some shared data then
A manager returned by :func:`Manager` will support types :class:`list`,
:class:`dict`, :class:`Namespace`, :class:`Lock`, :class:`RLock`,
:class:`Semaphore`, :class:`BoundedSemaphore`, :class:`Condition`,
- :class:`Event`, :class:`Queue`, :class:`Value` and :class:`Array`. For
+ :class:`Event`, :class:`~multiprocessing.Queue`, :class:`Value` and :class:`Array`. For
example, ::
from multiprocessing import Process, Manager
@@ -286,6 +287,9 @@ For example::
print result.get(timeout=1) # prints "100" unless your computer is *very* slow
print pool.map(f, range(10)) # prints "[0, 1, 4,..., 81]"
+Note that the methods of a pool should only ever be used by the
+process which created it.
+
Reference
---------
@@ -297,7 +301,7 @@ The :mod:`multiprocessing` package mostly replicates the API of the
:class:`Process` and exceptions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. class:: Process([group[, target[, name[, args[, kwargs]]]]])
+.. class:: Process(group=None, target=None, name=None, args=(), kwargs={})
Process objects represent activity that is run in a separate process. The
:class:`Process` class has equivalents of all the methods of
@@ -378,7 +382,7 @@ The :mod:`multiprocessing` package mostly replicates the API of the
Unix daemons or services, they are normal processes that will be
terminated (and not joined) if non-daemonic processes have exited.
- In addition to the :class:`Threading.Thread` API, :class:`Process` objects
+ In addition to the :class:`threading.Thread` API, :class:`Process` objects
also support the following attributes and methods:
.. attribute:: pid
@@ -397,7 +401,7 @@ The :mod:`multiprocessing` package mostly replicates the API of the
The process's authentication key (a byte string).
When :mod:`multiprocessing` is initialized the main process is assigned a
- random string using :func:`os.random`.
+ random string using :func:`os.urandom`.
When a :class:`Process` object is created, it will inherit the
authentication key of its parent process, although this may be changed by
@@ -422,9 +426,9 @@ The :mod:`multiprocessing` package mostly replicates the API of the
acquired a lock or semaphore etc. then terminating it is liable to
cause other processes to deadlock.
- Note that the :meth:`start`, :meth:`join`, :meth:`is_alive` and
- :attr:`exit_code` methods should only be called by the process that created
- the process object.
+ Note that the :meth:`start`, :meth:`join`, :meth:`is_alive`,
+ :meth:`terminate` and :attr:`exitcode` methods should only be called by
+ the process that created the process object.
Example usage of some of the methods of :class:`Process`:
@@ -464,9 +468,9 @@ primitives like locks.
For passing messages one can use :func:`Pipe` (for a connection between two
processes) or a queue (which allows multiple producers and consumers).
-The :class:`Queue`, :class:`multiprocessing.queues.SimpleQueue` and :class:`JoinableQueue` types are multi-producer,
+The :class:`~multiprocessing.Queue`, :class:`multiprocessing.queues.SimpleQueue` and :class:`JoinableQueue` types are multi-producer,
multi-consumer FIFO queues modelled on the :class:`Queue.Queue` class in the
-standard library. They differ in that :class:`Queue` lacks the
+standard library. They differ in that :class:`~multiprocessing.Queue` lacks the
:meth:`~Queue.Queue.task_done` and :meth:`~Queue.Queue.join` methods introduced
into Python 2.5's :class:`Queue.Queue` class.
@@ -485,18 +489,37 @@ Note that one can also create a shared queue by using a manager object -- see
the :mod:`multiprocessing` namespace so you need to import them from
:mod:`Queue`.
+.. note::
+
+ When an object is put on a queue, the object is pickled and a
+ background thread later flushes the pickled data to an underlying
+ pipe. This has some consequences which are a little surprising,
+ but should not cause any practical difficulties -- if they really
+ bother you then you can instead use a queue created with a
+ :ref:`manager <multiprocessing-managers>`.
+
+ (1) After putting an object on an empty queue there may be an
+ infinitesimal delay before the queue's :meth:`~Queue.empty`
+ method returns :const:`False` and :meth:`~Queue.get_nowait` can
+ return without raising :exc:`Queue.Empty`.
+
+ (2) If multiple processes are enqueuing objects, it is possible for
+ the objects to be received at the other end out-of-order.
+ However, objects enqueued by the same process will always be in
+ the expected order with respect to each other.
.. warning::
If a process is killed using :meth:`Process.terminate` or :func:`os.kill`
- while it is trying to use a :class:`Queue`, then the data in the queue is
+ while it is trying to use a :class:`~multiprocessing.Queue`, then the data in the queue is
likely to become corrupted. This may cause any other process to get an
exception when it tries to use the queue later on.
.. warning::
As mentioned above, if a child process has put items on a queue (and it has
- not used :meth:`JoinableQueue.cancel_join_thread`), then that process will
+ not used :meth:`JoinableQueue.cancel_join_thread
+ <multiprocessing.Queue.cancel_join_thread>`), then that process will
not terminate until all buffered items have been flushed to the pipe.
This means that if you try joining that process you may get a deadlock unless
@@ -531,7 +554,7 @@ For an example of the usage of queues for interprocess communication see
The usual :exc:`Queue.Empty` and :exc:`Queue.Full` exceptions from the
standard library's :mod:`Queue` module are raised to signal timeouts.
- :class:`Queue` implements all the methods of :class:`Queue.Queue` except for
+ :class:`~multiprocessing.Queue` implements all the methods of :class:`Queue.Queue` except for
:meth:`~Queue.Queue.task_done` and :meth:`~Queue.Queue.join`.
.. method:: qsize()
@@ -578,11 +601,10 @@ For an example of the usage of queues for interprocess communication see
:exc:`Queue.Empty` exception (*timeout* is ignored in that case).
.. method:: get_nowait()
- get_no_wait()
Equivalent to ``get(False)``.
- :class:`multiprocessing.Queue` has a few additional methods not found in
+ :class:`~multiprocessing.Queue` has a few additional methods not found in
:class:`Queue.Queue`. These methods are usually unnecessary for most
code:
@@ -609,10 +631,17 @@ For an example of the usage of queues for interprocess communication see
the background thread from being joined automatically when the process
exits -- see :meth:`join_thread`.
+ A better name for this method might be
+ ``allow_exit_without_flush()``. It is likely to cause enqueued
+ data to lost, and you almost certainly will not need to use it.
+ It is really only there if you need the current process to exit
+ immediately without waiting to flush enqueued data to the
+ underlying pipe, and you don't care about lost data.
+
.. class:: multiprocessing.queues.SimpleQueue()
- It is a simplified :class:`Queue` type, very close to a locked :class:`Pipe`.
+ It is a simplified :class:`~multiprocessing.Queue` type, very close to a locked :class:`Pipe`.
.. method:: empty()
@@ -629,7 +658,7 @@ For an example of the usage of queues for interprocess communication see
.. class:: JoinableQueue([maxsize])
- :class:`JoinableQueue`, a :class:`Queue` subclass, is a queue which
+ :class:`JoinableQueue`, a :class:`~multiprocessing.Queue` subclass, is a queue which
additionally has :meth:`task_done` and :meth:`join` methods.
.. method:: task_done()
@@ -639,7 +668,7 @@ For an example of the usage of queues for interprocess communication see
call to :meth:`task_done` tells the queue that the processing on the task
is complete.
- If a :meth:`~Queue.join` is currently blocking, it will resume when all
+ If a :meth:`~Queue.Queue.join` is currently blocking, it will resume when all
items have been processed (meaning that a :meth:`task_done` call was
received for every item that had been :meth:`~Queue.put` into the queue).
@@ -655,7 +684,7 @@ For an example of the usage of queues for interprocess communication see
queue. The count goes down whenever a consumer thread calls
:meth:`task_done` to indicate that the item was retrieved and all work on
it is complete. When the count of unfinished tasks drops to zero,
- :meth:`~Queue.join` unblocks.
+ :meth:`~Queue.Queue.join` unblocks.
Miscellaneous
@@ -931,12 +960,24 @@ inherited by child processes.
ctypes type or a one character typecode of the kind used by the :mod:`array`
module. *\*args* is passed on to the constructor for the type.
- If *lock* is ``True`` (the default) then a new lock object is created to
- synchronize access to the value. If *lock* is a :class:`Lock` or
- :class:`RLock` object then that will be used to synchronize access to the
- value. If *lock* is ``False`` then access to the returned object will not be
- automatically protected by a lock, so it will not necessarily be
- "process-safe".
+ If *lock* is ``True`` (the default) then a new recursive lock
+ object is created to synchronize access to the value. If *lock* is
+ a :class:`Lock` or :class:`RLock` object then that will be used to
+ synchronize access to the value. If *lock* is ``False`` then
+ access to the returned object will not be automatically protected
+ by a lock, so it will not necessarily be "process-safe".
+
+ Operations like ``+=`` which involve a read and write are not
+ atomic. So if, for instance, you want to atomically increment a
+ shared value it is insufficient to just do ::
+
+ counter.value += 1
+
+ Assuming the associated lock is recursive (which it is by default)
+ you can instead do ::
+
+ with counter.get_lock():
+ counter.value += 1
Note that *lock* is a keyword-only argument.
@@ -1021,8 +1062,9 @@ processes.
array.
If *lock* is ``True`` (the default) then a new lock object is created to
- synchronize access to the value. If *lock* is a :class:`Lock` or
- :class:`RLock` object then that will be used to synchronize access to the
+ synchronize access to the value. If *lock* is a
+ :class:`~multiprocessing.Lock` or :class:`~multiprocessing.RLock` object
+ then that will be used to synchronize access to the
value. If *lock* is ``False`` then access to the returned object will not be
automatically protected by a lock, so it will not necessarily be
"process-safe".
@@ -1036,8 +1078,8 @@ processes.
object.
If *lock* is ``True`` (the default) then a new lock object is created to
- synchronize access to the value. If *lock* is a :class:`Lock` or
- :class:`RLock` object then that will be used to synchronize access to the
+ synchronize access to the value. If *lock* is a :class:`~multiprocessing.Lock` or
+ :class:`~multiprocessing.RLock` object then that will be used to synchronize access to the
value. If *lock* is ``False`` then access to the returned object will not be
automatically protected by a lock, so it will not necessarily be
"process-safe".
@@ -1215,12 +1257,12 @@ their parent process exits. The manager classes are defined in the
*exposed* is used to specify a sequence of method names which proxies for
this typeid should be allowed to access using
- :meth:`BaseProxy._callMethod`. (If *exposed* is ``None`` then
+ :meth:`BaseProxy._callmethod`. (If *exposed* is ``None`` then
:attr:`proxytype._exposed_` is used instead if it exists.) In the case
where no exposed list is specified, all "public methods" of the shared
object will be accessible. (Here a "public method" means any attribute
- which has a :meth:`__call__` method and whose name does not begin with
- ``'_'``.)
+ which has a :meth:`~object.__call__` method and whose name does not begin
+ with ``'_'``.)
*method_to_typeid* is a mapping used to specify the return type of those
exposed methods which should return a proxy. It maps method names to
@@ -1581,6 +1623,9 @@ with the :class:`Pool` class.
*initializer* is not ``None`` then each worker process will call
``initializer(*initargs)`` when it starts.
+ Note that the methods of the pool object should only be called by
+ the process which created the pool.
+
.. versionadded:: 2.7
*maxtasksperchild* is the number of tasks a worker process can complete
before it will exit and be replaced with a fresh worker process, to enable
@@ -1727,7 +1772,8 @@ Listeners and Clients
:synopsis: API for dealing with sockets.
Usually message passing between processes is done using queues or by using
-:class:`Connection` objects returned by :func:`Pipe`.
+:class:`~multiprocessing.Connection` objects returned by
+:func:`~multiprocessing.Pipe`.
However, the :mod:`multiprocessing.connection` module allows some extra
flexibility. It basically gives a high level message oriented API for dealing
@@ -1744,7 +1790,7 @@ authentication* using the :mod:`hmac` module.
then a welcome message is sent to the other end of the connection. Otherwise
:exc:`AuthenticationError` is raised.
-.. function:: answerChallenge(connection, authkey)
+.. function:: answer_challenge(connection, authkey)
Receive a message, calculate the digest of the message using *authkey* as the
key, and then send the digest back.
@@ -1793,7 +1839,8 @@ authentication* using the :mod:`hmac` module.
private temporary directory created using :func:`tempfile.mkstemp`.
If the listener object uses a socket then *backlog* (1 by default) is passed
- to the :meth:`listen` method of the socket once it has been bound.
+ to the :meth:`~socket.socket.listen` method of the socket once it has been
+ bound.
If *authenticate* is ``True`` (``False`` by default) or *authkey* is not
``None`` then digest authentication is used.
@@ -1810,8 +1857,9 @@ authentication* using the :mod:`hmac` module.
.. method:: accept()
Accept a connection on the bound socket or named pipe of the listener
- object and return a :class:`Connection` object. If authentication is
- attempted and fails, then :exc:`AuthenticationError` is raised.
+ object and return a :class:`~multiprocessing.Connection` object. If
+ authentication is attempted and fails, then
+ :exc:`~multiprocessing.AuthenticationError` is raised.
.. method:: close()
@@ -1907,7 +1955,8 @@ an ``'AF_PIPE'`` address rather than an ``'AF_UNIX'`` address.
Authentication keys
~~~~~~~~~~~~~~~~~~~
-When one uses :meth:`Connection.recv`, the data received is automatically
+When one uses :meth:`Connection.recv <multiprocessing.Connection.recv>`, the
+data received is automatically
unpickled. Unfortunately unpickling data from an untrusted source is a security
risk. Therefore :class:`Listener` and :func:`Client` use the :mod:`hmac` module
to provide digest authentication.
@@ -2056,9 +2105,10 @@ Joining zombie processes
On Unix when a process finishes but has not been joined it becomes a zombie.
There should never be very many because each time a new process starts (or
- :func:`active_children` is called) all completed processes which have not
- yet been joined will be joined. Also calling a finished process's
- :meth:`Process.is_alive` will join the process. Even so it is probably good
+ :func:`~multiprocessing.active_children` is called) all completed processes
+ which have not yet been joined will be joined. Also calling a finished
+ process's :meth:`Process.is_alive <multiprocessing.Process.is_alive>` will
+ join the process. Even so it is probably good
practice to explicitly join all the processes that you start.
Better to inherit than pickle/unpickle
@@ -2071,20 +2121,22 @@ Better to inherit than pickle/unpickle
Avoid terminating processes
- Using the :meth:`Process.terminate` method to stop a process is liable to
+ Using the :meth:`Process.terminate <multiprocessing.Process.terminate>`
+ method to stop a process is liable to
cause any shared resources (such as locks, semaphores, pipes and queues)
currently being used by the process to become broken or unavailable to other
processes.
Therefore it is probably best to only consider using
- :meth:`Process.terminate` on processes which never use any shared resources.
+ :meth:`Process.terminate <multiprocessing.Process.terminate>` on processes
+ which never use any shared resources.
Joining processes that use queues
Bear in mind that a process that has put items in a queue will wait before
terminating until all the buffered items are fed by the "feeder" thread to
the underlying pipe. (The child process can call the
- :meth:`Queue.cancel_join_thread` method of the queue to avoid this behaviour.)
+ :meth:`~multiprocessing.Queue.cancel_join_thread` method of the queue to avoid this behaviour.)
This means that whenever you use a queue you need to make sure that all
items which have been put on the queue will eventually be removed before the
@@ -2161,7 +2213,7 @@ Beware of replacing :data:`sys.stdin` with a "file like object"
resulting in a bad file descriptor error, but introduces a potential danger
to applications which replace :func:`sys.stdin` with a "file-like object"
with output buffering. This danger is that if multiple processes call
- :func:`close()` on this file-like object, it could result in the same
+ :meth:`~io.IOBase.close()` on this file-like object, it could result in the same
data being flushed to the object multiple times, resulting in corruption.
If you write a file-like object and implement your own caching, you can
@@ -2190,14 +2242,16 @@ More picklability
as the ``target`` argument on Windows --- just define a function and use
that instead.
- Also, if you subclass :class:`Process` then make sure that instances will be
- picklable when the :meth:`Process.start` method is called.
+ Also, if you subclass :class:`~multiprocessing.Process` then make sure that
+ instances will be picklable when the :meth:`Process.start
+ <multiprocessing.Process.start>` method is called.
Global variables
Bear in mind that if code run in a child process tries to access a global
variable, then the value it sees (if any) may not be the same as the value
- in the parent process at the time that :meth:`Process.start` was called.
+ in the parent process at the time that :meth:`Process.start
+ <multiprocessing.Process.start>` was called.
However, global variables which are just module level constants cause no
problems.
@@ -2252,7 +2306,7 @@ Demonstration of how to create and use customized managers and proxies:
.. literalinclude:: ../includes/mp_newtype.py
-Using :class:`Pool`:
+Using :class:`~multiprocessing.pool.Pool`:
.. literalinclude:: ../includes/mp_pool.py
diff --git a/Doc/library/mutex.rst b/Doc/library/mutex.rst
index 2d41350..57c3971 100644
--- a/Doc/library/mutex.rst
+++ b/Doc/library/mutex.rst
@@ -7,7 +7,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`mutex` module has been removed in Python 3.0.
+ The :mod:`mutex` module has been removed in Python 3.
.. sectionauthor:: Moshe Zadka <moshez@zadka.site.co.il>
diff --git a/Doc/library/netrc.rst b/Doc/library/netrc.rst
index 323fd69..713c8df 100644
--- a/Doc/library/netrc.rst
+++ b/Doc/library/netrc.rst
@@ -25,6 +25,14 @@ the Unix :program:`ftp` program and other FTP clients.
no argument is given, the file :file:`.netrc` in the user's home directory will
be read. Parse errors will raise :exc:`NetrcParseError` with diagnostic
information including the file name, line number, and terminating token.
+ If no argument is specified on a POSIX system, the presence of passwords in
+ the :file:`.netrc` file will raise a :exc:`NetrcParseError` if the file
+ ownership or permissions are insecure (owned by a user other than the user
+ running the process, or accessible for read or write by any other user).
+ This implements security behavior equivalent to that of ftp and other
+ programs that use :file:`.netrc`.
+
+ .. versionchanged:: 2.7.6 Added the POSIX permissions check.
.. exception:: NetrcParseError
diff --git a/Doc/library/new.rst b/Doc/library/new.rst
index 8dd965e..667e586 100644
--- a/Doc/library/new.rst
+++ b/Doc/library/new.rst
@@ -6,7 +6,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`new` module has been removed in Python 3.0. Use the :mod:`types`
+ The :mod:`new` module has been removed in Python 3. Use the :mod:`types`
module's classes instead.
.. sectionauthor:: Moshe Zadka <moshez@zadka.site.co.il>
diff --git a/Doc/library/nntplib.rst b/Doc/library/nntplib.rst
index acbb7a5..92180d6 100644
--- a/Doc/library/nntplib.rst
+++ b/Doc/library/nntplib.rst
@@ -46,7 +46,7 @@ To post an article from a file (this assumes that the article has valid
headers, and that you have right to post on the particular newsgroup)::
>>> s = NNTP('news.gmane.org')
- >>> f = open('/tmp/article')
+ >>> f = open('articlefile')
>>> s.post(f)
'240 Article posted successfully.'
>>> s.quit()
@@ -234,25 +234,25 @@ indicates an error, the method raises one of the above exceptions.
.. method:: NNTP.next()
- Send a ``NEXT`` command. Return as for :meth:`stat`.
+ Send a ``NEXT`` command. Return as for :meth:`.stat`.
.. method:: NNTP.last()
- Send a ``LAST`` command. Return as for :meth:`stat`.
+ Send a ``LAST`` command. Return as for :meth:`.stat`.
.. method:: NNTP.head(id)
- Send a ``HEAD`` command, where *id* has the same meaning as for :meth:`stat`.
+ Send a ``HEAD`` command, where *id* has the same meaning as for :meth:`.stat`.
Return a tuple ``(response, number, id, list)`` where the first three are the
- same as for :meth:`stat`, and *list* is a list of the article's headers (an
+ same as for :meth:`.stat`, and *list* is a list of the article's headers (an
uninterpreted list of lines, without trailing newlines).
.. method:: NNTP.body(id,[file])
- Send a ``BODY`` command, where *id* has the same meaning as for :meth:`stat`.
+ Send a ``BODY`` command, where *id* has the same meaning as for :meth:`.stat`.
If the *file* parameter is supplied, then the body is stored in a file. If
*file* is a string, then the method will open a file object with that name,
write to it then close it. If *file* is a file object, then it will start
@@ -263,7 +263,7 @@ indicates an error, the method raises one of the above exceptions.
.. method:: NNTP.article(id)
Send an ``ARTICLE`` command, where *id* has the same meaning as for
- :meth:`stat`. Return as for :meth:`head`.
+ :meth:`.stat`. Return as for :meth:`head`.
.. method:: NNTP.slave()
@@ -290,7 +290,7 @@ indicates an error, the method raises one of the above exceptions.
.. method:: NNTP.post(file)
Post an article using the ``POST`` command. The *file* argument is an open file
- object which is read until EOF using its :meth:`readline` method. It should be
+ object which is read until EOF using its :meth:`~file.readline` method. It should be
a well-formed news article, including the required headers. The :meth:`post`
method automatically escapes lines beginning with ``.``.
diff --git a/Doc/library/numbers.rst b/Doc/library/numbers.rst
index f46e8ac..8811b5d 100644
--- a/Doc/library/numbers.rst
+++ b/Doc/library/numbers.rst
@@ -73,10 +73,10 @@ The numeric tower
.. class:: Integral
- Subtypes :class:`Rational` and adds a conversion to :class:`int`.
- Provides defaults for :func:`float`, :attr:`~Rational.numerator`, and
- :attr:`~Rational.denominator`, and bit-string operations: ``<<``,
- ``>>``, ``&``, ``^``, ``|``, ``~``.
+ Subtypes :class:`Rational` and adds a conversion to :class:`int`. Provides
+ defaults for :func:`float`, :attr:`~Rational.numerator`, and
+ :attr:`~Rational.denominator`. Adds abstract methods for ``**`` and
+ bit-string operations: ``<<``, ``>>``, ``&``, ``^``, ``|``, ``~``.
Notes for type implementors
diff --git a/Doc/library/operator.rst b/Doc/library/operator.rst
index 53d45b2..0b46504 100644
--- a/Doc/library/operator.rst
+++ b/Doc/library/operator.rst
@@ -490,13 +490,22 @@ lookups. These are useful for making fast field extractors as arguments for
expect a function argument.
-.. function:: attrgetter(attr[, args...])
+.. function:: attrgetter(attr)
+ attrgetter(*attrs)
- Return a callable object that fetches *attr* from its operand. If more than one
- attribute is requested, returns a tuple of attributes. After,
- ``f = attrgetter('name')``, the call ``f(b)`` returns ``b.name``. After,
- ``f = attrgetter('name', 'date')``, the call ``f(b)`` returns ``(b.name,
- b.date)``. Equivalent to::
+ Return a callable object that fetches *attr* from its operand.
+ If more than one attribute is requested, returns a tuple of attributes.
+ The attribute names can also contain dots. For example:
+
+ * After ``f = attrgetter('name')``, the call ``f(b)`` returns ``b.name``.
+
+ * After ``f = attrgetter('name', 'date')``, the call ``f(b)`` returns
+ ``(b.name, b.date)``.
+
+ * After ``f = attrgetter('name.first', 'name.last')``, the call ``f(b)``
+ returns ``(b.name.first, b.name.last)``.
+
+ Equivalent to::
def attrgetter(*items):
if len(items) == 1:
@@ -505,7 +514,7 @@ expect a function argument.
return resolve_attr(obj, attr)
else:
def g(obj):
- return tuple(resolve_att(obj, attr) for attr in items)
+ return tuple(resolve_attr(obj, attr) for attr in items)
return g
def resolve_attr(obj, attr):
@@ -514,9 +523,6 @@ expect a function argument.
return obj
- The attribute names can also contain dots; after ``f = attrgetter('date.month')``,
- the call ``f(b)`` returns ``b.date.month``.
-
.. versionadded:: 2.4
.. versionchanged:: 2.5
@@ -526,11 +532,19 @@ expect a function argument.
Added support for dotted attributes.
-.. function:: itemgetter(item[, args...])
+.. function:: itemgetter(item)
+ itemgetter(*items)
Return a callable object that fetches *item* from its operand using the
operand's :meth:`__getitem__` method. If multiple items are specified,
- returns a tuple of lookup values. Equivalent to::
+ returns a tuple of lookup values. For example:
+
+ * After ``f = itemgetter(2)``, the call ``f(r)`` returns ``r[2]``.
+
+ * After ``g = itemgetter(2, 5, 3)``, the call ``g(r)`` returns
+ ``(r[2], r[5], r[3])``.
+
+ Equivalent to::
def itemgetter(*items):
if len(items) == 1:
@@ -573,9 +587,14 @@ expect a function argument.
Return a callable object that calls the method *name* on its operand. If
additional arguments and/or keyword arguments are given, they will be given
- to the method as well. After ``f = methodcaller('name')``, the call ``f(b)``
- returns ``b.name()``. After ``f = methodcaller('name', 'foo', bar=1)``, the
- call ``f(b)`` returns ``b.name('foo', bar=1)``. Equivalent to::
+ to the method as well. For example:
+
+ * After ``f = methodcaller('name')``, the call ``f(b)`` returns ``b.name()``.
+
+ * After ``f = methodcaller('name', 'foo', bar=1)``, the call ``f(b)``
+ returns ``b.name('foo', bar=1)``.
+
+ Equivalent to::
def methodcaller(name, *args, **kwargs):
def caller(obj):
diff --git a/Doc/library/optparse.rst b/Doc/library/optparse.rst
index d0783e7..417b3bb 100644
--- a/Doc/library/optparse.rst
+++ b/Doc/library/optparse.rst
@@ -173,10 +173,10 @@ required option
For example, consider this hypothetical command-line::
- prog -v --report /tmp/report.txt foo bar
+ prog -v --report report.txt foo bar
``-v`` and ``--report`` are both options. Assuming that ``--report``
-takes one argument, ``/tmp/report.txt`` is an option argument. ``foo`` and
+takes one argument, ``report.txt`` is an option argument. ``foo`` and
``bar`` are positional arguments.
@@ -275,7 +275,8 @@ You're free to define as many short option strings and as many long option
strings as you like (including zero), as long as there is at least one option
string overall.
-The option strings passed to :meth:`add_option` are effectively labels for the
+The option strings passed to :meth:`OptionParser.add_option` are effectively
+labels for the
option defined by that call. For brevity, we will frequently refer to
*encountering an option* on the command line; in reality, :mod:`optparse`
encounters *option strings* and looks up options from them.
@@ -895,7 +896,8 @@ long option strings, but you must specify at least one overall option string.
The canonical way to create an :class:`Option` instance is with the
:meth:`add_option` method of :class:`OptionParser`.
-.. method:: OptionParser.add_option(opt_str[, ...], attr=value, ...)
+.. method:: OptionParser.add_option(option)
+ OptionParser.add_option(*opt_str, attr=value, ...)
To define an option with only a short option string::
@@ -1168,6 +1170,17 @@ must specify for any option using that action.
options.tracks.append(int("4"))
+ The ``append`` action calls the ``append`` method on the current value of the
+ option. This means that any default value specified must have an ``append``
+ method. It also means that if the default value is non-empty, the default
+ elements will be present in the parsed value for the option, with any values
+ from the command line appended after those default values::
+
+ >>> parser.add_option("--files", action="append", default=['~/.mypkg/defaults'])
+ >>> opts, args = parser.parse_args(['--files', 'overrides.mypkg'])
+ >>> opts.files
+ ['~/.mypkg/defaults', 'overrides.mypkg']
+
* ``"append_const"`` [required: :attr:`~Option.const`; relevant:
:attr:`~Option.dest`]
diff --git a/Doc/library/os.path.rst b/Doc/library/os.path.rst
index 62bbdff..ed3aaf4 100644
--- a/Doc/library/os.path.rst
+++ b/Doc/library/os.path.rst
@@ -16,6 +16,11 @@ write files see :func:`open`, and for accessing the filesystem see the
:func:`splitunc` and :func:`ismount` do handle them correctly.
+Unlike a unix shell, Python does not do any *automatic* path expansions.
+Functions such as :func:`expanduser` and :func:`expandvars` can be invoked
+explicitly when an application desires shell-like path expansion. (See also
+the :mod:`glob` module.)
+
.. note::
Since different operating systems have different path name conventions, there
@@ -35,15 +40,17 @@ write files see :func:`open`, and for accessing the filesystem see the
.. function:: abspath(path)
Return a normalized absolutized version of the pathname *path*. On most
- platforms, this is equivalent to ``normpath(join(os.getcwd(), path))``.
+ platforms, this is equivalent to calling the function :func:`normpath` as
+ follows: ``normpath(join(os.getcwd(), path))``.
.. versionadded:: 1.5.2
.. function:: basename(path)
- Return the base name of pathname *path*. This is the second half of the pair
- returned by ``split(path)``. Note that the result of this function is different
+ Return the base name of pathname *path*. This is the second element of the
+ pair returned by passing *path* to the function :func:`split`. Note that
+ the result of this function is different
from the Unix :program:`basename` program; where :program:`basename` for
``'/foo/bar/'`` returns ``'bar'``, the :func:`basename` function returns an
empty string (``''``).
@@ -58,8 +65,8 @@ write files see :func:`open`, and for accessing the filesystem see the
.. function:: dirname(path)
- Return the directory name of pathname *path*. This is the first half of the
- pair returned by ``split(path)``.
+ Return the directory name of pathname *path*. This is the first element of
+ the pair returned by passing *path* to the function :func:`split`.
.. function:: exists(path)
@@ -120,7 +127,7 @@ write files see :func:`open`, and for accessing the filesystem see the
.. versionadded:: 1.5.2
.. versionchanged:: 2.3
- If :func:`os.stat_float_times` returns True, the result is a floating point
+ If :func:`os.stat_float_times` returns ``True``, the result is a floating point
number.
@@ -133,14 +140,14 @@ write files see :func:`open`, and for accessing the filesystem see the
.. versionadded:: 1.5.2
.. versionchanged:: 2.3
- If :func:`os.stat_float_times` returns True, the result is a floating point
+ If :func:`os.stat_float_times` returns ``True``, the result is a floating point
number.
.. function:: getctime(path)
Return the system's ctime which, on some systems (like Unix) is the time of the
- last change, and, on others (like Windows), is the creation time for *path*.
+ last metadata change, and, on others (like Windows), is the creation time for *path*.
The return value is a number giving the number of seconds since the epoch (see
the :mod:`time` module). Raise :exc:`os.error` if the file does not exist or
is inaccessible.
@@ -178,7 +185,7 @@ write files see :func:`open`, and for accessing the filesystem see the
.. function:: islink(path)
Return ``True`` if *path* refers to a directory entry that is a symbolic link.
- Always ``False`` if symbolic links are not supported.
+ Always ``False`` if symbolic links are not supported by the python runtime.
.. function:: ismount(path)
@@ -212,13 +219,11 @@ write files see :func:`open`, and for accessing the filesystem see the
.. function:: normpath(path)
- Normalize a pathname. This collapses redundant separators and up-level
- references so that ``A//B``, ``A/B/``, ``A/./B`` and ``A/foo/../B`` all become
- ``A/B``.
-
- It does not normalize the case (use :func:`normcase` for that). On Windows, it
- converts forward slashes to backward slashes. It should be understood that this
- may change the meaning of the path if it contains symbolic links!
+ Normalize a pathname by collapsing redundant separators and up-level
+ references so that ``A//B``, ``A/B/``, ``A/./B`` and ``A/foo/../B`` all
+ become ``A/B``. This string manipulation may change the meaning of a path
+ that contains symbolic links. On Windows, it converts forward slashes to
+ backward slashes. To normalize case, use :func:`normcase`.
.. function:: realpath(path)
@@ -231,8 +236,10 @@ write files see :func:`open`, and for accessing the filesystem see the
.. function:: relpath(path[, start])
- Return a relative filepath to *path* either from the current directory or from
- an optional *start* point.
+ Return a relative filepath to *path* either from the current directory or
+ from an optional *start* directory. This is a path computation: the
+ filesystem is not accessed to confirm the existence or nature of *path* or
+ *start*.
*start* defaults to :attr:`os.curdir`.
@@ -260,9 +267,9 @@ write files see :func:`open`, and for accessing the filesystem see the
.. function:: samestat(stat1, stat2)
Return ``True`` if the stat tuples *stat1* and *stat2* refer to the same file.
- These structures may have been returned by :func:`fstat`, :func:`lstat`, or
- :func:`stat`. This function implements the underlying comparison used by
- :func:`samefile` and :func:`sameopenfile`.
+ These structures may have been returned by :func:`os.fstat`,
+ :func:`os.lstat`, or :func:`os.stat`. This function implements the
+ underlying comparison used by :func:`samefile` and :func:`sameopenfile`.
Availability: Unix.
@@ -276,7 +283,8 @@ write files see :func:`open`, and for accessing the filesystem see the
*path* is empty, both *head* and *tail* are empty. Trailing slashes are
stripped from *head* unless it is the root (one or more slashes only). In
all cases, ``join(head, tail)`` returns a path to the same location as *path*
- (but the strings may differ).
+ (but the strings may differ). Also see the functions :func:`dirname` and
+ :func:`basename`.
.. function:: splitdrive(path)
@@ -331,13 +339,13 @@ write files see :func:`open`, and for accessing the filesystem see the
.. note::
- This function is deprecated and has been removed in 3.0 in favor of
+ This function is deprecated and has been removed in Python 3 in favor of
:func:`os.walk`.
.. data:: supports_unicode_filenames
- True if arbitrary Unicode strings can be used as file names (within limitations
+ ``True`` if arbitrary Unicode strings can be used as file names (within limitations
imposed by the file system).
.. versionadded:: 2.3
diff --git a/Doc/library/os.rst b/Doc/library/os.rst
index 8c63444..32051c0 100644
--- a/Doc/library/os.rst
+++ b/Doc/library/os.rst
@@ -72,7 +72,7 @@ process and user.
.. data:: environ
- A mapping object representing the string environment. For example,
+ A :term:`mapping` object representing the string environment. For example,
``environ['HOME']`` is the pathname of your home directory (on some platforms),
and is equivalent to ``getenv("HOME")`` in C.
@@ -157,6 +157,22 @@ process and user.
Availability: Unix.
+ .. note::
+
+ On Mac OS X, :func:`getgroups` behavior differs somewhat from
+ other Unix platforms. If the Python interpreter was built with a
+ deployment target of :const:`10.5` or earlier, :func:`getgroups` returns
+ the list of effective group ids associated with the current user process;
+ this list is limited to a system-defined number of entries, typically 16,
+ and may be modified by calls to :func:`setgroups` if suitably privileged.
+ If built with a deployment target greater than :const:`10.5`,
+ :func:`getgroups` returns the current group access list for the user
+ associated with the effective user id of the process; the group access
+ list may change over the lifetime of the process, it is not affected by
+ calls to :func:`setgroups`, and its length is not limited to 16. The
+ deployment target value, :const:`MACOSX_DEPLOYMENT_TARGET`, can be
+ obtained with :func:`sysconfig.get_config_var`.
+
.. function:: initgroups(username, gid)
@@ -241,7 +257,7 @@ process and user.
.. index:: single: user; id
- Return the current process's user id.
+ Return the current process's real user id.
Availability: Unix.
@@ -306,6 +322,10 @@ process and user.
.. versionadded:: 2.2
+ .. note:: On Mac OS X, the length of *groups* may not exceed the
+ system-defined maximum number of effective group ids, typically 16.
+ See the documentation for :func:`getgroups` for cases where it may not
+ return the same group list set by calling setgroups().
.. function:: setpgrp()
@@ -443,8 +463,9 @@ These functions create new file objects. (See also :func:`open`.)
.. index:: single: I/O control; buffering
Return an open file object connected to the file descriptor *fd*. The *mode*
- and *bufsize* arguments have the same meaning as the corresponding arguments to
- the built-in :func:`open` function.
+ and *bufsize* arguments have the same meaning as the corresponding arguments
+ to the built-in :func:`open` function. If :func:`fdopen` raises an
+ exception, it leaves *fd* untouched (unclosed).
Availability: Unix, Windows.
@@ -597,7 +618,7 @@ as internal buffering of data.
This function is intended for low-level I/O and must be applied to a file
descriptor as returned by :func:`os.open` or :func:`pipe`. To close a "file
object" returned by the built-in function :func:`open` or by :func:`popen` or
- :func:`fdopen`, use its :meth:`~file.close` method.
+ :func:`fdopen`, use its :meth:`~io.IOBase.close` method.
.. function:: closerange(fd_low, fd_high)
@@ -719,16 +740,14 @@ as internal buffering of data.
Return ``True`` if the file descriptor *fd* is open and connected to a
tty(-like) device, else ``False``.
- Availability: Unix.
-
.. function:: lseek(fd, pos, how)
Set the current position of file descriptor *fd* to position *pos*, modified
by *how*: :const:`SEEK_SET` or ``0`` to set the position relative to the
beginning of the file; :const:`SEEK_CUR` or ``1`` to set it relative to the
- current position; :const:`os.SEEK_END` or ``2`` to set it relative to the end of
- the file.
+ current position; :const:`SEEK_END` or ``2`` to set it relative to the end of
+ the file. Return the new cursor position in bytes, starting from the beginning.
Availability: Unix, Windows.
@@ -1163,7 +1182,7 @@ Files and Directories
doesn't open the FIFO --- it just creates the rendezvous point.
-.. function:: mknod(filename[, mode=0600, device])
+.. function:: mknod(filename[, mode=0600[, device=0]])
Create a filesystem node (file, device special file or named pipe) named
*filename*. *mode* specifies both the permissions to use and the type of node to
@@ -1363,15 +1382,14 @@ Files and Directories
.. versionchanged:: 2.3
If :func:`stat_float_times` returns ``True``, the time values are floats, measuring
- seconds. Fractions of a second may be reported if the system supports that. On
- Mac OS, the times are always floats. See :func:`stat_float_times` for further
- discussion.
+ seconds. Fractions of a second may be reported if the system supports that.
+ See :func:`stat_float_times` for further discussion.
On some Unix systems (such as Linux), the following attributes may also be
available:
- * :attr:`st_blocks` - number of blocks allocated for file
- * :attr:`st_blksize` - filesystem blocksize
+ * :attr:`st_blocks` - number of 512-byte blocks allocated for file
+ * :attr:`st_blksize` - filesystem blocksize for efficient file system I/O
* :attr:`st_rdev` - type of device if an inode device
* :attr:`st_flags` - user defined flags for file
@@ -1381,12 +1399,6 @@ Files and Directories
* :attr:`st_gen` - file generation number
* :attr:`st_birthtime` - time of file creation
- On Mac OS systems, the following attributes may also be available:
-
- * :attr:`st_rsize`
- * :attr:`st_creator`
- * :attr:`st_type`
-
On RISCOS systems, the following attributes are also available:
* :attr:`st_ftype` (file type)
@@ -1565,7 +1577,7 @@ Files and Directories
Availability: Unix, Windows.
-.. function:: walk(top[, topdown=True [, onerror=None[, followlinks=False]]])
+.. function:: walk(top, topdown=True, onerror=None, followlinks=False)
.. index::
single: directory; walking
@@ -1585,9 +1597,11 @@ Files and Directories
If optional argument *topdown* is ``True`` or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
- (directories are generated top-down). If *topdown* is ``False``, the triple for a
- directory is generated after the triples for all of its subdirectories
- (directories are generated bottom-up).
+ (directories are generated top-down). If *topdown* is ``False``, the triple
+ for a directory is generated after the triples for all of its subdirectories
+ (directories are generated bottom-up). No matter the value of *topdown*, the
+ list of subdirectories is retrieved before the tuples for the directory and
+ its subdirectories are generated.
When *topdown* is ``True``, the caller can modify the *dirnames* list in-place
(perhaps using :keyword:`del` or slice assignment), and :func:`walk` will only
@@ -1660,7 +1674,7 @@ Process Management
These functions may be used to create and manage processes.
-The various :func:`exec\*` functions take a list of arguments for the new
+The various :func:`exec\* <execl>` functions take a list of arguments for the new
program loaded into the process. In each case, the first of these arguments is
passed to the new program as its own name rather than as an argument a user may
have typed on a command line. For the C programmer, this is the ``argv[0]``
@@ -1698,9 +1712,9 @@ to be ignored.
descriptors are not flushed, so if there may be data buffered
on these open files, you should flush them using
:func:`sys.stdout.flush` or :func:`os.fsync` before calling an
- :func:`exec\*` function.
+ :func:`exec\* <execl>` function.
- The "l" and "v" variants of the :func:`exec\*` functions differ in how
+ The "l" and "v" variants of the :func:`exec\* <execl>` functions differ in how
command-line arguments are passed. The "l" variants are perhaps the easiest
to work with if the number of parameters is fixed when the code is written; the
individual parameters simply become additional parameters to the :func:`execl\*`
@@ -1712,7 +1726,7 @@ to be ignored.
The variants which include a "p" near the end (:func:`execlp`,
:func:`execlpe`, :func:`execvp`, and :func:`execvpe`) will use the
:envvar:`PATH` environment variable to locate the program *file*. When the
- environment is being replaced (using one of the :func:`exec\*e` variants,
+ environment is being replaced (using one of the :func:`exec\*e <execl>` variants,
discussed in the next paragraph), the new environment is used as the source of
the :envvar:`PATH` variable. The other variants, :func:`execl`, :func:`execle`,
:func:`execv`, and :func:`execve`, will not use the :envvar:`PATH` variable to
@@ -1920,6 +1934,10 @@ written in Python, such as a mail server's external command delivery program.
Note that some platforms including FreeBSD <= 6.3, Cygwin and OS/2 EMX have
known issues when using fork() from a thread.
+ .. warning::
+
+ See :mod:`ssl` for applications that use the SSL module with fork().
+
Availability: Unix.
@@ -2014,7 +2032,7 @@ written in Python, such as a mail server's external command delivery program.
process. On Windows, the process id will actually be the process handle, so can
be used with the :func:`waitpid` function.
- The "l" and "v" variants of the :func:`spawn\*` functions differ in how
+ The "l" and "v" variants of the :func:`spawn\* <spawnl>` functions differ in how
command-line arguments are passed. The "l" variants are perhaps the easiest
to work with if the number of parameters is fixed when the code is written; the
individual parameters simply become additional parameters to the
@@ -2026,7 +2044,7 @@ written in Python, such as a mail server's external command delivery program.
The variants which include a second "p" near the end (:func:`spawnlp`,
:func:`spawnlpe`, :func:`spawnvp`, and :func:`spawnvpe`) will use the
:envvar:`PATH` environment variable to locate the program *file*. When the
- environment is being replaced (using one of the :func:`spawn\*e` variants,
+ environment is being replaced (using one of the :func:`spawn\*e <spawnl>` variants,
discussed in the next paragraph), the new environment is used as the source of
the :envvar:`PATH` variable. The other variants, :func:`spawnl`,
:func:`spawnle`, :func:`spawnv`, and :func:`spawnve`, will not use the
@@ -2062,7 +2080,7 @@ written in Python, such as a mail server's external command delivery program.
.. data:: P_NOWAIT
P_NOWAITO
- Possible values for the *mode* parameter to the :func:`spawn\*` family of
+ Possible values for the *mode* parameter to the :func:`spawn\* <spawnl>` family of
functions. If either of these values is given, the :func:`spawn\*` functions
will return as soon as the new process has been created, with the process id as
the return value.
@@ -2074,7 +2092,7 @@ written in Python, such as a mail server's external command delivery program.
.. data:: P_WAIT
- Possible value for the *mode* parameter to the :func:`spawn\*` family of
+ Possible value for the *mode* parameter to the :func:`spawn\* <spawnl>` family of
functions. If this is given as *mode*, the :func:`spawn\*` functions will not
return until the new process has run to completion and will return the exit code
of the process the run is successful, or ``-signal`` if a signal kills the
@@ -2088,7 +2106,7 @@ written in Python, such as a mail server's external command delivery program.
.. data:: P_DETACH
P_OVERLAY
- Possible values for the *mode* parameter to the :func:`spawn\*` family of
+ Possible values for the *mode* parameter to the :func:`spawn\* <spawnl>` family of
functions. These are less portable than those listed above. :const:`P_DETACH`
is similar to :const:`P_NOWAIT`, but the new process is detached from the
console of the calling process. If :const:`P_OVERLAY` is used, the current
@@ -2204,17 +2222,18 @@ written in Python, such as a mail server's external command delivery program.
(shifting makes cross-platform use of the function easier). A *pid* less than or
equal to ``0`` has no special meaning on Windows, and raises an exception. The
value of integer *options* has no effect. *pid* can refer to any process whose
- id is known, not necessarily a child process. The :func:`spawn` functions called
- with :const:`P_NOWAIT` return suitable process handles.
+ id is known, not necessarily a child process. The :func:`spawn\* <spawnl>`
+ functions called with :const:`P_NOWAIT` return suitable process handles.
-.. function:: wait3([options])
+.. function:: wait3(options)
Similar to :func:`waitpid`, except no process id argument is given and a
3-element tuple containing the child's process id, exit status indication, and
resource usage information is returned. Refer to :mod:`resource`.\
- :func:`getrusage` for details on resource usage information. The option
- argument is the same as that provided to :func:`waitpid` and :func:`wait4`.
+ :func:`~resource.getrusage` for details on resource usage information. The
+ option argument is the same as that provided to :func:`waitpid` and
+ :func:`wait4`.
Availability: Unix.
@@ -2225,9 +2244,9 @@ written in Python, such as a mail server's external command delivery program.
Similar to :func:`waitpid`, except a 3-element tuple, containing the child's
process id, exit status indication, and resource usage information is returned.
- Refer to :mod:`resource`.\ :func:`getrusage` for details on resource usage
- information. The arguments to :func:`wait4` are the same as those provided to
- :func:`waitpid`.
+ Refer to :mod:`resource`.\ :func:`~resource.getrusage` for details on
+ resource usage information. The arguments to :func:`wait4` are the same as
+ those provided to :func:`waitpid`.
Availability: Unix.
@@ -2451,8 +2470,9 @@ Higher-level operations on pathnames are defined in the :mod:`os.path` module.
.. data:: defpath
- The default search path used by :func:`exec\*p\*` and :func:`spawn\*p\*` if the
- environment doesn't have a ``'PATH'`` key. Also available via :mod:`os.path`.
+ The default search path used by :func:`exec\*p\* <execl>` and
+ :func:`spawn\*p\* <spawnl>` if the environment doesn't have a ``'PATH'``
+ key. Also available via :mod:`os.path`.
.. data:: linesep
@@ -2485,8 +2505,11 @@ Miscellaneous Functions
This function returns random bytes from an OS-specific randomness source. The
returned data should be unpredictable enough for cryptographic applications,
though its exact quality depends on the OS implementation. On a UNIX-like
- system this will query /dev/urandom, and on Windows it will use CryptGenRandom.
- If a randomness source is not found, :exc:`NotImplementedError` will be raised.
+ system this will query ``/dev/urandom``, and on Windows it will use
+ ``CryptGenRandom()``. If a randomness source is not found,
+ :exc:`NotImplementedError` will be raised.
- .. versionadded:: 2.4
+ For an easy-to-use interface to the random number generator
+ provided by your platform, please see :class:`random.SystemRandom`.
+ .. versionadded:: 2.4
diff --git a/Doc/library/ossaudiodev.rst b/Doc/library/ossaudiodev.rst
index 00c113b..79c5ea5 100644
--- a/Doc/library/ossaudiodev.rst
+++ b/Doc/library/ossaudiodev.rst
@@ -48,7 +48,7 @@ the standard audio interface for Linux and recent versions of FreeBSD.
the official documentation for the OSS C API
The module defines a large number of constants supplied by the OSS device
- driver; see ``<sys/soundcard.h>`` on either Linux or FreeBSD for a listing .
+ driver; see ``<sys/soundcard.h>`` on either Linux or FreeBSD for a listing.
:mod:`ossaudiodev` defines the following variables and functions:
@@ -66,7 +66,8 @@ the standard audio interface for Linux and recent versions of FreeBSD.
``ossaudiodev.error``.)
-.. function:: open([device, ]mode)
+.. function:: open(mode)
+ open(device, mode)
Open an audio device and return an OSS audio device object. This object
supports many file-like methods, such as :meth:`read`, :meth:`write`, and
@@ -162,11 +163,11 @@ and (read-only) attributes:
is only useful in non-blocking mode. Has no return value, since the amount of
data written is always equal to the amount of data supplied.
-The following methods each map to exactly one :func:`ioctl` system call. The
+The following methods each map to exactly one :c:func:`ioctl` system call. The
correspondence is obvious: for example, :meth:`setfmt` corresponds to the
``SNDCTL_DSP_SETFMT`` ioctl, and :meth:`sync` to ``SNDCTL_DSP_SYNC`` (this can
be useful when consulting the OSS documentation). If the underlying
-:func:`ioctl` fails, they all raise :exc:`IOError`.
+:c:func:`ioctl` fails, they all raise :exc:`IOError`.
.. method:: oss_audio_device.nonblock()
@@ -275,7 +276,7 @@ The following convenience methods combine several ioctls, or one ioctl and some
simple calculations.
-.. method:: oss_audio_device.setparameters(format, nchannels, samplerate [, strict=False])
+.. method:: oss_audio_device.setparameters(format, nchannels, samplerate[, strict=False])
Set the key audio sampling parameters---sample format, number of channels, and
sampling rate---in one method call. *format*, *nchannels*, and *samplerate*
@@ -295,7 +296,7 @@ simple calculations.
fmt = dsp.setfmt(fmt)
channels = dsp.channels(channels)
- rate = dsp.rate(channels)
+ rate = dsp.rate(rate)
.. method:: oss_audio_device.bufsize()
diff --git a/Doc/library/othergui.rst b/Doc/library/othergui.rst
index 69df9df..1ee5c5b 100644
--- a/Doc/library/othergui.rst
+++ b/Doc/library/othergui.rst
@@ -13,8 +13,7 @@ available for Python:
provides an object oriented interface that is slightly higher level than
the C one. It comes with many more widgets than Tkinter provides, and has
good Python-specific reference documentation. There are also bindings to
- `GNOME <http://www.gnome.org>`_. One well known PyGTK application is
- `PythonCAD <http://www.pythoncad.org/>`_. An online `tutorial
+ `GNOME <http://www.gnome.org>`_. An online `tutorial
<http://www.pygtk.org/pygtk2tutorial/index.html>`_ is available.
`PyQt <http://www.riverbankcomputing.co.uk/software/pyqt/>`_
diff --git a/Doc/library/parser.rst b/Doc/library/parser.rst
index c46aeae..acda372 100644
--- a/Doc/library/parser.rst
+++ b/Doc/library/parser.rst
@@ -34,7 +34,7 @@ the code forming the application. It is also faster.
replaced by "ast"; this is a legacy from the time when there was no other
AST and has nothing to do with the AST found in Python 2.5. This is also the
reason for the functions' keyword arguments being called *ast*, not *st*.
- The "ast" functions will be removed in Python 3.0.
+ The "ast" functions have been removed in Python 3.
There are a few things to note about this module which are important to making
use of the data structures created. This is not a tutorial on editing the parse
@@ -200,7 +200,7 @@ numbering information.
information is omitted if the flag is false or omitted.
-.. function:: compilest(ast[, filename='<syntax-tree>'])
+.. function:: compilest(ast, filename='<syntax-tree>')
.. index:: builtin: eval
diff --git a/Doc/library/pickle.rst b/Doc/library/pickle.rst
index 4d417d2..a42eabc 100644
--- a/Doc/library/pickle.rst
+++ b/Doc/library/pickle.rst
@@ -352,8 +352,9 @@ The following types can be pickled:
* classes that are defined at the top level of a module
-* instances of such classes whose :attr:`__dict__` or :meth:`__setstate__` is
- picklable (see section :ref:`pickle-protocol` for details)
+* instances of such classes whose :attr:`~object.__dict__` or the result of
+ calling :meth:`__getstate__` is picklable (see section :ref:`pickle-protocol`
+ for details).
Attempts to pickle unpicklable objects will raise the :exc:`PicklingError`
exception; when this happens, an unspecified number of bytes may have already
@@ -364,8 +365,8 @@ raised in this case. You can carefully raise this limit with
Note that functions (built-in and user-defined) are pickled by "fully qualified"
name reference, not by value. This means that only the function name is
-pickled, along with the name of the module the function is defined in. Neither the
-function's code, nor any of its function attributes are pickled. Thus the
+pickled, along with the name of the module the function is defined in. Neither
+the function's code, nor any of its function attributes are pickled. Thus the
defining module must be importable in the unpickling environment, and the module
must contain the named object, otherwise an exception will be raised. [#]_
@@ -442,7 +443,7 @@ Pickling and unpickling normal class instances
defines the method :meth:`__getstate__`, it is called and the return state is
pickled as the contents for the instance, instead of the contents of the
instance's dictionary. If there is no :meth:`__getstate__` method, the
- instance's :attr:`__dict__` is pickled.
+ instance's :attr:`~object.__dict__` is pickled.
.. method:: object.__setstate__(state)
@@ -510,7 +511,8 @@ Pickling and unpickling extension types
* Optionally, the object's state, which will be passed to the object's
:meth:`__setstate__` method as described in section :ref:`pickle-inst`. If
the object has no :meth:`__setstate__` method, then, as above, the value
- must be a dictionary and it will be added to the object's :attr:`__dict__`.
+ must be a dictionary and it will be added to the object's
+ :attr:`~object.__dict__`.
* Optionally, an iterator (and not a sequence) yielding successive list
items. These list items will be pickled, and appended to the object using
@@ -568,19 +570,20 @@ the :mod:`pickle` module; it will delegate this resolution to user defined
functions on the pickler and unpickler. [#]_
To define external persistent id resolution, you need to set the
-:attr:`persistent_id` attribute of the pickler object and the
-:attr:`persistent_load` attribute of the unpickler object.
+:attr:`~Pickler.persistent_id` attribute of the pickler object and the
+:attr:`~Unpickler.persistent_load` attribute of the unpickler object.
To pickle objects that have an external persistent id, the pickler must have a
-custom :func:`persistent_id` method that takes an object as an argument and
-returns either ``None`` or the persistent id for that object. When ``None`` is
-returned, the pickler simply pickles the object as normal. When a persistent id
-string is returned, the pickler will pickle that string, along with a marker so
-that the unpickler will recognize the string as a persistent id.
+custom :func:`~Pickler.persistent_id` method that takes an object as an
+argument and returns either ``None`` or the persistent id for that object.
+When ``None`` is returned, the pickler simply pickles the object as normal.
+When a persistent id string is returned, the pickler will pickle that string,
+along with a marker so that the unpickler will recognize the string as a
+persistent id.
To unpickle external objects, the unpickler must have a custom
-:func:`persistent_load` function that takes a persistent id string and returns
-the referenced object.
+:func:`~Unpickler.persistent_load` function that takes a persistent id string
+and returns the referenced object.
Here's a silly example that *might* shed more light::
@@ -630,13 +633,14 @@ Here's a silly example that *might* shed more light::
j = up.load()
print j
-In the :mod:`cPickle` module, the unpickler's :attr:`persistent_load` attribute
-can also be set to a Python list, in which case, when the unpickler reaches a
-persistent id, the persistent id string will simply be appended to this list.
-This functionality exists so that a pickle data stream can be "sniffed" for
-object references without actually instantiating all the objects in a pickle.
-[#]_ Setting :attr:`persistent_load` to a list is usually used in conjunction
-with the :meth:`noload` method on the Unpickler.
+In the :mod:`cPickle` module, the unpickler's :attr:`~Unpickler.persistent_load`
+attribute can also be set to a Python list, in which case, when the unpickler
+reaches a persistent id, the persistent id string will simply be appended to
+this list. This functionality exists so that a pickle data stream can be
+"sniffed" for object references without actually instantiating all the objects
+in a pickle.
+[#]_ Setting :attr:`~Unpickler.persistent_load` to a list is usually used in
+conjunction with the :meth:`~Unpickler.noload` method on the Unpickler.
.. BAW: Both pickle and cPickle support something called inst_persistent_id()
which appears to give unknown types a second shot at producing a persistent
@@ -674,13 +678,13 @@ want to disallow all unpickling of instances. If this sounds like a hack,
you're right. Refer to the source code to make this work.
Things are a little cleaner with :mod:`cPickle`, but not by much. To control
-what gets unpickled, you can set the unpickler's :attr:`find_global` attribute
-to a function or ``None``. If it is ``None`` then any attempts to unpickle
-instances will raise an :exc:`UnpicklingError`. If it is a function, then it
-should accept a module name and a class name, and return the corresponding class
-object. It is responsible for looking up the class and performing any necessary
-imports, and it may raise an error to prevent instances of the class from being
-unpickled.
+what gets unpickled, you can set the unpickler's :attr:`~Unpickler.find_global`
+attribute to a function or ``None``. If it is ``None`` then any attempts to
+unpickle instances will raise an :exc:`UnpicklingError`. If it is a function,
+then it should accept a module name and a class name, and return the
+corresponding class object. It is responsible for looking up the class and
+performing any necessary imports, and it may raise an error to prevent
+instances of the class from being unpickled.
The moral of the story is that you should be really careful about the source of
the strings your application unpickles.
@@ -731,7 +735,7 @@ can't be sure if the ASCII or binary format was used. ::
Here's a larger example that shows how to modify pickling behavior for a class.
The :class:`TextReader` class opens a text file, and returns the line number and
-line contents each time its :meth:`readline` method is called. If a
+line contents each time its :meth:`!readline` method is called. If a
:class:`TextReader` instance is pickled, all attributes *except* the file object
member are saved. When the instance is unpickled, the file is reopened, and
reading resumes from the last location. The :meth:`__setstate__` and
diff --git a/Doc/library/pickletools.rst b/Doc/library/pickletools.rst
index ce47c97..ebb30ab 100644
--- a/Doc/library/pickletools.rst
+++ b/Doc/library/pickletools.rst
@@ -20,7 +20,7 @@ useful for Python core developers who are working on the :mod:`pickle` and
probably won't find the :mod:`pickletools` module relevant.
-.. function:: dis(pickle[, out=None, memo=None, indentlevel=4])
+.. function:: dis(pickle, out=None, memo=None, indentlevel=4)
Outputs a symbolic disassembly of the pickle to the file-like object *out*,
defaulting to ``sys.stdout``. *pickle* can be a string or a file-like object.
diff --git a/Doc/library/pipes.rst b/Doc/library/pipes.rst
index 016a720..415d5c7 100644
--- a/Doc/library/pipes.rst
+++ b/Doc/library/pipes.rst
@@ -16,8 +16,6 @@ The :mod:`pipes` module defines a class to abstract the concept of a *pipeline*
Because the module uses :program:`/bin/sh` command lines, a POSIX or compatible
shell for :func:`os.system` and :func:`os.popen` is required.
-The :mod:`pipes` module defines the following class:
-
.. class:: Template()
@@ -26,15 +24,52 @@ The :mod:`pipes` module defines the following class:
Example::
>>> import pipes
- >>> t=pipes.Template()
+ >>> t = pipes.Template()
>>> t.append('tr a-z A-Z', '--')
- >>> f=t.open('/tmp/1', 'w')
+ >>> f = t.open('pipefile', 'w')
>>> f.write('hello world')
>>> f.close()
- >>> open('/tmp/1').read()
+ >>> open('pipefile').read()
'HELLO WORLD'
+.. function:: quote(s)
+
+ .. deprecated:: 2.7
+ Prior to Python 2.7, this function was not publicly documented. It is
+ finally exposed publicly in Python 3.3 as the
+ :func:`quote <shlex.quote>` function in the :mod:`shlex` module.
+
+ Return a shell-escaped version of the string *s*. The returned value is a
+ string that can safely be used as one token in a shell command line, for
+ cases where you cannot use a list.
+
+ This idiom would be unsafe::
+
+ >>> filename = 'somefile; rm -rf ~'
+ >>> command = 'ls -l {}'.format(filename)
+ >>> print command # executed by a shell: boom!
+ ls -l somefile; rm -rf ~
+
+ :func:`quote` lets you plug the security hole::
+
+ >>> command = 'ls -l {}'.format(quote(filename))
+ >>> print command
+ ls -l 'somefile; rm -rf ~'
+ >>> remote_command = 'ssh home {}'.format(quote(command))
+ >>> print remote_command
+ ssh home 'ls -l '"'"'somefile; rm -rf ~'"'"''
+
+ The quoting is compatible with UNIX shells and with :func:`shlex.split`:
+
+ >>> remote_command = shlex.split(remote_command)
+ >>> remote_command
+ ['ssh', 'home', "ls -l 'somefile; rm -rf ~'"]
+ >>> command = shlex.split(remote_command[-1])
+ >>> command
+ ['ls', '-l', 'somefile; rm -rf ~']
+
+
.. _template-objects:
Template Objects
diff --git a/Doc/library/platform.rst b/Doc/library/platform.rst
index 26f587e..cb7144a 100644
--- a/Doc/library/platform.rst
+++ b/Doc/library/platform.rst
@@ -197,8 +197,8 @@ Windows Platform
.. function:: win32_ver(release='', version='', csd='', ptype='')
Get additional version information from the Windows Registry and return a tuple
- ``(version, csd, ptype)`` referring to version number, CSD level
- (service pack) and OS type (multi/single processor).
+ ``(release, version, csd, ptype)`` referring to OS release, version number,
+ CSD level (service pack) and OS type (multi/single processor).
As a hint: *ptype* is ``'Uniprocessor Free'`` on single processor NT machines
and ``'Multiprocessor Free'`` on multi processor machines. The *'Free'* refers
diff --git a/Doc/library/plistlib.rst b/Doc/library/plistlib.rst
index 11268c2..c6930c6 100644
--- a/Doc/library/plistlib.rst
+++ b/Doc/library/plistlib.rst
@@ -74,7 +74,7 @@ This module defines the following functions:
-.. function:: readPlistFromResource(path[, restype='plst'[, resid=0]])
+.. function:: readPlistFromResource(path, restype='plst', resid=0)
Read a plist from the resource with type *restype* from the resource fork of
*path*. Availability: Mac OS X.
@@ -84,7 +84,7 @@ This module defines the following functions:
In Python 3.x, this function has been removed.
-.. function:: writePlistToResource(rootObject, path[, restype='plst'[, resid=0]])
+.. function:: writePlistToResource(rootObject, path, restype='plst', resid=0)
Write *rootObject* as a resource with type *restype* to the resource fork of
*path*. Availability: Mac OS X.
diff --git a/Doc/library/poplib.rst b/Doc/library/poplib.rst
index 8456304..07c243f 100644
--- a/Doc/library/poplib.rst
+++ b/Doc/library/poplib.rst
@@ -24,7 +24,7 @@ quality of POP3 servers varies widely, and too many are quite poor. If your
mailserver supports IMAP, you would be better off using the
:class:`imaplib.IMAP4` class, as IMAP servers tend to be better implemented.
-A single class is provided by the :mod:`poplib` module:
+The :mod:`poplib` module provides two classes:
.. class:: POP3(host[, port[, timeout]])
@@ -102,7 +102,7 @@ An :class:`POP3` instance has the following methods:
.. method:: POP3.pass_(password)
Send password, response includes message count and mailbox size. Note: the
- mailbox on the server is locked until :meth:`quit` is called.
+ mailbox on the server is locked until :meth:`~poplib.quit` is called.
.. method:: POP3.apop(user, secret)
diff --git a/Doc/library/posix.rst b/Doc/library/posix.rst
index 8b5cac0..d28ed2d 100644
--- a/Doc/library/posix.rst
+++ b/Doc/library/posix.rst
@@ -19,7 +19,7 @@ systems the :mod:`posix` module is not available, but a subset is always
available through the :mod:`os` interface. Once :mod:`os` is imported, there is
*no* performance penalty in using it instead of :mod:`posix`. In addition,
:mod:`os` provides some additional functionality, such as automatically calling
-:func:`putenv` when an entry in ``os.environ`` is changed.
+:func:`~os.putenv` when an entry in ``os.environ`` is changed.
Errors are reported as exceptions; the usual exceptions are given for type
errors, while errors reported by the system calls raise :exc:`OSError`.
@@ -74,9 +74,10 @@ In addition to many functions described in the :mod:`os` module documentation,
directory, equivalent to ``getenv("HOME")`` in C.
Modifying this dictionary does not affect the string environment passed on by
- :func:`execv`, :func:`popen` or :func:`system`; if you need to change the
- environment, pass ``environ`` to :func:`execve` or add variable assignments and
- export statements to the command string for :func:`system` or :func:`popen`.
+ :func:`~os.execv`, :func:`~os.popen` or :func:`~os.system`; if you need to
+ change the environment, pass ``environ`` to :func:`~os.execve` or add
+ variable assignments and export statements to the command string for
+ :func:`~os.system` or :func:`~os.popen`.
.. note::
diff --git a/Doc/library/posixfile.rst b/Doc/library/posixfile.rst
index c27e412..97ef800 100644
--- a/Doc/library/posixfile.rst
+++ b/Doc/library/posixfile.rst
@@ -181,7 +181,7 @@ Examples::
import posixfile
- file = posixfile.open('/tmp/test', 'w')
+ file = posixfile.open('testfile', 'w')
file.lock('w|')
...
file.lock('u')
diff --git a/Doc/library/pprint.rst b/Doc/library/pprint.rst
index a0a7200..8e7baf8 100644
--- a/Doc/library/pprint.rst
+++ b/Doc/library/pprint.rst
@@ -36,7 +36,7 @@ The :mod:`pprint` module defines one class:
.. First the implementation class:
-.. class:: PrettyPrinter(...)
+.. class:: PrettyPrinter(indent=1, width=80, depth=None, stream=None)
Construct a :class:`PrettyPrinter` instance. This constructor understands
several keyword parameters. An output stream may be set using the *stream*
@@ -73,9 +73,7 @@ The :mod:`pprint` module defines one class:
The :class:`PrettyPrinter` class supports several derivative functions:
-.. Now the derivative functions:
-
-.. function:: pformat(object[, indent[, width[, depth]]])
+.. function:: pformat(object, indent=1, width=80, depth=None)
Return the formatted representation of *object* as a string. *indent*, *width*
and *depth* will be passed to the :class:`PrettyPrinter` constructor as
@@ -85,10 +83,10 @@ The :class:`PrettyPrinter` class supports several derivative functions:
The parameters *indent*, *width* and *depth* were added.
-.. function:: pprint(object[, stream[, indent[, width[, depth]]]])
+.. function:: pprint(object, stream=None, indent=1, width=80, depth=None)
Prints the formatted representation of *object* on *stream*, followed by a
- newline. If *stream* is omitted, ``sys.stdout`` is used. This may be used in
+ newline. If *stream* is ``None``, ``sys.stdout`` is used. This may be used in
the interactive interpreter instead of a :keyword:`print` statement for
inspecting values. *indent*, *width* and *depth* will be passed to the
:class:`PrettyPrinter` constructor as formatting parameters.
@@ -206,7 +204,8 @@ are converted to strings. The default implementation uses the internals of the
pprint Example
--------------
-This example demonstrates several uses of the :func:`pprint` function and its parameters.
+This example demonstrates several uses of the :func:`pprint` function and its
+parameters.
>>> import pprint
>>> tup = ('spam', ('eggs', ('lumberjack', ('knights', ('ni', ('dead',
diff --git a/Doc/library/profile.rst b/Doc/library/profile.rst
index 236324d..0fb1489 100644
--- a/Doc/library/profile.rst
+++ b/Doc/library/profile.rst
@@ -4,11 +4,6 @@
The Python Profilers
********************
-.. sectionauthor:: James Roskind
-
-.. module:: profile
- :synopsis: Python source profiler.
-
**Source code:** :source:`Lib/profile.py` and :source:`Lib/pstats.py`
--------------
@@ -22,33 +17,31 @@ Introduction to the profilers
single: deterministic profiling
single: profiling, deterministic
-A :dfn:`profiler` is a program that describes the run time performance
-of a program, providing a variety of statistics. This documentation
-describes the profiler functionality provided in the modules
-:mod:`cProfile`, :mod:`profile` and :mod:`pstats`. This profiler
-provides :dfn:`deterministic profiling` of Python programs. It also
-provides a series of report generation tools to allow users to rapidly
-examine the results of a profile operation.
+:mod:`cProfile` and :mod:`profile` provide :dfn:`deterministic profiling` of
+Python programs. A :dfn:`profile` is a set of statistics that describes how
+often and for how long various parts of the program executed. These statistics
+can be formatted into reports via the :mod:`pstats` module.
-The Python standard library provides three different profilers:
+The Python standard library provides three different implementations of the same
+profiling interface:
-#. :mod:`cProfile` is recommended for most users; it's a C extension
- with reasonable overhead
- that makes it suitable for profiling long-running programs.
- Based on :mod:`lsprof`,
- contributed by Brett Rosen and Ted Czotter.
+1. :mod:`cProfile` is recommended for most users; it's a C extension with
+ reasonable overhead that makes it suitable for profiling long-running
+ programs. Based on :mod:`lsprof`, contributed by Brett Rosen and Ted
+ Czotter.
.. versionadded:: 2.5
-#. :mod:`profile`, a pure Python module whose interface is imitated by
- :mod:`cProfile`. Adds significant overhead to profiled programs.
- If you're trying to extend
- the profiler in some way, the task might be easier with this module.
+2. :mod:`profile`, a pure Python module whose interface is imitated by
+ :mod:`cProfile`, but which adds significant overhead to profiled programs.
+ If you're trying to extend the profiler in some way, the task might be easier
+ with this module.
.. versionchanged:: 2.4
- Now also reports the time spent in calls to built-in functions and methods.
+ Now also reports the time spent in calls to built-in functions
+ and methods.
-#. :mod:`hotshot` was an experimental C module that focused on minimizing
+3. :mod:`hotshot` was an experimental C module that focused on minimizing
the overhead of profiling, at the expense of longer data
post-processing times. It is no longer maintained and may be
dropped in a future version of Python.
@@ -65,6 +58,15 @@ is newer and might not be available on all systems.
:mod:`_lsprof` module. The :mod:`hotshot` module is reserved for specialized
usage.
+.. note::
+
+ The profiler modules are designed to provide an execution profile for a given
+ program, not for benchmarking purposes (for that, there is :mod:`timeit` for
+ reasonably accurate results). This particularly applies to benchmarking
+ Python code against C code: the profilers introduce overhead for Python code,
+ but not for C-level functions, and so the C code would seem faster than any
+ Python one.
+
.. _profile-instant:
@@ -75,57 +77,94 @@ This section is provided for users that "don't want to read the manual." It
provides a very brief overview, and allows a user to rapidly perform profiling
on an existing application.
-To profile an application with a main entry point of :func:`foo`, you would add
-the following to your module::
+To profile a function that takes a single argument, you can do::
import cProfile
- cProfile.run('foo()')
+ import re
+ cProfile.run('re.compile("foo|bar")')
(Use :mod:`profile` instead of :mod:`cProfile` if the latter is not available on
your system.)
-The above action would cause :func:`foo` to be run, and a series of informative
-lines (the profile) to be printed. The above approach is most useful when
-working with the interpreter. If you would like to save the results of a
-profile into a file for later examination, you can supply a file name as the
-second argument to the :func:`run` function::
+The above action would run :func:`re.compile` and print profile results like
+the following::
- import cProfile
- cProfile.run('foo()', 'fooprof')
+ 197 function calls (192 primitive calls) in 0.002 seconds
-The file :file:`cProfile.py` can also be invoked as a script to profile another
-script. For example::
+ Ordered by: standard name
- python -m cProfile myscript.py
+ ncalls tottime percall cumtime percall filename:lineno(function)
+ 1 0.000 0.000 0.001 0.001 <string>:1(<module>)
+ 1 0.000 0.000 0.001 0.001 re.py:212(compile)
+ 1 0.000 0.000 0.001 0.001 re.py:268(_compile)
+ 1 0.000 0.000 0.000 0.000 sre_compile.py:172(_compile_charset)
+ 1 0.000 0.000 0.000 0.000 sre_compile.py:201(_optimize_charset)
+ 4 0.000 0.000 0.000 0.000 sre_compile.py:25(_identityfunction)
+ 3/1 0.000 0.000 0.000 0.000 sre_compile.py:33(_compile)
-:file:`cProfile.py` accepts two optional arguments on the command line::
+The first line indicates that 197 calls were monitored. Of those calls, 192
+were :dfn:`primitive`, meaning that the call was not induced via recursion. The
+next line: ``Ordered by: standard name``, indicates that the text string in the
+far right column was used to sort the output. The column headings include:
- cProfile.py [-o output_file] [-s sort_order]
+ncalls
+ for the number of calls,
-``-s`` only applies to standard output (``-o`` is not supplied).
-Look in the :class:`Stats` documentation for valid sort values.
+tottime
+ for the total time spent in the given function (and excluding time made in
+ calls to sub-functions)
-When you wish to review the profile, you should use the methods in the
-:mod:`pstats` module. Typically you would load the statistics data as follows::
+percall
+ is the quotient of ``tottime`` divided by ``ncalls``
- import pstats
- p = pstats.Stats('fooprof')
+cumtime
+ is the cumulative time spent in this and all subfunctions (from invocation
+ till exit). This figure is accurate *even* for recursive functions.
-The class :class:`Stats` (the above code just created an instance of this class)
-has a variety of methods for manipulating and printing the data that was just
-read into ``p``. When you ran :func:`cProfile.run` above, what was printed was
-the result of three method calls::
+percall
+ is the quotient of ``cumtime`` divided by primitive calls
- p.strip_dirs().sort_stats(-1).print_stats()
+filename:lineno(function)
+ provides the respective data of each function
+
+When there are two numbers in the first column (for example ``3/1``), it means
+that the function recursed. The second value is the number of primitive calls
+and the former is the total number of calls. Note that when the function does
+not recurse, these two values are the same, and only the single figure is
+printed.
-The first method removed the extraneous path from all the module names. The
-second method sorted all the entries according to the standard module/line/name
-string that is printed. The third method printed out all the statistics. You
-might try the following sort calls:
+Instead of printing the output at the end of the profile run, you can save the
+results to a file by specifying a filename to the :func:`run` function::
-.. (this is to comply with the semantics of the old profiler).
+ import cProfile
+ import re
+ cProfile.run('re.compile("foo|bar")', 'restats')
+
+The :class:`pstats.Stats` class reads profile results from a file and formats
+them in various ways.
+
+The file :mod:`cProfile` can also be invoked as a script to profile another
+script. For example::
-::
+ python -m cProfile [-o output_file] [-s sort_order] myscript.py
+
+``-o`` writes the profile results to a file instead of to stdout
+
+``-s`` specifies one of the :func:`~pstats.Stats.sort_stats` sort values to sort
+the output by. This only applies when ``-o`` is not supplied.
+
+The :mod:`pstats` module's :class:`~pstats.Stats` class has a variety of methods
+for manipulating and printing the data saved into a profile results file::
+
+ import pstats
+ p = pstats.Stats('restats')
+ p.strip_dirs().sort_stats(-1).print_stats()
+
+The :meth:`~pstats.Stats.strip_dirs` method removed the extraneous path from all
+the module names. The :meth:`~pstats.Stats.sort_stats` method sorted all the
+entries according to the standard module/line/name string that is printed. The
+:meth:`~pstats.Stats.print_stats` method printed out all the statistics. You
+might try the following sort calls::
p.sort_stats('name')
p.print_stats()
@@ -174,315 +213,340 @@ If you want more functionality, you're going to have to read the manual, or
guess what the following functions do::
p.print_callees()
- p.add('fooprof')
+ p.add('restats')
Invoked as a script, the :mod:`pstats` module is a statistics browser for
reading and examining profile dumps. It has a simple line-oriented interface
(implemented using :mod:`cmd`) and interactive help.
+:mod:`profile` and :mod:`cProfile` Module Reference
+=======================================================
-.. _deterministic-profiling:
+.. module:: cProfile
+.. module:: profile
+ :synopsis: Python source profiler.
-What Is Deterministic Profiling?
-================================
+Both the :mod:`profile` and :mod:`cProfile` modules provide the following
+functions:
-:dfn:`Deterministic profiling` is meant to reflect the fact that all *function
-call*, *function return*, and *exception* events are monitored, and precise
-timings are made for the intervals between these events (during which time the
-user's code is executing). In contrast, :dfn:`statistical profiling` (which is
-not done by this module) randomly samples the effective instruction pointer, and
-deduces where time is being spent. The latter technique traditionally involves
-less overhead (as the code does not need to be instrumented), but provides only
-relative indications of where time is being spent.
+.. function:: run(command, filename=None, sort=-1)
-In Python, since there is an interpreter active during execution, the presence
-of instrumented code is not required to do deterministic profiling. Python
-automatically provides a :dfn:`hook` (optional callback) for each event. In
-addition, the interpreted nature of Python tends to add so much overhead to
-execution, that deterministic profiling tends to only add small processing
-overhead in typical applications. The result is that deterministic profiling is
-not that expensive, yet provides extensive run time statistics about the
-execution of a Python program.
+ This function takes a single argument that can be passed to the :func:`exec`
+ function, and an optional file name. In all cases this routine executes::
-Call count statistics can be used to identify bugs in code (surprising counts),
-and to identify possible inline-expansion points (high call counts). Internal
-time statistics can be used to identify "hot loops" that should be carefully
-optimized. Cumulative time statistics should be used to identify high level
-errors in the selection of algorithms. Note that the unusual handling of
-cumulative times in this profiler allows statistics for recursive
-implementations of algorithms to be directly compared to iterative
-implementations.
+ exec(command, __main__.__dict__, __main__.__dict__)
+ and gathers profiling statistics from the execution. If no file name is
+ present, then this function automatically creates a :class:`~pstats.Stats`
+ instance and prints a simple profiling report. If the sort value is specified
+ it is passed to this :class:`~pstats.Stats` instance to control how the
+ results are sorted.
-Reference Manual -- :mod:`profile` and :mod:`cProfile`
-======================================================
+.. function:: runctx(command, globals, locals, filename=None)
-.. module:: cProfile
- :synopsis: Python profiler
+ This function is similar to :func:`run`, with added arguments to supply the
+ globals and locals dictionaries for the *command* string. This routine
+ executes::
+ exec(command, globals, locals)
-The primary entry point for the profiler is the global function
-:func:`profile.run` (resp. :func:`cProfile.run`). It is typically used to create
-any profile information. The reports are formatted and printed using methods of
-the class :class:`pstats.Stats`. The following is a description of all of these
-standard entry points and functions. For a more in-depth view of some of the
-code, consider reading the later section on Profiler Extensions, which includes
-discussion of how to derive "better" profilers from the classes presented, or
-reading the source code for these modules.
+ and gathers profiling statistics as in the :func:`run` function above.
+.. class:: Profile(timer=None, timeunit=0.0, subcalls=True, builtins=True)
-.. function:: run(command[, filename])
+ This class is normally only used if more precise control over profiling is
+ needed than what the :func:`cProfile.run` function provides.
- This function takes a single argument that can be passed to the
- :keyword:`exec` statement, and an optional file name. In all cases this
- routine attempts to :keyword:`exec` its first argument, and gather profiling
- statistics from the execution. If no file name is present, then this function
- automatically prints a simple profiling report, sorted by the standard name
- string (file/line/function-name) that is presented in each line. The
- following is a typical output from such a call::
+ A custom timer can be supplied for measuring how long code takes to run via
+ the *timer* argument. This must be a function that returns a single number
+ representing the current time. If the number is an integer, the *timeunit*
+ specifies a multiplier that specifies the duration of each unit of time. For
+ example, if the timer returns times measured in thousands of seconds, the
+ time unit would be ``.001``.
- 2706 function calls (2004 primitive calls) in 4.504 CPU seconds
+ Directly using the :class:`Profile` class allows formatting profile results
+ without writing the profile data to a file::
- Ordered by: standard name
+ import cProfile, pstats, StringIO
+ pr = cProfile.Profile()
+ pr.enable()
+ # ... do something ...
+ pr.disable()
+ s = StringIO.StringIO()
+ sortby = 'cumulative'
+ ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
+ ps.print_stats()
+ print s.getvalue()
- ncalls tottime percall cumtime percall filename:lineno(function)
- 2 0.006 0.003 0.953 0.477 pobject.py:75(save_objects)
- 43/3 0.533 0.012 0.749 0.250 pobject.py:99(evaluate)
- ...
+ .. method:: enable()
- The first line indicates that 2706 calls were monitored. Of those calls, 2004
- were :dfn:`primitive`. We define :dfn:`primitive` to mean that the call was not
- induced via recursion. The next line: ``Ordered by: standard name``, indicates
- that the text string in the far right column was used to sort the output. The
- column headings include:
+ Start collecting profiling data.
- ncalls
- for the number of calls,
+ .. method:: disable()
- tottime
- for the total time spent in the given function (and excluding time made in calls
- to sub-functions),
+ Stop collecting profiling data.
- percall
- is the quotient of ``tottime`` divided by ``ncalls``
+ .. method:: create_stats()
- cumtime
- is the total time spent in this and all subfunctions (from invocation till
- exit). This figure is accurate *even* for recursive functions.
+ Stop collecting profiling data and record the results internally
+ as the current profile.
- percall
- is the quotient of ``cumtime`` divided by primitive calls
+ .. method:: print_stats(sort=-1)
- filename:lineno(function)
- provides the respective data of each function
+ Create a :class:`~pstats.Stats` object based on the current
+ profile and print the results to stdout.
- When there are two numbers in the first column (for example, ``43/3``), then the
- latter is the number of primitive calls, and the former is the actual number of
- calls. Note that when the function does not recurse, these two values are the
- same, and only the single figure is printed.
+ .. method:: dump_stats(filename)
+ Write the results of the current profile to *filename*.
-.. function:: runctx(command, globals, locals[, filename])
+ .. method:: run(cmd)
- This function is similar to :func:`run`, with added arguments to supply the
- globals and locals dictionaries for the *command* string.
+ Profile the cmd via :func:`exec`.
-Analysis of the profiler data is done using the :class:`Stats` class.
+ .. method:: runctx(cmd, globals, locals)
-.. note::
+ Profile the cmd via :func:`exec` with the specified global and
+ local environment.
+
+ .. method:: runcall(func, *args, **kwargs)
- The :class:`Stats` class is defined in the :mod:`pstats` module.
+ Profile ``func(*args, **kwargs)``
+.. _profile-stats:
+
+The :class:`Stats` Class
+========================
+
+Analysis of the profiler data is done using the :class:`~pstats.Stats` class.
.. module:: pstats
:synopsis: Statistics object for use with the profiler.
-
-.. class:: Stats(filename[, stream=sys.stdout[, ...]])
+.. class:: Stats(*filenames or profile, stream=sys.stdout)
This class constructor creates an instance of a "statistics object" from a
- *filename* (or set of filenames). :class:`Stats` objects are manipulated by
- methods, in order to print useful reports. You may specify an alternate output
- stream by giving the keyword argument, ``stream``.
+ *filename* (or list of filenames) or from a :class:`Profile` instance. Output
+ will be printed to the stream specified by *stream*.
The file selected by the above constructor must have been created by the
corresponding version of :mod:`profile` or :mod:`cProfile`. To be specific,
there is *no* file compatibility guaranteed with future versions of this
- profiler, and there is no compatibility with files produced by other profilers.
- If several files are provided, all the statistics for identical functions will
- be coalesced, so that an overall view of several processes can be considered in
- a single report. If additional files need to be combined with data in an
- existing :class:`Stats` object, the :meth:`add` method can be used.
-
- .. (such as the old system profiler).
-
- .. versionchanged:: 2.5
- The *stream* parameter was added.
-
+ profiler, and there is no compatibility with files produced by other
+ profilers. If several files are provided, all the statistics for identical
+ functions will be coalesced, so that an overall view of several processes can
+ be considered in a single report. If additional files need to be combined
+ with data in an existing :class:`~pstats.Stats` object, the
+ :meth:`~pstats.Stats.add` method can be used.
-.. _profile-stats:
+ Instead of reading the profile data from a file, a :class:`cProfile.Profile`
+ or :class:`profile.Profile` object can be used as the profile data source.
-The :class:`Stats` Class
-------------------------
+ :class:`Stats` objects have the following methods:
-:class:`Stats` objects have the following methods:
+ .. method:: strip_dirs()
+ This method for the :class:`Stats` class removes all leading path
+ information from file names. It is very useful in reducing the size of
+ the printout to fit within (close to) 80 columns. This method modifies
+ the object, and the stripped information is lost. After performing a
+ strip operation, the object is considered to have its entries in a
+ "random" order, as it was just after object initialization and loading.
+ If :meth:`~pstats.Stats.strip_dirs` causes two function names to be
+ indistinguishable (they are on the same line of the same filename, and
+ have the same function name), then the statistics for these two entries
+ are accumulated into a single entry.
-.. method:: Stats.strip_dirs()
- This method for the :class:`Stats` class removes all leading path information
- from file names. It is very useful in reducing the size of the printout to fit
- within (close to) 80 columns. This method modifies the object, and the stripped
- information is lost. After performing a strip operation, the object is
- considered to have its entries in a "random" order, as it was just after object
- initialization and loading. If :meth:`strip_dirs` causes two function names to
- be indistinguishable (they are on the same line of the same filename, and have
- the same function name), then the statistics for these two entries are
- accumulated into a single entry.
+ .. method:: add(*filenames)
+ This method of the :class:`Stats` class accumulates additional profiling
+ information into the current profiling object. Its arguments should refer
+ to filenames created by the corresponding version of :func:`profile.run`
+ or :func:`cProfile.run`. Statistics for identically named (re: file, line,
+ name) functions are automatically accumulated into single function
+ statistics.
-.. method:: Stats.add(filename[, ...])
- This method of the :class:`Stats` class accumulates additional profiling
- information into the current profiling object. Its arguments should refer to
- filenames created by the corresponding version of :func:`profile.run` or
- :func:`cProfile.run`. Statistics for identically named (re: file, line, name)
- functions are automatically accumulated into single function statistics.
+ .. method:: dump_stats(filename)
-
-.. method:: Stats.dump_stats(filename)
-
- Save the data loaded into the :class:`Stats` object to a file named *filename*.
- The file is created if it does not exist, and is overwritten if it already
- exists. This is equivalent to the method of the same name on the
- :class:`profile.Profile` and :class:`cProfile.Profile` classes.
+ Save the data loaded into the :class:`Stats` object to a file named
+ *filename*. The file is created if it does not exist, and is overwritten
+ if it already exists. This is equivalent to the method of the same name
+ on the :class:`profile.Profile` and :class:`cProfile.Profile` classes.
.. versionadded:: 2.3
-.. method:: Stats.sort_stats(key[, ...])
-
- This method modifies the :class:`Stats` object by sorting it according to the
- supplied criteria. The argument is typically a string identifying the basis of
- a sort (example: ``'time'`` or ``'name'``).
-
- When more than one key is provided, then additional keys are used as secondary
- criteria when there is equality in all keys selected before them. For example,
- ``sort_stats('name', 'file')`` will sort all the entries according to their
- function name, and resolve all ties (identical function names) by sorting by
- file name.
-
- Abbreviations can be used for any key names, as long as the abbreviation is
- unambiguous. The following are the keys currently defined:
-
- +------------------+----------------------+
- | Valid Arg | Meaning |
- +==================+======================+
- | ``'calls'`` | call count |
- +------------------+----------------------+
- | ``'cumulative'`` | cumulative time |
- +------------------+----------------------+
- | ``'file'`` | file name |
- +------------------+----------------------+
- | ``'module'`` | file name |
- +------------------+----------------------+
- | ``'pcalls'`` | primitive call count |
- +------------------+----------------------+
- | ``'line'`` | line number |
- +------------------+----------------------+
- | ``'name'`` | function name |
- +------------------+----------------------+
- | ``'nfl'`` | name/file/line |
- +------------------+----------------------+
- | ``'stdname'`` | standard name |
- +------------------+----------------------+
- | ``'time'`` | internal time |
- +------------------+----------------------+
-
- Note that all sorts on statistics are in descending order (placing most time
- consuming items first), where as name, file, and line number searches are in
- ascending order (alphabetical). The subtle distinction between ``'nfl'`` and
- ``'stdname'`` is that the standard name is a sort of the name as printed, which
- means that the embedded line numbers get compared in an odd way. For example,
- lines 3, 20, and 40 would (if the file names were the same) appear in the string
- order 20, 3 and 40. In contrast, ``'nfl'`` does a numeric compare of the line
- numbers. In fact, ``sort_stats('nfl')`` is the same as ``sort_stats('name',
- 'file', 'line')``.
-
- For backward-compatibility reasons, the numeric arguments ``-1``, ``0``, ``1``,
- and ``2`` are permitted. They are interpreted as ``'stdname'``, ``'calls'``,
- ``'time'``, and ``'cumulative'`` respectively. If this old style format
- (numeric) is used, only one sort key (the numeric key) will be used, and
- additional arguments will be silently ignored.
-
- .. For compatibility with the old profiler,
-
+ .. method:: sort_stats(*keys)
+
+ This method modifies the :class:`Stats` object by sorting it according to
+ the supplied criteria. The argument is typically a string identifying the
+ basis of a sort (example: ``'time'`` or ``'name'``).
+
+ When more than one key is provided, then additional keys are used as
+ secondary criteria when there is equality in all keys selected before
+ them. For example, ``sort_stats('name', 'file')`` will sort all the
+ entries according to their function name, and resolve all ties (identical
+ function names) by sorting by file name.
+
+ Abbreviations can be used for any key names, as long as the abbreviation
+ is unambiguous. The following are the keys currently defined:
+
+ +------------------+----------------------+
+ | Valid Arg | Meaning |
+ +==================+======================+
+ | ``'calls'`` | call count |
+ +------------------+----------------------+
+ | ``'cumulative'`` | cumulative time |
+ +------------------+----------------------+
+ | ``'cumtime'`` | cumulative time |
+ +------------------+----------------------+
+ | ``'file'`` | file name |
+ +------------------+----------------------+
+ | ``'filename'`` | file name |
+ +------------------+----------------------+
+ | ``'module'`` | file name |
+ +------------------+----------------------+
+ | ``'ncalls'`` | call count |
+ +------------------+----------------------+
+ | ``'pcalls'`` | primitive call count |
+ +------------------+----------------------+
+ | ``'line'`` | line number |
+ +------------------+----------------------+
+ | ``'name'`` | function name |
+ +------------------+----------------------+
+ | ``'nfl'`` | name/file/line |
+ +------------------+----------------------+
+ | ``'stdname'`` | standard name |
+ +------------------+----------------------+
+ | ``'time'`` | internal time |
+ +------------------+----------------------+
+ | ``'tottime'`` | internal time |
+ +------------------+----------------------+
+
+ Note that all sorts on statistics are in descending order (placing most
+ time consuming items first), where as name, file, and line number searches
+ are in ascending order (alphabetical). The subtle distinction between
+ ``'nfl'`` and ``'stdname'`` is that the standard name is a sort of the
+ name as printed, which means that the embedded line numbers get compared
+ in an odd way. For example, lines 3, 20, and 40 would (if the file names
+ were the same) appear in the string order 20, 3 and 40. In contrast,
+ ``'nfl'`` does a numeric compare of the line numbers. In fact,
+ ``sort_stats('nfl')`` is the same as ``sort_stats('name', 'file',
+ 'line')``.
+
+ For backward-compatibility reasons, the numeric arguments ``-1``, ``0``,
+ ``1``, and ``2`` are permitted. They are interpreted as ``'stdname'``,
+ ``'calls'``, ``'time'``, and ``'cumulative'`` respectively. If this old
+ style format (numeric) is used, only one sort key (the numeric key) will
+ be used, and additional arguments will be silently ignored.
+
+ .. For compatibility with the old profiler.
+
+
+ .. method:: reverse_order()
+
+ This method for the :class:`Stats` class reverses the ordering of the
+ basic list within the object. Note that by default ascending vs
+ descending order is properly selected based on the sort key of choice.
+
+ .. This method is provided primarily for compatibility with the old
+ profiler.
+
+
+ .. method:: print_stats(*restrictions)
+
+ This method for the :class:`Stats` class prints out a report as described
+ in the :func:`profile.run` definition.
+
+ The order of the printing is based on the last
+ :meth:`~pstats.Stats.sort_stats` operation done on the object (subject to
+ caveats in :meth:`~pstats.Stats.add` and
+ :meth:`~pstats.Stats.strip_dirs`).
+
+ The arguments provided (if any) can be used to limit the list down to the
+ significant entries. Initially, the list is taken to be the complete set
+ of profiled functions. Each restriction is either an integer (to select a
+ count of lines), or a decimal fraction between 0.0 and 1.0 inclusive (to
+ select a percentage of lines), or a regular expression (to pattern match
+ the standard name that is printed. If several restrictions are provided,
+ then they are applied sequentially. For example::
+
+ print_stats(.1, 'foo:')
+
+ would first limit the printing to first 10% of list, and then only print
+ functions that were part of filename :file:`.\*foo:`. In contrast, the
+ command::
+
+ print_stats('foo:', .1)
+
+ would limit the list to all functions having file names :file:`.\*foo:`,
+ and then proceed to only print the first 10% of them.
+
+
+ .. method:: print_callers(*restrictions)
+
+ This method for the :class:`Stats` class prints a list of all functions
+ that called each function in the profiled database. The ordering is
+ identical to that provided by :meth:`~pstats.Stats.print_stats`, and the
+ definition of the restricting argument is also identical. Each caller is
+ reported on its own line. The format differs slightly depending on the
+ profiler that produced the stats:
+
+ * With :mod:`profile`, a number is shown in parentheses after each caller
+ to show how many times this specific call was made. For convenience, a
+ second non-parenthesized number repeats the cumulative time spent in the
+ function at the right.
+
+ * With :mod:`cProfile`, each caller is preceded by three numbers: the
+ number of times this specific call was made, and the total and
+ cumulative times spent in the current function while it was invoked by
+ this specific caller.
+
+
+ .. method:: print_callees(*restrictions)
-.. method:: Stats.reverse_order()
+ This method for the :class:`Stats` class prints a list of all function
+ that were called by the indicated function. Aside from this reversal of
+ direction of calls (re: called vs was called by), the arguments and
+ ordering are identical to the :meth:`~pstats.Stats.print_callers` method.
- This method for the :class:`Stats` class reverses the ordering of the basic list
- within the object. Note that by default ascending vs descending order is
- properly selected based on the sort key of choice.
- .. This method is provided primarily for compatibility with the old profiler.
-
-
-.. method:: Stats.print_stats([restriction, ...])
-
- This method for the :class:`Stats` class prints out a report as described in the
- :func:`profile.run` definition.
-
- The order of the printing is based on the last :meth:`sort_stats` operation done
- on the object (subject to caveats in :meth:`add` and :meth:`strip_dirs`).
-
- The arguments provided (if any) can be used to limit the list down to the
- significant entries. Initially, the list is taken to be the complete set of
- profiled functions. Each restriction is either an integer (to select a count of
- lines), or a decimal fraction between 0.0 and 1.0 inclusive (to select a
- percentage of lines), or a regular expression (to pattern match the standard
- name that is printed; as of Python 1.5b1, this uses the Perl-style regular
- expression syntax defined by the :mod:`re` module). If several restrictions are
- provided, then they are applied sequentially. For example::
-
- print_stats(.1, 'foo:')
-
- would first limit the printing to first 10% of list, and then only print
- functions that were part of filename :file:`.\*foo:`. In contrast, the
- command::
-
- print_stats('foo:', .1)
-
- would limit the list to all functions having file names :file:`.\*foo:`, and
- then proceed to only print the first 10% of them.
-
-
-.. method:: Stats.print_callers([restriction, ...])
-
- This method for the :class:`Stats` class prints a list of all functions that
- called each function in the profiled database. The ordering is identical to
- that provided by :meth:`print_stats`, and the definition of the restricting
- argument is also identical. Each caller is reported on its own line. The
- format differs slightly depending on the profiler that produced the stats:
-
- * With :mod:`profile`, a number is shown in parentheses after each caller to
- show how many times this specific call was made. For convenience, a second
- non-parenthesized number repeats the cumulative time spent in the function
- at the right.
+.. _deterministic-profiling:
- * With :mod:`cProfile`, each caller is preceded by three numbers: the number of
- times this specific call was made, and the total and cumulative times spent in
- the current function while it was invoked by this specific caller.
+What Is Deterministic Profiling?
+================================
+:dfn:`Deterministic profiling` is meant to reflect the fact that all *function
+call*, *function return*, and *exception* events are monitored, and precise
+timings are made for the intervals between these events (during which time the
+user's code is executing). In contrast, :dfn:`statistical profiling` (which is
+not done by this module) randomly samples the effective instruction pointer, and
+deduces where time is being spent. The latter technique traditionally involves
+less overhead (as the code does not need to be instrumented), but provides only
+relative indications of where time is being spent.
-.. method:: Stats.print_callees([restriction, ...])
+In Python, since there is an interpreter active during execution, the presence
+of instrumented code is not required to do deterministic profiling. Python
+automatically provides a :dfn:`hook` (optional callback) for each event. In
+addition, the interpreted nature of Python tends to add so much overhead to
+execution, that deterministic profiling tends to only add small processing
+overhead in typical applications. The result is that deterministic profiling is
+not that expensive, yet provides extensive run time statistics about the
+execution of a Python program.
- This method for the :class:`Stats` class prints a list of all function that were
- called by the indicated function. Aside from this reversal of direction of
- calls (re: called vs was called by), the arguments and ordering are identical to
- the :meth:`print_callers` method.
+Call count statistics can be used to identify bugs in code (surprising counts),
+and to identify possible inline-expansion points (high call counts). Internal
+time statistics can be used to identify "hot loops" that should be carefully
+optimized. Cumulative time statistics should be used to identify high level
+errors in the selection of algorithms. Note that the unusual handling of
+cumulative times in this profiler allows statistics for recursive
+implementations of algorithms to be directly compared to iterative
+implementations.
-.. _profile-limits:
+.. _profile-limitations:
Limitations
===========
@@ -525,7 +589,7 @@ The profiler of the :mod:`profile` module subtracts a constant from each event
handling time to compensate for the overhead of calling the time function, and
socking away the results. By default, the constant is 0. The following
procedure can be used to obtain a better constant for a given platform (see
-discussion in section Limitations above). ::
+:ref:`profile-limitations`). ::
import profile
pr = profile.Profile()
@@ -535,8 +599,8 @@ discussion in section Limitations above). ::
The method executes the number of Python calls given by the argument, directly
and again under the profiler, measuring the time for both. It then computes the
hidden overhead per profiler event, and returns that as a float. For example,
-on an 800 MHz Pentium running Windows 2000, and using Python's time.clock() as
-the timer, the magical number is about 12.5e-6.
+on a 1.8Ghz Intel Core i5 running Mac OS X, and using Python's time.clock() as
+the timer, the magical number is about 4.04e-6.
The object of this exercise is to get a fairly consistent result. If your
computer is *very* fast, or your timer function has poor resolution, you might
@@ -559,61 +623,54 @@ When you have a consistent answer, there are three ways you can use it: [#]_ ::
If you have a choice, you are better off choosing a smaller constant, and then
your results will "less often" show up as negative in profile statistics.
+.. _profile-timers:
-.. _profiler-extensions:
-
-Extensions --- Deriving Better Profilers
-========================================
-
-The :class:`Profile` class of both modules, :mod:`profile` and :mod:`cProfile`,
-were written so that derived classes could be developed to extend the profiler.
-The details are not described here, as doing this successfully requires an
-expert understanding of how the :class:`Profile` class works internally. Study
-the source code of the module carefully if you want to pursue this.
+Using a custom timer
+====================
-If all you want to do is change how current time is determined (for example, to
-force use of wall-clock time or elapsed process time), pass the timing function
-you want to the :class:`Profile` class constructor::
+If you want to change how current time is determined (for example, to force use
+of wall-clock time or elapsed process time), pass the timing function you want
+to the :class:`Profile` class constructor::
- pr = profile.Profile(your_time_func)
+ pr = profile.Profile(your_time_func)
-The resulting profiler will then call :func:`your_time_func`.
+The resulting profiler will then call ``your_time_func``. Depending on whether
+you are using :class:`profile.Profile` or :class:`cProfile.Profile`,
+``your_time_func``'s return value will be interpreted differently:
:class:`profile.Profile`
- :func:`your_time_func` should return a single number, or a list of numbers whose
- sum is the current time (like what :func:`os.times` returns). If the function
- returns a single time number, or the list of returned numbers has length 2, then
- you will get an especially fast version of the dispatch routine.
+ ``your_time_func`` should return a single number, or a list of numbers whose
+ sum is the current time (like what :func:`os.times` returns). If the
+ function returns a single time number, or the list of returned numbers has
+ length 2, then you will get an especially fast version of the dispatch
+ routine.
Be warned that you should calibrate the profiler class for the timer function
- that you choose. For most machines, a timer that returns a lone integer value
- will provide the best results in terms of low overhead during profiling.
- (:func:`os.times` is *pretty* bad, as it returns a tuple of floating point
- values). If you want to substitute a better timer in the cleanest fashion,
- derive a class and hardwire a replacement dispatch method that best handles your
- timer call, along with the appropriate calibration constant.
+ that you choose (see :ref:`profile-calibration`). For most machines, a timer
+ that returns a lone integer value will provide the best results in terms of
+ low overhead during profiling. (:func:`os.times` is *pretty* bad, as it
+ returns a tuple of floating point values). If you want to substitute a
+ better timer in the cleanest fashion, derive a class and hardwire a
+ replacement dispatch method that best handles your timer call, along with the
+ appropriate calibration constant.
:class:`cProfile.Profile`
- :func:`your_time_func` should return a single number. If it returns plain
- integers, you can also invoke the class constructor with a second argument
- specifying the real duration of one unit of time. For example, if
- :func:`your_integer_time_func` returns times measured in thousands of seconds,
+ ``your_time_func`` should return a single number. If it returns integers,
+ you can also invoke the class constructor with a second argument specifying
+ the real duration of one unit of time. For example, if
+ ``your_integer_time_func`` returns times measured in thousands of seconds,
you would construct the :class:`Profile` instance as follows::
- pr = profile.Profile(your_integer_time_func, 0.001)
+ pr = cProfile.Profile(your_integer_time_func, 0.001)
As the :mod:`cProfile.Profile` class cannot be calibrated, custom timer
- functions should be used with care and should be as fast as possible. For the
- best results with a custom timer, it might be necessary to hard-code it in the C
- source of the internal :mod:`_lsprof` module.
+ functions should be used with care and should be as fast as possible. For
+ the best results with a custom timer, it might be necessary to hard-code it
+ in the C source of the internal :mod:`_lsprof` module.
-.. rubric:: Footnotes
-.. [#] Updated and converted to LaTeX by Guido van Rossum. Further updated by Armin
- Rigo to integrate the documentation for the new :mod:`cProfile` module of Python
- 2.5.
+.. rubric:: Footnotes
-.. [#] Prior to Python 2.2, it was necessary to edit the profiler source code to embed
- the bias as a literal number. You still can, but that method is no longer
+.. [#] Prior to Python 2.2, it was necessary to edit the profiler source code to
+ embed the bias as a literal number. You still can, but that method is no longer
described, because no longer needed.
-
diff --git a/Doc/library/pyclbr.rst b/Doc/library/pyclbr.rst
index 2f81451..13eaabf 100644
--- a/Doc/library/pyclbr.rst
+++ b/Doc/library/pyclbr.rst
@@ -19,7 +19,7 @@ not implemented in Python, including all standard and optional extension
modules.
-.. function:: readmodule(module[, path=None])
+.. function:: readmodule(module, path=None)
Read a module and return a dictionary mapping class names to class
descriptor objects. The parameter *module* should be the name of a
@@ -28,7 +28,7 @@ modules.
of ``sys.path``, which is used to locate module source code.
-.. function:: readmodule_ex(module[, path=None])
+.. function:: readmodule_ex(module, path=None)
Like :func:`readmodule`, but the returned dictionary, in addition to
mapping class names to class descriptor objects, also maps top-level
diff --git a/Doc/library/pyexpat.rst b/Doc/library/pyexpat.rst
index 8299739..20ca3bc 100644
--- a/Doc/library/pyexpat.rst
+++ b/Doc/library/pyexpat.rst
@@ -14,6 +14,14 @@
directive. Since they are attributes which are set by client code, in-text
references to these attributes should be marked using the :member: role.
+
+.. warning::
+
+ The :mod:`pyexpat` module is not secure against maliciously
+ constructed data. If you need to parse untrusted or unauthenticated data see
+ :ref:`xml-vulnerabilities`.
+
+
.. versionadded:: 2.0
.. index:: single: Expat
@@ -95,6 +103,10 @@ The :mod:`xml.parsers.expat` module contains two functions:
http://www.python.org/ns/ elem1
elem2
+ Due to limitations in the ``Expat`` library used by :mod:`pyexpat`,
+ the :class:`xmlparser` instance returned can only be used to parse a single
+ XML document. Call ``ParserCreate`` for each document to provide unique
+ parser instances.
.. seealso::
@@ -114,7 +126,9 @@ XMLParser Objects
Parses the contents of the string *data*, calling the appropriate handler
functions to process the parsed data. *isfinal* must be true on the final call
- to this method. *data* can be the empty string at any time.
+ to this method; it allows the parsing of a single file in fragments,
+ not the submission of multiple files.
+ *data* can be the empty string at any time.
.. method:: xmlparser.ParseFile(file)
@@ -437,7 +451,7 @@ otherwise stated.
.. method:: xmlparser.CommentHandler(data)
Called for comments. *data* is the text of the comment, excluding the leading
- '``<!-``\ ``-``' and trailing '``-``\ ``->``'.
+ ``'<!-``\ ``-'`` and trailing ``'-``\ ``->'``.
.. method:: xmlparser.StartCdataSectionHandler()
@@ -898,5 +912,5 @@ The ``errors`` object has the following attributes:
.. [#] The encoding string included in XML output should conform to the
appropriate standards. For example, "UTF-8" is valid, but "UTF8" is
not. See http://www.w3.org/TR/2006/REC-xml11-20060816/#NT-EncodingDecl
- and http://www.iana.org/assignments/character-sets .
+ and http://www.iana.org/assignments/character-sets\ .
diff --git a/Doc/library/queue.rst b/Doc/library/queue.rst
index 36ff346..b525705 100644
--- a/Doc/library/queue.rst
+++ b/Doc/library/queue.rst
@@ -5,9 +5,9 @@
:synopsis: A synchronized queue class.
.. note::
- The :mod:`Queue` module has been renamed to :mod:`queue` in Python 3.0. The
+ The :mod:`Queue` module has been renamed to :mod:`queue` in Python 3. The
:term:`2to3` tool will automatically adapt imports when converting your
- sources to 3.0.
+ sources to Python 3.
**Source code:** :source:`Lib/Queue.py`
@@ -20,8 +20,8 @@ module implements all the required locking semantics. It depends on the
availability of thread support in Python; see the :mod:`threading`
module.
-Implements three types of queue whose only difference is the order that
-the entries are retrieved. In a FIFO queue, the first tasks added are
+The module implements three types of queue, which differ only in the order in
+which the entries are retrieved. In a FIFO queue, the first tasks added are
the first retrieved. In a LIFO queue, the most recently added entry is
the first retrieved (operating like a stack). With a priority queue,
the entries are kept sorted (using the :mod:`heapq` module) and the
@@ -60,13 +60,15 @@ The :mod:`Queue` module defines the following classes and exceptions:
.. exception:: Empty
- Exception raised when non-blocking :meth:`get` (or :meth:`get_nowait`) is called
+ Exception raised when non-blocking :meth:`~Queue.get` (or
+ :meth:`~Queue.get_nowait`) is called
on a :class:`Queue` object which is empty.
.. exception:: Full
- Exception raised when non-blocking :meth:`put` (or :meth:`put_nowait`) is called
+ Exception raised when non-blocking :meth:`~Queue.put` (or
+ :meth:`~Queue.put_nowait`) is called
on a :class:`Queue` object which is full.
.. seealso::
diff --git a/Doc/library/random.rst b/Doc/library/random.rst
index de98c04..1bc9989 100644
--- a/Doc/library/random.rst
+++ b/Doc/library/random.rst
@@ -60,6 +60,13 @@ The :mod:`random` module also provides the :class:`SystemRandom` class which
uses the system function :func:`os.urandom` to generate random numbers
from sources provided by the operating system.
+.. warning::
+
+ The pseudo-random generators of this module should not be used for
+ security purposes. Use :func:`os.urandom` or :class:`SystemRandom` if
+ you require a cryptographically secure pseudo-random number generator.
+
+
Bookkeeping functions:
@@ -90,7 +97,7 @@ Bookkeeping functions:
*state* should have been obtained from a previous call to :func:`getstate`, and
:func:`setstate` restores the internal state of the generator to what it was at
- the time :func:`setstate` was called.
+ the time :func:`getstate` was called.
.. versionadded:: 2.1
@@ -124,7 +131,8 @@ Bookkeeping functions:
Functions for integers:
-.. function:: randrange([start,] stop[, step])
+.. function:: randrange(stop)
+ randrange(start, stop[, step])
Return a randomly selected element from ``range(start, stop, step)``. This is
equivalent to ``choice(range(start, stop, step))``, but doesn't actually build a
diff --git a/Doc/library/re.rst b/Doc/library/re.rst
index df3f9e5..e5bbd03 100644
--- a/Doc/library/re.rst
+++ b/Doc/library/re.rst
@@ -237,21 +237,32 @@ The special characters are:
``(?P<name>...)``
Similar to regular parentheses, but the substring matched by the group is
- accessible within the rest of the regular expression via the symbolic group
- name *name*. Group names must be valid Python identifiers, and each group
- name must be defined only once within a regular expression. A symbolic group
- is also a numbered group, just as if the group were not named. So the group
- named ``id`` in the example below can also be referenced as the numbered group
- ``1``.
-
- For example, if the pattern is ``(?P<id>[a-zA-Z_]\w*)``, the group can be
- referenced by its name in arguments to methods of match objects, such as
- ``m.group('id')`` or ``m.end('id')``, and also by name in the regular
- expression itself (using ``(?P=id)``) and replacement text given to
- ``.sub()`` (using ``\g<id>``).
+ accessible via the symbolic group name *name*. Group names must be valid
+ Python identifiers, and each group name must be defined only once within a
+ regular expression. A symbolic group is also a numbered group, just as if
+ the group were not named.
+
+ Named groups can be referenced in three contexts. If the pattern is
+ ``(?P<quote>['"]).*?(?P=quote)`` (i.e. matching a string quoted with either
+ single or double quotes):
+
+ +---------------------------------------+----------------------------------+
+ | Context of reference to group "quote" | Ways to reference it |
+ +=======================================+==================================+
+ | in the same pattern itself | * ``(?P=quote)`` (as shown) |
+ | | * ``\1`` |
+ +---------------------------------------+----------------------------------+
+ | when processing match object ``m`` | * ``m.group('quote')`` |
+ | | * ``m.end('quote')`` (etc.) |
+ +---------------------------------------+----------------------------------+
+ | in a string passed to the ``repl`` | * ``\g<quote>`` |
+ | argument of ``re.sub()`` | * ``\g<1>`` |
+ | | * ``\1`` |
+ +---------------------------------------+----------------------------------+
``(?P=name)``
- Matches whatever text was matched by the earlier group named *name*.
+ A backreference to a named group; it matches whatever text was matched by the
+ earlier group named *name*.
``(?#...)``
A comment; the contents of the parentheses are simply ignored.
@@ -273,7 +284,7 @@ The special characters are:
lookbehind will back up 3 characters and check if the contained pattern matches.
The contained pattern must only match strings of some fixed length, meaning that
``abc`` or ``a|b`` are allowed, but ``a*`` and ``a{3,4}`` are not. Note that
- patterns which start with positive lookbehind assertions will never match at the
+ patterns which start with positive lookbehind assertions will not match at the
beginning of the string being searched; you will most likely want to use the
:func:`search` function rather than the :func:`match` function:
@@ -311,7 +322,7 @@ the second character. For example, ``\$`` matches the character ``'$'``.
``\number``
Matches the contents of the group of the same number. Groups are numbered
starting from 1. For example, ``(.+) \1`` matches ``'the the'`` or ``'55 55'``,
- but not ``'the end'`` (note the space after the group). This special sequence
+ but not ``'thethe'`` (note the space after the group). This special sequence
can only be used to match one of the first 99 groups. If the first digit of
*number* is 0, or *number* is 3 octal digits long, it will not be interpreted as
a group match, but as the character with octal value *number*. Inside the
@@ -325,14 +336,20 @@ the second character. For example, ``\$`` matches the character ``'$'``.
Matches the empty string, but only at the beginning or end of a word. A word is
defined as a sequence of alphanumeric or underscore characters, so the end of a
word is indicated by whitespace or a non-alphanumeric, non-underscore character.
- Note that ``\b`` is defined as the boundary between ``\w`` and ``\W``, so the
- precise set of characters deemed to be alphanumeric depends on the values of the
- ``UNICODE`` and ``LOCALE`` flags. Inside a character range, ``\b`` represents
- the backspace character, for compatibility with Python's string literals.
+ Note that formally, ``\b`` is defined as the boundary between a ``\w`` and
+ a ``\W`` character (or vice versa), or between ``\w`` and the beginning/end
+ of the string, so the precise set of characters deemed to be alphanumeric
+ depends on the values of the ``UNICODE`` and ``LOCALE`` flags.
+ For example, ``r'\bfoo\b'`` matches ``'foo'``, ``'foo.'``, ``'(foo)'``,
+ ``'bar foo baz'`` but not ``'foobar'`` or ``'foo3'``.
+ Inside a character range, ``\b`` represents the backspace character, for
+ compatibility with Python's string literals.
``\B``
Matches the empty string, but only when it is *not* at the beginning or end of a
- word. This is just the opposite of ``\b``, so is also subject to the settings
+ word. This means that ``r'py\B'`` matches ``'python'``, ``'py3'``, ``'py2'``,
+ but not ``'py'``, ``'py.'``, or ``'py!'``.
+ ``\B`` is just the opposite of ``\b``, so is also subject to the settings
of ``LOCALE`` and ``UNICODE``.
``\d``
@@ -348,20 +365,20 @@ the second character. For example, ``\$`` matches the character ``'$'``.
character properties database.
``\s``
- When the :const:`LOCALE` and :const:`UNICODE` flags are not specified, matches
- any whitespace character; this is equivalent to the set ``[ \t\n\r\f\v]``. With
- :const:`LOCALE`, it will match this set plus whatever characters are defined as
- space for the current locale. If :const:`UNICODE` is set, this will match the
- characters ``[ \t\n\r\f\v]`` plus whatever is classified as space in the Unicode
- character properties database.
+ When the :const:`UNICODE` flag is not specified, it matches any whitespace
+ character, this is equivalent to the set ``[ \t\n\r\f\v]``. The
+ :const:`LOCALE` flag has no extra effect on matching of the space.
+ If :const:`UNICODE` is set, this will match the characters ``[ \t\n\r\f\v]``
+ plus whatever is classified as space in the Unicode character properties
+ database.
``\S``
- When the :const:`LOCALE` and :const:`UNICODE` flags are not specified, matches
- any non-whitespace character; this is equivalent to the set ``[^ \t\n\r\f\v]``
- With :const:`LOCALE`, it will match any character not in this set, and not
- defined as space in the current locale. If :const:`UNICODE` is set, this will
- match anything other than ``[ \t\n\r\f\v]`` and characters marked as space in
- the Unicode character properties database.
+ When the :const:`UNICODE` flags is not specified, matches any non-whitespace
+ character; this is equivalent to the set ``[^ \t\n\r\f\v]`` The
+ :const:`LOCALE` flag has no extra effect on non-whitespace match. If
+ :const:`UNICODE` is set, then any character not marked as space in the
+ Unicode character properties database is matched.
+
``\w``
When the :const:`LOCALE` and :const:`UNICODE` flags are not specified, matches
@@ -376,12 +393,16 @@ the second character. For example, ``\$`` matches the character ``'$'``.
any non-alphanumeric character; this is equivalent to the set ``[^a-zA-Z0-9_]``.
With :const:`LOCALE`, it will match any character not in the set ``[0-9_]``, and
not defined as alphanumeric for the current locale. If :const:`UNICODE` is set,
- this will match anything other than ``[0-9_]`` and characters marked as
- alphanumeric in the Unicode character properties database.
+ this will match anything other than ``[0-9_]`` plus characters classied as
+ not alphanumeric in the Unicode character properties database.
``\Z``
Matches only at the end of the string.
+If both :const:`LOCALE` and :const:`UNICODE` flags are included for a
+particular sequence, then :const:`LOCALE` flag takes effect first followed by
+the :const:`UNICODE`.
+
Most of the standard escapes supported by Python string literals are also
accepted by the regular expression parser::
@@ -389,37 +410,15 @@ accepted by the regular expression parser::
\r \t \v \x
\\
+(Note that ``\b`` is used to represent word boundaries, and means "backspace"
+only inside character classes.)
+
Octal escapes are included in a limited form: If the first digit is a 0, or if
there are three octal digits, it is considered an octal escape. Otherwise, it is
a group reference. As for string literals, octal escapes are always at most
three digits in length.
-.. _matching-searching:
-
-Matching vs Searching
----------------------
-
-.. sectionauthor:: Fred L. Drake, Jr. <fdrake@acm.org>
-
-
-Python offers two different primitive operations based on regular expressions:
-**match** checks for a match only at the beginning of the string, while
-**search** checks for a match anywhere in the string (this is what Perl does
-by default).
-
-Note that match may differ from search even when using a regular expression
-beginning with ``'^'``: ``'^'`` matches only at the start of the string, or in
-:const:`MULTILINE` mode also immediately following a newline. The "match"
-operation succeeds only if the pattern matches at the start of the string
-regardless of mode, or at the starting position given by the optional *pos*
-argument regardless of whether a newline precedes it.
-
- >>> re.match("c", "abcdef") # No match
- >>> re.search("c", "abcdef") # Match
- <_sre.SRE_Match object at ...>
-
-
.. _contents-of-module-re:
Module Contents
@@ -434,8 +433,8 @@ form.
.. function:: compile(pattern, flags=0)
Compile a regular expression pattern into a regular expression object, which
- can be used for matching using its :func:`match` and :func:`search` methods,
- described below.
+ can be used for matching using its :func:`~RegexObject.match` and
+ :func:`~RegexObject.search` methods, described below.
The expression's behaviour can be modified by specifying a *flags* value.
Values can be any of the following variables, combined using bitwise OR (the
@@ -528,7 +527,7 @@ form.
.. function:: search(pattern, string, flags=0)
- Scan through *string* looking for a location where the regular expression
+ Scan through *string* looking for the first location where the regular expression
*pattern* produces a match, and return a corresponding :class:`MatchObject`
instance. Return ``None`` if no position in the string matches the pattern; note
that this is different from finding a zero-length match at some point in the
@@ -542,10 +541,11 @@ form.
Return ``None`` if the string does not match the pattern; note that this is
different from a zero-length match.
- .. note::
+ Note that even in :const:`MULTILINE` mode, :func:`re.match` will only match
+ at the beginning of the string and not at the beginning of each line.
- If you want to locate a match anywhere in *string*, use :func:`search`
- instead.
+ If you want to locate a match anywhere in *string*, use :func:`search`
+ instead (see also :ref:`search-vs-match`).
.. function:: split(pattern, string, maxsplit=0, flags=0)
@@ -654,7 +654,8 @@ form.
when not adjacent to a previous match, so ``sub('x*', '-', 'abc')`` returns
``'-a-b-c-'``.
- In addition to character escapes and backreferences as described above,
+ In string-type *repl* arguments, in addition to the character escapes and
+ backreferences described above,
``\g<name>`` will use the substring matched by the group named ``name``, as
defined by the ``(?P<name>...)`` syntax. ``\g<number>`` uses the corresponding
group number; ``\g<2>`` is therefore equivalent to ``\2``, but isn't ambiguous
@@ -741,16 +742,14 @@ Regular Expression Objects
The optional *pos* and *endpos* parameters have the same meaning as for the
:meth:`~RegexObject.search` method.
- .. note::
-
- If you want to locate a match anywhere in *string*, use
- :meth:`~RegexObject.search` instead.
-
>>> pattern = re.compile("o")
>>> pattern.match("dog") # No match as "o" is not at the start of "dog".
>>> pattern.match("dog", 1) # Match as "o" is the 2nd character of "dog".
<_sre.SRE_Match object at ...>
+ If you want to locate a match anywhere in *string*, use
+ :meth:`~RegexObject.search` instead (see also :ref:`search-vs-match`).
+
.. method:: RegexObject.split(string, maxsplit=0)
@@ -783,8 +782,8 @@ Regular Expression Objects
.. attribute:: RegexObject.flags
- The flags argument used when the RE object was compiled, or ``0`` if no flags
- were provided.
+ The regex matching flags. This is a combination of the flags given to
+ :func:`.compile` and any ``(?...)`` inline flags in the pattern.
.. attribute:: RegexObject.groups
@@ -811,9 +810,16 @@ Match Objects
.. class:: MatchObject
- Match Objects always have a boolean value of :const:`True`, so that you can test
- whether e.g. :func:`match` resulted in a match with a simple if statement. They
- support the following methods and attributes:
+ Match objects always have a boolean value of ``True``.
+ Since :meth:`~regex.match` and :meth:`~regex.search` return ``None``
+ when there is no match, you can test whether there was a match with a simple
+ ``if`` statement::
+
+ match = re.search(pattern, string)
+ if match:
+ process(match)
+
+ Match objects support the following methods and attributes:
.. method:: MatchObject.expand(template)
@@ -1072,13 +1078,13 @@ expressions.
+--------------------------------+---------------------------------------------+
| ``%i`` | ``[-+]?(0[xX][\dA-Fa-f]+|0[0-7]*|\d+)`` |
+--------------------------------+---------------------------------------------+
-| ``%o`` | ``0[0-7]*`` |
+| ``%o`` | ``[-+]?[0-7]+`` |
+--------------------------------+---------------------------------------------+
| ``%s`` | ``\S+`` |
+--------------------------------+---------------------------------------------+
| ``%u`` | ``\d+`` |
+--------------------------------+---------------------------------------------+
-| ``%x``, ``%X`` | ``0[xX][\dA-Fa-f]+`` |
+| ``%x``, ``%X`` | ``[-+]?(0[xX])?[\dA-Fa-f]+`` |
+--------------------------------+---------------------------------------------+
To extract the filename and numbers from a string like ::
@@ -1094,59 +1100,39 @@ The equivalent regular expression would be ::
(\S+) - (\d+) errors, (\d+) warnings
-Avoiding recursion
-^^^^^^^^^^^^^^^^^^
-
-If you create regular expressions that require the engine to perform a lot of
-recursion, you may encounter a :exc:`RuntimeError` exception with the message
-``maximum recursion limit`` exceeded. For example, ::
-
- >>> s = 'Begin ' + 1000*'a very long string ' + 'end'
- >>> re.match('Begin (\w| )*? end', s).end()
- Traceback (most recent call last):
- File "<stdin>", line 1, in ?
- File "/usr/local/lib/python2.5/re.py", line 132, in match
- return _compile(pattern, flags).match(string)
- RuntimeError: maximum recursion limit exceeded
-
-You can often restructure your regular expression to avoid recursion.
-
-Starting with Python 2.3, simple uses of the ``*?`` pattern are special-cased to
-avoid recursion. Thus, the above regular expression can avoid recursion by
-being recast as ``Begin [a-zA-Z0-9_ ]*?end``. As a further benefit, such
-regular expressions will run faster than their recursive equivalents.
-
+.. _search-vs-match:
search() vs. match()
^^^^^^^^^^^^^^^^^^^^
-In a nutshell, :func:`match` only attempts to match a pattern at the beginning
-of a string where :func:`search` will match a pattern anywhere in a string.
-For example:
+.. sectionauthor:: Fred L. Drake, Jr. <fdrake@acm.org>
- >>> re.match("o", "dog") # No match as "o" is not the first letter of "dog".
- >>> re.search("o", "dog") # Match as search() looks everywhere in the string.
- <_sre.SRE_Match object at ...>
+Python offers two different primitive operations based on regular expressions:
+:func:`re.match` checks for a match only at the beginning of the string, while
+:func:`re.search` checks for a match anywhere in the string (this is what Perl
+does by default).
-.. note::
+For example::
- The following applies only to regular expression objects like those created
- with ``re.compile("pattern")``, not the primitives ``re.match(pattern,
- string)`` or ``re.search(pattern, string)``.
+ >>> re.match("c", "abcdef") # No match
+ >>> re.search("c", "abcdef") # Match
+ <_sre.SRE_Match object at ...>
-:func:`match` has an optional second parameter that gives an index in the string
-where the search is to start::
+Regular expressions beginning with ``'^'`` can be used with :func:`search` to
+restrict the match at the beginning of the string::
- >>> pattern = re.compile("o")
- >>> pattern.match("dog") # No match as "o" is not at the start of "dog."
+ >>> re.match("c", "abcdef") # No match
+ >>> re.search("^c", "abcdef") # No match
+ >>> re.search("^a", "abcdef") # Match
+ <_sre.SRE_Match object at ...>
- # Equivalent to the above expression as 0 is the default starting index:
- >>> pattern.match("dog", 0)
+Note however that in :const:`MULTILINE` mode :func:`match` only matches at the
+beginning of the string, whereas using :func:`search` with a regular expression
+beginning with ``'^'`` will match at the beginning of each line.
- # Match as "o" is the 2nd character of "dog" (index 0 is the first):
- >>> pattern.match("dog", 1)
+ >>> re.match('X', 'A\nB\nX', re.MULTILINE) # No match
+ >>> re.search('^X', 'A\nB\nX', re.MULTILINE) # Match
<_sre.SRE_Match object at ...>
- >>> pattern.match("dog", 2) # No match as "o" is not the 3rd character of "dog."
Making a Phonebook
@@ -1160,7 +1146,7 @@ creates a phonebook.
First, here is the input. Normally it may come from a file, here we are using
triple-quoted string syntax:
- >>> input = """Ross McFluff: 834.345.1254 155 Elm Street
+ >>> text = """Ross McFluff: 834.345.1254 155 Elm Street
...
... Ronald Heathmore: 892.345.3428 436 Finley Avenue
... Frank Burger: 925.541.7625 662 South Dogwood Way
@@ -1174,7 +1160,7 @@ into a list with each nonempty line having its own entry:
.. doctest::
:options: +NORMALIZE_WHITESPACE
- >>> entries = re.split("\n+", input)
+ >>> entries = re.split("\n+", text)
>>> entries
['Ross McFluff: 834.345.1254 155 Elm Street',
'Ronald Heathmore: 892.345.3428 436 Finley Avenue',
diff --git a/Doc/library/repr.rst b/Doc/library/repr.rst
index 11e6ae2..b604186 100644
--- a/Doc/library/repr.rst
+++ b/Doc/library/repr.rst
@@ -6,9 +6,9 @@
.. sectionauthor:: Fred L. Drake, Jr. <fdrake@acm.org>
.. note::
- The :mod:`repr` module has been renamed to :mod:`reprlib` in Python 3.0. The
+ The :mod:`repr` module has been renamed to :mod:`reprlib` in Python 3. The
:term:`2to3` tool will automatically adapt imports when converting your
- sources to 3.0.
+ sources to Python 3.
**Source code:** :source:`Lib/repr.py`
@@ -24,8 +24,9 @@ This module provides a class, an instance, and a function:
.. class:: Repr()
Class which provides formatting services useful in implementing functions
- similar to the built-in :func:`repr`; size limits for different object types
- are added to avoid the generation of representations which are excessively long.
+ similar to the built-in :ref:`repr() <func-repr>`; size limits for different
+ object types are added to avoid the generation of representations which are
+ excessively long.
.. data:: aRepr
@@ -96,8 +97,8 @@ which format specific object types.
.. method:: Repr.repr(obj)
- The equivalent to the built-in :func:`repr` that uses the formatting imposed by
- the instance.
+ The equivalent to the built-in :ref:`repr() <func-repr>` that uses the
+ formatting imposed by the instance.
.. method:: Repr.repr1(obj, level)
diff --git a/Doc/library/resource.rst b/Doc/library/resource.rst
index 834dace..7ca4534 100644
--- a/Doc/library/resource.rst
+++ b/Doc/library/resource.rst
@@ -42,6 +42,11 @@ which cannot be checked or controlled by the operating system are not defined in
this module for those platforms.
+.. data:: RLIM_INFINITY
+
+ Constant used to represent the the limit for an unlimited resource.
+
+
.. function:: getrlimit(resource)
Returns a tuple ``(soft, hard)`` with the current soft and hard limits of
@@ -53,12 +58,20 @@ this module for those platforms.
Sets new limits of consumption of *resource*. The *limits* argument must be a
tuple ``(soft, hard)`` of two integers describing the new limits. A value of
- ``-1`` can be used to specify the maximum possible upper limit.
+ :data:`~resource.RLIM_INFINITY` can be used to request a limit that is
+ unlimited.
Raises :exc:`ValueError` if an invalid resource is specified, if the new soft
- limit exceeds the hard limit, or if a process tries to raise its hard limit
- (unless the process has an effective UID of super-user). Can also raise
- :exc:`error` if the underlying system call fails.
+ limit exceeds the hard limit, or if a process tries to raise its hard limit.
+ Specifying a limit of :data:`~resource.RLIM_INFINITY` when the hard or
+ system limit for that resource is not unlimited will result in a
+ :exc:`ValueError`. A process with the effective UID of super-user can
+ request any valid limit value, including unlimited, but :exc:`ValueError`
+ will still be raised if the requested limit exceeds the system imposed
+ limit.
+
+ ``setrlimit`` may also raise :exc:`error` if the underlying system call
+ fails.
These symbols define resources whose consumption can be controlled using the
:func:`setrlimit` and :func:`getrlimit` functions described below. The values of
diff --git a/Doc/library/rexec.rst b/Doc/library/rexec.rst
index 2ce612a..12f6faa 100644
--- a/Doc/library/rexec.rst
+++ b/Doc/library/rexec.rst
@@ -6,7 +6,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`rexec` module has been removed in Python 3.0.
+ The :mod:`rexec` module has been removed in Python 3.
.. versionchanged:: 2.3
Disabled module.
@@ -270,7 +270,7 @@ Let us say that we want a slightly more relaxed policy than the standard
if mode in ('r', 'rb'):
pass
elif mode in ('w', 'wb', 'a', 'ab'):
- # check filename : must begin with /tmp/
+ # check filename: must begin with /tmp/
if file[:5]!='/tmp/':
raise IOError("can't write outside /tmp")
elif (string.find(file, '/../') >= 0 or
diff --git a/Doc/library/rfc822.rst b/Doc/library/rfc822.rst
index 8e563dd..33aa851 100644
--- a/Doc/library/rfc822.rst
+++ b/Doc/library/rfc822.rst
@@ -10,7 +10,7 @@
.. deprecated:: 2.3
The :mod:`email` package should be used in preference to the :mod:`rfc822`
module. This module is present only to maintain backward compatibility, and
- has been removed in 3.0.
+ has been removed in Python 3.
This module defines a class, :class:`Message`, which represents an "email
message" as defined by the Internet standard :rfc:`2822`. [#]_ Such messages
diff --git a/Doc/library/robotparser.rst b/Doc/library/robotparser.rst
index ba7e557..d95b629 100644
--- a/Doc/library/robotparser.rst
+++ b/Doc/library/robotparser.rst
@@ -16,9 +16,9 @@
.. note::
The :mod:`robotparser` module has been renamed :mod:`urllib.robotparser` in
- Python 3.0.
+ Python 3.
The :term:`2to3` tool will automatically adapt imports when converting
- your sources to 3.0.
+ your sources to Python 3.
This module provides a single class, :class:`RobotFileParser`, which answers
questions about whether or not a particular user agent can fetch a URL on the
@@ -26,10 +26,10 @@ Web site that published the :file:`robots.txt` file. For more details on the
structure of :file:`robots.txt` files, see http://www.robotstxt.org/orig.html.
-.. class:: RobotFileParser()
+.. class:: RobotFileParser(url='')
- This class provides a set of methods to read, parse and answer questions
- about a single :file:`robots.txt` file.
+ This class provides methods to read, parse and answer questions about the
+ :file:`robots.txt` file at *url*.
.. method:: set_url(url)
diff --git a/Doc/library/runpy.rst b/Doc/library/runpy.rst
index 31562fe..fc9e7c1 100644
--- a/Doc/library/runpy.rst
+++ b/Doc/library/runpy.rst
@@ -22,6 +22,9 @@ The :mod:`runpy` module provides two functions:
.. function:: run_module(mod_name, init_globals=None, run_name=None, alter_sys=False)
+ .. index::
+ module: __main__
+
Execute the code of the specified module and return the resulting module
globals dictionary. The module's code is first located using the standard
import mechanism (refer to :pep:`302` for details) and then executed in a
@@ -77,6 +80,9 @@ The :mod:`runpy` module provides two functions:
.. function:: run_path(file_path, init_globals=None, run_name=None)
+ .. index::
+ module: __main__
+
Execute the code at the named filesystem location and return the resulting
module globals dictionary. As with a script name supplied to the CPython
command line, the supplied path may refer to a Python source file, a
diff --git a/Doc/library/scrolledtext.rst b/Doc/library/scrolledtext.rst
index 5c666c3..6af59dc 100644
--- a/Doc/library/scrolledtext.rst
+++ b/Doc/library/scrolledtext.rst
@@ -16,8 +16,8 @@ as that of the :class:`Tkinter.Text` class.
.. note::
:mod:`ScrolledText` has been renamed to :mod:`tkinter.scrolledtext` in Python
- 3.0. The :term:`2to3` tool will automatically adapt imports when converting
- your sources to 3.0.
+ 3. The :term:`2to3` tool will automatically adapt imports when converting
+ your sources to Python 3.
The text widget and scrollbar are packed together in a :class:`Frame`, and the
methods of the :class:`Grid` and :class:`Pack` geometry managers are acquired
diff --git a/Doc/library/select.rst b/Doc/library/select.rst
index d131cb9..24cb756 100644
--- a/Doc/library/select.rst
+++ b/Doc/library/select.rst
@@ -63,7 +63,7 @@ The module defines the following:
This is a straightforward interface to the Unix :c:func:`select` system call.
The first three arguments are sequences of 'waitable objects': either
integers representing file descriptors or objects with a parameterless method
- named :meth:`fileno` returning such an integer:
+ named :meth:`~io.IOBase.fileno` returning such an integer:
* *rlist*: wait until ready for reading
* *wlist*: wait until ready for writing
@@ -88,8 +88,8 @@ The module defines the following:
Among the acceptable object types in the sequences are Python file objects (e.g.
``sys.stdin``, or objects returned by :func:`open` or :func:`os.popen`), socket
objects returned by :func:`socket.socket`. You may also define a :dfn:`wrapper`
- class yourself, as long as it has an appropriate :meth:`fileno` method (that
- really returns a file descriptor, not just a random integer).
+ class yourself, as long as it has an appropriate :meth:`~io.IOBase.fileno`
+ method (that really returns a file descriptor, not just a random integer).
.. note::
@@ -207,10 +207,10 @@ linearly scanned again. :c:func:`select` is O(highest file descriptor), while
.. method:: poll.register(fd[, eventmask])
Register a file descriptor with the polling object. Future calls to the
- :meth:`poll` method will then check whether the file descriptor has any pending
- I/O events. *fd* can be either an integer, or an object with a :meth:`fileno`
- method that returns an integer. File objects implement :meth:`fileno`, so they
- can also be used as the argument.
+ :meth:`poll` method will then check whether the file descriptor has any
+ pending I/O events. *fd* can be either an integer, or an object with a
+ :meth:`~io.IOBase.fileno` method that returns an integer. File objects
+ implement :meth:`!fileno`, so they can also be used as the argument.
*eventmask* is an optional bitmask describing the type of events you want to
check for, and can be a combination of the constants :const:`POLLIN`,
@@ -251,7 +251,7 @@ linearly scanned again. :c:func:`select` is O(highest file descriptor), while
Remove a file descriptor being tracked by a polling object. Just like the
:meth:`register` method, *fd* can be an integer or an object with a
- :meth:`fileno` method that returns an integer.
+ :meth:`~io.IOBase.fileno` method that returns an integer.
Attempting to remove a file descriptor that was never registered causes a
:exc:`KeyError` exception to be raised.
diff --git a/Doc/library/sgmllib.rst b/Doc/library/sgmllib.rst
index f50b02c..1da19cf 100644
--- a/Doc/library/sgmllib.rst
+++ b/Doc/library/sgmllib.rst
@@ -6,7 +6,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`sgmllib` module has been removed in Python 3.0.
+ The :mod:`sgmllib` module has been removed in Python 3.
.. index:: single: SGML
diff --git a/Doc/library/shelve.rst b/Doc/library/shelve.rst
index de12420..b02f763 100644
--- a/Doc/library/shelve.rst
+++ b/Doc/library/shelve.rst
@@ -18,7 +18,7 @@ This includes most class instances, recursive data types, and objects containing
lots of shared sub-objects. The keys are ordinary strings.
-.. function:: open(filename[, flag='c'[, protocol=None[, writeback=False]]])
+.. function:: open(filename, flag='c', protocol=None, writeback=False)
Open a persistent dictionary. The filename specified is the base filename for
the underlying database. As a side-effect, an extension may be added to the
@@ -47,9 +47,11 @@ lots of shared sub-objects. The keys are ordinary strings.
Like file objects, shelve objects should be closed explicitly to ensure
that the persistent data is flushed to disk.
- Since the :mod:`shelve` module stores objects using :mod:`pickle`, the same
- security precautions apply. Accordingly, you should avoid loading a shelf
- from an untrusted source.
+.. warning::
+
+ Because the :mod:`shelve` module is backed by :mod:`pickle`, it is insecure
+ to load a shelf from an untrusted source. Like with pickle, loading a shelf
+ can execute arbitrary code.
Shelf objects support all methods supported by dictionaries. This eases the
transition from dictionary based scripts to those requiring persistent storage.
@@ -100,7 +102,7 @@ Restrictions
implementation used.
-.. class:: Shelf(dict[, protocol=None[, writeback=False]])
+.. class:: Shelf(dict, protocol=None, writeback=False)
A subclass of :class:`UserDict.DictMixin` which stores pickled values in the
*dict* object.
@@ -118,7 +120,7 @@ Restrictions
memory and make sync and close take a long time.
-.. class:: BsdDbShelf(dict[, protocol=None[, writeback=False]])
+.. class:: BsdDbShelf(dict, protocol=None, writeback=False)
A subclass of :class:`Shelf` which exposes :meth:`first`, :meth:`!next`,
:meth:`previous`, :meth:`last` and :meth:`set_location` which are available in
@@ -129,7 +131,7 @@ Restrictions
the same interpretation as for the :class:`Shelf` class.
-.. class:: DbfilenameShelf(filename[, flag='c'[, protocol=None[, writeback=False]]])
+.. class:: DbfilenameShelf(filename, flag='c', protocol=None, writeback=False)
A subclass of :class:`Shelf` which accepts a *filename* instead of a dict-like
object. The underlying file will be opened using :func:`anydbm.open`. By
diff --git a/Doc/library/shlex.rst b/Doc/library/shlex.rst
index bb05c7d..be08e01 100644
--- a/Doc/library/shlex.rst
+++ b/Doc/library/shlex.rst
@@ -16,9 +16,9 @@
--------------
-The :class:`shlex` class makes it easy to write lexical analyzers for simple
-syntaxes resembling that of the Unix shell. This will often be useful for
-writing minilanguages, (for example, in run control files for Python
+The :class:`~shlex.shlex` class makes it easy to write lexical analyzers for
+simple syntaxes resembling that of the Unix shell. This will often be useful
+for writing minilanguages, (for example, in run control files for Python
applications) or for parsing quoted strings.
Prior to Python 2.7.3, this module did not support Unicode input.
@@ -30,9 +30,10 @@ The :mod:`shlex` module defines the following functions:
Split the string *s* using shell-like syntax. If *comments* is :const:`False`
(the default), the parsing of comments in the given string will be disabled
- (setting the :attr:`commenters` attribute of the :class:`shlex` instance to
- the empty string). This function operates in POSIX mode by default, but uses
- non-POSIX mode if the *posix* argument is false.
+ (setting the :attr:`~shlex.commenters` attribute of the
+ :class:`~shlex.shlex` instance to the empty string). This function operates
+ in POSIX mode by default, but uses non-POSIX mode if the *posix* argument is
+ false.
.. versionadded:: 2.3
@@ -41,26 +42,28 @@ The :mod:`shlex` module defines the following functions:
.. note::
- Since the :func:`split` function instantiates a :class:`shlex` instance, passing
- ``None`` for *s* will read the string to split from standard input.
+ Since the :func:`split` function instantiates a :class:`~shlex.shlex`
+ instance, passing ``None`` for *s* will read the string to split from
+ standard input.
The :mod:`shlex` module defines the following class:
.. class:: shlex([instream[, infile[, posix]]])
- A :class:`shlex` instance or subclass instance is a lexical analyzer object.
- The initialization argument, if present, specifies where to read characters
- from. It must be a file-/stream-like object with :meth:`read` and
- :meth:`readline` methods, or a string (strings are accepted since Python 2.3).
- If no argument is given, input will be taken from ``sys.stdin``. The second
- optional argument is a filename string, which sets the initial value of the
- :attr:`infile` attribute. If the *instream* argument is omitted or equal to
- ``sys.stdin``, this second argument defaults to "stdin". The *posix* argument
- was introduced in Python 2.3, and defines the operational mode. When *posix* is
- not true (default), the :class:`shlex` instance will operate in compatibility
- mode. When operating in POSIX mode, :class:`shlex` will try to be as close as
- possible to the POSIX shell parsing rules.
+ A :class:`~shlex.shlex` instance or subclass instance is a lexical analyzer
+ object. The initialization argument, if present, specifies where to read
+ characters from. It must be a file-/stream-like object with
+ :meth:`~io.TextIOBase.read` and :meth:`~io.TextIOBase.readline` methods, or
+ a string (strings are accepted since Python 2.3). If no argument is given,
+ input will be taken from ``sys.stdin``. The second optional argument is a
+ filename string, which sets the initial value of the :attr:`~shlex.infile`
+ attribute. If the *instream* argument is omitted or equal to ``sys.stdin``,
+ this second argument defaults to "stdin". The *posix* argument was
+ introduced in Python 2.3, and defines the operational mode. When *posix* is
+ not true (default), the :class:`~shlex.shlex` instance will operate in
+ compatibility mode. When operating in POSIX mode, :class:`~shlex.shlex`
+ will try to be as close as possible to the POSIX shell parsing rules.
.. seealso::
@@ -74,14 +77,14 @@ The :mod:`shlex` module defines the following class:
shlex Objects
-------------
-A :class:`shlex` instance has the following methods:
+A :class:`~shlex.shlex` instance has the following methods:
.. method:: shlex.get_token()
Return a token. If tokens have been stacked using :meth:`push_token`, pop a
token off the stack. Otherwise, read one from the input stream. If reading
- encounters an immediate end-of-file, :attr:`self.eof` is returned (the empty
+ encounters an immediate end-of-file, :attr:`eof` is returned (the empty
string (``''``) in non-POSIX mode, and ``None`` in POSIX mode).
@@ -99,9 +102,9 @@ A :class:`shlex` instance has the following methods:
.. method:: shlex.sourcehook(filename)
- When :class:`shlex` detects a source request (see :attr:`source` below) this
- method is given the following token as argument, and expected to return a tuple
- consisting of a filename and an open file-like object.
+ When :class:`~shlex.shlex` detects a source request (see :attr:`source`
+ below) this method is given the following token as argument, and expected
+ to return a tuple consisting of a filename and an open file-like object.
Normally, this method first strips any quotes off the argument. If the result
is an absolute pathname, or there was no previous source request in effect, or
@@ -118,8 +121,9 @@ A :class:`shlex` instance has the following methods:
This hook is exposed so that you can use it to implement directory search paths,
addition of file extensions, and other namespace hacks. There is no
- corresponding 'close' hook, but a shlex instance will call the :meth:`close`
- method of the sourced input stream when it returns EOF.
+ corresponding 'close' hook, but a shlex instance will call the
+ :meth:`~io.IOBase.close` method of the sourced input stream when it returns
+ EOF.
For more explicit control of source stacking, use the :meth:`push_source` and
:meth:`pop_source` methods.
@@ -153,8 +157,8 @@ A :class:`shlex` instance has the following methods:
messages in the standard, parseable format understood by Emacs and other Unix
tools.
-Instances of :class:`shlex` subclasses have some public instance variables which
-either control lexical analysis or can be used for debugging:
+Instances of :class:`~shlex.shlex` subclasses have some public instance
+variables which either control lexical analysis or can be used for debugging:
.. attribute:: shlex.commenters
@@ -203,8 +207,8 @@ either control lexical analysis or can be used for debugging:
.. attribute:: shlex.whitespace_split
If ``True``, tokens will only be split in whitespaces. This is useful, for
- example, for parsing command lines with :class:`shlex`, getting tokens in a
- similar way to shell arguments.
+ example, for parsing command lines with :class:`~shlex.shlex`, getting
+ tokens in a similar way to shell arguments.
.. versionadded:: 2.3
@@ -218,7 +222,8 @@ either control lexical analysis or can be used for debugging:
.. attribute:: shlex.instream
- The input stream from which this :class:`shlex` instance is reading characters.
+ The input stream from which this :class:`~shlex.shlex` instance is reading
+ characters.
.. attribute:: shlex.source
@@ -227,16 +232,16 @@ either control lexical analysis or can be used for debugging:
string will be recognized as a lexical-level inclusion request similar to the
``source`` keyword in various shells. That is, the immediately following token
will opened as a filename and input taken from that stream until EOF, at which
- point the :meth:`close` method of that stream will be called and the input
- source will again become the original input stream. Source requests may be
- stacked any number of levels deep.
+ point the :meth:`~io.IOBase.close` method of that stream will be called and
+ the input source will again become the original input stream. Source
+ requests may be stacked any number of levels deep.
.. attribute:: shlex.debug
- If this attribute is numeric and ``1`` or more, a :class:`shlex` instance will
- print verbose progress output on its behavior. If you need to use this, you can
- read the module source code to learn the details.
+ If this attribute is numeric and ``1`` or more, a :class:`~shlex.shlex`
+ instance will print verbose progress output on its behavior. If you need
+ to use this, you can read the module source code to learn the details.
.. attribute:: shlex.lineno
@@ -262,7 +267,7 @@ either control lexical analysis or can be used for debugging:
Parsing Rules
-------------
-When operating in non-POSIX mode, :class:`shlex` will try to obey to the
+When operating in non-POSIX mode, :class:`~shlex.shlex` will try to obey to the
following rules.
* Quote characters are not recognized within words (``Do"Not"Separate`` is
@@ -276,16 +281,17 @@ following rules.
* Closing quotes separate words (``"Do"Separate`` is parsed as ``"Do"`` and
``Separate``);
-* If :attr:`whitespace_split` is ``False``, any character not declared to be a
- word character, whitespace, or a quote will be returned as a single-character
- token. If it is ``True``, :class:`shlex` will only split words in whitespaces;
+* If :attr:`~shlex.whitespace_split` is ``False``, any character not
+ declared to be a word character, whitespace, or a quote will be returned as
+ a single-character token. If it is ``True``, :class:`~shlex.shlex` will only
+ split words in whitespaces;
* EOF is signaled with an empty string (``''``);
* It's not possible to parse empty strings, even if quoted.
-When operating in POSIX mode, :class:`shlex` will try to obey to the following
-parsing rules.
+When operating in POSIX mode, :class:`~shlex.shlex` will try to obey to the
+following parsing rules.
* Quotes are stripped out, and do not separate words (``"Do"Not"Separate"`` is
parsed as the single word ``DoNotSeparate``);
@@ -293,14 +299,16 @@ parsing rules.
* Non-quoted escape characters (e.g. ``'\'``) preserve the literal value of the
next character that follows;
-* Enclosing characters in quotes which are not part of :attr:`escapedquotes`
- (e.g. ``"'"``) preserve the literal value of all characters within the quotes;
+* Enclosing characters in quotes which are not part of
+ :attr:`~shlex.escapedquotes` (e.g. ``"'"``) preserve the literal value
+ of all characters within the quotes;
-* Enclosing characters in quotes which are part of :attr:`escapedquotes` (e.g.
- ``'"'``) preserves the literal value of all characters within the quotes, with
- the exception of the characters mentioned in :attr:`escape`. The escape
- characters retain its special meaning only when followed by the quote in use, or
- the escape character itself. Otherwise the escape character will be considered a
+* Enclosing characters in quotes which are part of
+ :attr:`~shlex.escapedquotes` (e.g. ``'"'``) preserves the literal value
+ of all characters within the quotes, with the exception of the characters
+ mentioned in :attr:`~shlex.escape`. The escape characters retain its
+ special meaning only when followed by the quote in use, or the escape
+ character itself. Otherwise the escape character will be considered a
normal character.
* EOF is signaled with a :const:`None` value;
diff --git a/Doc/library/shutil.rst b/Doc/library/shutil.rst
index 78c38e5..e897483 100644
--- a/Doc/library/shutil.rst
+++ b/Doc/library/shutil.rst
@@ -31,6 +31,8 @@ copying and removal. For operations on individual files, see also the
are not copied.
+.. _file-operations:
+
Directory and files operations
------------------------------
@@ -94,7 +96,7 @@ Directory and files operations
.. versionadded:: 2.6
-.. function:: copytree(src, dst[, symlinks=False[, ignore=None]])
+.. function:: copytree(src, dst, symlinks=False, ignore=None)
Recursively copy an entire directory tree rooted at *src*. The destination
directory, named by *dst*, must not already exist; it will be created as
@@ -185,7 +187,7 @@ Directory and files operations
.. versionadded:: 2.3
-.. _shutil-example:
+.. _copytree-example:
copytree example
::::::::::::::::
@@ -217,18 +219,18 @@ provided by this module. ::
else:
copy2(srcname, dstname)
# XXX What about devices, sockets etc.?
- except (IOError, os.error), why:
+ except (IOError, os.error) as why:
errors.append((srcname, dstname, str(why)))
# catch the Error from the recursive copytree so that we can
# continue with other files
- except Error, err:
+ except Error as err:
errors.extend(err.args[0])
try:
copystat(src, dst)
except WindowsError:
# can't copy file access times on Windows
pass
- except OSError, why:
+ except OSError as why:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
@@ -254,8 +256,13 @@ Another example that uses the *ignore* argument to add a logging call::
copytree(source, destination, ignore=_logpath)
-Archives operations
--------------------
+.. _archiving-operations:
+
+Archiving operations
+--------------------
+
+High-level utilities to create and read compressed and archived files are also
+provided. They rely on the :mod:`zipfile` and :mod:`tarfile` modules.
.. function:: make_archive(base_name, format, [root_dir, [base_dir, [verbose, [dry_run, [owner, [group, [logger]]]]]]])
@@ -278,7 +285,8 @@ Archives operations
*owner* and *group* are used when creating a tar archive. By default,
uses the current owner and group.
- *logger* is an instance of :class:`logging.Logger`.
+ *logger* must be an object compatible with :pep:`282`, usually an instance of
+ :class:`logging.Logger`.
.. versionadded:: 2.7
@@ -322,6 +330,8 @@ Archives operations
.. versionadded:: 2.7
+.. _archiving-example:
+
Archiving example
:::::::::::::::::
@@ -346,5 +356,3 @@ The resulting archive contains::
-rw------- tarek/staff 1675 2008-06-09 13:26:54 ./id_rsa
-rw-r--r-- tarek/staff 397 2008-06-09 13:26:54 ./id_rsa.pub
-rw-r--r-- tarek/staff 37192 2010-02-06 18:23:10 ./known_hosts
-
-
diff --git a/Doc/library/simplehttpserver.rst b/Doc/library/simplehttpserver.rst
index a92c7c9..2e7e97a 100644
--- a/Doc/library/simplehttpserver.rst
+++ b/Doc/library/simplehttpserver.rst
@@ -8,8 +8,8 @@
.. note::
The :mod:`SimpleHTTPServer` module has been merged into :mod:`http.server` in
- Python 3.0. The :term:`2to3` tool will automatically adapt imports when
- converting your sources to 3.0.
+ Python 3. The :term:`2to3` tool will automatically adapt imports when
+ converting your sources to Python 3.
The :mod:`SimpleHTTPServer` module defines a single class,
diff --git a/Doc/library/simplexmlrpcserver.rst b/Doc/library/simplexmlrpcserver.rst
index 3618728..8f805e9 100644
--- a/Doc/library/simplexmlrpcserver.rst
+++ b/Doc/library/simplexmlrpcserver.rst
@@ -8,8 +8,8 @@
.. note::
The :mod:`SimpleXMLRPCServer` module has been merged into
- :mod:`xmlrpc.server` in Python 3.0. The :term:`2to3` tool will automatically
- adapt imports when converting your sources to 3.0.
+ :mod:`xmlrpc.server` in Python 3. The :term:`2to3` tool will automatically
+ adapt imports when converting your sources to Python 3.
.. versionadded:: 2.2
@@ -197,6 +197,38 @@ server::
# Print list of available methods
print s.system.listMethods()
+The following :class:`SimpleXMLRPCServer` example is included in the module
+`Lib/SimpleXMLRPCServer.py`::
+
+ server = SimpleXMLRPCServer(("localhost", 8000))
+ server.register_function(pow)
+ server.register_function(lambda x,y: x+y, 'add')
+ server.register_multicall_functions()
+ server.serve_forever()
+
+This demo server can be run from the command line as::
+
+ python -m SimpleXMLRPCServer
+
+Example client code which talks to the above server is included with
+`Lib/xmlrpclib.py`::
+
+ server = ServerProxy("http://localhost:8000")
+ print server
+ multi = MultiCall(server)
+ multi.pow(2, 9)
+ multi.add(5, 1)
+ multi.add(24, 11)
+ try:
+ for response in multi():
+ print response
+ except Error, v:
+ print "ERROR", v
+
+And the client can be invoked directly using the following command::
+
+ python -m xmlrpclib
+
CGIXMLRPCRequestHandler
-----------------------
@@ -247,7 +279,7 @@ requests sent to Python CGI scripts.
Example::
class MyFuncs:
- def div(self, x, y) : return x // y
+ def div(self, x, y): return x // y
handler = CGIXMLRPCRequestHandler()
diff --git a/Doc/library/site.rst b/Doc/library/site.rst
index c0fafed..ff7195d 100644
--- a/Doc/library/site.rst
+++ b/Doc/library/site.rst
@@ -26,7 +26,7 @@ It starts by constructing up to four directories from a head and a tail part.
For the head part, it uses ``sys.prefix`` and ``sys.exec_prefix``; empty heads
are skipped. For the tail part, it uses the empty string and then
:file:`lib/site-packages` (on Windows) or
-:file:`lib/python|version|/site-packages` and then :file:`lib/site-python` (on
+:file:`lib/python{X.Y}/site-packages` and then :file:`lib/site-python` (on
Unix and Macintosh). For each of the distinct head-tail combinations, it sees
if it refers to an existing directory, and if so, adds it to ``sys.path`` and
also inspects the newly added path for configuration files.
@@ -83,7 +83,11 @@ After these path manipulations, an attempt is made to import a module named
:mod:`sitecustomize`, which can perform arbitrary site-specific customizations.
It is typically created by a system administrator in the site-packages
directory. If this import fails with an :exc:`ImportError` exception, it is
-silently ignored.
+silently ignored. If Python is started without output streams available, as
+with :file:`pythonw.exe` on Windows (which is used by default to start IDLE),
+attempted output from :mod:`sitecustomize` is ignored. Any exception other
+than :exc:`ImportError` causes a silent and perhaps mysterious failure of the
+process.
.. index:: module: usercustomize
@@ -181,8 +185,8 @@ command line:
.. code-block:: sh
- $ python3 -m site --user-site
- /home/user/.local/lib/python3.3/site-packages
+ $ python -m site --user-site
+ /home/user/.local/lib/python2.7/site-packages
.. program:: site
diff --git a/Doc/library/smtplib.rst b/Doc/library/smtplib.rst
index b0b58e8..5b2808d 100644
--- a/Doc/library/smtplib.rst
+++ b/Doc/library/smtplib.rst
@@ -24,15 +24,20 @@ Protocol) and :rfc:`1869` (SMTP Service Extensions).
A :class:`SMTP` instance encapsulates an SMTP connection. It has methods
that support a full repertoire of SMTP and ESMTP operations. If the optional
- host and port parameters are given, the SMTP :meth:`connect` method is called
- with those parameters during initialization. An :exc:`SMTPConnectError` is
- raised if the specified host doesn't respond correctly. The optional
+ host and port parameters are given, the SMTP :meth:`connect` method is
+ called with those parameters during initialization. If specified,
+ *local_hostname* is used as the FQDN of the local host in the HELO/EHLO
+ command. Otherwise, the local hostname is found using
+ :func:`socket.getfqdn`. If the :meth:`connect` call returns anything other
+ than a success code, an :exc:`SMTPConnectError` is raised. The optional
*timeout* parameter specifies a timeout in seconds for blocking operations
like the connection attempt (if not specified, the global default timeout
- setting will be used).
+ setting will be used). If the timeout expires, :exc:`socket.timeout`
+ is raised.
For normal use, you should only require the initialization/connect,
- :meth:`sendmail`, and :meth:`quit` methods. An example is included below.
+ :meth:`sendmail`, and :meth:`~smtplib.quit` methods.
+ An example is included below.
.. versionchanged:: 2.6
*timeout* was added.
@@ -44,12 +49,14 @@ Protocol) and :rfc:`1869` (SMTP Service Extensions).
:class:`SMTP`. :class:`SMTP_SSL` should be used for situations where SSL is
required from the beginning of the connection and using :meth:`starttls` is
not appropriate. If *host* is not specified, the local host is used. If
- *port* is omitted, the standard SMTP-over-SSL port (465) is used. *keyfile*
- and *certfile* are also optional, and can contain a PEM formatted private key
- and certificate chain file for the SSL connection. The optional *timeout*
- parameter specifies a timeout in seconds for blocking operations like the
- connection attempt (if not specified, the global default timeout setting
- will be used).
+ *port* is omitted, the standard SMTP-over-SSL port (465) is used.
+ *local_hostname* has the same meaning as it does for the :class:`SMTP`
+ class. *keyfile* and *certfile* are also optional, and can contain a PEM
+ formatted private key and certificate chain file for the SSL connection. The
+ optional *timeout* parameter specifies a timeout in seconds for blocking
+ operations like the connection attempt (if not specified, the global default
+ timeout setting will be used). If the timeout expires, :exc:`socket.timeout`
+ is raised.
.. versionadded:: 2.6
@@ -57,13 +64,15 @@ Protocol) and :rfc:`1869` (SMTP Service Extensions).
.. class:: LMTP([host[, port[, local_hostname]]])
The LMTP protocol, which is very similar to ESMTP, is heavily based on the
- standard SMTP client. It's common to use Unix sockets for LMTP, so our :meth:`connect`
- method must support that as well as a regular host:port server. To specify a
- Unix socket, you must use an absolute path for *host*, starting with a '/'.
+ standard SMTP client. It's common to use Unix sockets for LMTP, so our
+ :meth:`connect` method must support that as well as a regular host:port
+ server. *local_hostname* has the same meaning as it does for the
+ :class:`SMTP` class. To specify a Unix socket, you must use an absolute
+ path for *host*, starting with a '/'.
- Authentication is supported, using the regular SMTP mechanism. When using a Unix
- socket, LMTP generally don't support or require any authentication, but your
- mileage might vary.
+ Authentication is supported, using the regular SMTP mechanism. When using a
+ Unix socket, LMTP generally don't support or require any authentication, but
+ your mileage might vary.
.. versionadded:: 2.6
@@ -72,7 +81,8 @@ A nice selection of exceptions is defined as well:
.. exception:: SMTPException
- Base exception class for all exceptions raised by this module.
+ The base exception class for all the other exceptions provided by this
+ module.
.. exception:: SMTPServerDisconnected
@@ -151,15 +161,6 @@ An :class:`SMTP` instance has the following methods:
for connection and for all messages sent to and received from the server.
-.. method:: SMTP.connect([host[, port]])
-
- Connect to a host on a given port. The defaults are to connect to the local
- host at the standard SMTP port (25). If the hostname ends with a colon (``':'``)
- followed by a number, that suffix will be stripped off and the number
- interpreted as the port number to use. This method is automatically invoked by
- the constructor if a host is specified during instantiation.
-
-
.. method:: SMTP.docmd(cmd, [, argstring])
Send a command *cmd* to the server. The optional argument *argstring* is simply
@@ -176,6 +177,17 @@ An :class:`SMTP` instance has the following methods:
:exc:`SMTPServerDisconnected` will be raised.
+.. method:: SMTP.connect([host[, port]])
+
+ Connect to a host on a given port. The defaults are to connect to the local
+ host at the standard SMTP port (25). If the hostname ends with a colon (``':'``)
+ followed by a number, that suffix will be stripped off and the number
+ interpreted as the port number to use. This method is automatically invoked by
+ the constructor if a host is specified during instantiation. Returns a
+ 2-tuple of the response code and message sent by the server in its
+ connection response.
+
+
.. method:: SMTP.helo([hostname])
Identify yourself to the SMTP server using ``HELO``. The hostname argument
diff --git a/Doc/library/socket.rst b/Doc/library/socket.rst
index 8ac47fb..f6c711e 100644
--- a/Doc/library/socket.rst
+++ b/Doc/library/socket.rst
@@ -28,7 +28,7 @@ want to refer to :rfc:`3493` titled Basic Socket Interface Extensions for IPv6.
The Python interface is a straightforward transliteration of the Unix system
call and library interface for sockets to Python's object-oriented style: the
-:func:`socket` function returns a :dfn:`socket object` whose methods implement
+:func:`.socket` function returns a :dfn:`socket object` whose methods implement
the various socket system calls. Parameter types are somewhat higher-level than
in the C interface: as with :meth:`read` and :meth:`write` operations on Python
files, buffer allocation on receive operations is automatic, and buffer length
@@ -38,7 +38,7 @@ Socket addresses are represented as follows: A single string is used for the
:const:`AF_UNIX` address family. A pair ``(host, port)`` is used for the
:const:`AF_INET` address family, where *host* is a string representing either a
hostname in Internet domain notation like ``'daring.cwi.nl'`` or an IPv4 address
-like ``'100.50.200.5'``, and *port* is an integral port number. For
+like ``'100.50.200.5'``, and *port* is an integer. For
:const:`AF_INET6` address family, a four-tuple ``(host, port, flowinfo,
scopeid)`` is used, where *flowinfo* and *scopeid* represents ``sin6_flowinfo``
and ``sin6_scope_id`` member in :const:`struct sockaddr_in6` in C. For
@@ -72,17 +72,17 @@ numeric address in *host* portion.
tuple, and the fields depend on the address type. The general tuple form is
``(addr_type, v1, v2, v3 [, scope])``, where:
- - *addr_type* is one of TIPC_ADDR_NAMESEQ, TIPC_ADDR_NAME, or
- TIPC_ADDR_ID.
- - *scope* is one of TIPC_ZONE_SCOPE, TIPC_CLUSTER_SCOPE, and
- TIPC_NODE_SCOPE.
- - If *addr_type* is TIPC_ADDR_NAME, then *v1* is the server type, *v2* is
+ - *addr_type* is one of :const:`TIPC_ADDR_NAMESEQ`, :const:`TIPC_ADDR_NAME`,
+ or :const:`TIPC_ADDR_ID`.
+ - *scope* is one of :const:`TIPC_ZONE_SCOPE`, :const:`TIPC_CLUSTER_SCOPE`,
+ and :const:`TIPC_NODE_SCOPE`.
+ - If *addr_type* is :const:`TIPC_ADDR_NAME`, then *v1* is the server type, *v2* is
the port identifier, and *v3* should be 0.
- If *addr_type* is TIPC_ADDR_NAMESEQ, then *v1* is the server type, *v2*
+ If *addr_type* is :const:`TIPC_ADDR_NAMESEQ`, then *v1* is the server type, *v2*
is the lower port number, and *v3* is the upper port number.
- If *addr_type* is TIPC_ADDR_ID, then *v1* is the node, *v2* is the
+ If *addr_type* is :const:`TIPC_ADDR_ID`, then *v1* is the node, *v2* is the
reference, and *v3* should be set to 0.
@@ -146,7 +146,7 @@ The module :mod:`socket` exports the following constants and functions:
AF_INET6
These constants represent the address (and protocol) families, used for the
- first argument to :func:`socket`. If the :const:`AF_UNIX` constant is not
+ first argument to :func:`.socket`. If the :const:`AF_UNIX` constant is not
defined then this protocol is unsupported.
@@ -186,7 +186,7 @@ The module :mod:`socket` exports the following constants and functions:
RCVALL_*
Constants for Windows' WSAIoctl(). The constants are used as arguments to the
- :meth:`ioctl` method of socket objects.
+ :meth:`~socket.socket.ioctl` method of socket objects.
.. versionadded:: 2.6
@@ -230,7 +230,7 @@ The module :mod:`socket` exports the following constants and functions:
*source_address* was added.
-.. function:: getaddrinfo(host, port, family=0, socktype=0, proto=0, flags=0)
+.. function:: getaddrinfo(host, port[, family[, socktype[, proto[, flags]]]])
Translate the *host*/*port* argument into a sequence of 5-tuples that contain
all the necessary arguments for creating a socket connected to that service.
@@ -240,19 +240,19 @@ The module :mod:`socket` exports the following constants and functions:
and *port*, you can pass ``NULL`` to the underlying C API.
The *family*, *socktype* and *proto* arguments can be optionally specified
- in order to narrow the list of addresses returned. Passing zero as a
- value for each of these arguments selects the full range of results.
+ in order to narrow the list of addresses returned. By default, their value
+ is ``0``, meaning that the full range of results is selected.
The *flags* argument can be one or several of the ``AI_*`` constants,
- and will influence how results are computed and returned.
- For example, :const:`AI_NUMERICHOST` will disable domain name resolution
- and will raise an error if *host* is a domain name.
+ and will influence how results are computed and returned. Its default value
+ is ``0``. For example, :const:`AI_NUMERICHOST` will disable domain name
+ resolution and will raise an error if *host* is a domain name.
The function returns a list of 5-tuples with the following structure:
``(family, socktype, proto, canonname, sockaddr)``
In these tuples, *family*, *socktype*, *proto* are all integers and are
- meant to be passed to the :func:`socket` function. *canonname* will be
+ meant to be passed to the :func:`.socket` function. *canonname* will be
a string representing the canonical name of the *host* if
:const:`AI_CANONNAME` is part of the *flags* argument; else *canonname*
will be empty. *sockaddr* is a tuple describing a socket address, whose
@@ -343,7 +343,7 @@ The module :mod:`socket` exports the following constants and functions:
.. function:: getprotobyname(protocolname)
Translate an Internet protocol name (for example, ``'icmp'``) to a constant
- suitable for passing as the (optional) third argument to the :func:`socket`
+ suitable for passing as the (optional) third argument to the :func:`.socket`
function. This is usually only needed for sockets opened in "raw" mode
(:const:`SOCK_RAW`); for the normal socket modes, the correct protocol is chosen
automatically if the protocol is omitted or zero.
@@ -377,7 +377,7 @@ The module :mod:`socket` exports the following constants and functions:
Build a pair of connected socket objects using the given address family, socket
type, and protocol number. Address family, socket type, and protocol number are
- as for the :func:`socket` function above. The default family is :const:`AF_UNIX`
+ as for the :func:`.socket` function above. The default family is :const:`AF_UNIX`
if defined on the platform; otherwise, the default is :const:`AF_INET`.
Availability: Unix.
@@ -388,7 +388,7 @@ The module :mod:`socket` exports the following constants and functions:
Duplicate the file descriptor *fd* (an integer as returned by a file object's
:meth:`fileno` method) and build a socket object from the result. Address
- family, socket type and protocol number are as for the :func:`socket` function
+ family, socket type and protocol number are as for the :func:`.socket` function
above. The file descriptor should refer to a socket, but this is not checked ---
subsequent operations on the object may fail if the file descriptor is invalid.
This function is rarely needed, but can be used to get or set socket options on
@@ -562,6 +562,7 @@ correspond to Unix system calls applicable to sockets.
automatically closed when they are garbage-collected.
.. note::
+
:meth:`close()` releases the resource associated with a connection but
does not necessarily close the connection immediately. If you want
to close the connection in a timely fashion, call :meth:`shutdown()`
@@ -739,7 +740,8 @@ correspond to Unix system calls applicable to sockets.
much data, if any, was successfully sent.
-.. method:: socket.sendto(string[, flags], address)
+.. method:: socket.sendto(string, address)
+ socket.sendto(string, flags, address)
Send data to the socket. The socket should not be connected to a remote socket,
since the destination socket is specified by *address*. The optional *flags*
@@ -860,10 +862,10 @@ Example
Here are four minimal example programs using the TCP/IP protocol: a server that
echoes all data that it receives back (servicing only one client), and a client
-using it. Note that a server must perform the sequence :func:`socket`,
+using it. Note that a server must perform the sequence :func:`.socket`,
:meth:`~socket.bind`, :meth:`~socket.listen`, :meth:`~socket.accept` (possibly
repeating the :meth:`~socket.accept` to service more than one client), while a
-client only needs the sequence :func:`socket`, :meth:`~socket.connect`. Also
+client only needs the sequence :func:`.socket`, :meth:`~socket.connect`. Also
note that the server does not :meth:`~socket.sendall`/:meth:`~socket.recv` on
the socket it is listening on but on the new socket returned by
:meth:`~socket.accept`.
@@ -919,13 +921,13 @@ sends traffic to the first one connected successfully. ::
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
- except socket.error, msg:
+ except socket.error as msg:
s = None
continue
try:
s.bind(sa)
s.listen(1)
- except socket.error, msg:
+ except socket.error as msg:
s.close()
s = None
continue
@@ -954,12 +956,12 @@ sends traffic to the first one connected successfully. ::
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
- except socket.error, msg:
+ except socket.error as msg:
s = None
continue
try:
s.connect(sa)
- except socket.error, msg:
+ except socket.error as msg:
s.close()
s = None
continue
diff --git a/Doc/library/socketserver.rst b/Doc/library/socketserver.rst
index 62caf2b..a9053d1 100644
--- a/Doc/library/socketserver.rst
+++ b/Doc/library/socketserver.rst
@@ -7,8 +7,8 @@
.. note::
The :mod:`SocketServer` module has been renamed to :mod:`socketserver` in
- Python 3.0. The :term:`2to3` tool will automatically adapt imports when
- converting your sources to 3.0.
+ Python 3. The :term:`2to3` tool will automatically adapt imports when
+ converting your sources to Python 3.
**Source code:** :source:`Lib/SocketServer.py`
@@ -116,13 +116,13 @@ can be implemented by using a synchronous server and doing an explicit fork in
the request handler class :meth:`handle` method.
Another approach to handling multiple simultaneous requests in an environment
-that supports neither threads nor :func:`fork` (or where these are too expensive
-or inappropriate for the service) is to maintain an explicit table of partially
-finished requests and to use :func:`select` to decide which request to work on
-next (or whether to handle a new incoming request). This is particularly
-important for stream services where each client can potentially be connected for
-a long time (if threads or subprocesses cannot be used). See :mod:`asyncore` for
-another way to manage this.
+that supports neither threads nor :func:`~os.fork` (or where these are too
+expensive or inappropriate for the service) is to maintain an explicit table of
+partially finished requests and to use :func:`~select.select` to decide which
+request to work on next (or whether to handle a new incoming request). This is
+particularly important for stream services where each client can potentially be
+connected for a long time (if threads or subprocesses cannot be used). See
+:mod:`asyncore` for another way to manage this.
.. XXX should data and methods be intermingled, or separate?
how should the distinction between class and instance variables be drawn?
@@ -306,8 +306,8 @@ request.
.. method:: RequestHandler.finish()
Called after the :meth:`handle` method to perform any clean-up actions
- required. The default implementation does nothing. If :meth:`setup` or
- :meth:`handle` raise an exception, this function will not be called.
+ required. The default implementation does nothing. If :meth:`setup`
+ raises an exception, this function will not be called.
.. method:: RequestHandler.handle()
diff --git a/Doc/library/sqlite3.rst b/Doc/library/sqlite3.rst
index 80803aa..ff634c0 100644
--- a/Doc/library/sqlite3.rst
+++ b/Doc/library/sqlite3.rst
@@ -3,7 +3,7 @@
.. module:: sqlite3
:synopsis: A DB-API 2.0 implementation using SQLite 3.x.
-.. sectionauthor:: Gerhard Häring <gh@ghaering.de>
+.. sectionauthor:: Gerhard Häring <gh@ghaering.de>
.. versionadded:: 2.5
@@ -15,15 +15,15 @@ SQLite for internal data storage. It's also possible to prototype an
application using SQLite and then port the code to a larger database such as
PostgreSQL or Oracle.
-sqlite3 was written by Gerhard Häring and provides a SQL interface compliant
-with the DB-API 2.0 specification described by :pep:`249`.
+The sqlite3 module was written by Gerhard Häring. It provides a SQL interface
+compliant with the DB-API 2.0 specification described by :pep:`249`.
To use the module, you must first create a :class:`Connection` object that
represents the database. Here the data will be stored in the
-:file:`/tmp/example` file::
+:file:`example.db` file::
import sqlite3
- conn = sqlite3.connect('/tmp/example')
+ conn = sqlite3.connect('example.db')
You can also supply the special name ``:memory:`` to create a database in RAM.
@@ -33,23 +33,29 @@ and call its :meth:`~Cursor.execute` method to perform SQL commands::
c = conn.cursor()
# Create table
- c.execute('''create table stocks
- (date text, trans text, symbol text,
- qty real, price real)''')
+ c.execute('''CREATE TABLE stocks
+ (date text, trans text, symbol text, qty real, price real)''')
# Insert a row of data
- c.execute("""insert into stocks
- values ('2006-01-05','BUY','RHAT',100,35.14)""")
+ c.execute("INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14)")
# Save (commit) the changes
conn.commit()
- # We can also close the cursor if we are done with it
- c.close()
+ # We can also close the connection if we are done with it.
+ # Just be sure any changes have been committed or they will be lost.
+ conn.close()
+
+The data you've saved is persistent and is available in subsequent sessions::
+
+ import sqlite3
+ conn = sqlite3.connect('example.db')
+ c = conn.cursor()
Usually your SQL operations will need to use values from Python variables. You
shouldn't assemble your query using Python's string operations because doing so
-is insecure; it makes your program vulnerable to an SQL injection attack.
+is insecure; it makes your program vulnerable to an SQL injection attack
+(see http://xkcd.com/327/ for humorous example of what can go wrong).
Instead, use the DB-API's parameter substitution. Put ``?`` as a placeholder
wherever you want to use a value, and then provide a tuple of values as the
@@ -58,19 +64,20 @@ modules may use a different placeholder, such as ``%s`` or ``:1``.) For
example::
# Never do this -- insecure!
- symbol = 'IBM'
- c.execute("select * from stocks where symbol = '%s'" % symbol)
+ symbol = 'RHAT'
+ c.execute("SELECT * FROM stocks WHERE symbol = '%s'" % symbol)
# Do this instead
- t = (symbol,)
- c.execute('select * from stocks where symbol=?', t)
+ t = ('RHAT',)
+ c.execute('SELECT * FROM stocks WHERE symbol=?', t)
+ print c.fetchone()
- # Larger example
- for t in [('2006-03-28', 'BUY', 'IBM', 1000, 45.00),
- ('2006-04-05', 'BUY', 'MSFT', 1000, 72.00),
- ('2006-04-06', 'SELL', 'IBM', 500, 53.00),
- ]:
- c.execute('insert into stocks values (?,?,?,?,?)', t)
+ # Larger example that inserts many records at a time
+ purchases = [('2006-03-28', 'BUY', 'IBM', 1000, 45.00),
+ ('2006-04-05', 'BUY', 'MSFT', 1000, 72.00),
+ ('2006-04-06', 'SELL', 'IBM', 500, 53.00),
+ ]
+ c.executemany('INSERT INTO stocks VALUES (?,?,?,?,?)', purchases)
To retrieve data after executing a SELECT statement, you can either treat the
cursor as an :term:`iterator`, call the cursor's :meth:`~Cursor.fetchone` method to
@@ -79,21 +86,18 @@ matching rows.
This example uses the iterator form::
- >>> c = conn.cursor()
- >>> c.execute('select * from stocks order by price')
- >>> for row in c:
- ... print row
- ...
+ >>> for row in c.execute('SELECT * FROM stocks ORDER BY price'):
+ print row
+
(u'2006-01-05', u'BUY', u'RHAT', 100, 35.14)
(u'2006-03-28', u'BUY', u'IBM', 1000, 45.0)
(u'2006-04-06', u'SELL', u'IBM', 500, 53.0)
(u'2006-04-05', u'BUY', u'MSFT', 1000, 72.0)
- >>>
.. seealso::
- http://code.google.com/p/pysqlite/
+ https://github.com/ghaering/pysqlite
The pysqlite web page -- sqlite3 is developed externally under the name
"pysqlite".
@@ -101,6 +105,9 @@ This example uses the iterator form::
The SQLite web page; the documentation describes the syntax and the
available data types for the supported SQL dialect.
+ http://www.w3schools.com/sql/
+ Tutorial, reference and examples for learning SQL syntax.
+
:pep:`249` - Database API Specification 2.0
PEP written by Marc-André Lemburg.
@@ -111,6 +118,24 @@ Module functions and constants
------------------------------
+.. data:: version
+
+ The version number of this module, as a string. This is not the version of
+ the SQLite library.
+
+.. data:: version_info
+
+ The version number of this module, as a tuple of integers. This is not the
+ version of the SQLite library.
+
+.. data:: sqlite_version
+
+ The version number of the run-time SQLite library, as a string.
+
+.. data:: sqlite_version_info
+
+ The version number of the run-time SQLite library, as a tuple of integers.
+
.. data:: PARSE_DECLTYPES
This constant is meant to be used with the *detect_types* parameter of the
@@ -154,7 +179,7 @@ Module functions and constants
For the *isolation_level* parameter, please see the
:attr:`Connection.isolation_level` property of :class:`Connection` objects.
- SQLite natively supports only the types TEXT, INTEGER, FLOAT, BLOB and NULL. If
+ SQLite natively supports only the types TEXT, INTEGER, REAL, BLOB and NULL. If
you want to use other types you must add support for them yourself. The
*detect_types* parameter and the using custom **converters** registered with the
module-level :func:`register_converter` function allow you to easily do that.
@@ -209,10 +234,10 @@ Module functions and constants
.. function:: enable_callback_tracebacks(flag)
By default you will not get any tracebacks in user-defined functions,
- aggregates, converters, authorizer callbacks etc. If you want to debug them, you
- can call this function with *flag* as True. Afterwards, you will get tracebacks
- from callbacks on ``sys.stderr``. Use :const:`False` to disable the feature
- again.
+ aggregates, converters, authorizer callbacks etc. If you want to debug them,
+ you can call this function with *flag* set to ``True``. Afterwards, you will
+ get tracebacks from callbacks on ``sys.stderr``. Use :const:`False` to
+ disable the feature again.
.. _sqlite3-connection-objects:
@@ -224,237 +249,236 @@ Connection Objects
A SQLite database connection has the following attributes and methods:
-.. attribute:: Connection.isolation_level
+ .. attribute:: isolation_level
- Get or set the current isolation level. :const:`None` for autocommit mode or
- one of "DEFERRED", "IMMEDIATE" or "EXCLUSIVE". See section
- :ref:`sqlite3-controlling-transactions` for a more detailed explanation.
+ Get or set the current isolation level. :const:`None` for autocommit mode or
+ one of "DEFERRED", "IMMEDIATE" or "EXCLUSIVE". See section
+ :ref:`sqlite3-controlling-transactions` for a more detailed explanation.
-.. method:: Connection.cursor([cursorClass])
+ .. method:: cursor([cursorClass])
- The cursor method accepts a single optional parameter *cursorClass*. If
- supplied, this must be a custom cursor class that extends
- :class:`sqlite3.Cursor`.
+ The cursor method accepts a single optional parameter *cursorClass*. If
+ supplied, this must be a custom cursor class that extends
+ :class:`sqlite3.Cursor`.
+ .. method:: commit()
-.. method:: Connection.commit()
+ This method commits the current transaction. If you don't call this method,
+ anything you did since the last call to ``commit()`` is not visible from
+ other database connections. If you wonder why you don't see the data you've
+ written to the database, please check you didn't forget to call this method.
- This method commits the current transaction. If you don't call this method,
- anything you did since the last call to ``commit()`` is not visible from
- other database connections. If you wonder why you don't see the data you've
- written to the database, please check you didn't forget to call this method.
+ .. method:: rollback()
-.. method:: Connection.rollback()
+ This method rolls back any changes to the database since the last call to
+ :meth:`commit`.
- This method rolls back any changes to the database since the last call to
- :meth:`commit`.
+ .. method:: close()
-.. method:: Connection.close()
+ This closes the database connection. Note that this does not automatically
+ call :meth:`commit`. If you just close your database connection without
+ calling :meth:`commit` first, your changes will be lost!
- This closes the database connection. Note that this does not automatically
- call :meth:`commit`. If you just close your database connection without
- calling :meth:`commit` first, your changes will be lost!
+ .. method:: execute(sql, [parameters])
-.. method:: Connection.execute(sql, [parameters])
+ This is a nonstandard shortcut that creates an intermediate cursor object by
+ calling the cursor method, then calls the cursor's :meth:`execute
+ <Cursor.execute>` method with the parameters given.
- This is a nonstandard shortcut that creates an intermediate cursor object by
- calling the cursor method, then calls the cursor's :meth:`execute
- <Cursor.execute>` method with the parameters given.
+ .. method:: executemany(sql, [parameters])
-.. method:: Connection.executemany(sql, [parameters])
+ This is a nonstandard shortcut that creates an intermediate cursor object by
+ calling the cursor method, then calls the cursor's :meth:`executemany
+ <Cursor.executemany>` method with the parameters given.
- This is a nonstandard shortcut that creates an intermediate cursor object by
- calling the cursor method, then calls the cursor's :meth:`executemany
- <Cursor.executemany>` method with the parameters given.
+ .. method:: executescript(sql_script)
-.. method:: Connection.executescript(sql_script)
+ This is a nonstandard shortcut that creates an intermediate cursor object by
+ calling the cursor method, then calls the cursor's :meth:`executescript
+ <Cursor.executescript>` method with the parameters given.
- This is a nonstandard shortcut that creates an intermediate cursor object by
- calling the cursor method, then calls the cursor's :meth:`executescript
- <Cursor.executescript>` method with the parameters given.
+ .. method:: create_function(name, num_params, func)
-.. method:: Connection.create_function(name, num_params, func)
+ Creates a user-defined function that you can later use from within SQL
+ statements under the function name *name*. *num_params* is the number of
+ parameters the function accepts, and *func* is a Python callable that is called
+ as the SQL function.
- Creates a user-defined function that you can later use from within SQL
- statements under the function name *name*. *num_params* is the number of
- parameters the function accepts, and *func* is a Python callable that is called
- as the SQL function.
+ The function can return any of the types supported by SQLite: unicode, str, int,
+ long, float, buffer and None.
- The function can return any of the types supported by SQLite: unicode, str, int,
- long, float, buffer and None.
+ Example:
- Example:
+ .. literalinclude:: ../includes/sqlite3/md5func.py
- .. literalinclude:: ../includes/sqlite3/md5func.py
+ .. method:: create_aggregate(name, num_params, aggregate_class)
-.. method:: Connection.create_aggregate(name, num_params, aggregate_class)
+ Creates a user-defined aggregate function.
- Creates a user-defined aggregate function.
+ The aggregate class must implement a ``step`` method, which accepts the number
+ of parameters *num_params*, and a ``finalize`` method which will return the
+ final result of the aggregate.
- The aggregate class must implement a ``step`` method, which accepts the number
- of parameters *num_params*, and a ``finalize`` method which will return the
- final result of the aggregate.
+ The ``finalize`` method can return any of the types supported by SQLite:
+ unicode, str, int, long, float, buffer and None.
- The ``finalize`` method can return any of the types supported by SQLite:
- unicode, str, int, long, float, buffer and None.
+ Example:
- Example:
+ .. literalinclude:: ../includes/sqlite3/mysumaggr.py
- .. literalinclude:: ../includes/sqlite3/mysumaggr.py
+ .. method:: create_collation(name, callable)
-.. method:: Connection.create_collation(name, callable)
+ Creates a collation with the specified *name* and *callable*. The callable will
+ be passed two string arguments. It should return -1 if the first is ordered
+ lower than the second, 0 if they are ordered equal and 1 if the first is ordered
+ higher than the second. Note that this controls sorting (ORDER BY in SQL) so
+ your comparisons don't affect other SQL operations.
- Creates a collation with the specified *name* and *callable*. The callable will
- be passed two string arguments. It should return -1 if the first is ordered
- lower than the second, 0 if they are ordered equal and 1 if the first is ordered
- higher than the second. Note that this controls sorting (ORDER BY in SQL) so
- your comparisons don't affect other SQL operations.
+ Note that the callable will get its parameters as Python bytestrings, which will
+ normally be encoded in UTF-8.
- Note that the callable will get its parameters as Python bytestrings, which will
- normally be encoded in UTF-8.
+ The following example shows a custom collation that sorts "the wrong way":
- The following example shows a custom collation that sorts "the wrong way":
+ .. literalinclude:: ../includes/sqlite3/collation_reverse.py
- .. literalinclude:: ../includes/sqlite3/collation_reverse.py
+ To remove a collation, call ``create_collation`` with None as callable::
- To remove a collation, call ``create_collation`` with None as callable::
+ con.create_collation("reverse", None)
- con.create_collation("reverse", None)
+ .. method:: interrupt()
-.. method:: Connection.interrupt()
+ You can call this method from a different thread to abort any queries that might
+ be executing on the connection. The query will then abort and the caller will
+ get an exception.
- You can call this method from a different thread to abort any queries that might
- be executing on the connection. The query will then abort and the caller will
- get an exception.
+ .. method:: set_authorizer(authorizer_callback)
-.. method:: Connection.set_authorizer(authorizer_callback)
+ This routine registers a callback. The callback is invoked for each attempt to
+ access a column of a table in the database. The callback should return
+ :const:`SQLITE_OK` if access is allowed, :const:`SQLITE_DENY` if the entire SQL
+ statement should be aborted with an error and :const:`SQLITE_IGNORE` if the
+ column should be treated as a NULL value. These constants are available in the
+ :mod:`sqlite3` module.
- This routine registers a callback. The callback is invoked for each attempt to
- access a column of a table in the database. The callback should return
- :const:`SQLITE_OK` if access is allowed, :const:`SQLITE_DENY` if the entire SQL
- statement should be aborted with an error and :const:`SQLITE_IGNORE` if the
- column should be treated as a NULL value. These constants are available in the
- :mod:`sqlite3` module.
+ The first argument to the callback signifies what kind of operation is to be
+ authorized. The second and third argument will be arguments or :const:`None`
+ depending on the first argument. The 4th argument is the name of the database
+ ("main", "temp", etc.) if applicable. The 5th argument is the name of the
+ inner-most trigger or view that is responsible for the access attempt or
+ :const:`None` if this access attempt is directly from input SQL code.
- The first argument to the callback signifies what kind of operation is to be
- authorized. The second and third argument will be arguments or :const:`None`
- depending on the first argument. The 4th argument is the name of the database
- ("main", "temp", etc.) if applicable. The 5th argument is the name of the
- inner-most trigger or view that is responsible for the access attempt or
- :const:`None` if this access attempt is directly from input SQL code.
+ Please consult the SQLite documentation about the possible values for the first
+ argument and the meaning of the second and third argument depending on the first
+ one. All necessary constants are available in the :mod:`sqlite3` module.
- Please consult the SQLite documentation about the possible values for the first
- argument and the meaning of the second and third argument depending on the first
- one. All necessary constants are available in the :mod:`sqlite3` module.
+ .. method:: set_progress_handler(handler, n)
-.. method:: Connection.set_progress_handler(handler, n)
+ This routine registers a callback. The callback is invoked for every *n*
+ instructions of the SQLite virtual machine. This is useful if you want to
+ get called from SQLite during long-running operations, for example to update
+ a GUI.
- .. versionadded:: 2.6
+ If you want to clear any previously installed progress handler, call the
+ method with :const:`None` for *handler*.
- This routine registers a callback. The callback is invoked for every *n*
- instructions of the SQLite virtual machine. This is useful if you want to
- get called from SQLite during long-running operations, for example to update
- a GUI.
-
- If you want to clear any previously installed progress handler, call the
- method with :const:`None` for *handler*.
+ .. versionadded:: 2.6
-.. method:: Connection.enable_load_extension(enabled)
+ .. method:: enable_load_extension(enabled)
- .. versionadded:: 2.7
+ This routine allows/disallows the SQLite engine to load SQLite extensions
+ from shared libraries. SQLite extensions can define new functions,
+ aggregates or whole new virtual table implementations. One well-known
+ extension is the fulltext-search extension distributed with SQLite.
- This routine allows/disallows the SQLite engine to load SQLite extensions
- from shared libraries. SQLite extensions can define new functions,
- aggregates or whole new virtual table implementations. One well-known
- extension is the fulltext-search extension distributed with SQLite.
+ Loadable extensions are disabled by default. See [#f1]_.
- .. literalinclude:: ../includes/sqlite3/load_extension.py
+ .. versionadded:: 2.7
- Loadable extensions are disabled by default. See [#f1]_
+ .. literalinclude:: ../includes/sqlite3/load_extension.py
-.. method:: Connection.load_extension(path)
+ .. method:: load_extension(path)
- .. versionadded:: 2.7
+ This routine loads a SQLite extension from a shared library. You have to
+ enable extension loading with :meth:`enable_load_extension` before you can
+ use this routine.
- This routine loads a SQLite extension from a shared library. You have to
- enable extension loading with :meth:`enable_load_extension` before you can
- use this routine.
+ Loadable extensions are disabled by default. See [#f1]_.
- Loadable extensions are disabled by default. See [#f1]_
+ .. versionadded:: 2.7
-.. attribute:: Connection.row_factory
+ .. attribute:: row_factory
- You can change this attribute to a callable that accepts the cursor and the
- original row as a tuple and will return the real result row. This way, you can
- implement more advanced ways of returning results, such as returning an object
- that can also access columns by name.
+ You can change this attribute to a callable that accepts the cursor and the
+ original row as a tuple and will return the real result row. This way, you can
+ implement more advanced ways of returning results, such as returning an object
+ that can also access columns by name.
- Example:
+ Example:
- .. literalinclude:: ../includes/sqlite3/row_factory.py
+ .. literalinclude:: ../includes/sqlite3/row_factory.py
- If returning a tuple doesn't suffice and you want name-based access to
- columns, you should consider setting :attr:`row_factory` to the
- highly-optimized :class:`sqlite3.Row` type. :class:`Row` provides both
- index-based and case-insensitive name-based access to columns with almost no
- memory overhead. It will probably be better than your own custom
- dictionary-based approach or even a db_row based solution.
+ If returning a tuple doesn't suffice and you want name-based access to
+ columns, you should consider setting :attr:`row_factory` to the
+ highly-optimized :class:`sqlite3.Row` type. :class:`Row` provides both
+ index-based and case-insensitive name-based access to columns with almost no
+ memory overhead. It will probably be better than your own custom
+ dictionary-based approach or even a db_row based solution.
- .. XXX what's a db_row-based solution?
+ .. XXX what's a db_row-based solution?
-.. attribute:: Connection.text_factory
+ .. attribute:: text_factory
- Using this attribute you can control what objects are returned for the ``TEXT``
- data type. By default, this attribute is set to :class:`unicode` and the
- :mod:`sqlite3` module will return Unicode objects for ``TEXT``. If you want to
- return bytestrings instead, you can set it to :class:`str`.
+ Using this attribute you can control what objects are returned for the ``TEXT``
+ data type. By default, this attribute is set to :class:`unicode` and the
+ :mod:`sqlite3` module will return Unicode objects for ``TEXT``. If you want to
+ return bytestrings instead, you can set it to :class:`str`.
- For efficiency reasons, there's also a way to return Unicode objects only for
- non-ASCII data, and bytestrings otherwise. To activate it, set this attribute to
- :const:`sqlite3.OptimizedUnicode`.
+ For efficiency reasons, there's also a way to return Unicode objects only for
+ non-ASCII data, and bytestrings otherwise. To activate it, set this attribute to
+ :const:`sqlite3.OptimizedUnicode`.
- You can also set it to any other callable that accepts a single bytestring
- parameter and returns the resulting object.
+ You can also set it to any other callable that accepts a single bytestring
+ parameter and returns the resulting object.
- See the following example code for illustration:
+ See the following example code for illustration:
- .. literalinclude:: ../includes/sqlite3/text_factory.py
+ .. literalinclude:: ../includes/sqlite3/text_factory.py
-.. attribute:: Connection.total_changes
+ .. attribute:: total_changes
- Returns the total number of database rows that have been modified, inserted, or
- deleted since the database connection was opened.
+ Returns the total number of database rows that have been modified, inserted, or
+ deleted since the database connection was opened.
-.. attribute:: Connection.iterdump
+ .. attribute:: iterdump
- Returns an iterator to dump the database in an SQL text format. Useful when
- saving an in-memory database for later restoration. This function provides
- the same capabilities as the :kbd:`.dump` command in the :program:`sqlite3`
- shell.
+ Returns an iterator to dump the database in an SQL text format. Useful when
+ saving an in-memory database for later restoration. This function provides
+ the same capabilities as the :kbd:`.dump` command in the :program:`sqlite3`
+ shell.
- .. versionadded:: 2.6
+ .. versionadded:: 2.6
- Example::
+ Example::
- # Convert file existing_db.db to SQL dump file dump.sql
- import sqlite3, os
+ # Convert file existing_db.db to SQL dump file dump.sql
+ import sqlite3, os
- con = sqlite3.connect('existing_db.db')
- with open('dump.sql', 'w') as f:
- for line in con.iterdump():
- f.write('%s\n' % line)
+ con = sqlite3.connect('existing_db.db')
+ with open('dump.sql', 'w') as f:
+ for line in con.iterdump():
+ f.write('%s\n' % line)
.. _sqlite3-cursor-objects:
@@ -466,114 +490,110 @@ Cursor Objects
A :class:`Cursor` instance has the following attributes and methods.
-.. method:: Cursor.execute(sql, [parameters])
-
- Executes an SQL statement. The SQL statement may be parametrized (i. e.
- placeholders instead of SQL literals). The :mod:`sqlite3` module supports two
- kinds of placeholders: question marks (qmark style) and named placeholders
- (named style).
+ .. method:: execute(sql, [parameters])
- This example shows how to use parameters with qmark style:
+ Executes an SQL statement. The SQL statement may be parameterized (i. e.
+ placeholders instead of SQL literals). The :mod:`sqlite3` module supports two
+ kinds of placeholders: question marks (qmark style) and named placeholders
+ (named style).
- .. literalinclude:: ../includes/sqlite3/execute_1.py
+ Here's an example of both styles:
- This example shows how to use the named style:
+ .. literalinclude:: ../includes/sqlite3/execute_1.py
- .. literalinclude:: ../includes/sqlite3/execute_2.py
+ :meth:`execute` will only execute a single SQL statement. If you try to execute
+ more than one statement with it, it will raise a Warning. Use
+ :meth:`executescript` if you want to execute multiple SQL statements with one
+ call.
- :meth:`execute` will only execute a single SQL statement. If you try to execute
- more than one statement with it, it will raise a Warning. Use
- :meth:`executescript` if you want to execute multiple SQL statements with one
- call.
+ .. method:: executemany(sql, seq_of_parameters)
-.. method:: Cursor.executemany(sql, seq_of_parameters)
+ Executes an SQL command against all parameter sequences or mappings found in
+ the sequence *sql*. The :mod:`sqlite3` module also allows using an
+ :term:`iterator` yielding parameters instead of a sequence.
- Executes an SQL command against all parameter sequences or mappings found in
- the sequence *sql*. The :mod:`sqlite3` module also allows using an
- :term:`iterator` yielding parameters instead of a sequence.
+ .. literalinclude:: ../includes/sqlite3/executemany_1.py
- .. literalinclude:: ../includes/sqlite3/executemany_1.py
+ Here's a shorter example using a :term:`generator`:
- Here's a shorter example using a :term:`generator`:
+ .. literalinclude:: ../includes/sqlite3/executemany_2.py
- .. literalinclude:: ../includes/sqlite3/executemany_2.py
+ .. method:: executescript(sql_script)
-.. method:: Cursor.executescript(sql_script)
+ This is a nonstandard convenience method for executing multiple SQL statements
+ at once. It issues a ``COMMIT`` statement first, then executes the SQL script it
+ gets as a parameter.
- This is a nonstandard convenience method for executing multiple SQL statements
- at once. It issues a ``COMMIT`` statement first, then executes the SQL script it
- gets as a parameter.
+ *sql_script* can be a bytestring or a Unicode string.
- *sql_script* can be a bytestring or a Unicode string.
+ Example:
- Example:
+ .. literalinclude:: ../includes/sqlite3/executescript.py
- .. literalinclude:: ../includes/sqlite3/executescript.py
+ .. method:: fetchone()
-.. method:: Cursor.fetchone()
+ Fetches the next row of a query result set, returning a single sequence,
+ or :const:`None` when no more data is available.
- Fetches the next row of a query result set, returning a single sequence,
- or :const:`None` when no more data is available.
+ .. method:: fetchmany([size=cursor.arraysize])
-.. method:: Cursor.fetchmany([size=cursor.arraysize])
+ Fetches the next set of rows of a query result, returning a list. An empty
+ list is returned when no more rows are available.
- Fetches the next set of rows of a query result, returning a list. An empty
- list is returned when no more rows are available.
+ The number of rows to fetch per call is specified by the *size* parameter.
+ If it is not given, the cursor's arraysize determines the number of rows
+ to be fetched. The method should try to fetch as many rows as indicated by
+ the size parameter. If this is not possible due to the specified number of
+ rows not being available, fewer rows may be returned.
- The number of rows to fetch per call is specified by the *size* parameter.
- If it is not given, the cursor's arraysize determines the number of rows
- to be fetched. The method should try to fetch as many rows as indicated by
- the size parameter. If this is not possible due to the specified number of
- rows not being available, fewer rows may be returned.
+ Note there are performance considerations involved with the *size* parameter.
+ For optimal performance, it is usually best to use the arraysize attribute.
+ If the *size* parameter is used, then it is best for it to retain the same
+ value from one :meth:`fetchmany` call to the next.
- Note there are performance considerations involved with the *size* parameter.
- For optimal performance, it is usually best to use the arraysize attribute.
- If the *size* parameter is used, then it is best for it to retain the same
- value from one :meth:`fetchmany` call to the next.
+ .. method:: fetchall()
-.. method:: Cursor.fetchall()
+ Fetches all (remaining) rows of a query result, returning a list. Note that
+ the cursor's arraysize attribute can affect the performance of this operation.
+ An empty list is returned when no rows are available.
- Fetches all (remaining) rows of a query result, returning a list. Note that
- the cursor's arraysize attribute can affect the performance of this operation.
- An empty list is returned when no rows are available.
+ .. attribute:: rowcount
-.. attribute:: Cursor.rowcount
+ Although the :class:`Cursor` class of the :mod:`sqlite3` module implements this
+ attribute, the database engine's own support for the determination of "rows
+ affected"/"rows selected" is quirky.
- Although the :class:`Cursor` class of the :mod:`sqlite3` module implements this
- attribute, the database engine's own support for the determination of "rows
- affected"/"rows selected" is quirky.
+ For :meth:`executemany` statements, the number of modifications are summed up
+ into :attr:`rowcount`.
- For :meth:`executemany` statements, the number of modifications are summed up
- into :attr:`rowcount`.
+ As required by the Python DB API Spec, the :attr:`rowcount` attribute "is -1 in
+ case no ``executeXX()`` has been performed on the cursor or the rowcount of the
+ last operation is not determinable by the interface". This includes ``SELECT``
+ statements because we cannot determine the number of rows a query produced
+ until all rows were fetched.
- As required by the Python DB API Spec, the :attr:`rowcount` attribute "is -1 in
- case no ``executeXX()`` has been performed on the cursor or the rowcount of the
- last operation is not determinable by the interface". This includes ``SELECT``
- statements because we cannot determine the number of rows a query produced
- until all rows were fetched.
+ With SQLite versions before 3.6.5, :attr:`rowcount` is set to 0 if
+ you make a ``DELETE FROM table`` without any condition.
- With SQLite versions before 3.6.5, :attr:`rowcount` is set to 0 if
- you make a ``DELETE FROM table`` without any condition.
+ .. attribute:: lastrowid
-.. attribute:: Cursor.lastrowid
+ This read-only attribute provides the rowid of the last modified row. It is
+ only set if you issued a ``INSERT`` statement using the :meth:`execute`
+ method. For operations other than ``INSERT`` or when :meth:`executemany` is
+ called, :attr:`lastrowid` is set to :const:`None`.
- This read-only attribute provides the rowid of the last modified row. It is
- only set if you issued a ``INSERT`` statement using the :meth:`execute`
- method. For operations other than ``INSERT`` or when :meth:`executemany` is
- called, :attr:`lastrowid` is set to :const:`None`.
+ .. attribute:: description
-.. attribute:: Cursor.description
+ This read-only attribute provides the column names of the last query. To
+ remain compatible with the Python DB API, it returns a 7-tuple for each
+ column where the last six items of each tuple are :const:`None`.
- This read-only attribute provides the column names of the last query. To
- remain compatible with the Python DB API, it returns a 7-tuple for each
- column where the last six items of each tuple are :const:`None`.
-
- It is set for ``SELECT`` statements without any matching rows as well.
+ It is set for ``SELECT`` statements without any matching rows as well.
.. _sqlite3-row-objects:
@@ -597,7 +617,7 @@ Row Objects
.. method:: keys
- This method returns a tuple of column names. Immediately after a query,
+ This method returns a list of column names. Immediately after a query,
it is the first member of each tuple in :attr:`Cursor.description`.
.. versionadded:: 2.6
@@ -633,7 +653,8 @@ Now we plug :class:`Row` in::
['date', 'trans', 'symbol', 'qty', 'price']
>>> r['qty']
100.0
- >>> for member in r: print member
+ >>> for member in r:
+ ... print member
...
2006-01-05
BUY
@@ -706,9 +727,6 @@ use other Python types with SQLite, you must **adapt** them to one of the
sqlite3 module's supported types for SQLite: one of NoneType, int, long, float,
str, unicode, buffer.
-The :mod:`sqlite3` module uses Python object adaptation, as described in
-:pep:`246` for this. The protocol to use is :class:`PrepareProtocol`.
-
There are two ways to enable the :mod:`sqlite3` module to adapt a custom Python
type to one of the supported ones.
@@ -811,6 +829,10 @@ The following example demonstrates this.
.. literalinclude:: ../includes/sqlite3/pysqlite_datetime.py
+If a timestamp stored in SQLite has a fractional part longer than 6
+numbers, its value will be truncated to microsecond precision by the
+timestamp converter.
+
.. _sqlite3-controlling-transactions:
diff --git a/Doc/library/ssl.rst b/Doc/library/ssl.rst
index 8782439..c115976 100644
--- a/Doc/library/ssl.rst
+++ b/Doc/library/ssl.rst
@@ -30,6 +30,18 @@ probably additional platforms, as long as OpenSSL is installed on that platform.
operating system socket APIs. The installed version of OpenSSL may also
cause variations in behavior.
+.. warning::
+ The ssl module won't validate certificates by default. When used in
+ client mode, this means you are vulnerable to man-in-the-middle attacks.
+
+.. warning::
+
+ OpenSSL's internal random number generator does not properly handle fork.
+ Applications must change the PRNG state of the parent process if they use
+ any SSL feature with :func:`os.fork`. Any successful call of
+ :func:`~ssl.RAND_add`, :func:`~ssl.RAND_bytes` or
+ :func:`~ssl.RAND_pseudo_bytes` is sufficient.
+
This section documents the objects and functions in the ``ssl`` module; for more
general information about TLS, SSL, and certificates, the reader is referred to
the documents in the "See Also" section at the bottom.
@@ -57,13 +69,16 @@ Functions, Constants, and Exceptions
Takes an instance ``sock`` of :class:`socket.socket`, and returns an instance
of :class:`ssl.SSLSocket`, a subtype of :class:`socket.socket`, which wraps
- the underlying socket in an SSL context. For client-side sockets, the
- context construction is lazy; if the underlying socket isn't connected yet,
- the context construction will be performed after :meth:`connect` is called on
- the socket. For server-side sockets, if the socket has no remote peer, it is
- assumed to be a listening socket, and the server-side SSL wrapping is
- automatically performed on client connections accepted via the :meth:`accept`
- method. :func:`wrap_socket` may raise :exc:`SSLError`.
+ the underlying socket in an SSL context. ``sock`` must be a
+ :data:`~socket.SOCK_STREAM` socket; other socket types are unsupported.
+
+ For client-side sockets, the context construction is lazy; if the
+ underlying socket isn't connected yet, the context construction will be
+ performed after :meth:`connect` is called on the socket. For
+ server-side sockets, if the socket has no remote peer, it is assumed
+ to be a listening socket, and the server-side SSL wrapping is
+ automatically performed on client connections accepted via the
+ :meth:`accept` method. :func:`wrap_socket` may raise :exc:`SSLError`.
The ``keyfile`` and ``certfile`` parameters specify optional files which
contain a certificate to be used to identify the local side of the
@@ -154,7 +169,7 @@ Functions, Constants, and Exceptions
.. function:: RAND_status()
- Returns True if the SSL pseudo-random number generator has been seeded with
+ Returns ``True`` if the SSL pseudo-random number generator has been seeded with
'enough' randomness, and False otherwise. You can use :func:`ssl.RAND_egd`
and :func:`ssl.RAND_add` to increase the randomness of the pseudo-random
number generator.
@@ -298,21 +313,37 @@ Functions, Constants, and Exceptions
SSLSocket Objects
-----------------
-.. method:: SSLSocket.read([nbytes=1024])
-
- Reads up to ``nbytes`` bytes from the SSL-encrypted channel and returns them.
-
-.. method:: SSLSocket.write(data)
-
- Writes the ``data`` to the other side of the connection, using the SSL
- channel to encrypt. Returns the number of bytes written.
+SSL sockets provide the following methods of :ref:`socket-objects`:
+
+- :meth:`~socket.socket.accept()`
+- :meth:`~socket.socket.bind()`
+- :meth:`~socket.socket.close()`
+- :meth:`~socket.socket.connect()`
+- :meth:`~socket.socket.fileno()`
+- :meth:`~socket.socket.getpeername()`, :meth:`~socket.socket.getsockname()`
+- :meth:`~socket.socket.getsockopt()`, :meth:`~socket.socket.setsockopt()`
+- :meth:`~socket.socket.gettimeout()`, :meth:`~socket.socket.settimeout()`,
+ :meth:`~socket.socket.setblocking()`
+- :meth:`~socket.socket.listen()`
+- :meth:`~socket.socket.makefile()`
+- :meth:`~socket.socket.recv()`, :meth:`~socket.socket.recv_into()`
+ (but passing a non-zero ``flags`` argument is not allowed)
+- :meth:`~socket.socket.send()`, :meth:`~socket.socket.sendall()` (with
+ the same limitation)
+- :meth:`~socket.socket.shutdown()`
+
+However, since the SSL (and TLS) protocol has its own framing atop
+of TCP, the SSL sockets abstraction can, in certain respects, diverge from
+the specification of normal, OS-level sockets.
+
+SSL sockets also have the following additional methods and attributes:
.. method:: SSLSocket.getpeercert(binary_form=False)
If there is no certificate for the peer on the other end of the connection,
returns ``None``.
- If the parameter ``binary_form`` is :const:`False`, and a certificate was
+ If the ``binary_form`` parameter is :const:`False`, and a certificate was
received from the peer, this method returns a :class:`dict` instance. If the
certificate was not validated, the dict is empty. If the certificate was
validated, it returns a dict with the keys ``subject`` (the principal for
@@ -338,10 +369,16 @@ SSLSocket Objects
If the ``binary_form`` parameter is :const:`True`, and a certificate was
provided, this method returns the DER-encoded form of the entire certificate
as a sequence of bytes, or :const:`None` if the peer did not provide a
- certificate. This return value is independent of validation; if validation
- was required (:const:`CERT_OPTIONAL` or :const:`CERT_REQUIRED`), it will have
- been validated, but if :const:`CERT_NONE` was used to establish the
- connection, the certificate, if present, will not have been validated.
+ certificate. Whether the peer provides a certificate depends on the SSL
+ socket's role:
+
+ * for a client SSL socket, the server will always provide a certificate,
+ regardless of whether validation was required;
+
+ * for a server SSL socket, the client will only provide a certificate
+ when requested by the server; therefore :meth:`getpeercert` will return
+ :const:`None` if you used :const:`CERT_NONE` (rather than
+ :const:`CERT_OPTIONAL` or :const:`CERT_REQUIRED`).
.. method:: SSLSocket.cipher()
@@ -361,7 +398,7 @@ SSLSocket Objects
try:
s.do_handshake()
break
- except ssl.SSLError, err:
+ except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
select.select([s], [], [])
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
@@ -453,8 +490,7 @@ these chains concatenated together. For validation, Python will use the first
chain it finds in the file which matches.
Some "standard" root certificates are available from various certification
-authorities: `CACert.org <http://www.cacert.org/index.php?id=3>`_, `Thawte
-<http://www.thawte.com/roots/>`_, `Verisign
+authorities: `Thawte <http://www.thawte.com/roots/>`_, `Verisign
<http://www.verisign.com/support/roots.html>`_, `Positive SSL
<http://www.PositiveSSL.com/ssl-certificate-support/cert_installation/UTN-USERFirst-Hardware.crt>`_
(used by python.org), `Equifax and GeoTrust
@@ -619,10 +655,10 @@ And go back to listening for new client connections.
.. seealso::
Class :class:`socket.socket`
- Documentation of underlying :mod:`socket` class
+ Documentation of underlying :mod:`socket` class
- `TLS (Transport Layer Security) and SSL (Secure Socket Layer) <http://www3.rad.com/networks/applications/secure/tls.htm>`_
- Debby Koren
+ `SSL/TLS Strong Encryption: An Introduction <http://httpd.apache.org/docs/trunk/en/ssl/ssl_intro.html>`_
+ Intro from the Apache webserver documentation
`RFC 1422: Privacy Enhancement for Internet Electronic Mail: Part II: Certificate-Based Key Management <http://www.ietf.org/rfc/rfc1422>`_
Steve Kent
diff --git a/Doc/library/stat.rst b/Doc/library/stat.rst
index 6b0e1b4..a8f411a 100644
--- a/Doc/library/stat.rst
+++ b/Doc/library/stat.rst
@@ -1,5 +1,5 @@
-:mod:`stat` --- Interpreting :func:`stat` results
-=================================================
+:mod:`stat` --- Interpreting :func:`~os.stat` results
+=====================================================
.. module:: stat
:synopsis: Utilities for interpreting the results of os.stat(), os.lstat() and os.fstat().
@@ -171,10 +171,6 @@ The variables below define the flags used in the :data:`ST_MODE` field.
Use of the functions above is more portable than use of the first set of flags:
-.. data:: S_IFMT
-
- Bit mask for the file type bit fields.
-
.. data:: S_IFSOCK
Socket.
diff --git a/Doc/library/statvfs.rst b/Doc/library/statvfs.rst
index 748b7f9..6f44b2c 100644
--- a/Doc/library/statvfs.rst
+++ b/Doc/library/statvfs.rst
@@ -6,7 +6,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`statvfs` module has been deprecated for removal in Python 3.0.
+ The :mod:`statvfs` module has been removed in Python 3.
.. sectionauthor:: Moshe Zadka <moshez@zadka.site.co.il>
diff --git a/Doc/library/stdtypes.rst b/Doc/library/stdtypes.rst
index e560360..f229967 100644
--- a/Doc/library/stdtypes.rst
+++ b/Doc/library/stdtypes.rst
@@ -26,7 +26,7 @@ instances and exceptions.
Some operations are supported by several object types; in particular,
practically all objects can be compared, tested for truth value, and converted
-to a string (with the :func:`repr` function or the slightly different
+to a string (with the :ref:`repr() <func-repr>` function or the slightly different
:func:`str` function). The latter function is implicitly used when an object is
written by the :func:`print` function.
@@ -189,11 +189,22 @@ such objects are ordered arbitrarily but consistently. The ``<``, ``<=``, ``>``
and ``>=`` operators will raise a :exc:`TypeError` exception when any operand is
a complex number.
-.. index:: single: __cmp__() (instance method)
-
-Instances of a class normally compare as non-equal unless the class defines the
-:meth:`__cmp__` method. Refer to :ref:`customization`) for information on the
-use of this method to effect object comparisons.
+.. index::
+ single: __cmp__() (instance method)
+ single: __eq__() (instance method)
+ single: __ne__() (instance method)
+ single: __lt__() (instance method)
+ single: __le__() (instance method)
+ single: __gt__() (instance method)
+ single: __ge__() (instance method)
+
+Non-identical instances of a class normally compare as non-equal unless the
+class defines the :meth:`__eq__` method or the :meth:`__cmp__` method.
+
+Instances of a class cannot be ordered with respect to other instances of the
+same class, or other types of object, unless the class defines either enough of
+the rich comparison methods (:meth:`__lt__`, :meth:`__le__`, :meth:`__gt__`, and
+:meth:`__ge__`) or the :meth:`__cmp__` method.
.. impl-detail::
@@ -388,8 +399,8 @@ All :class:`numbers.Real` types (:class:`int`, :class:`long`, and
| ``math.trunc(x)`` | *x* truncated to Integral | |
+--------------------+------------------------------------+--------+
| ``round(x[, n])`` | *x* rounded to n digits, | |
-| | rounding half to even. If n is | |
-| | omitted, it defaults to 0. | |
+| | rounding ties away from zero. If n | |
+| | is omitted, it defaults to 0. | |
+--------------------+------------------------------------+--------+
| ``math.floor(x)`` | the greatest integral float <= *x* | |
+--------------------+------------------------------------+--------+
@@ -614,7 +625,7 @@ support:
iterators for those iteration types. (An example of an object supporting
multiple forms of iteration would be a tree structure which supports both
breadth-first and depth-first traversal.) This method corresponds to the
- :attr:`tp_iter` slot of the type structure for Python objects in the Python/C
+ :c:member:`~PyTypeObject.tp_iter` slot of the type structure for Python objects in the Python/C
API.
The iterator objects themselves are required to support the following two
@@ -625,7 +636,7 @@ methods, which together form the :dfn:`iterator protocol`:
Return the iterator object itself. This is required to allow both containers
and iterators to be used with the :keyword:`for` and :keyword:`in` statements.
- This method corresponds to the :attr:`tp_iter` slot of the type structure for
+ This method corresponds to the :c:member:`~PyTypeObject.tp_iter` slot of the type structure for
Python objects in the Python/C API.
@@ -633,7 +644,7 @@ methods, which together form the :dfn:`iterator protocol`:
Return the next item from the container. If there are no further items, raise
the :exc:`StopIteration` exception. This method corresponds to the
- :attr:`tp_iternext` slot of the type structure for Python objects in the
+ :c:member:`~PyTypeObject.tp_iternext` slot of the type structure for Python objects in the
Python/C API.
Python defines several iterator objects to support iteration over general and
@@ -743,11 +754,11 @@ are sequences of the same type; *n*, *i* and *j* are integers:
+------------------+--------------------------------+----------+
| ``max(s)`` | largest item of *s* | |
+------------------+--------------------------------+----------+
-| ``s.index(i)`` | index of the first occurence | |
-| | of *i* in *s* | |
+| ``s.index(x)`` | index of the first occurrence | |
+| | of *x* in *s* | |
+------------------+--------------------------------+----------+
-| ``s.count(i)`` | total number of occurences of | |
-| | *i* in *s* | |
+| ``s.count(x)`` | total number of occurrences of | |
+| | *x* in *s* | |
+------------------+--------------------------------+----------+
Sequence types also support comparisons. In particular, tuples and lists
@@ -931,10 +942,22 @@ string functions based on regular expressions.
.. method:: str.expandtabs([tabsize])
Return a copy of the string where all tab characters are replaced by one or
- more spaces, depending on the current column and the given tab size. The
- column number is reset to zero after each newline occurring in the string.
- If *tabsize* is not given, a tab size of ``8`` characters is assumed. This
- doesn't understand other non-printing characters or escape sequences.
+ more spaces, depending on the current column and the given tab size. Tab
+ positions occur every *tabsize* characters (default is 8, giving tab
+ positions at columns 0, 8, 16 and so on). To expand the string, the current
+ column is set to zero and the string is examined character by character. If
+ the character is a tab (``\t``), one or more space characters are inserted
+ in the result until the current column is equal to the next tab position.
+ (The tab character itself is not copied.) If the character is a newline
+ (``\n``) or return (``\r``), it is copied and the current column is reset to
+ zero. Any other character is copied unchanged and the current column is
+ incremented by one regardless of how the character is represented when
+ printed.
+
+ >>> '01\t012\t0123\t01234'.expandtabs()
+ '01 012 0123 01234'
+ >>> '01\t012\t0123\t01234'.expandtabs(4)
+ '01 012 0123 01234'
.. method:: str.find(sub[, start[, end]])
@@ -969,7 +992,7 @@ string functions based on regular expressions.
See :ref:`formatstrings` for a description of the various formatting options
that can be specified in format strings.
- This method of string formatting is the new standard in Python 3.0, and
+ This method of string formatting is the new standard in Python 3, and
should be preferred to the ``%`` formatting described in
:ref:`string-formatting` in new code.
@@ -1161,8 +1184,8 @@ string functions based on regular expressions.
Return a list of the words in the string, using *sep* as the delimiter
string. If *maxsplit* is given, at most *maxsplit* splits are done (thus,
the list will have at most ``maxsplit+1`` elements). If *maxsplit* is not
- specified, then there is no limit on the number of splits (all possible
- splits are made).
+ specified or ``-1``, then there is no limit on the number of splits
+ (all possible splits are made).
If *sep* is given, consecutive delimiters are not grouped together and are
deemed to delimit empty strings (for example, ``'1,,2'.split(',')`` returns
@@ -1181,11 +1204,23 @@ string functions based on regular expressions.
``' 1 2 3 '.split(None, 1)`` returns ``['1', '2 3 ']``.
+.. index::
+ single: universal newlines; str.splitlines method
+
.. method:: str.splitlines([keepends])
- Return a list of the lines in the string, breaking at line boundaries. Line
- breaks are not included in the resulting list unless *keepends* is given and
- true.
+ Return a list of the lines in the string, breaking at line boundaries.
+ This method uses the :term:`universal newlines` approach to splitting lines.
+ Line breaks are not included in the resulting list unless *keepends* is
+ given and true.
+
+ For example, ``'ab c\n\nde fg\rkl\r\n'.splitlines()`` returns
+ ``['ab c', '', 'de fg', 'kl']``, while the same call with ``splitlines(True)``
+ returns ``['ab c\n', '\n', 'de fg\r', 'kl\r\n']``.
+
+ Unlike :meth:`~str.split` when a delimiter string *sep* is given, this
+ method returns an empty list for the empty string, and a terminal line
+ break does not result in an extra line.
.. method:: str.startswith(prefix[, start[, end]])
@@ -1241,11 +1276,11 @@ string functions based on regular expressions.
>>> import re
>>> def titlecase(s):
- return re.sub(r"[A-Za-z]+('[A-Za-z]+)?",
- lambda mo: mo.group(0)[0].upper() +
- mo.group(0)[1:].lower(),
- s)
-
+ ... return re.sub(r"[A-Za-z]+('[A-Za-z]+)?",
+ ... lambda mo: mo.group(0)[0].upper() +
+ ... mo.group(0)[1:].lower(),
+ ... s)
+ ...
>>> titlecase("they're bill's friends.")
"They're Bill's Friends."
@@ -1440,7 +1475,7 @@ The conversion types are:
| | character string). | |
+------------+-----------------------------------------------------+-------+
| ``'r'`` | String (converts any Python object using | \(5) |
-| | :func:`repr`). | |
+| | :ref:`repr() <func-repr>`). | |
+------------+-----------------------------------------------------+-------+
| ``'s'`` | String (converts any Python object using | \(6) |
| | :func:`str`). | |
@@ -1633,9 +1668,8 @@ Notes:
Previously, all negative indices were truncated to zero.
(6)
- The :meth:`pop` method is only supported by the list and array types. The
- optional argument *i* defaults to ``-1``, so that by default the last item is
- removed and returned.
+ The :meth:`pop` method's optional argument *i* defaults to ``-1``, so that
+ by default the last item is removed and returned.
(7)
The :meth:`sort` and :meth:`reverse` methods modify the list in place for
@@ -1710,11 +1744,11 @@ other sequence-like behavior.
There are currently two built-in set types, :class:`set` and :class:`frozenset`.
The :class:`set` type is mutable --- the contents can be changed using methods
-like :meth:`add` and :meth:`remove`. Since it is mutable, it has no hash value
-and cannot be used as either a dictionary key or as an element of another set.
-The :class:`frozenset` type is immutable and :term:`hashable` --- its contents
-cannot be altered after it is created; it can therefore be used as a dictionary
-key or as an element of another set.
+like :meth:`~set.add` and :meth:`~set.remove`. Since it is mutable, it has no
+hash value and cannot be used as either a dictionary key or as an element of
+another set. The :class:`frozenset` type is immutable and :term:`hashable` ---
+its contents cannot be altered after it is created; it can therefore be used as
+a dictionary key or as an element of another set.
As of Python 2.7, non-empty sets (not frozensets) can be created by placing a
comma-separated list of elements within braces, for example: ``{'jack',
@@ -1726,9 +1760,10 @@ The constructors for both classes work the same:
frozenset([iterable])
Return a new set or frozenset object whose elements are taken from
- *iterable*. The elements of a set must be hashable. To represent sets of
- sets, the inner sets must be :class:`frozenset` objects. If *iterable* is
- not specified, a new empty set is returned.
+ *iterable*. The elements of a set must be :term:`hashable`. To
+ represent sets of sets, the inner sets must be :class:`frozenset`
+ objects. If *iterable* is not specified, a new empty set is
+ returned.
Instances of :class:`set` and :class:`frozenset` provide the following
operations:
@@ -1747,7 +1782,7 @@ The constructors for both classes work the same:
.. method:: isdisjoint(other)
- Return True if the set has no elements in common with *other*. Sets are
+ Return ``True`` if the set has no elements in common with *other*. Sets are
disjoint if and only if their intersection is the empty set.
.. versionadded:: 2.6
@@ -1759,7 +1794,7 @@ The constructors for both classes work the same:
.. method:: set < other
- Test whether the set is a true subset of *other*, that is,
+ Test whether the set is a proper subset of *other*, that is,
``set <= other and set != other``.
.. method:: issuperset(other)
@@ -1769,7 +1804,7 @@ The constructors for both classes work the same:
.. method:: set > other
- Test whether the set is a true superset of *other*, that is, ``set >=
+ Test whether the set is a proper superset of *other*, that is, ``set >=
other and set != other``.
.. method:: union(other, ...)
@@ -1824,8 +1859,8 @@ The constructors for both classes work the same:
based on their members. For example, ``set('abc') == frozenset('abc')``
returns ``True`` and so does ``set('abc') in set([frozenset('abc')])``.
- The subset and equality comparisons do not generalize to a complete ordering
- function. For example, any two disjoint sets are not equal and are not
+ The subset and equality comparisons do not generalize to a total ordering
+ function. For example, any two non-empty disjoint sets are not equal and are not
subsets of each other, so *all* of the following return ``False``: ``a<b``,
``a==b``, or ``a>b``. Accordingly, sets do not implement the :meth:`__cmp__`
method.
@@ -1925,7 +1960,7 @@ Mapping Types --- :class:`dict`
statement: del
builtin: len
-A :dfn:`mapping` object maps :term:`hashable` values to arbitrary objects.
+A :term:`mapping` object maps :term:`hashable` values to arbitrary objects.
Mappings are mutable objects. There is currently only one standard mapping
type, the :dfn:`dictionary`. (For other containers see the built in
:class:`list`, :class:`set`, and :class:`tuple` classes, and the
@@ -1944,32 +1979,41 @@ Dictionaries can be created by placing a comma-separated list of ``key: value``
pairs within braces, for example: ``{'jack': 4098, 'sjoerd': 4127}`` or ``{4098:
'jack', 4127: 'sjoerd'}``, or by the :class:`dict` constructor.
-.. class:: dict([arg])
-
- Return a new dictionary initialized from an optional positional argument or from
- a set of keyword arguments. If no arguments are given, return a new empty
- dictionary. If the positional argument *arg* is a mapping object, return a
- dictionary mapping the same keys to the same values as does the mapping object.
- Otherwise the positional argument must be a sequence, a container that supports
- iteration, or an iterator object. The elements of the argument must each also
- be of one of those kinds, and each must in turn contain exactly two objects.
- The first is used as a key in the new dictionary, and the second as the key's
- value. If a given key is seen more than once, the last value associated with it
- is retained in the new dictionary.
-
- If keyword arguments are given, the keywords themselves with their associated
- values are added as items to the dictionary. If a key is specified both in the
- positional argument and as a keyword argument, the value associated with the
- keyword is retained in the dictionary. For example, these all return a
- dictionary equal to ``{"one": 1, "two": 2}``:
-
- * ``dict(one=1, two=2)``
- * ``dict({'one': 1, 'two': 2})``
- * ``dict(zip(('one', 'two'), (1, 2)))``
- * ``dict([['two', 2], ['one', 1]])``
-
- The first example only works for keys that are valid Python
- identifiers; the others work with any valid keys.
+.. class:: dict(**kwarg)
+ dict(mapping, **kwarg)
+ dict(iterable, **kwarg)
+
+ Return a new dictionary initialized from an optional positional argument
+ and a possibly empty set of keyword arguments.
+
+ If no positional argument is given, an empty dictionary is created.
+ If a positional argument is given and it is a mapping object, a dictionary
+ is created with the same key-value pairs as the mapping object. Otherwise,
+ the positional argument must be an :term:`iterable` object. Each item in
+ the iterable must itself be an iterable with exactly two objects. The
+ first object of each item becomes a key in the new dictionary, and the
+ second object the corresponding value. If a key occurs more than once, the
+ last value for that key becomes the corresponding value in the new
+ dictionary.
+
+ If keyword arguments are given, the keyword arguments and their values are
+ added to the dictionary created from the positional argument. If a key
+ being added is already present, the value from the keyword argument
+ replaces the value from the positional argument.
+
+ To illustrate, the following examples all return a dictionary equal to
+ ``{"one": 1, "two": 2, "three": 3}``::
+
+ >>> a = dict(one=1, two=2, three=3)
+ >>> b = {'one': 1, 'two': 2, 'three': 3}
+ >>> c = dict(zip(['one', 'two', 'three'], [1, 2, 3]))
+ >>> d = dict([('two', 2), ('one', 1), ('three', 3)])
+ >>> e = dict({'three': 3, 'one': 1, 'two': 2})
+ >>> a == b == c == d == e
+ True
+
+ Providing keyword arguments as in the first example only works for keys that
+ are valid Python identifiers. Otherwise, any valid keys can be used.
.. versionadded:: 2.2
@@ -2309,7 +2353,7 @@ Files have the following methods:
with open("hello.txt") as f:
for line in f:
- print line
+ print line,
In older versions of Python, you would have needed to do this to get the same
effect::
@@ -2317,7 +2361,7 @@ Files have the following methods:
f = open("hello.txt")
try:
for line in f:
- print line
+ print line,
finally:
f.close()
@@ -2371,14 +2415,14 @@ Files have the following methods:
A file object is its own iterator, for example ``iter(f)`` returns *f* (unless
*f* is closed). When a file is used as an iterator, typically in a
- :keyword:`for` loop (for example, ``for line in f: print line``), the
+ :keyword:`for` loop (for example, ``for line in f: print line.strip()``), the
:meth:`~file.next` method is called repeatedly. This method returns the next input
line, or raises :exc:`StopIteration` when EOF is hit when the file is open for
reading (behavior is undefined when the file is open for writing). In order to
make a :keyword:`for` loop the most efficient way of looping over the lines of a
file (a very common operation), the :meth:`~file.next` method uses a hidden read-ahead
buffer. As a consequence of using a read-ahead buffer, combining :meth:`~file.next`
- with other file methods (like :meth:`readline`) does not work right. However,
+ with other file methods (like :meth:`~file.readline`) does not work right. However,
using :meth:`seek` to reposition the file to an absolute position will flush the
read-ahead buffer.
@@ -2420,7 +2464,7 @@ Files have the following methods:
.. method:: file.readlines([sizehint])
- Read until EOF using :meth:`readline` and return a list containing the lines
+ Read until EOF using :meth:`~file.readline` and return a list containing the lines
thus read. If the optional *sizehint* argument is present, instead of
reading up to EOF, whole lines totalling approximately *sizehint* bytes
(possibly after rounding up to an internal buffer size) are read. Objects
@@ -2500,7 +2544,7 @@ Files have the following methods:
add line separators.)
Files support the iterator protocol. Each iteration returns the same result as
-``file.readline()``, and iteration ends when the :meth:`readline` method returns
+:meth:`~file.readline`, and iteration ends when the :meth:`~file.readline` method returns
an empty string.
File objects also offer a number of other interesting attributes. These are not
@@ -2549,16 +2593,19 @@ the particular object.
form ``<...>``. This is a read-only attribute and may not be present on all
file-like objects.
+ .. index::
+ single: universal newlines; file.newlines attribute
+
.. attribute:: file.newlines
- If Python was built with universal newlines enabled (the default) this
+ If Python was built with :term:`universal newlines` enabled (the default) this
read-only attribute exists, and for files opened in universal newline read
mode it keeps track of the types of newlines encountered while reading the
file. The values it can take are ``'\r'``, ``'\n'``, ``'\r\n'``, ``None``
(unknown, no newlines read yet) or a tuple containing all the newline types
seen, to indicate that multiple newline conventions were encountered. For
- files not opened in universal newline read mode the value of this attribute
+ files not opened in universal newlines read mode the value of this attribute
will be ``None``.
@@ -2791,12 +2838,12 @@ statement is not, strictly speaking, an operation on a module object; ``import
foo`` does not require a module object named *foo* to exist, rather it requires
an (external) *definition* for a module named *foo* somewhere.)
-A special attribute of every module is :attr:`__dict__`. This is the dictionary
-containing the module's symbol table. Modifying this dictionary will actually
-change the module's symbol table, but direct assignment to the :attr:`__dict__`
-attribute is not possible (you can write ``m.__dict__['a'] = 1``, which defines
-``m.a`` to be ``1``, but you can't write ``m.__dict__ = {}``). Modifying
-:attr:`__dict__` directly is not recommended.
+A special attribute of every module is :attr:`~object.__dict__`. This is the
+dictionary containing the module's symbol table. Modifying this dictionary will
+actually change the module's symbol table, but direct assignment to the
+:attr:`__dict__` attribute is not possible (you can write
+``m.__dict__['a'] = 1``, which defines ``m.a`` to be ``1``, but you can't write
+``m.__dict__ = {}``). Modifying :attr:`__dict__` directly is not recommended.
Modules built into the interpreter are written like this: ``<module 'sys'
(built-in)>``. If loaded from a file, they are written as ``<module 'os' from
@@ -2854,16 +2901,23 @@ that class), otherwise a :exc:`TypeError` is raised.
Like function objects, methods objects support getting arbitrary attributes.
However, since method attributes are actually stored on the underlying function
object (``meth.im_func``), setting method attributes on either bound or unbound
-methods is disallowed. Attempting to set a method attribute results in a
-:exc:`TypeError` being raised. In order to set a method attribute, you need to
-explicitly set it on the underlying function object::
-
- class C:
- def method(self):
- pass
+methods is disallowed. Attempting to set an attribute on a method results in
+an :exc:`AttributeError` being raised. In order to set a method attribute, you
+need to explicitly set it on the underlying function object::
+
+ >>> class C:
+ ... def method(self):
+ ... pass
+ ...
+ >>> c = C()
+ >>> c.method.whoami = 'my name is method' # can't set on the method
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ AttributeError: 'instancemethod' object has no attribute 'whoami'
+ >>> c.method.im_func.whoami = 'my name is method'
+ >>> c.method.whoami
+ 'my name is method'
- c = C()
- c.method.im_func.whoami = 'my name is c'
See :ref:`types` for more information.
@@ -3033,7 +3087,7 @@ The following attributes are only supported by :term:`new-style class`\ es.
This method can be overridden by a metaclass to customize the method
resolution order for its instances. It is called at class instantiation, and
- its result is stored in :attr:`__mro__`.
+ its result is stored in :attr:`~class.__mro__`.
.. method:: class.__subclasses__
diff --git a/Doc/library/string.rst b/Doc/library/string.rst
index 4c3abcd..b0ffb6a 100644
--- a/Doc/library/string.rst
+++ b/Doc/library/string.rst
@@ -123,8 +123,8 @@ string formatting behaviors using the same implementation as the built-in
.. method:: format(format_string, *args, **kwargs)
- :meth:`format` is the primary API method. It takes a format template
- string, and an arbitrary set of positional and keyword argument.
+ :meth:`format` is the primary API method. It takes a format string and
+ an arbitrary set of positional and keyword arguments.
:meth:`format` is just a wrapper that calls :meth:`vformat`.
.. method:: vformat(format_string, args, kwargs)
@@ -132,9 +132,9 @@ string formatting behaviors using the same implementation as the built-in
This function does the actual work of formatting. It is exposed as a
separate function for cases where you want to pass in a predefined
dictionary of arguments, rather than unpacking and repacking the
- dictionary as individual arguments using the ``*args`` and ``**kwds``
- syntax. :meth:`vformat` does the work of breaking up the format template
- string into character data and replacement fields. It calls the various
+ dictionary as individual arguments using the ``*args`` and ``**kwargs``
+ syntax. :meth:`vformat` does the work of breaking up the format string
+ into character data and replacement fields. It calls the various
methods described below.
In addition, the :class:`Formatter` defines a number of methods that are
@@ -205,7 +205,8 @@ string formatting behaviors using the same implementation as the built-in
Converts the value (returned by :meth:`get_field`) given a conversion type
(as in the tuple returned by the :meth:`parse` method). The default
- version understands 'r' (repr) and 's' (str) conversion types.
+ version understands 's' (str), 'r' (repr) and 'a' (ascii) conversion
+ types.
.. _formatstrings:
@@ -322,18 +323,18 @@ The general form of a *standard format specifier* is:
.. productionlist:: sf
format_spec: [[`fill`]`align`][`sign`][#][0][`width`][,][.`precision`][`type`]
- fill: <a character other than '}'>
+ fill: <any character>
align: "<" | ">" | "=" | "^"
sign: "+" | "-" | " "
width: `integer`
precision: `integer`
type: "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"
-The *fill* character can be any character other than '{' or '}'. The presence
-of a fill character is signaled by the character following it, which must be
-one of the alignment options. If the second character of *format_spec* is not
-a valid alignment option, then it is assumed that both the fill character and
-the alignment option are absent.
+If a valid *align* value is specified, it can be preceded by a *fill*
+character that can be any character and defaults to a space if omitted.
+Note that it is not possible to use ``{`` and ``}`` as *fill* char while
+using the :meth:`str.format` method; this limitation however doesn't
+affect the :func:`format` function.
The meaning of the various alignment options is as follows:
@@ -389,9 +390,9 @@ instead.
*width* is a decimal integer defining the minimum field width. If not
specified, then the field width will be determined by the content.
-If the *width* field is preceded by a zero (``'0'``) character, this enables
-zero-padding. This is equivalent to an *alignment* type of ``'='`` and a *fill*
-character of ``'0'``.
+Preceding the *width* field by a zero (``'0'``) character enables
+sign-aware zero-padding for numeric types. This is equivalent to a *fill*
+character of ``'0'`` with an *alignment* type of ``'='``.
The *precision* is a decimal number indicating how many digits should be
displayed after the decimal point for a floating point value formatted with
@@ -452,12 +453,13 @@ The available presentation types for floating point and decimal values are:
+=========+==========================================================+
| ``'e'`` | Exponent notation. Prints the number in scientific |
| | notation using the letter 'e' to indicate the exponent. |
+ | | The default precision is ``6``. |
+---------+----------------------------------------------------------+
| ``'E'`` | Exponent notation. Same as ``'e'`` except it uses an |
| | upper case 'E' as the separator character. |
+---------+----------------------------------------------------------+
| ``'f'`` | Fixed point. Displays the number as a fixed-point |
- | | number. |
+ | | number. The default precision is ``6``. |
+---------+----------------------------------------------------------+
| ``'F'`` | Fixed point. Same as ``'f'``. |
+---------+----------------------------------------------------------+
@@ -483,7 +485,7 @@ The available presentation types for floating point and decimal values are:
| | the precision. |
| | |
| | A precision of ``0`` is treated as equivalent to a |
- | | precision of ``1``. |
+ | | precision of ``1``. The default precision is ``6``. |
+---------+----------------------------------------------------------+
| ``'G'`` | General format. Same as ``'g'`` except switches to |
| | ``'E'`` if the number gets too large. The |
@@ -706,7 +708,7 @@ these rules. The methods of :class:`Template` are:
This is the object passed to the constructor's *template* argument. In
general, you shouldn't change it, but read-only access is not enforced.
-Here is an example of how to use a Template:
+Here is an example of how to use a Template::
>>> from string import Template
>>> s = Template('$who likes $what')
@@ -715,11 +717,11 @@ Here is an example of how to use a Template:
>>> d = dict(who='tim')
>>> Template('Give $who $100').substitute(d)
Traceback (most recent call last):
- [...]
- ValueError: Invalid placeholder in string: line 1, col 10
+ ...
+ ValueError: Invalid placeholder in string: line 1, col 11
>>> Template('$who likes $what').substitute(d)
Traceback (most recent call last):
- [...]
+ ...
KeyError: 'what'
>>> Template('$who likes $what').safe_substitute(d)
'tim likes $what'
@@ -793,7 +795,7 @@ Deprecated string functions
The following list of functions are also defined as methods of string and
Unicode objects; see section :ref:`string-methods` for more information on
those. You should consider these functions as deprecated, although they will
-not be removed until Python 3.0. The functions defined in this module are:
+not be removed until Python 3. The functions defined in this module are:
.. function:: atof(s)
@@ -905,14 +907,15 @@ not be removed until Python 3.0. The functions defined in this module are:
Return a list of the words of the string *s*. If the optional second argument
*sep* is absent or ``None``, the words are separated by arbitrary strings of
- whitespace characters (space, tab, newline, return, formfeed). If the second
+ whitespace characters (space, tab, newline, return, formfeed). If the second
argument *sep* is present and not ``None``, it specifies a string to be used as
the word separator. The returned list will then have one more item than the
- number of non-overlapping occurrences of the separator in the string. The
- optional third argument *maxsplit* defaults to 0. If it is nonzero, at most
- *maxsplit* number of splits occur, and the remainder of the string is returned
- as the final element of the list (thus, the list will have at most
- ``maxsplit+1`` elements).
+ number of non-overlapping occurrences of the separator in the string.
+ If *maxsplit* is given, at most *maxsplit* number of splits occur, and the
+ remainder of the string is returned as the final element of the list (thus,
+ the list will have at most ``maxsplit+1`` elements). If *maxsplit* is not
+ specified or ``-1``, then there is no limit on the number of splits (all
+ possible splits are made).
The behavior of split on an empty string depends on the value of *sep*. If *sep*
is not specified, or specified as ``None``, the result will be an empty list.
@@ -925,7 +928,7 @@ not be removed until Python 3.0. The functions defined in this module are:
Return a list of the words of the string *s*, scanning *s* from the end. To all
intents and purposes, the resulting list of words is the same as returned by
:func:`split`, except when the optional third argument *maxsplit* is explicitly
- specified and nonzero. When *maxsplit* is nonzero, at most *maxsplit* number of
+ specified and nonzero. If *maxsplit* is given, at most *maxsplit* number of
splits -- the *rightmost* ones -- occur, and the remainder of the string is
returned as the first element of the list (thus, the list will have at most
``maxsplit+1`` elements).
@@ -1023,13 +1026,14 @@ not be removed until Python 3.0. The functions defined in this module are:
.. function:: zfill(s, width)
- Pad a numeric string on the left with zero digits until the given width is
- reached. Strings starting with a sign are handled correctly.
+ Pad a numeric string *s* on the left with zero digits until the
+ given *width* is reached. Strings starting with a sign are handled
+ correctly.
-.. function:: replace(str, old, new[, maxreplace])
+.. function:: replace(s, old, new[, maxreplace])
- Return a copy of string *str* with all occurrences of substring *old* replaced
+ Return a copy of string *s* with all occurrences of substring *old* replaced
by *new*. If the optional argument *maxreplace* is given, the first
*maxreplace* occurrences are replaced.
diff --git a/Doc/library/stringprep.rst b/Doc/library/stringprep.rst
index d2f269c..b0944e4 100644
--- a/Doc/library/stringprep.rst
+++ b/Doc/library/stringprep.rst
@@ -4,7 +4,6 @@
.. module:: stringprep
:synopsis: String preparation, as per RFC 3453
- :deprecated:
.. moduleauthor:: Martin v. Löwis <martin@v.loewis.de>
.. sectionauthor:: Martin v. Löwis <martin@v.loewis.de>
diff --git a/Doc/library/struct.rst b/Doc/library/struct.rst
index 4331665..74e3206 100644
--- a/Doc/library/struct.rst
+++ b/Doc/library/struct.rst
@@ -284,7 +284,7 @@ platforms use 32-bit pointers and will use a Python integer.
For the ``'?'`` format character, the return value is either :const:`True` or
:const:`False`. When packing, the truth value of the argument object is used.
Either 0 or 1 in the native or standard bool representation will be packed, and
-any non-zero value will be True when unpacking.
+any non-zero value will be ``True`` when unpacking.
@@ -386,7 +386,7 @@ The :mod:`struct` module also defines the following type:
(``len(string)`` must equal :attr:`self.size`).
- .. method:: unpack_from(buffer[, offset=0])
+ .. method:: unpack_from(buffer, offset=0)
Identical to the :func:`unpack_from` function, using the compiled format.
(``len(buffer[offset:])`` must be at least :attr:`self.size`).
diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst
index 2ba960a..114907f 100644
--- a/Doc/library/subprocess.rst
+++ b/Doc/library/subprocess.rst
@@ -12,7 +12,7 @@
The :mod:`subprocess` module allows you to spawn new processes, connect to their
input/output/error pipes, and obtain their return codes. This module intends to
-replace several other, older modules and functions, such as::
+replace several older modules and functions::
os.system
os.spawn*
@@ -20,20 +20,26 @@ replace several other, older modules and functions, such as::
popen2.*
commands.*
-Information about how the :mod:`subprocess` module can be used to replace these
-modules and functions can be found in the following sections.
+Information about how this module can be used to replace the older
+functions can be found in the subprocess-replacements_ section.
.. seealso::
+ POSIX users (Linux, BSD, etc.) are strongly encouraged to install
+ and use the much more recent subprocess32_ module instead of the
+ version included with python 2.7. It is a drop in replacement with
+ better behavior in many situations.
+
:pep:`324` -- PEP proposing the subprocess module
+.. _subprocess32: https://pypi.python.org/pypi/subprocess32/
-Using the subprocess Module
----------------------------
+Using the :mod:`subprocess` Module
+----------------------------------
-The recommended approach to invoking subprocesses is to use the following
-convenience functions for all use cases they can handle. For more advanced
-use cases, the underlying :class:`Popen` interface can be used directly.
+The recommended way to launch subprocesses is to use the following
+convenience functions. For more advanced use cases when these do not
+meet your needs, use the underlying :class:`Popen` interface.
.. function:: call(args, *, stdin=None, stdout=None, stderr=None, shell=False)
@@ -57,16 +63,15 @@ use cases, the underlying :class:`Popen` interface can be used directly.
.. warning::
- Invoking the system shell with ``shell=True`` can be a security hazard
- if combined with untrusted input. See the warning under
- :ref:`frequently-used-arguments` for details.
+ Using ``shell=True`` can be a security hazard. See the warning
+ under :ref:`frequently-used-arguments` for details.
.. note::
- Do not use ``stdout=PIPE`` or ``stderr=PIPE`` with this function. As
- the pipes are not being read in the current process, the child
- process may block if it generates enough output to a pipe to fill up
- the OS pipe buffer.
+ Do not use ``stdout=PIPE`` or ``stderr=PIPE`` with this function
+ as that can deadlock based on the child process output volume.
+ Use :class:`Popen` with the :meth:`communicate` method when you
+ need pipes.
.. function:: check_call(args, *, stdin=None, stdout=None, stderr=None, shell=False)
@@ -74,7 +79,7 @@ use cases, the underlying :class:`Popen` interface can be used directly.
Run command with arguments. Wait for command to complete. If the return
code was zero then return, otherwise raise :exc:`CalledProcessError`. The
:exc:`CalledProcessError` object will have the return code in the
- :attr:`returncode` attribute.
+ :attr:`~CalledProcessError.returncode` attribute.
The arguments shown above are merely the most common ones, described below
in :ref:`frequently-used-arguments` (hence the slightly odd notation in
@@ -96,16 +101,15 @@ use cases, the underlying :class:`Popen` interface can be used directly.
.. warning::
- Invoking the system shell with ``shell=True`` can be a security hazard
- if combined with untrusted input. See the warning under
- :ref:`frequently-used-arguments` for details.
+ Using ``shell=True`` can be a security hazard. See the warning
+ under :ref:`frequently-used-arguments` for details.
.. note::
- Do not use ``stdout=PIPE`` or ``stderr=PIPE`` with this function. As
- the pipes are not being read in the current process, the child
- process may block if it generates enough output to a pipe to fill up
- the OS pipe buffer.
+ Do not use ``stdout=PIPE`` or ``stderr=PIPE`` with this function
+ as that can deadlock based on the child process output volume.
+ Use :class:`Popen` with the :meth:`communicate` method when you
+ need pipes.
.. function:: check_output(args, *, stdin=None, stderr=None, shell=False, universal_newlines=False)
@@ -114,8 +118,8 @@ use cases, the underlying :class:`Popen` interface can be used directly.
If the return code was non-zero it raises a :exc:`CalledProcessError`. The
:exc:`CalledProcessError` object will have the return code in the
- :attr:`returncode` attribute and any output in the :attr:`output`
- attribute.
+ :attr:`~CalledProcessError.returncode` attribute and any output in the
+ :attr:`~CalledProcessError.output` attribute.
The arguments shown above are merely the most common ones, described below
in :ref:`frequently-used-arguments` (hence the slightly odd notation in
@@ -147,15 +151,14 @@ use cases, the underlying :class:`Popen` interface can be used directly.
.. warning::
- Invoking the system shell with ``shell=True`` can be a security hazard
- if combined with untrusted input. See the warning under
- :ref:`frequently-used-arguments` for details.
+ Using ``shell=True`` can be a security hazard. See the warning
+ under :ref:`frequently-used-arguments` for details.
.. note::
- Do not use ``stderr=PIPE`` with this function. As the pipe is not being
- read in the current process, the child process may block if it
- generates enough output to the pipe to fill up the OS pipe buffer.
+ Do not use ``stderr=PIPE`` with this function as that can deadlock
+ based on the child process error volume. Use :class:`Popen` with
+ the :meth:`communicate` method when you need a stderr pipe.
.. data:: PIPE
@@ -172,6 +175,26 @@ use cases, the underlying :class:`Popen` interface can be used directly.
output.
+.. exception:: CalledProcessError
+
+ Exception raised when a process run by :func:`check_call` or
+ :func:`check_output` returns a non-zero exit status.
+
+ .. attribute:: returncode
+
+ Exit status of the child process.
+
+ .. attribute:: cmd
+
+ Command that was used to spawn the child process.
+
+ .. attribute:: output
+
+ Output of the child process if this exception is raised by
+ :func:`check_output`. Otherwise, ``None``.
+
+
+
.. _frequently-used-arguments:
Frequently Used Arguments
@@ -200,15 +223,22 @@ default values. The arguments that are most commonly needed are:
the stderr data from the child process should be captured into the same file
handle as for stdout.
+ .. index::
+ single: universal newlines; subprocess module
+
When *stdout* or *stderr* are pipes and *universal_newlines* is
- :const:`True` then all line endings will be converted to ``'\n'`` as
- described for the universal newlines `'U'`` mode argument to :func:`open`.
+ ``True`` then all line endings will be converted to ``'\n'`` as described
+ for the :term:`universal newlines` ``'U'`` mode argument to :func:`open`.
- If *shell* is :const:`True`, the specified command will be executed through
- the shell. This can be useful if you are using Python primarily for the
+ If *shell* is ``True``, the specified command will be executed through
+ the shell. This can be useful if you are using Python primarily for the
enhanced control flow it offers over most system shells and still want
- access to other shell features such as filename wildcards, shell pipes and
- environment variable expansion.
+ convenient access to other shell features such as shell pipes, filename
+ wildcards, environment variable expansion, and expansion of ``~`` to a
+ user's home directory. However, note that Python itself offers
+ implementations of many shell-like features (in particular, :mod:`glob`,
+ :mod:`fnmatch`, :func:`os.walk`, :func:`os.path.expandvars`,
+ :func:`os.path.expanduser`, and :mod:`shutil`).
.. warning::
@@ -216,8 +246,8 @@ default values. The arguments that are most commonly needed are:
untrusted source makes a program vulnerable to `shell injection
<http://en.wikipedia.org/wiki/Shell_injection#Shell_injection>`_,
a serious security flaw which can result in arbitrary command execution.
- For this reason, the use of *shell=True* is **strongly discouraged** in cases
- where the command string is constructed from external input::
+ For this reason, the use of ``shell=True`` is **strongly discouraged**
+ in cases where the command string is constructed from external input::
>>> from subprocess import call
>>> filename = input("What file would you like to display?\n")
@@ -229,6 +259,10 @@ default values. The arguments that are most commonly needed are:
from this vulnerability; see the Note in the :class:`Popen` constructor
documentation for helpful hints in getting ``shell=False`` to work.
+ When using ``shell=True``, :func:`pipes.quote` can be used to properly
+ escape whitespace and shell metacharacters in strings that are going to
+ be used to construct shell commands.
+
These options, along with all of the other options, are described in more
detail in the :class:`Popen` constructor documentation.
@@ -242,23 +276,26 @@ are able to handle the less common cases not covered by the convenience
functions.
-.. class:: Popen(args, bufsize=0, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=False, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0)
+.. class:: Popen(args, bufsize=0, executable=None, stdin=None, stdout=None, \
+ stderr=None, preexec_fn=None, close_fds=False, shell=False, \
+ cwd=None, env=None, universal_newlines=False, \
+ startupinfo=None, creationflags=0)
- Arguments are:
+ Execute a child program in a new process. On Unix, the class uses
+ :meth:`os.execvp`-like behavior to execute the child program. On Windows,
+ the class uses the Windows ``CreateProcess()`` function. The arguments to
+ :class:`Popen` are as follows.
- *args* should be a string, or a sequence of program arguments. The program
- to execute is normally the first item in the args sequence or the string if
- a string is given, but can be explicitly set by using the *executable*
- argument. When *executable* is given, the first item in the args sequence
- is still treated by most programs as the command name, which can then be
- different from the actual executable name. On Unix, it becomes the display
- name for the executing program in utilities such as :program:`ps`.
+ *args* should be a sequence of program arguments or else a single string.
+ By default, the program to execute is the first item in *args* if *args* is
+ a sequence. If *args* is a string, the interpretation is
+ platform-dependent and described below. See the *shell* and *executable*
+ arguments for additional differences from the default behavior. Unless
+ otherwise stated, it is recommended to pass *args* as a sequence.
- On Unix, with *shell=False* (default): In this case, the Popen class uses
- :meth:`os.execvp` to execute the child program. *args* should normally be a
- sequence. If a string is specified for *args*, it will be used as the name
- or path of the program to execute; this will only work if the program is
- being given no arguments.
+ On Unix, if *args* is a string, the string is interpreted as the name or
+ path of the program to execute. However, this can only be done if not
+ passing arguments to the program.
.. note::
@@ -279,20 +316,36 @@ functions.
used in the shell (such as filenames containing spaces or the *echo* command
shown above) are single list elements.
- On Unix, with *shell=True*: If args is a string, it specifies the command
- string to execute through the shell. This means that the string must be
+ On Windows, if *args* is a sequence, it will be converted to a string in a
+ manner described in :ref:`converting-argument-sequence`. This is because
+ the underlying ``CreateProcess()`` operates on strings.
+
+ The *shell* argument (which defaults to *False*) specifies whether to use
+ the shell as the program to execute. If *shell* is *True*, it is
+ recommended to pass *args* as a string rather than as a sequence.
+
+ On Unix with ``shell=True``, the shell defaults to :file:`/bin/sh`. If
+ *args* is a string, the string specifies the command
+ to execute through the shell. This means that the string must be
formatted exactly as it would be when typed at the shell prompt. This
includes, for example, quoting or backslash escaping filenames with spaces in
them. If *args* is a sequence, the first item specifies the command string, and
any additional items will be treated as additional arguments to the shell
- itself. That is to say, *Popen* does the equivalent of::
+ itself. That is to say, :class:`Popen` does the equivalent of::
Popen(['/bin/sh', '-c', args[0], args[1], ...])
- On Windows: the :class:`Popen` class uses CreateProcess() to execute the child
- child program, which operates on strings. If *args* is a sequence, it will
- be converted to a string in a manner described in
- :ref:`converting-argument-sequence`.
+ On Windows with ``shell=True``, the :envvar:`COMSPEC` environment variable
+ specifies the default shell. The only time you need to specify
+ ``shell=True`` on Windows is when the command you wish to execute is built
+ into the shell (e.g. :command:`dir` or :command:`copy`). You do not need
+ ``shell=True`` to run a batch file or console-based executable.
+
+ .. warning::
+
+ Passing ``shell=True`` can be a security hazard if combined with
+ untrusted input. See the warning under :ref:`frequently-used-arguments`
+ for details.
*bufsize*, if given, has the same meaning as the corresponding argument to the
built-in open() function: :const:`0` means unbuffered, :const:`1` means line
@@ -306,15 +359,15 @@ functions.
enable buffering by setting *bufsize* to either -1 or a large enough
positive value (such as 4096).
- The *executable* argument specifies the program to execute. It is very seldom
- needed: Usually, the program to execute is defined by the *args* argument. If
- ``shell=True``, the *executable* argument specifies which shell to use. On Unix,
- the default shell is :file:`/bin/sh`. On Windows, the default shell is
- specified by the :envvar:`COMSPEC` environment variable. The only reason you
- would need to specify ``shell=True`` on Windows is where the command you
- wish to execute is actually built in to the shell, eg ``dir``, ``copy``.
- You don't need ``shell=True`` to run a batch file, nor to run a console-based
- executable.
+ The *executable* argument specifies a replacement program to execute. It
+ is very seldom needed. When ``shell=False``, *executable* replaces the
+ program to execute specified by *args*. However, the original *args* is
+ still passed to the program. Most programs treat the program specified
+ by *args* as the command name, which can then be different from the program
+ actually executed. On Unix, the *args* name
+ becomes the display name for the executable in utilities such as
+ :program:`ps`. If ``shell=True``, on Unix the *executable* argument
+ specifies a replacement shell for the default :file:`/bin/sh`.
*stdin*, *stdout* and *stderr* specify the executed program's standard input,
standard output and standard error file handles, respectively. Valid values
@@ -335,15 +388,6 @@ functions.
child process. Note that on Windows, you cannot set *close_fds* to true and
also redirect the standard handles by setting *stdin*, *stdout* or *stderr*.
- If *shell* is :const:`True`, the specified command will be executed through the
- shell.
-
- .. warning::
-
- Enabling this option can be a security hazard if combined with untrusted
- input. See the warning under :ref:`frequently-used-arguments`
- for details.
-
If *cwd* is not ``None``, the child's current directory will be changed to *cwd*
before it is executed. Note that this directory is not considered when
searching the executable, so you can't specify the program's path relative to
@@ -362,11 +406,11 @@ functions.
.. _side-by-side assembly: http://en.wikipedia.org/wiki/Side-by-Side_Assembly
- If *universal_newlines* is :const:`True`, the file objects stdout and stderr are
- opened as text files, but lines may be terminated by any of ``'\n'``, the Unix
- end-of-line convention, ``'\r'``, the old Macintosh convention or ``'\r\n'``, the
- Windows convention. All of these external representations are seen as ``'\n'``
- by the Python program.
+ If *universal_newlines* is ``True``, the file objects *stdout* and *stderr*
+ are opened as text files in :term:`universal newlines` mode. Lines may be
+ terminated by any of ``'\n'``, the Unix end-of-line convention, ``'\r'``,
+ the old Macintosh convention or ``'\r\n'``, the Windows convention. All of
+ these external representations are seen as ``'\n'`` by the Python program.
.. note::
@@ -419,14 +463,14 @@ Instances of the :class:`Popen` class have the following methods:
.. method:: Popen.poll()
- Check if child process has terminated. Set and return :attr:`returncode`
- attribute.
+ Check if child process has terminated. Set and return
+ :attr:`~Popen.returncode` attribute.
.. method:: Popen.wait()
- Wait for child process to terminate. Set and return :attr:`returncode`
- attribute.
+ Wait for child process to terminate. Set and return
+ :attr:`~Popen.returncode` attribute.
.. warning::
@@ -490,8 +534,8 @@ The following attributes are also available:
.. warning::
- Use :meth:`communicate` rather than :attr:`.stdin.write <stdin>`,
- :attr:`.stdout.read <stdout>` or :attr:`.stderr.read <stderr>` to avoid
+ Use :meth:`~Popen.communicate` rather than :attr:`.stdin.write <Popen.stdin>`,
+ :attr:`.stdout.read <Popen.stdout>` or :attr:`.stderr.read <Popen.stderr>` to avoid
deadlocks due to any of the other OS pipe buffers filling up and blocking the
child process.
@@ -639,8 +683,8 @@ The :mod:`subprocess` module exposes the following constants.
.. _subprocess-replacements:
-Replacing Older Functions with the subprocess Module
-----------------------------------------------------
+Replacing Older Functions with the :mod:`subprocess` Module
+-----------------------------------------------------------
In this section, "a becomes b" means that b can be used as a replacement for a.
@@ -652,11 +696,11 @@ In this section, "a becomes b" means that b can be used as a replacement for a.
In addition, the replacements using :func:`check_output` will fail with a
:exc:`CalledProcessError` if the requested operation produces a non-zero
- return code. The output is still available as the ``output`` attribute of
- the raised exception.
+ return code. The output is still available as the
+ :attr:`~CalledProcessError.output` attribute of the raised exception.
In the following examples, we assume that the relevant functions have already
-been imported from the subprocess module.
+been imported from the :mod:`subprocess` module.
Replacing /bin/sh shell backquote
@@ -685,7 +729,7 @@ The p1.stdout.close() call after starting the p2 is important in order for p1
to receive a SIGPIPE if p2 exits before p1.
Alternatively, for trusted input, the shell's own pipeline support may still
-be used directly:
+be used directly::
output=`dmesg | grep hda`
# becomes
@@ -697,9 +741,9 @@ Replacing :func:`os.system`
::
- sts = os.system("mycmd" + " myarg")
+ status = os.system("mycmd" + " myarg")
# becomes
- sts = call("mycmd" + " myarg", shell=True)
+ status = subprocess.call("mycmd" + " myarg", shell=True)
Notes:
@@ -713,7 +757,7 @@ A more realistic example would look like this::
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
- except OSError, e:
+ except OSError as e:
print >>sys.stderr, "Execution failed:", e
@@ -822,7 +866,7 @@ Replacing functions from the :mod:`popen2` module
(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
==>
- p = Popen(["somestring"], shell=True, bufsize=bufsize,
+ p = Popen("somestring", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
diff --git a/Doc/library/sunaudio.rst b/Doc/library/sunaudio.rst
index 148eb5e..187204e 100644
--- a/Doc/library/sunaudio.rst
+++ b/Doc/library/sunaudio.rst
@@ -8,7 +8,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`sunaudiodev` module has been deprecated for removal in Python 3.0.
+ The :mod:`sunaudiodev` module has been removed in Python 3.
@@ -153,7 +153,7 @@ the SIGPOLL signal. Here's an example of how you might enable this in Python::
:deprecated:
.. deprecated:: 2.6
- The :mod:`SUNAUDIODEV` module has been deprecated for removal in Python 3.0.
+ The :mod:`SUNAUDIODEV` module has been removed in Python 3.
diff --git a/Doc/library/sys.rst b/Doc/library/sys.rst
index 3873eb8..48d2b5b 100644
--- a/Doc/library/sys.rst
+++ b/Doc/library/sys.rst
@@ -1,4 +1,3 @@
-
:mod:`sys` --- System-specific parameters and functions
=======================================================
@@ -208,7 +207,7 @@ always available.
be set at build time with the ``--exec-prefix`` argument to the
:program:`configure` script. Specifically, all configuration files (e.g. the
:file:`pyconfig.h` header file) are installed in the directory
- :file:`{exec_prefix}/lib/python{X.Y}/config', and shared library modules are
+ :file:`{exec_prefix}/lib/python{X.Y}/config`, and shared library modules are
installed in :file:`{exec_prefix}/lib/python{X.Y}/lib-dynload`, where *X.Y*
is the version number of Python, for example ``2.7``.
@@ -291,6 +290,8 @@ always available.
.. versionadded:: 2.6
+ .. versionadded:: 2.7.3
+ The ``hash_randomization`` attribute.
.. data:: float_info
@@ -301,6 +302,8 @@ always available.
5.2.4.2.2 of the 1999 ISO/IEC C standard [C99]_, 'Characteristics of
floating types', for details.
+ .. tabularcolumns:: |l|l|L|
+
+---------------------+----------------+--------------------------------------------------+
| attribute | float.h macro | explanation |
+=====================+================+==================================================+
@@ -598,6 +601,8 @@ always available.
A struct sequence that holds information about Python's
internal representation of integers. The attributes are read only.
+ .. tabularcolumns:: |l|L|
+
+-------------------------+----------------------------------------------+
| Attribute | Explanation |
+=========================+==============================================+
@@ -773,9 +778,9 @@ always available.
independent Python files are installed; by default, this is the string
``'/usr/local'``. This can be set at build time with the ``--prefix``
argument to the :program:`configure` script. The main collection of Python
- library modules is installed in the directory :file:`{prefix}/lib/python{X.Y}``
+ library modules is installed in the directory :file:`{prefix}/lib/python{X.Y}`
while the platform independent header files (all except :file:`pyconfig.h`) are
- stored in :file:`{prefix}/include/python{X.Y}``, where *X.Y* is the version
+ stored in :file:`{prefix}/include/python{X.Y}`, where *X.Y* is the version
number of Python, for example ``2.7``.
@@ -796,10 +801,10 @@ always available.
.. data:: py3kwarning
- Bool containing the status of the Python 3.0 warning flag. It's ``True``
+ Bool containing the status of the Python 3 warning flag. It's ``True``
when Python is started with the -3 option. (This should be considered
read-only; setting it to a different value doesn't have an effect on
- Python 3.0 warnings.)
+ Python 3 warnings.)
.. versionadded:: 2.6
@@ -1072,5 +1077,5 @@ always available.
.. rubric:: Citations
-.. [C99] ISO/IEC 9899:1999. "Programming languages -- C." A public draft of this standard is available at http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1256.pdf .
+.. [C99] ISO/IEC 9899:1999. "Programming languages -- C." A public draft of this standard is available at http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1256.pdf\ .
diff --git a/Doc/library/sysconfig.rst b/Doc/library/sysconfig.rst
index 5ba6fa2..a745a3d 100644
--- a/Doc/library/sysconfig.rst
+++ b/Doc/library/sysconfig.rst
@@ -129,7 +129,7 @@ identifier. Python currently uses eight paths:
one may call this function and get the default value.
If *scheme* is provided, it must be a value from the list returned by
- :func:`get_path_names`. Otherwise, the default scheme for the current
+ :func:`get_scheme_names`. Otherwise, the default scheme for the current
platform is used.
If *vars* is provided, it must be a dictionary of variables that will update
diff --git a/Doc/library/syslog.rst b/Doc/library/syslog.rst
index 21eee1e..9b66abf 100644
--- a/Doc/library/syslog.rst
+++ b/Doc/library/syslog.rst
@@ -17,7 +17,8 @@ library that can speak to a syslog server is available in the
The module defines the following functions:
-.. function:: syslog([priority,] message)
+.. function:: syslog(message)
+ syslog(priority, message)
Send the string *message* to the system logger. A trailing newline is added
if necessary. Each message is tagged with a priority composed of a
@@ -73,7 +74,8 @@ Priority levels (high to low):
Facilities:
:const:`LOG_KERN`, :const:`LOG_USER`, :const:`LOG_MAIL`, :const:`LOG_DAEMON`,
:const:`LOG_AUTH`, :const:`LOG_LPR`, :const:`LOG_NEWS`, :const:`LOG_UUCP`,
- :const:`LOG_CRON` and :const:`LOG_LOCAL0` to :const:`LOG_LOCAL7`.
+ :const:`LOG_CRON`, :const:`LOG_SYSLOG` and :const:`LOG_LOCAL0` to
+ :const:`LOG_LOCAL7`.
Log options:
:const:`LOG_PID`, :const:`LOG_CONS`, :const:`LOG_NDELAY`, :const:`LOG_NOWAIT`
diff --git a/Doc/library/tarfile.rst b/Doc/library/tarfile.rst
index 5502adc..bd218e1 100644
--- a/Doc/library/tarfile.rst
+++ b/Doc/library/tarfile.rst
@@ -16,7 +16,8 @@
The :mod:`tarfile` module makes it possible to read and write tar
archives, including those using gzip or bz2 compression.
-(:file:`.zip` files can be read and written using the :mod:`zipfile` module.)
+Use the :mod:`zipfile` module to read or write :file:`.zip` files, or the
+higher-level functions in :ref:`shutil <archiving-operations>`.
Some facts and figures:
@@ -76,6 +77,10 @@ Some facts and figures:
If *fileobj* is specified, it is used as an alternative to a file object opened
for *name*. It is supposed to be at position 0.
+ For modes ``'w:gz'``, ``'r:gz'``, ``'w:bz2'``, ``'r:bz2'``, :func:`tarfile.open`
+ accepts the keyword argument *compresslevel* to specify the compression level of
+ the file.
+
For special purposes, there is a second format for *mode*:
``'filemode|[compression]'``. :func:`tarfile.open` will return a :class:`TarFile`
object that processes its data as a stream of blocks. No random seeking will
@@ -142,7 +147,7 @@ Some facts and figures:
.. deprecated:: 2.6
- The :class:`TarFileCompat` class has been deprecated for removal in Python 3.0.
+ The :class:`TarFileCompat` class has been removed in Python 3.
.. exception:: TarError
@@ -304,7 +309,7 @@ be finalized; only the internally used file object will be closed. See the
.. versionadded:: 2.6
-.. method:: TarFile.open(...)
+.. classmethod:: TarFile.open(...)
Alternative constructor. The :func:`tarfile.open` function is actually a
shortcut to this classmethod.
@@ -543,7 +548,7 @@ A ``TarInfo`` object has the following public data attributes:
:const:`AREGTYPE`, :const:`LNKTYPE`, :const:`SYMTYPE`, :const:`DIRTYPE`,
:const:`FIFOTYPE`, :const:`CONTTYPE`, :const:`CHRTYPE`, :const:`BLKTYPE`,
:const:`GNUTYPE_SPARSE`. To determine the type of a :class:`TarInfo` object
- more conveniently, use the ``is_*()`` methods below.
+ more conveniently, use the ``is*()`` methods below.
.. attribute:: TarInfo.linkname
diff --git a/Doc/library/telnetlib.rst b/Doc/library/telnetlib.rst
index f6340a9..a3019f5 100644
--- a/Doc/library/telnetlib.rst
+++ b/Doc/library/telnetlib.rst
@@ -189,7 +189,7 @@ Telnet Objects
Read until one from a list of a regular expressions matches.
The first argument is a list of regular expressions, either compiled
- (:class:`re.RegexObject` instances) or uncompiled (strings). The optional second
+ (:class:`regex objects <re-objects>`) or uncompiled (strings). The optional second
argument is a timeout, in seconds; the default is to block indefinitely.
Return a tuple of three items: the index in the list of the first regular
@@ -208,7 +208,7 @@ Telnet Objects
.. method:: Telnet.set_option_negotiation_callback(callback)
Each time a telnet option is read on the input flow, this *callback* (if set) is
- called with the following parameters : callback(telnet socket, command
+ called with the following parameters: callback(telnet socket, command
(DO/DONT/WILL/WONT), option). No other action is done afterwards by telnetlib.
diff --git a/Doc/library/tempfile.rst b/Doc/library/tempfile.rst
index 936f06a..827f5f5 100644
--- a/Doc/library/tempfile.rst
+++ b/Doc/library/tempfile.rst
@@ -86,13 +86,14 @@ The module defines the following user-callable functions:
data is spooled in memory until the file size exceeds *max_size*, or
until the file's :func:`fileno` method is called, at which point the
contents are written to disk and operation proceeds as with
- :func:`TemporaryFile`.
+ :func:`TemporaryFile`. Also, it's ``truncate`` method does not
+ accept a ``size`` argument.
The resulting file has one additional method, :func:`rollover`, which
causes the file to roll over to an on-disk file regardless of its size.
The returned object is a file-like object whose :attr:`_file` attribute
- is either a :class:`StringIO` object or a true file object, depending on
+ is either a :class:`~StringIO.StringIO` object or a true file object, depending on
whether :func:`rollover` has been called. This file-like object can be
used in a :keyword:`with` statement, just like a normal file.
diff --git a/Doc/library/test.rst b/Doc/library/test.rst
index 3a0b5cb..434ef5a 100644
--- a/Doc/library/test.rst
+++ b/Doc/library/test.rst
@@ -169,10 +169,10 @@ be passed to the script. Specifying a single regression test (:program:`python
the test passed or failed and thus minimize output.
Running :mod:`test.regrtest` directly allows what resources are available for
-tests to use to be set. You do this by using the :option:`-u` command-line
-option. Run :program:`python -m test.regrtest -uall` to turn on all
-resources; specifying ``all`` as an option for ``-u`` enables all
-possible resources. If all but one resource is desired (a more common case), a
+tests to use to be set. You do this by using the ``-u`` command-line
+option. Specifying ``all`` as the value for the ``-u`` option enables all
+possible resources: :program:`python -m test -uall`.
+If all but one resource is desired (a more common case), a
comma-separated list of resources that are not desired may be listed after
``all``. The command :program:`python -m test.regrtest -uall,-audio,-largefile`
will run :mod:`test.regrtest` with all resources except the ``audio`` and
@@ -380,7 +380,7 @@ The :mod:`test.test_support` module defines the following functions:
with captured_stdout() as s:
print "hello"
- assert s.getvalue() == "hello"
+ assert s.getvalue() == "hello\n"
.. versionadded:: 2.6
diff --git a/Doc/library/textwrap.rst b/Doc/library/textwrap.rst
index 84e6ee1..a50600e 100644
--- a/Doc/library/textwrap.rst
+++ b/Doc/library/textwrap.rst
@@ -26,6 +26,9 @@ otherwise, you should use an instance of :class:`TextWrapper` for efficiency.
Optional keyword arguments correspond to the instance attributes of
:class:`TextWrapper`, documented below. *width* defaults to ``70``.
+ See the :meth:`TextWrapper.wrap` method for additional details on how
+ :func:`wrap` behaves.
+
.. function:: fill(text[, width[, ...]])
@@ -112,9 +115,11 @@ indentation from strings that have unwanted whitespace to the left of the text.
.. attribute:: replace_whitespace
- (default: ``True``) If true, each whitespace character (as defined by
- ``string.whitespace``) remaining after tab expansion will be replaced by a
- single space.
+ (default: ``True``) If true, after tab expansion but before wrapping,
+ the :meth:`wrap` method will replace each whitespace character
+ with a single space. The whitespace characters replaced are
+ as follows: tab, newline, vertical tab, formfeed, and carriage
+ return (``'\t\n\v\f\r'``).
.. note::
@@ -132,9 +137,11 @@ indentation from strings that have unwanted whitespace to the left of the text.
.. attribute:: drop_whitespace
- (default: ``True``) If true, whitespace that, after wrapping, happens to
- end up at the beginning or end of a line is dropped (leading whitespace in
- the first line is always preserved, though).
+ (default: ``True``) If true, whitespace at the beginning and ending of
+ every line (after wrapping but before indenting) is dropped.
+ Whitespace at the beginning of the paragraph, however, is not dropped
+ if non-whitespace follows it. If whitespace being dropped takes up an
+ entire line, the whole line is dropped.
.. versionadded:: 2.6
Whitespace was always dropped in earlier versions.
@@ -143,7 +150,8 @@ indentation from strings that have unwanted whitespace to the left of the text.
.. attribute:: initial_indent
(default: ``''``) String that will be prepended to the first line of
- wrapped output. Counts towards the length of the first line.
+ wrapped output. Counts towards the length of the first line. The empty
+ string is not indented.
.. attribute:: subsequent_indent
@@ -206,8 +214,9 @@ indentation from strings that have unwanted whitespace to the left of the text.
Wraps the single paragraph in *text* (a string) so every line is at most
:attr:`width` characters long. All wrapping options are taken from
- instance attributes of the :class:`TextWrapper` instance. Returns a list
- of output lines, without final newlines.
+ instance attributes of the :class:`TextWrapper` instance. Returns a list
+ of output lines, without final newlines. If the wrapped output has no
+ content, the returned list is empty.
.. method:: fill(text)
diff --git a/Doc/library/thread.rst b/Doc/library/thread.rst
index 7e8d5c8..15859d1 100644
--- a/Doc/library/thread.rst
+++ b/Doc/library/thread.rst
@@ -5,9 +5,9 @@
:synopsis: Create multiple threads of control within one interpreter.
.. note::
- The :mod:`thread` module has been renamed to :mod:`_thread` in Python 3.0.
+ The :mod:`thread` module has been renamed to :mod:`_thread` in Python 3.
The :term:`2to3` tool will automatically adapt imports when converting your
- sources to 3.0; however, you should consider using the high-level
+ sources to Python 3; however, you should consider using the high-level
:mod:`threading` module instead.
diff --git a/Doc/library/threading.rst b/Doc/library/threading.rst
index 28a3f81..a24c385 100644
--- a/Doc/library/threading.rst
+++ b/Doc/library/threading.rst
@@ -31,10 +31,10 @@ The :mod:`dummy_threading` module is provided for situations where
.. impl-detail::
- Due to the :term:`Global Interpreter Lock`, in CPython only one thread
+ In CPython, due to the :term:`Global Interpreter Lock`, only one thread
can execute Python code at once (even though certain performance-oriented
libraries might overcome this limitation).
- If you want your application to make better of use of the computational
+ If you want your application to make better use of the computational
resources of multi-core machines, you are advised to use
:mod:`multiprocessing`. However, threading is still an appropriate model
if you want to run multiple I/O-bound tasks simultaneously.
@@ -48,6 +48,9 @@ This module defines the following functions and objects:
Return the number of :class:`Thread` objects currently alive. The returned
count is equal to the length of the list returned by :func:`.enumerate`.
+ .. versionchanged:: 2.6
+ Added ``active_count()`` spelling.
+
.. function:: Condition()
:noindex:
@@ -67,6 +70,9 @@ This module defines the following functions and objects:
:mod:`threading` module, a dummy thread object with limited functionality is
returned.
+ .. versionchanged:: 2.6
+ Added ``current_thread()`` spelling.
+
.. function:: enumerate()
@@ -167,7 +173,7 @@ This module defines the following functions and objects:
Set a trace function for all threads started from the :mod:`threading` module.
The *func* will be passed to :func:`sys.settrace` for each thread, before its
- :meth:`run` method is called.
+ :meth:`~Thread.run` method is called.
.. versionadded:: 2.3
@@ -178,7 +184,7 @@ This module defines the following functions and objects:
Set a profile function for all threads started from the :mod:`threading` module.
The *func* will be passed to :func:`sys.setprofile` for each thread, before its
- :meth:`run` method is called.
+ :meth:`~Thread.run` method is called.
.. versionadded:: 2.3
@@ -202,6 +208,13 @@ This module defines the following functions and objects:
.. versionadded:: 2.5
+
+.. exception:: ThreadError
+
+ Raised for various threading-related errors as described below. Note that
+ many interfaces use :exc:`RuntimeError` instead of :exc:`ThreadError`.
+
+
Detailed interfaces for the objects are documented below.
The design of this module is loosely based on Java's threading model. However,
@@ -247,6 +260,12 @@ that the entire Python program exits when only daemon threads are left. The
initial value is inherited from the creating thread. The flag can be set
through the :attr:`daemon` property.
+.. note::
+ Daemon threads are abruptly stopped at shutdown. Their resources (such
+ as open files, database transactions, etc.) may not be released properly.
+ If you want your threads to stop gracefully, make them non-daemonic and
+ use a suitable signalling mechanism such as an :class:`Event`.
+
There is a "main thread" object; this corresponds to the initial thread of
control in the Python program. It is not a daemon thread.
@@ -322,17 +341,19 @@ impossible to detect the termination of alien threads.
:meth:`join` a thread before it has been started and attempts to do so
raises the same exception.
- .. method:: getName()
- setName()
-
- Old API for :attr:`~Thread.name`.
-
.. attribute:: name
A string used for identification purposes only. It has no semantics.
Multiple threads may be given the same name. The initial name is set by
the constructor.
+ .. versionadded:: 2.6
+
+ .. method:: getName()
+ setName()
+
+ Pre-2.6 API for :attr:`~Thread.name`.
+
.. attribute:: ident
The 'thread identifier' of this thread or ``None`` if the thread has not
@@ -352,10 +373,8 @@ impossible to detect the termination of alien threads.
until just after the :meth:`run` method terminates. The module function
:func:`.enumerate` returns a list of all alive threads.
- .. method:: isDaemon()
- setDaemon()
-
- Old API for :attr:`~Thread.daemon`.
+ .. versionchanged:: 2.6
+ Added ``is_alive()`` spelling.
.. attribute:: daemon
@@ -368,6 +387,13 @@ impossible to detect the termination of alien threads.
The entire Python program exits when no alive non-daemon threads are left.
+ .. versionadded:: 2.6
+
+ .. method:: isDaemon()
+ setDaemon()
+
+ Pre-2.6 API for :attr:`~Thread.daemon`.
+
.. _lock-objects:
@@ -387,7 +413,7 @@ blocks until a call to :meth:`release` in another thread changes it to unlocked,
then the :meth:`acquire` call resets it to locked and returns. The
:meth:`release` method should only be called in the locked state; it changes the
state to unlocked and returns immediately. If an attempt is made to release an
-unlocked lock, a :exc:`RuntimeError` will be raised.
+unlocked lock, a :exc:`ThreadError` will be raised.
When more than one thread is blocked in :meth:`acquire` waiting for the state to
turn to unlocked, only one thread proceeds when a :meth:`release` call resets
@@ -401,15 +427,12 @@ All methods are executed atomically.
Acquire a lock, blocking or non-blocking.
- When invoked without arguments, block until the lock is unlocked, then set it to
- locked, and return true.
+ When invoked with the *blocking* argument set to ``True`` (the default),
+ block until the lock is unlocked, then set it to locked and return ``True``.
- When invoked with the *blocking* argument set to true, do the same thing as when
- called without arguments, and return true.
-
- When invoked with the *blocking* argument set to false, do not block. If a call
- without an argument would block, return false immediately; otherwise, do the
- same thing as when called without arguments, and return true.
+ When invoked with the *blocking* argument set to ``False``, do not block.
+ If a call with *blocking* set to ``True`` would block, return ``False``
+ immediately; otherwise, set the lock to locked and return ``True``.
.. method:: Lock.release()
@@ -420,7 +443,7 @@ All methods are executed atomically.
are blocked waiting for the lock to become unlocked, allow exactly one of them
to proceed.
- Do not call this method when the lock is unlocked.
+ When invoked on an unlocked lock, a :exc:`ThreadError` is raised.
There is no return value.
@@ -599,6 +622,9 @@ needs to wake up one consumer thread.
calling thread has not acquired the lock when this method is called, a
:exc:`RuntimeError` is raised.
+ .. versionchanged:: 2.6
+ Added ``notify_all()`` spelling.
+
.. _semaphore-objects:
@@ -698,7 +724,7 @@ An event object manages an internal flag that can be set to true with the
Return true if and only if the internal flag is true.
.. versionchanged:: 2.6
- The ``is_set()`` syntax is new.
+ Added ``is_set()`` spelling.
.. method:: set()
@@ -739,10 +765,11 @@ This class represents an action that should be run only after a certain amount
of time has passed --- a timer. :class:`Timer` is a subclass of :class:`Thread`
and as such also functions as an example of creating custom threads.
-Timers are started, as with threads, by calling their :meth:`start` method. The
-timer can be stopped (before its action has begun) by calling the :meth:`cancel`
-method. The interval the timer will wait before executing its action may not be
-exactly the same as the interval specified by the user.
+Timers are started, as with threads, by calling their :meth:`~Timer.start`
+method. The timer can be stopped (before its action has begun) by calling the
+:meth:`~Timer.cancel` method. The interval the timer will wait before
+executing its action may not be exactly the same as the interval specified by
+the user.
For example::
diff --git a/Doc/library/time.rst b/Doc/library/time.rst
index 56e9019..304bf0c 100644
--- a/Doc/library/time.rst
+++ b/Doc/library/time.rst
@@ -71,9 +71,9 @@ An explanation of some terminology and conventions is in order.
the units in which their value or argument is expressed. E.g. on most Unix
systems, the clock "ticks" only 50 or 100 times a second.
-* On the other hand, the precision of :func:`time` and :func:`sleep` is better
+* On the other hand, the precision of :func:`.time` and :func:`sleep` is better
than their Unix equivalents: times are expressed as floating point numbers,
- :func:`time` returns the most accurate time available (using Unix
+ :func:`.time` returns the most accurate time available (using Unix
:c:func:`gettimeofday` where available), and :func:`sleep` will accept a time
with a nonzero fraction (Unix :c:func:`select` is used to implement this, where
available).
@@ -164,7 +164,7 @@ The module defines the following functions and data items:
Convert a time expressed in seconds since the epoch to a string representing
local time. If *secs* is not provided or :const:`None`, the current time as
- returned by :func:`time` is used. ``ctime(secs)`` is equivalent to
+ returned by :func:`.time` is used. ``ctime(secs)`` is equivalent to
``asctime(localtime(secs))``. Locale information is not used by :func:`ctime`.
.. versionchanged:: 2.1
@@ -183,7 +183,7 @@ The module defines the following functions and data items:
Convert a time expressed in seconds since the epoch to a :class:`struct_time` in
UTC in which the dst flag is always zero. If *secs* is not provided or
- :const:`None`, the current time as returned by :func:`time` is used. Fractions
+ :const:`None`, the current time as returned by :func:`.time` is used. Fractions
of a second are ignored. See above for a description of the
:class:`struct_time` object. See :func:`calendar.timegm` for the inverse of this
function.
@@ -198,7 +198,7 @@ The module defines the following functions and data items:
.. function:: localtime([secs])
Like :func:`gmtime` but converts to local time. If *secs* is not provided or
- :const:`None`, the current time as returned by :func:`time` is used. The dst
+ :const:`None`, the current time as returned by :func:`.time` is used. The dst
flag is set to ``1`` when DST applies to the given time.
.. versionchanged:: 2.1
@@ -213,7 +213,7 @@ The module defines the following functions and data items:
This is the inverse function of :func:`localtime`. Its argument is the
:class:`struct_time` or full 9-tuple (since the dst flag is needed; use ``-1``
as the dst flag if it is unknown) which expresses the time in *local* time, not
- UTC. It returns a floating point number, for compatibility with :func:`time`.
+ UTC. It returns a floating point number, for compatibility with :func:`.time`.
If the input value cannot be represented as a valid time, either
:exc:`OverflowError` or :exc:`ValueError` will be raised (which depends on
whether the invalid value is caught by Python or the underlying C libraries).
@@ -236,7 +236,9 @@ The module defines the following functions and data items:
:func:`gmtime` or :func:`localtime` to a string as specified by the *format*
argument. If *t* is not provided, the current time as returned by
:func:`localtime` is used. *format* must be a string. :exc:`ValueError` is
- raised if any field in *t* is outside of the allowed range.
+ raised if any field in *t* is outside of the allowed range. :func:`strftime`
+ returns a locale depedent byte string; the result may be converted to unicode
+ by doing ``strftime(<myformat>).decode(locale.getlocale()[1])``.
.. versionchanged:: 2.1
Allowed *t* to be omitted.
@@ -350,8 +352,10 @@ The module defines the following functions and data items:
>>> strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
'Thu, 28 Jun 2001 14:17:15 +0000'
- Additional directives may be supported on certain platforms, but only the ones
- listed here have a meaning standardized by ANSI C.
+ Additional directives may be supported on certain platforms, but only the
+ ones listed here have a meaning standardized by ANSI C. To see the full set
+ of format codes supported on your platform, consult the :manpage:`strftime(3)`
+ documentation.
On some platforms, an optional field width and precision specification can
immediately follow the initial ``'%'`` of a directive in the following order;
@@ -410,7 +414,7 @@ The module defines the following functions and data items:
+-------+-------------------+---------------------------------+
| 4 | :attr:`tm_min` | range [0, 59] |
+-------+-------------------+---------------------------------+
- | 5 | :attr:`tm_sec` | range [0, 61]; see **(1)** in |
+ | 5 | :attr:`tm_sec` | range [0, 61]; see **(2)** in |
| | | :func:`strftime` description |
+-------+-------------------+---------------------------------+
| 6 | :attr:`tm_wday` | range [0, 6], Monday is 0 |
@@ -435,8 +439,8 @@ The module defines the following functions and data items:
.. function:: time()
- Return the time as a floating point number expressed in seconds since the epoch,
- in UTC. Note that even though the time is always returned as a floating point
+ Return the time in seconds since the epoch as a floating point number.
+ Note that even though the time is always returned as a floating point
number, not all systems provide time with a better precision than 1 second.
While this function normally returns non-decreasing values, it can return a
lower value than a previous call if the system clock has been set back between
@@ -547,12 +551,12 @@ The module defines the following functions and data items:
More object-oriented interface to dates and times.
Module :mod:`locale`
- Internationalization services. The locale settings can affect the return values
- for some of the functions in the :mod:`time` module.
+ Internationalization services. The locale setting affects the interpretation
+ of many format specifiers in :func:`strftime` and :func:`strptime`.
Module :mod:`calendar`
- General calendar-related functions. :func:`timegm` is the inverse of
- :func:`gmtime` from this module.
+ General calendar-related functions. :func:`~calendar.timegm` is the
+ inverse of :func:`gmtime` from this module.
.. rubric:: Footnotes
diff --git a/Doc/library/timeit.rst b/Doc/library/timeit.rst
index 7fbe19e..2cb3c9d 100644
--- a/Doc/library/timeit.rst
+++ b/Doc/library/timeit.rst
@@ -16,112 +16,163 @@
--------------
This module provides a simple way to time small bits of Python code. It has both
-command line as well as callable interfaces. It avoids a number of common traps
-for measuring execution times. See also Tim Peters' introduction to the
-"Algorithms" chapter in the Python Cookbook, published by O'Reilly.
+a :ref:`command-line-interface` as well as a :ref:`callable <python-interface>`
+one. It avoids a number of common traps for measuring execution times.
+See also Tim Peters' introduction to the "Algorithms" chapter in the *Python
+Cookbook*, published by O'Reilly.
-The module defines the following public class:
+Basic Examples
+--------------
-.. class:: Timer([stmt='pass' [, setup='pass' [, timer=<timer function>]]])
+The following example shows how the :ref:`command-line-interface`
+can be used to compare three different expressions:
- Class for timing execution speed of small code snippets.
+.. code-block:: sh
- The constructor takes a statement to be timed, an additional statement used for
- setup, and a timer function. Both statements default to ``'pass'``; the timer
- function is platform-dependent (see the module doc string). *stmt* and *setup*
- may also contain multiple statements separated by ``;`` or newlines, as long as
- they don't contain multi-line string literals.
+ $ python -m timeit '"-".join(str(n) for n in range(100))'
+ 10000 loops, best of 3: 40.3 usec per loop
+ $ python -m timeit '"-".join([str(n) for n in range(100)])'
+ 10000 loops, best of 3: 33.4 usec per loop
+ $ python -m timeit '"-".join(map(str, range(100)))'
+ 10000 loops, best of 3: 25.2 usec per loop
- To measure the execution time of the first statement, use the :meth:`timeit`
- method. The :meth:`repeat` method is a convenience to call :meth:`timeit`
- multiple times and return a list of results.
+This can be achieved from the :ref:`python-interface` with::
- .. versionchanged:: 2.6
- The *stmt* and *setup* parameters can now also take objects that are callable
- without arguments. This will embed calls to them in a timer function that will
- then be executed by :meth:`timeit`. Note that the timing overhead is a little
- larger in this case because of the extra function calls.
+ >>> import timeit
+ >>> timeit.timeit('"-".join(str(n) for n in range(100))', number=10000)
+ 0.8187260627746582
+ >>> timeit.timeit('"-".join([str(n) for n in range(100)])', number=10000)
+ 0.7288308143615723
+ >>> timeit.timeit('"-".join(map(str, range(100)))', number=10000)
+ 0.5858950614929199
+Note however that :mod:`timeit` will automatically determine the number of
+repetitions only when the command-line interface is used. In the
+:ref:`timeit-examples` section you can find more advanced examples.
-.. method:: Timer.print_exc([file=None])
- Helper to print a traceback from the timed code.
+.. _python-interface:
- Typical use::
+Python Interface
+----------------
- t = Timer(...) # outside the try/except
- try:
- t.timeit(...) # or t.repeat(...)
- except:
- t.print_exc()
+The module defines three convenience functions and a public class:
- The advantage over the standard traceback is that source lines in the compiled
- template will be displayed. The optional *file* argument directs where the
- traceback is sent; it defaults to ``sys.stderr``.
+.. function:: timeit(stmt='pass', setup='pass', timer=<default timer>, number=1000000)
-.. method:: Timer.repeat([repeat=3 [, number=1000000]])
+ Create a :class:`Timer` instance with the given statement, *setup* code and
+ *timer* function and run its :meth:`.timeit` method with *number* executions.
- Call :meth:`timeit` a few times.
+ .. versionadded:: 2.6
- This is a convenience function that calls the :meth:`timeit` repeatedly,
- returning a list of results. The first argument specifies how many times to
- call :meth:`timeit`. The second argument specifies the *number* argument for
- :func:`timeit`.
- .. note::
+.. function:: repeat(stmt='pass', setup='pass', timer=<default timer>, repeat=3, number=1000000)
- It's tempting to calculate mean and standard deviation from the result vector
- and report these. However, this is not very useful. In a typical case, the
- lowest value gives a lower bound for how fast your machine can run the given
- code snippet; higher values in the result vector are typically not caused by
- variability in Python's speed, but by other processes interfering with your
- timing accuracy. So the :func:`min` of the result is probably the only number
- you should be interested in. After that, you should look at the entire vector
- and apply common sense rather than statistics.
+ Create a :class:`Timer` instance with the given statement, *setup* code and
+ *timer* function and run its :meth:`.repeat` method with the given *repeat*
+ count and *number* executions.
+ .. versionadded:: 2.6
-.. method:: Timer.timeit([number=1000000])
- Time *number* executions of the main statement. This executes the setup
- statement once, and then returns the time it takes to execute the main statement
- a number of times, measured in seconds as a float. The argument is the number
- of times through the loop, defaulting to one million. The main statement, the
- setup statement and the timer function to be used are passed to the constructor.
+.. function:: default_timer()
- .. note::
+ Define a default timer, in a platform-specific manner. On Windows,
+ :func:`time.clock` has microsecond granularity, but :func:`time.time`'s
+ granularity is 1/60th of a second. On Unix, :func:`time.clock` has 1/100th of
+ a second granularity, and :func:`time.time` is much more precise. On either
+ platform, :func:`default_timer` measures wall clock time, not the CPU
+ time. This means that other processes running on the same computer may
+ interfere with the timing.
- By default, :meth:`timeit` temporarily turns off :term:`garbage collection`
- during the timing. The advantage of this approach is that it makes
- independent timings more comparable. This disadvantage is that GC may be
- an important component of the performance of the function being measured.
- If so, GC can be re-enabled as the first statement in the *setup* string.
- For example::
- timeit.Timer('for i in xrange(10): oct(i)', 'gc.enable()').timeit()
+.. class:: Timer(stmt='pass', setup='pass', timer=<timer function>)
-Starting with version 2.6, the module also defines two convenience functions:
+ Class for timing execution speed of small code snippets.
+ The constructor takes a statement to be timed, an additional statement used
+ for setup, and a timer function. Both statements default to ``'pass'``;
+ the timer function is platform-dependent (see the module doc string).
+ *stmt* and *setup* may also contain multiple statements separated by ``;``
+ or newlines, as long as they don't contain multi-line string literals.
-.. function:: repeat(stmt[, setup[, timer[, repeat=3 [, number=1000000]]]])
+ To measure the execution time of the first statement, use the :meth:`.timeit`
+ method. The :meth:`.repeat` method is a convenience to call :meth:`.timeit`
+ multiple times and return a list of results.
- Create a :class:`Timer` instance with the given statement, setup code and timer
- function and run its :meth:`repeat` method with the given repeat count and
- *number* executions.
+ .. versionchanged:: 2.6
+ The *stmt* and *setup* parameters can now also take objects that are
+ callable without arguments. This will embed calls to them in a timer
+ function that will then be executed by :meth:`.timeit`. Note that the
+ timing overhead is a little larger in this case because of the extra
+ function calls.
- .. versionadded:: 2.6
+ .. method:: Timer.timeit(number=1000000)
-.. function:: timeit(stmt[, setup[, timer[, number=1000000]]])
+ Time *number* executions of the main statement. This executes the setup
+ statement once, and then returns the time it takes to execute the main
+ statement a number of times, measured in seconds as a float.
+ The argument is the number of times through the loop, defaulting to one
+ million. The main statement, the setup statement and the timer function
+ to be used are passed to the constructor.
- Create a :class:`Timer` instance with the given statement, setup code and timer
- function and run its :meth:`timeit` method with *number* executions.
+ .. note::
- .. versionadded:: 2.6
+ By default, :meth:`.timeit` temporarily turns off :term:`garbage
+ collection` during the timing. The advantage of this approach is that
+ it makes independent timings more comparable. This disadvantage is
+ that GC may be an important component of the performance of the
+ function being measured. If so, GC can be re-enabled as the first
+ statement in the *setup* string. For example::
+
+ timeit.Timer('for i in xrange(10): oct(i)', 'gc.enable()').timeit()
+
+
+ .. method:: Timer.repeat(repeat=3, number=1000000)
+
+ Call :meth:`.timeit` a few times.
+
+ This is a convenience function that calls the :meth:`.timeit` repeatedly,
+ returning a list of results. The first argument specifies how many times
+ to call :meth:`.timeit`. The second argument specifies the *number*
+ argument for :meth:`.timeit`.
+
+ .. note::
+
+ It's tempting to calculate mean and standard deviation from the result
+ vector and report these. However, this is not very useful.
+ In a typical case, the lowest value gives a lower bound for how fast
+ your machine can run the given code snippet; higher values in the
+ result vector are typically not caused by variability in Python's
+ speed, but by other processes interfering with your timing accuracy.
+ So the :func:`min` of the result is probably the only number you
+ should be interested in. After that, you should look at the entire
+ vector and apply common sense rather than statistics.
+
+
+ .. method:: Timer.print_exc(file=None)
+
+ Helper to print a traceback from the timed code.
+ Typical use::
-Command Line Interface
+ t = Timer(...) # outside the try/except
+ try:
+ t.timeit(...) # or t.repeat(...)
+ except:
+ t.print_exc()
+
+ The advantage over the standard traceback is that source lines in the
+ compiled template will be displayed. The optional *file* argument directs
+ where the traceback is sent; it defaults to :data:`sys.stderr`.
+
+
+.. _command-line-interface:
+
+Command-Line Interface
----------------------
When called as a program from the command line, the following form is used::
@@ -168,13 +219,9 @@ similarly.
If :option:`-n` is not given, a suitable number of loops is calculated by trying
successive powers of 10 until the total time is at least 0.2 seconds.
-The default timer function is platform dependent. On Windows,
-:func:`time.clock` has microsecond granularity but :func:`time.time`'s
-granularity is 1/60th of a second; on Unix, :func:`time.clock` has 1/100th of a
-second granularity and :func:`time.time` is much more precise. On either
-platform, the default timer functions measure wall clock time, not the CPU time.
-This means that other processes running on the same computer may interfere with
-the timing. The best thing to do when accurate timing is necessary is to repeat
+:func:`default_timer` measurations can be affected by other programs running on
+the same machine, so
+the best thing to do when accurate timing is necessary is to repeat
the timing a few times and use the best time. The :option:`-r` option is good
for this; the default of 3 repetitions is probably enough in most cases. On
Unix, you can use :func:`time.clock` to measure CPU time.
@@ -183,25 +230,55 @@ Unix, you can use :func:`time.clock` to measure CPU time.
There is a certain baseline overhead associated with executing a pass statement.
The code here doesn't try to hide it, but you should be aware of it. The
- baseline overhead can be measured by invoking the program without arguments.
+ baseline overhead can be measured by invoking the program without arguments, and
+ it might differ between Python versions. Also, to fairly compare older Python
+ versions to Python 2.3, you may want to use Python's :option:`-O` option for
+ the older versions to avoid timing ``SET_LINENO`` instructions.
-The baseline overhead differs between Python versions! Also, to fairly compare
-older Python versions to Python 2.3, you may want to use Python's :option:`-O`
-option for the older versions to avoid timing ``SET_LINENO`` instructions.
+.. _timeit-examples:
Examples
--------
-Here are two example sessions (one using the command line, one using the module
-interface) that compare the cost of using :func:`hasattr` vs.
-:keyword:`try`/:keyword:`except` to test for missing and present object
-attributes. ::
+It is possible to provide a setup statement that is executed only once at the beginning:
+
+.. code-block:: sh
+
+ $ python -m timeit -s 'text = "sample string"; char = "g"' 'char in text'
+ 10000000 loops, best of 3: 0.0877 usec per loop
+ $ python -m timeit -s 'text = "sample string"; char = "g"' 'text.find(char)'
+ 1000000 loops, best of 3: 0.342 usec per loop
+
+::
+
+ >>> import timeit
+ >>> timeit.timeit('char in text', setup='text = "sample string"; char = "g"')
+ 0.41440500499993504
+ >>> timeit.timeit('text.find(char)', setup='text = "sample string"; char = "g"')
+ 1.7246671520006203
+
+The same can be done using the :class:`Timer` class and its methods::
+
+ >>> import timeit
+ >>> t = timeit.Timer('char in text', setup='text = "sample string"; char = "g"')
+ >>> t.timeit()
+ 0.3955516149999312
+ >>> t.repeat()
+ [0.40193588800002544, 0.3960157959998014, 0.39594301399984033]
+
+
+The following examples show how to time expressions that contain multiple lines.
+Here we compare the cost of using :func:`hasattr` vs. :keyword:`try`/:keyword:`except`
+to test for missing and present object attributes:
+
+.. code-block:: sh
$ python -m timeit 'try:' ' str.__nonzero__' 'except AttributeError:' ' pass'
100000 loops, best of 3: 15.7 usec per loop
$ python -m timeit 'if hasattr(str, "__nonzero__"): pass'
100000 loops, best of 3: 4.26 usec per loop
+
$ python -m timeit 'try:' ' int.__nonzero__' 'except AttributeError:' ' pass'
1000000 loops, best of 3: 1.43 usec per loop
$ python -m timeit 'if hasattr(int, "__nonzero__"): pass'
@@ -210,39 +287,34 @@ attributes. ::
::
>>> import timeit
+ >>> # attribute is missing
>>> s = """\
... try:
... str.__nonzero__
... except AttributeError:
... pass
... """
- >>> t = timeit.Timer(stmt=s)
- >>> print "%.2f usec/pass" % (1000000 * t.timeit(number=100000)/100000)
- 17.09 usec/pass
- >>> s = """\
- ... if hasattr(str, '__nonzero__'): pass
- ... """
- >>> t = timeit.Timer(stmt=s)
- >>> print "%.2f usec/pass" % (1000000 * t.timeit(number=100000)/100000)
- 4.85 usec/pass
+ >>> timeit.timeit(stmt=s, number=100000)
+ 0.9138244460009446
+ >>> s = "if hasattr(str, '__bool__'): pass"
+ >>> timeit.timeit(stmt=s, number=100000)
+ 0.5829014980008651
+ >>>
+ >>> # attribute is present
>>> s = """\
... try:
... int.__nonzero__
... except AttributeError:
... pass
... """
- >>> t = timeit.Timer(stmt=s)
- >>> print "%.2f usec/pass" % (1000000 * t.timeit(number=100000)/100000)
- 1.97 usec/pass
- >>> s = """\
- ... if hasattr(int, '__nonzero__'): pass
- ... """
- >>> t = timeit.Timer(stmt=s)
- >>> print "%.2f usec/pass" % (1000000 * t.timeit(number=100000)/100000)
- 3.15 usec/pass
+ >>> timeit.timeit(stmt=s, number=100000)
+ 0.04215312199994514
+ >>> s = "if hasattr(int, '__bool__'): pass"
+ >>> timeit.timeit(stmt=s, number=100000)
+ 0.08588060699912603
To give the :mod:`timeit` module access to functions you define, you can pass a
-``setup`` parameter which contains an import statement::
+*setup* parameter which contains an import statement::
def test():
"""Stupid test function"""
@@ -251,7 +323,5 @@ To give the :mod:`timeit` module access to functions you define, you can pass a
L.append(i)
if __name__ == '__main__':
- from timeit import Timer
- t = Timer("test()", "from __main__ import test")
- print t.timeit()
-
+ import timeit
+ print(timeit.timeit("test()", setup="from __main__ import test"))
diff --git a/Doc/library/tix.rst b/Doc/library/tix.rst
index 8b5355d..a2f31a0 100644
--- a/Doc/library/tix.rst
+++ b/Doc/library/tix.rst
@@ -24,9 +24,9 @@ special needs of your application and users.
.. note::
- :mod:`Tix` has been renamed to :mod:`tkinter.tix` in Python 3.0. The
+ :mod:`Tix` has been renamed to :mod:`tkinter.tix` in Python 3. The
:term:`2to3` tool will automatically adapt imports when converting your
- sources to 3.0.
+ sources to Python 3.
.. seealso::
@@ -514,7 +514,7 @@ Tix Commands
print root.tix_configure()
-.. method:: tixCommand.tix_configure([cnf,] **kw)
+.. method:: tixCommand.tix_configure(cnf=None **kw)
Query or modify the configuration options of the Tix application context. If no
option is specified, returns a dictionary all of the available options. If
diff --git a/Doc/library/tkinter.rst b/Doc/library/tkinter.rst
index 3431f86..ddaeec7 100644
--- a/Doc/library/tkinter.rst
+++ b/Doc/library/tkinter.rst
@@ -13,22 +13,34 @@ is maintained at ActiveState.)
.. note::
- :mod:`Tkinter` has been renamed to :mod:`tkinter` in Python 3.0. The
+ :mod:`Tkinter` has been renamed to :mod:`tkinter` in Python 3. The
:term:`2to3` tool will automatically adapt imports when converting your
- sources to 3.0.
+ sources to Python 3.
.. seealso::
- `Python Tkinter Resources <http://www.python.org/topics/tkinter/>`_
+ `Python Tkinter Resources <https://wiki.python.org/moin/TkInter>`_
The Python Tkinter Topic Guide provides a great deal of information on using Tk
from Python and links to other sources of information on Tk.
- `An Introduction to Tkinter <http://www.pythonware.com/library/an-introduction-to-tkinter.htm>`_
- Fredrik Lundh's on-line reference material.
+ `TKDocs <http://www.tkdocs.com/>`_
+ Extensive tutorial plus friendlier widget pages for some of the widgets.
- `Tkinter reference: a GUI for Python <http://infohost.nmt.edu/tcc/help/pubs/lang.html>`_
+ `Tkinter reference: a GUI for Python <http://infohost.nmt.edu/tcc/help/pubs/tkinter/>`_
On-line reference material.
+ `Tkinter docs from effbot <http://effbot.org/tkinterbook/>`_
+ Online reference for tkinter supported by effbot.org.
+
+ `Tcl/Tk manual <http://www.tcl.tk/man/tcl8.5/>`_
+ Official manual for the latest tcl/tk version.
+
+ `Programming Python <http://www.amazon.com/Programming-Python-Mark-Lutz/dp/0596158106/>`_
+ Book by Mark Lutz, has excellent coverage of Tkinter.
+
+ `Modern Tkinter for Busy Python Developers <http://www.amazon.com/Modern-Tkinter-Python-Developers-ebook/dp/B0071QDNLO/>`_
+ Book by Mark Rozerman about building attractive and modern graphical user interfaces with Python and Tkinter.
+
`Python and Tkinter Programming <http://www.amazon.com/exec/obidos/ASIN/1884777813>`_
The book by John Grayson (ISBN 1-884777-81-3).
@@ -109,7 +121,7 @@ Other modules that provide Tk support include:
:mod:`turtle`
Turtle graphics in a Tk window.
-These have been renamed as well in Python 3.0; they were all made submodules of
+These have been renamed as well in Python 3; they were all made submodules of
the new ``tkinter`` package.
@@ -176,7 +188,7 @@ documentation that exists. Here are some hints:
The Tk/Tcl development is largely taking place at ActiveState.
`Tcl and the Tk Toolkit <http://www.amazon.com/exec/obidos/ASIN/020163337X>`_
- The book by John Ousterhout, the inventor of Tcl .
+ The book by John Ousterhout, the inventor of Tcl.
`Practical Programming in Tcl and Tk <http://www.amazon.com/exec/obidos/ASIN/0130220280>`_
Brent Welch's encyclopedic book.
@@ -440,7 +452,7 @@ back will contain the name of the synonym and the "real" option (such as
Example::
>>> print fred.config()
- {'relief' : ('relief', 'relief', 'Relief', 'raised', 'groove')}
+ {'relief': ('relief', 'relief', 'Relief', 'raised', 'groove')}
Of course, the dictionary printed will include all the options available and
their values. This is meant only as an example.
@@ -613,7 +625,7 @@ bitmap
preceded with an ``@``, as in ``"@/usr/contrib/bitmap/gumby.bit"``.
boolean
- You can pass integers 0 or 1 or the strings ``"yes"`` or ``"no"`` .
+ You can pass integers 0 or 1 or the strings ``"yes"`` or ``"no"``.
callback
This is any Python function that takes no arguments. For example::
diff --git a/Doc/library/tokenize.rst b/Doc/library/tokenize.rst
index 7075035..3f25a2c 100644
--- a/Doc/library/tokenize.rst
+++ b/Doc/library/tokenize.rst
@@ -17,9 +17,10 @@ for on-screen displays.
To simplify token stream handling, all :ref:`operators` and :ref:`delimiters`
tokens are returned using the generic :data:`token.OP` token type. The exact
-type can be determined by checking the token ``string`` field on the
-:term:`named tuple` returned from :func:`tokenize.tokenize` for the character
-sequence that identifies a specific operator token.
+type can be determined by checking the second field (containing the actual
+token string matched) of the tuple returned from
+:func:`tokenize.generate_tokens` for the character sequence that identifies a
+specific operator token.
The primary entry point is a :term:`generator`:
@@ -29,7 +30,8 @@ The primary entry point is a :term:`generator`:
which must be a callable object which provides the same interface as the
:meth:`readline` method of built-in file objects (see section
:ref:`bltin-file-objects`). Each call to the function should return one line
- of input as a string.
+ of input as a string. Alternately, *readline* may be a callable object that
+ signals completion by raising :exc:`StopIteration`.
The generator produces 5-tuples with these members: the token type; the token
string; a 2-tuple ``(srow, scol)`` of ints specifying the row and column
@@ -96,6 +98,24 @@ back the modified script.
.. versionadded:: 2.5
+.. exception:: TokenError
+
+ Raised when either a docstring or expression that may be split over several
+ lines is not completed anywhere in the file, for example::
+
+ """Beginning of
+ docstring
+
+ or::
+
+ [1,
+ 2,
+ 3
+
+Note that unclosed single-quoted strings do not cause an error to be
+raised. They are tokenized as ``ERRORTOKEN``, followed by the tokenization of
+their contents.
+
Example of a script re-writer that transforms float literals into Decimal
objects::
diff --git a/Doc/library/trace.rst b/Doc/library/trace.rst
index a2afda1..e00f118 100644
--- a/Doc/library/trace.rst
+++ b/Doc/library/trace.rst
@@ -41,8 +41,8 @@ Main options
At least one of the following options must be specified when invoking
:mod:`trace`. The :option:`--listfuncs <-l>` option is mutually exclusive with
-the :option:`--trace <-t>` and :option:`--counts <-c>` options . When
-:option:`--listfuncs <-l>` is provided, neither :option:`--counts <-c>` nor
+the :option:`--trace <-t>` and :option:`--count <-c>` options. When
+:option:`--listfuncs <-l>` is provided, neither :option:`--count <-c>` nor
:option:`--trace <-t>` are accepted, and vice versa.
.. program:: trace
@@ -149,7 +149,7 @@ Programmatic Interface
the current tracing parameters. *cmd* must be a string or code object,
suitable for passing into :func:`exec`.
- .. method:: runctx(cmd[, globals=None[, locals=None]])
+ .. method:: runctx(cmd, globals=None, locals=None)
Execute the command and gather statistics from the execution with the
current tracing parameters, in the defined global and local
@@ -200,7 +200,7 @@ A simple example demonstrating the use of the programmatic interface::
# run the new command using the given tracer
tracer.run('main()')
- # make a report, placing output in /tmp
+ # make a report, placing output in the current directory
r = tracer.results()
- r.write_results(show_missing=True, coverdir="/tmp")
+ r.write_results(show_missing=True, coverdir=".")
diff --git a/Doc/library/traceback.rst b/Doc/library/traceback.rst
index 15eaa6e..6859d4b 100644
--- a/Doc/library/traceback.rst
+++ b/Doc/library/traceback.rst
@@ -75,7 +75,7 @@ The module defines the following functions:
Return a list of up to *limit* "pre-processed" stack trace entries extracted
from the traceback object *traceback*. It is useful for alternate formatting of
stack traces. If *limit* is omitted or ``None``, all entries are extracted. A
- "pre-processed" stack trace entry is a quadruple (*filename*, *line number*,
+ "pre-processed" stack trace entry is a 4-tuple (*filename*, *line number*,
*function name*, *text*) representing the information that is usually printed
for a stack trace. The *text* is a string with leading and trailing whitespace
stripped; if the source is not available it is ``None``.
diff --git a/Doc/library/ttk.rst b/Doc/library/ttk.rst
index 0721234..6cab3e0 100644
--- a/Doc/library/ttk.rst
+++ b/Doc/library/ttk.rst
@@ -265,10 +265,10 @@ Besides the methods described below, the :class:`ttk.Widget` class supports the
*x* and *y* are pixel coordinates relative to the widget.
- .. method:: instate(statespec[, callback=None[, *args[, **kw]]])
+ .. method:: instate(statespec, callback=None, *args, **kw)
- Test the widget's state. If a callback is not specified, returns True
- if the widget state matches *statespec* and False otherwise. If callback
+ Test the widget's state. If a callback is not specified, returns ``True``
+ if the widget state matches *statespec* and ``False`` otherwise. If callback
is specified then it is called with *args* if widget state matches
*statespec*.
@@ -523,7 +523,7 @@ ttk.Notebook
omitted, returns the widget name of the currently selected pane.
- .. method:: tab(tab_id[, option=None[, **kw]])
+ .. method:: tab(tab_id, option=None, **kw)
Query or modify the options of the specific *tab_id*.
@@ -846,7 +846,7 @@ ttk.Treeview
.. class:: Treeview
- .. method:: bbox(item[, column=None])
+ .. method:: bbox(item, column=None)
Returns the bounding box (relative to the treeview widget's window) of
the specified *item* in the form (x, y, width, height).
@@ -873,7 +873,7 @@ ttk.Treeview
*item*'s children.
- .. method:: column(column[, option=None[, **kw]])
+ .. method:: column(column, option=None, **kw)
Query or modify the options for the specified *column*.
@@ -919,7 +919,7 @@ ttk.Treeview
.. method:: exists(item)
- Returns True if the specified *item* is present in the tree.
+ Returns ``True`` if the specified *item* is present in the tree.
.. method:: focus([item=None])
@@ -928,7 +928,7 @@ ttk.Treeview
the current focus item, or '' if there is none.
- .. method:: heading(column[, option=None[, **kw]])
+ .. method:: heading(column, option=None, **kw)
Query or modify the heading options for the specified *column*.
@@ -1001,7 +1001,7 @@ ttk.Treeview
Returns the integer index of *item* within its parent's list of children.
- .. method:: insert(parent, index[, iid=None[, **kw]])
+ .. method:: insert(parent, index, iid=None, **kw)
Creates a new item and returns the item identifier of the newly created
item.
@@ -1065,7 +1065,7 @@ ttk.Treeview
Ensure that *item* is visible.
- Sets all of *item*'s ancestors open option to True, and scrolls the
+ Sets all of *item*'s ancestors open option to ``True``, and scrolls the
widget if necessary so that *item* is within the visible portion of
the tree.
@@ -1096,7 +1096,7 @@ ttk.Treeview
Toggle the selection state of each item in *items*.
- .. method:: set(item[, column=None[, value=None]])
+ .. method:: set(item, column=None, value=None)
With one argument, returns a dictionary of column/value pairs for the
specified *item*. With two arguments, returns the current value of the
@@ -1104,14 +1104,14 @@ ttk.Treeview
*column* in given *item* to the specified *value*.
- .. method:: tag_bind(tagname[, sequence=None[, callback=None]])
+ .. method:: tag_bind(tagname, sequence=None, callback=None)
Bind a callback for the given event *sequence* to the tag *tagname*.
When an event is delivered to an item, the callbacks for each of the
item's tags option are called.
- .. method:: tag_configure(tagname[, option=None[, **kw]])
+ .. method:: tag_configure(tagname, option=None, **kw)
Query or modify the options for the specified *tagname*.
@@ -1220,7 +1220,7 @@ option. If the class name of a widget is unknown, use the method
foreground option, for example, you would get a blue foreground
when the widget is in the active or pressed states.
- .. method:: lookup(style, option[, state=None[, default=None]])
+ .. method:: lookup(style, option, state=None, default=None)
Returns the value specified for *option* in *style*.
@@ -1235,7 +1235,7 @@ option. If the class name of a widget is unknown, use the method
print ttk.Style().lookup("TButton", "font")
- .. method:: layout(style[, layoutspec=None])
+ .. method:: layout(style, layoutspec=None)
Define the widget layout for given *style*. If *layoutspec* is omitted,
return the layout specification for given style.
@@ -1318,7 +1318,7 @@ option. If the class name of a widget is unknown, use the method
Returns the list of *elementname*'s options.
- .. method:: theme_create(themename[, parent=None[, settings=None]])
+ .. method:: theme_create(themename, parent=None, settings=None)
Create a new theme.
diff --git a/Doc/library/turtle.rst b/Doc/library/turtle.rst
index e05305a..f04a07a 100644
--- a/Doc/library/turtle.rst
+++ b/Doc/library/turtle.rst
@@ -1049,8 +1049,8 @@ More drawing control
Write text - the string representation of *arg* - at the current turtle
position according to *align* ("left", "center" or right") and with the given
- font. If *move* is True, the pen is moved to the bottom-right corner of the
- text. By default, *move* is False.
+ font. If *move* is true, the pen is moved to the bottom-right corner of the
+ text. By default, *move* is ``False``.
>>> turtle.write("Home = ", True, align="center")
>>> turtle.write((0,0), True)
@@ -1086,7 +1086,7 @@ Visibility
.. function:: isvisible()
- Return True if the Turtle is shown, False if it's hidden.
+ Return ``True`` if the Turtle is shown, ``False`` if it's hidden.
>>> turtle.hideturtle()
>>> turtle.isvisible()
@@ -2173,9 +2173,11 @@ It contains:
The demoscripts are:
+.. tabularcolumns:: |l|L|L|
+
+----------------+------------------------------+-----------------------+
| Name | Description | Features |
-+----------------+------------------------------+-----------------------+
++================+==============================+=======================+
| bytedesign | complex classical | :func:`tracer`, delay,|
| | turtlegraphics pattern | :func:`update` |
+----------------+------------------------------+-----------------------+
diff --git a/Doc/library/unicodedata.rst b/Doc/library/unicodedata.rst
index 4d9b3c5..a3a7c96 100644
--- a/Doc/library/unicodedata.rst
+++ b/Doc/library/unicodedata.rst
@@ -66,7 +66,7 @@ It defines the following functions:
.. function:: bidirectional(unichr)
- Returns the bidirectional category assigned to the Unicode character *unichr* as
+ Returns the bidirectional class assigned to the Unicode character *unichr* as
string. If no such value is defined, an empty string is returned.
diff --git a/Doc/library/unittest.rst b/Doc/library/unittest.rst
index b53c029..d82c407 100644
--- a/Doc/library/unittest.rst
+++ b/Doc/library/unittest.rst
@@ -91,7 +91,7 @@ need to derive from a specific class.
Third-party unittest frameworks with a lighter-weight syntax for writing
tests. For example, ``assert func(10) == 42``.
- `The Python Testing Tools Taxonomy <http://pycheesecake.org/wiki/PythonTestingToolsTaxonomy>`_
+ `The Python Testing Tools Taxonomy <http://wiki.python.org/moin/PythonTestingToolsTaxonomy>`_
An extensive list of Python testing tools including functional testing
frameworks and mock object libraries.
@@ -279,15 +279,15 @@ The ``discover`` sub-command has the following options:
Verbose output
-.. cmdoption:: -s directory
+.. cmdoption:: -s, --start-directory directory
- Directory to start discovery ('.' default)
+ Directory to start discovery (``.`` default)
-.. cmdoption:: -p pattern
+.. cmdoption:: -p, --pattern pattern
- Pattern to match test files ('test*.py' default)
+ Pattern to match test files (``test*.py`` default)
-.. cmdoption:: -t directory
+.. cmdoption:: -t, --top-level-directory directory
Top level directory of project (defaults to start directory)
@@ -584,7 +584,7 @@ that is broken and will fail, but shouldn't be counted as a failure on a
Skipping a test is simply a matter of using the :func:`skip` :term:`decorator`
or one of its conditional variants.
-Basic skipping looks like this: ::
+Basic skipping looks like this::
class MyTestCase(unittest.TestCase):
@@ -603,7 +603,7 @@ Basic skipping looks like this: ::
# windows specific testing code
pass
-This is the output of running the example above in verbose mode: ::
+This is the output of running the example above in verbose mode::
test_format (__main__.MyTestCase) ... skipped 'not supported in this library version'
test_nothing (__main__.MyTestCase) ... skipped 'demonstrating skipping'
@@ -614,9 +614,9 @@ This is the output of running the example above in verbose mode: ::
OK (skipped=3)
-Classes can be skipped just like methods: ::
+Classes can be skipped just like methods::
- @skip("showing class skipping")
+ @unittest.skip("showing class skipping")
class MySkippedTestCase(unittest.TestCase):
def test_not_run(self):
pass
@@ -633,12 +633,12 @@ Expected failures use the :func:`expectedFailure` decorator. ::
It's easy to roll your own skipping decorators by making a decorator that calls
:func:`skip` on the test when it wants it to be skipped. This decorator skips
-the test unless the passed object has a certain attribute: ::
+the test unless the passed object has a certain attribute::
def skipUnlessHasattr(obj, attr):
if hasattr(obj, attr):
return lambda func: func
- return unittest.skip("{0!r} doesn't have {1!r}".format(obj, attr))
+ return unittest.skip("{!r} doesn't have {!r}".format(obj, attr))
The following decorators implement test skipping and expected failures:
@@ -660,6 +660,13 @@ The following decorators implement test skipping and expected failures:
Mark the test as an expected failure. If the test fails when run, the test
is not counted as a failure.
+.. exception:: SkipTest(reason)
+
+ This exception is raised to skip a test.
+
+ Usually you can use :meth:`TestCase.skipTest` or one of the skipping
+ decorators instead of raising this directly.
+
Skipped tests will not have :meth:`setUp` or :meth:`tearDown` run around them.
Skipped classes will not have :meth:`setUpClass` or :meth:`tearDownClass` run.
@@ -712,9 +719,9 @@ Test cases
.. method:: setUp()
Method called to prepare the test fixture. This is called immediately
- before calling the test method; any exception raised by this method will
- be considered an error rather than a test failure. The default
- implementation does nothing.
+ before calling the test method; other than :exc:`AssertionError` or :exc:`SkipTest`,
+ any exception raised by this method will be considered an error rather than
+ a test failure. The default implementation does nothing.
.. method:: tearDown()
@@ -722,10 +729,10 @@ Test cases
Method called immediately after the test method has been called and the
result recorded. This is called even if the test method raised an
exception, so the implementation in subclasses may need to be particularly
- careful about checking internal state. Any exception raised by this
- method will be considered an error rather than a test failure. This
- method will only be called if the :meth:`setUp` succeeds, regardless of
- the outcome of the test method. The default implementation does nothing.
+ careful about checking internal state. Any exception, other than :exc:`AssertionError`
+ or :exc:`SkipTest`, raised by this method will be considered an error rather than a
+ test failure. This method will only be called if the :meth:`setUp` succeeds,
+ regardless of the outcome of the test method. The default implementation does nothing.
.. method:: setUpClass()
@@ -909,8 +916,8 @@ Test cases
| :meth:`assertRaises(exc, fun, *args, **kwds) | ``fun(*args, **kwds)`` raises *exc* | |
| <TestCase.assertRaises>` | | |
+---------------------------------------------------------+--------------------------------------+------------+
- | :meth:`assertRaisesRegexp(exc, re, fun, *args, **kwds) | ``fun(*args, **kwds)`` raises *exc* | 2.7 |
- | <TestCase.assertRaisesRegexp>` | and the message matches *re* | |
+ | :meth:`assertRaisesRegexp(exc, r, fun, *args, **kwds) | ``fun(*args, **kwds)`` raises *exc* | 2.7 |
+ | <TestCase.assertRaisesRegexp>` | and the message matches regex *r* | |
+---------------------------------------------------------+--------------------------------------+------------+
.. method:: assertRaises(exception, callable, *args, **kwds)
@@ -951,7 +958,7 @@ Test cases
a regular expression object or a string containing a regular expression
suitable for use by :func:`re.search`. Examples::
- self.assertRaisesRegexp(ValueError, 'invalid literal for.*XYZ$',
+ self.assertRaisesRegexp(ValueError, "invalid literal for.*XYZ'$",
int, 'XYZ')
or::
@@ -986,10 +993,10 @@ Test cases
| :meth:`assertLessEqual(a, b) | ``a <= b`` | 2.7 |
| <TestCase.assertLessEqual>` | | |
+---------------------------------------+--------------------------------+--------------+
- | :meth:`assertRegexpMatches(s, re) | ``regex.search(s)`` | 2.7 |
+ | :meth:`assertRegexpMatches(s, r) | ``r.search(s)`` | 2.7 |
| <TestCase.assertRegexpMatches>` | | |
+---------------------------------------+--------------------------------+--------------+
- | :meth:`assertNotRegexpMatches(s, re) | ``not regex.search(s)`` | 2.7 |
+ | :meth:`assertNotRegexpMatches(s, r) | ``not r.search(s)`` | 2.7 |
| <TestCase.assertNotRegexpMatches>` | | |
+---------------------------------------+--------------------------------+--------------+
| :meth:`assertItemsEqual(a, b) | sorted(a) == sorted(b) and | 2.7 |
@@ -1010,7 +1017,7 @@ Test cases
like the :func:`round` function) and not *significant digits*.
If *delta* is supplied instead of *places* then the difference
- between *first* and *second* must be less (or more) than *delta*.
+ between *first* and *second* must be less or equal to (or greater than) *delta*.
Supplying both *delta* and *places* raises a ``TypeError``.
@@ -1068,6 +1075,8 @@ Test cases
sorted(actual))`` but it works with sequences of unhashable objects as
well.
+ In Python 3, this method is named ``assertCountEqual``.
+
.. versionadded:: 2.7
@@ -1157,7 +1166,7 @@ Test cases
.. method:: assertListEqual(list1, list2, msg=None)
assertTupleEqual(tuple1, tuple2, msg=None)
- Tests that two lists or tuples are equal. If not an error message is
+ Tests that two lists or tuples are equal. If not, an error message is
constructed that shows only the differences between the two. An error
is also raised if either of the parameters are of the wrong type.
These methods are used by default when comparing lists or tuples with
@@ -1426,8 +1435,8 @@ Loading and running tests
The :class:`TestLoader` class is used to create test suites from classes and
modules. Normally, there is no need to create an instance of this class; the
:mod:`unittest` module provides an instance that can be shared as
- ``unittest.defaultTestLoader``. Using a subclass or instance, however, allows
- customization of some configurable properties.
+ :data:`unittest.defaultTestLoader`. Using a subclass or instance, however,
+ allows customization of some configurable properties.
:class:`TestLoader` objects have the following methods:
@@ -1501,11 +1510,11 @@ Loading and running tests
.. method:: discover(start_dir, pattern='test*.py', top_level_dir=None)
- Find and return all test modules from the specified start directory,
- recursing into subdirectories to find them. Only test files that match
- *pattern* will be loaded. (Using shell style pattern matching.) Only
- module names that are importable (i.e. are valid Python identifiers) will
- be loaded.
+ Find all the test modules by recursing into subdirectories from the
+ specified start directory, and return a TestSuite object containing them.
+ Only test files that match *pattern* will be loaded. (Using shell style
+ pattern matching.) Only module names that are importable (i.e. are valid
+ Python identifiers) will be loaded.
All test modules must be importable from the top level of the project. If
the start directory is not the top level directory then the top level
@@ -1594,8 +1603,7 @@ Loading and running tests
A list containing 2-tuples of :class:`TestCase` instances and strings
holding formatted tracebacks. Each tuple represents a test where a failure
- was explicitly signalled using the :meth:`TestCase.fail\*` or
- :meth:`TestCase.assert\*` methods.
+ was explicitly signalled using the :meth:`TestCase.assert\*` methods.
.. versionchanged:: 2.2
Contains formatted tracebacks instead of :func:`sys.exc_info` results.
@@ -1679,14 +1687,14 @@ Loading and running tests
Called after the test case *test* has been executed, regardless of the
outcome.
- .. method:: startTestRun(test)
+ .. method:: startTestRun()
Called once before any tests are executed.
.. versionadded:: 2.7
- .. method:: stopTestRun(test)
+ .. method:: stopTestRun()
Called once after all tests are executed.
@@ -1695,7 +1703,7 @@ Loading and running tests
.. method:: addError(test, err)
- Called when the test case *test* raises an unexpected exception *err* is a
+ Called when the test case *test* raises an unexpected exception. *err* is a
tuple of the form returned by :func:`sys.exc_info`: ``(type, value,
traceback)``.
@@ -1784,11 +1792,12 @@ Loading and running tests
stream, descriptions, verbosity
-.. function:: main([module[, defaultTest[, argv[, testRunner[, testLoader[, exit[, verbosity[, failfast[, catchbreak[,buffer]]]]]]]]]])
+.. function:: main([module[, defaultTest[, argv[, testRunner[, testLoader[, exit[, verbosity[, failfast[, catchbreak[, buffer]]]]]]]]]])
- A command-line program that runs a set of tests; this is primarily for making
- test modules conveniently executable. The simplest use for this function is to
- include the following line at the end of a test script::
+ A command-line program that loads a set of tests from *module* and runs them;
+ this is primarily for making test modules conveniently executable.
+ The simplest use for this function is to include the following line at the
+ end of a test script::
if __name__ == '__main__':
unittest.main()
@@ -1799,10 +1808,21 @@ Loading and running tests
if __name__ == '__main__':
unittest.main(verbosity=2)
+ The *defaultTest* argument is the name of the test to run if no test names
+ are specified via *argv*. If not specified or ``None`` and no test names are
+ provided via *argv*, all tests found in *module* are run.
+
+ The *argv* argument can be a list of options passed to the program, with the
+ first element being the program name. If not specified or ``None``,
+ the values of :data:`sys.argv` are used.
+
The *testRunner* argument can either be a test runner class or an already
created instance of it. By default ``main`` calls :func:`sys.exit` with
an exit code indicating success or failure of the tests run.
+ The *testLoader* argument has to be a :class:`TestLoader` instance,
+ and defaults to :data:`defaultTestLoader`.
+
``main`` supports being used from the interactive interpreter by passing in the
argument ``exit=False``. This displays the result on standard output without
calling :func:`sys.exit`::
@@ -1810,14 +1830,14 @@ Loading and running tests
>>> from unittest import main
>>> main(module='test_module', exit=False)
- The ``failfast``, ``catchbreak`` and ``buffer`` parameters have the same
+ The *failfast*, *catchbreak* and *buffer* parameters have the same
effect as the same-name `command-line options`_.
Calling ``main`` actually returns an instance of the ``TestProgram`` class.
This stores the result of the tests run as the ``result`` attribute.
.. versionchanged:: 2.7
- The ``exit``, ``verbosity``, ``failfast``, ``catchbreak`` and ``buffer``
+ The *exit*, *verbosity*, *failfast*, *catchbreak* and *buffer*
parameters were added.
@@ -1860,10 +1880,10 @@ name then the package :file:`__init__.py` will be checked for ``load_tests``.
.. note::
- The default pattern is 'test*.py'. This matches all Python files
- that start with 'test' but *won't* match any test directories.
+ The default pattern is ``'test*.py'``. This matches all Python files
+ that start with ``'test'`` but *won't* match any test directories.
- A pattern like 'test*' will match test packages as well as
+ A pattern like ``'test*'`` will match test packages as well as
modules.
If the package :file:`__init__.py` defines ``load_tests`` then it will be
@@ -1948,7 +1968,7 @@ then you must call up to them yourself. The implementations in
If an exception is raised during a ``setUpClass`` then the tests in the class
are not run and the ``tearDownClass`` is not run. Skipped classes will not
have ``setUpClass`` or ``tearDownClass`` run. If the exception is a
-``SkipTest`` exception then the class will be reported as having been skipped
+:exc:`SkipTest` exception then the class will be reported as having been skipped
instead of as an error.
@@ -1965,7 +1985,7 @@ These should be implemented as functions::
If an exception is raised in a ``setUpModule`` then none of the tests in the
module will be run and the ``tearDownModule`` will not be run. If the exception is a
-``SkipTest`` exception then the module will be reported as having been skipped
+:exc:`SkipTest` exception then the module will be reported as having been skipped
instead of as an error.
diff --git a/Doc/library/urllib.rst b/Doc/library/urllib.rst
index 1f5d994..62f198f 100644
--- a/Doc/library/urllib.rst
+++ b/Doc/library/urllib.rst
@@ -6,11 +6,12 @@
.. note::
The :mod:`urllib` module has been split into parts and renamed in
- Python 3.0 to :mod:`urllib.request`, :mod:`urllib.parse`,
+ Python 3 to :mod:`urllib.request`, :mod:`urllib.parse`,
and :mod:`urllib.error`. The :term:`2to3` tool will automatically adapt
- imports when converting your sources to 3.0.
- Also note that the :func:`urllib.urlopen` function has been removed in
- Python 3.0 in favor of :func:`urllib2.urlopen`.
+ imports when converting your sources to Python 3.
+ Also note that the :func:`urllib.request.urlopen` function in Python 3 is
+ equivalent to :func:`urllib2.urlopen` and that :func:`urllib.urlopen` has
+ been removed.
.. index::
single: WWW
@@ -32,16 +33,17 @@ High-level interface
.. function:: urlopen(url[, data[, proxies]])
- Open a network object denoted by a URL for reading. If the URL does not have a
- scheme identifier, or if it has :file:`file:` as its scheme identifier, this
- opens a local file (without universal newlines); otherwise it opens a socket to
- a server somewhere on the network. If the connection cannot be made the
- :exc:`IOError` exception is raised. If all went well, a file-like object is
- returned. This supports the following methods: :meth:`read`, :meth:`readline`,
- :meth:`readlines`, :meth:`fileno`, :meth:`close`, :meth:`info`, :meth:`getcode` and
- :meth:`geturl`. It also has proper support for the :term:`iterator` protocol. One
- caveat: the :meth:`read` method, if the size argument is omitted or negative,
- may not read until the end of the data stream; there is no good way to determine
+ Open a network object denoted by a URL for reading. If the URL does not
+ have a scheme identifier, or if it has :file:`file:` as its scheme
+ identifier, this opens a local file (without :term:`universal newlines`);
+ otherwise it opens a socket to a server somewhere on the network. If the
+ connection cannot be made the :exc:`IOError` exception is raised. If all
+ went well, a file-like object is returned. This supports the following
+ methods: :meth:`read`, :meth:`readline`, :meth:`readlines`, :meth:`fileno`,
+ :meth:`close`, :meth:`info`, :meth:`getcode` and :meth:`geturl`. It also
+ has proper support for the :term:`iterator` protocol. One caveat: the
+ :meth:`read` method, if the size argument is omitted or negative, may not
+ read until the end of the data stream; there is no good way to determine
that the entire stream from a socket has been read in the general case.
Except for the :meth:`info`, :meth:`getcode` and :meth:`geturl` methods,
@@ -131,7 +133,7 @@ High-level interface
:envvar:`no_proxy` environment variable.
.. deprecated:: 2.6
- The :func:`urlopen` function has been removed in Python 3.0 in favor
+ The :func:`urlopen` function has been removed in Python 3 in favor
of :func:`urllib2.urlopen`.
@@ -279,6 +281,13 @@ Utility functions
find it, looks for proxy information from Mac OSX System Configuration for
Mac OS X and Windows Systems Registry for Windows.
+.. note::
+ urllib also exposes certain utility functions like splittype, splithost and
+ others parsing url into various components. But it is recommended to use
+ :mod:`urlparse` for parsing urls than using these functions directly.
+ Python 3 does not expose these helper functions from :mod:`urllib.parse`
+ module.
+
URL Opener objects
------------------
diff --git a/Doc/library/urllib2.rst b/Doc/library/urllib2.rst
index b66ebd7..0411e18 100644
--- a/Doc/library/urllib2.rst
+++ b/Doc/library/urllib2.rst
@@ -9,9 +9,9 @@
.. note::
The :mod:`urllib2` module has been split across several modules in
- Python 3.0 named :mod:`urllib.request` and :mod:`urllib.error`.
+ Python 3 named :mod:`urllib.request` and :mod:`urllib.error`.
The :term:`2to3` tool will automatically adapt imports when converting
- your sources to 3.0.
+ your sources to Python 3.
The :mod:`urllib2` module defines functions and classes which help in opening
@@ -43,7 +43,7 @@ The :mod:`urllib2` module defines the following functions:
timeout setting will be used). This actually only works for HTTP, HTTPS and
FTP connections.
- This function returns a file-like object with two additional methods:
+ This function returns a file-like object with three additional methods:
* :meth:`geturl` --- return the URL of the resource retrieved, commonly used to
determine if a redirect was followed
@@ -52,14 +52,18 @@ The :mod:`urllib2` module defines the following functions:
in the form of an :class:`mimetools.Message` instance
(see `Quick Reference to HTTP Headers <http://www.cs.tut.fi/~jkorpela/http.html>`_)
+ * :meth:`getcode` --- return the HTTP status code of the response.
+
Raises :exc:`URLError` on errors.
Note that ``None`` may be returned if no handler handles the request (though the
default installed global :class:`OpenerDirector` uses :class:`UnknownHandler` to
ensure this never happens).
- In addition, default installed :class:`ProxyHandler` makes sure the requests
- are handled through the proxy when they are set.
+ In addition, if proxy settings are detected (for example, when a ``*_proxy``
+ environment variable like :envvar:`http_proxy` is set),
+ :class:`ProxyHandler` is default installed and makes sure the requests are
+ handled through the proxy.
.. versionchanged:: 2.6
*timeout* was added.
@@ -81,7 +85,8 @@ The :mod:`urllib2` module defines the following functions:
subclasses of :class:`BaseHandler` (in which case it must be possible to call
the constructor without any parameters). Instances of the following classes
will be in front of the *handler*\s, unless the *handler*\s contain them,
- instances of them or subclasses of them: :class:`ProxyHandler`,
+ instances of them or subclasses of them: :class:`ProxyHandler` (if proxy
+ settings are detected),
:class:`UnknownHandler`, :class:`HTTPHandler`, :class:`HTTPDefaultErrorHandler`,
:class:`HTTPRedirectHandler`, :class:`FTPHandler`, :class:`FileHandler`,
:class:`HTTPErrorProcessor`.
@@ -121,7 +126,10 @@ The following exceptions are raised as appropriate:
This numeric value corresponds to a value found in the dictionary of
codes as found in :attr:`BaseHTTPServer.BaseHTTPRequestHandler.responses`.
+ .. attribute:: reason
+ The reason for this error. It can be a message string or another exception
+ instance.
The following classes are provided:
@@ -158,7 +166,7 @@ The following classes are provided:
should be the request-host of the request for the page containing the image.
*unverifiable* should indicate whether the request is unverifiable, as defined
- by RFC 2965. It defaults to False. An unverifiable request is one whose URL
+ by RFC 2965. It defaults to ``False``. An unverifiable request is one whose URL
the user did not have the option to approve. For example, if the request is for
an image in an HTML document, and the user had no option to approve the
automatic fetching of the image, this should be true.
@@ -197,9 +205,9 @@ The following classes are provided:
Cause requests to go through a proxy. If *proxies* is given, it must be a
dictionary mapping protocol names to URLs of proxies. The default is to read
the list of proxies from the environment variables
- :envvar:`<protocol>_proxy`. If no proxy environment variables are set, in a
- Windows environment, proxy settings are obtained from the registry's
- Internet Settings section and in a Mac OS X environment, proxy information
+ :envvar:`<protocol>_proxy`. If no proxy environment variables are set, then
+ in a Windows environment proxy settings are obtained from the registry's
+ Internet Settings section, and in a Mac OS X environment proxy information
is retrieved from the OS X System Configuration Framework.
To disable autodetected proxy pass an empty dictionary.
@@ -380,6 +388,17 @@ so all must be overridden in subclasses.
Return the selector --- the part of the URL that is sent to the server.
+.. method:: Request.get_header(header_name, default=None)
+
+ Return the value of the given header. If the header is not present, return
+ the default value.
+
+
+.. method:: Request.header_items()
+
+ Return a list of tuples (header_name, header_value) of the Request headers.
+
+
.. method:: Request.set_proxy(host, type)
Prepare the request by connecting to a proxy server. The *host* and *type* will
diff --git a/Doc/library/urlparse.rst b/Doc/library/urlparse.rst
index f118845..1bc361d 100644
--- a/Doc/library/urlparse.rst
+++ b/Doc/library/urlparse.rst
@@ -13,9 +13,9 @@
pair: relative; URL
.. note::
- The :mod:`urlparse` module is renamed to :mod:`urllib.parse` in Python 3.0.
+ The :mod:`urlparse` module is renamed to :mod:`urllib.parse` in Python 3.
The :term:`2to3` tool will automatically adapt imports when converting
- your sources to 3.0.
+ your sources to Python 3.
**Source code:** :source:`Lib/urlparse.py`
@@ -27,11 +27,11 @@ combine the components back into a URL string, and to convert a "relative URL"
to an absolute URL given a "base URL."
The module has been designed to match the Internet RFC on Relative Uniform
-Resource Locators (and discovered a bug in an earlier draft!). It supports the
-following URL schemes: ``file``, ``ftp``, ``gopher``, ``hdl``, ``http``,
-``https``, ``imap``, ``mailto``, ``mms``, ``news``, ``nntp``, ``prospero``,
-``rsync``, ``rtsp``, ``rtspu``, ``sftp``, ``shttp``, ``sip``, ``sips``,
-``snews``, ``svn``, ``svn+ssh``, ``telnet``, ``wais``.
+Resource Locators. It supports the following URL schemes: ``file``, ``ftp``,
+``gopher``, ``hdl``, ``http``, ``https``, ``imap``, ``mailto``, ``mms``,
+``news``, ``nntp``, ``prospero``, ``rsync``, ``rtsp``, ``rtspu``, ``sftp``,
+``shttp``, ``sip``, ``sips``, ``snews``, ``svn``, ``svn+ssh``, ``telnet``,
+``wais``.
.. versionadded:: 2.5
Support for the ``sftp`` and ``sips`` schemes.
@@ -71,8 +71,8 @@ The :mod:`urlparse` module defines the following functions:
>>> urlparse('//www.cwi.nl:80/%7Eguido/Python.html')
ParseResult(scheme='', netloc='www.cwi.nl:80', path='/%7Eguido/Python.html',
params='', query='', fragment='')
- >>> urlparse('www.cwi.nl:80/%7Eguido/Python.html')
- ParseResult(scheme='', netloc='', path='www.cwi.nl:80/%7Eguido/Python.html',
+ >>> urlparse('www.cwi.nl/%7Eguido/Python.html')
+ ParseResult(scheme='', netloc='', path='www.cwi.nl/%7Eguido/Python.html',
params='', query='', fragment='')
>>> urlparse('help/Python.html')
ParseResult(scheme='', netloc='', path='help/Python.html', params='',
diff --git a/Doc/library/user.rst b/Doc/library/user.rst
index 920f429..5acd7ce 100644
--- a/Doc/library/user.rst
+++ b/Doc/library/user.rst
@@ -7,7 +7,7 @@
:deprecated:
.. deprecated:: 2.6
- The :mod:`user` module has been removed in Python 3.0.
+ The :mod:`user` module has been removed in Python 3.
.. index::
pair: .pythonrc.py; file
diff --git a/Doc/library/userdict.rst b/Doc/library/userdict.rst
index 2e14c12..0585bda 100644
--- a/Doc/library/userdict.rst
+++ b/Doc/library/userdict.rst
@@ -114,8 +114,8 @@ The :mod:`UserList` module defines the :class:`UserList` class:
.. note::
The :class:`UserList` class has been moved to the :mod:`collections`
- module in Python 3.0. The :term:`2to3` tool will automatically adapt
- imports when converting your sources to 3.0.
+ module in Python 3. The :term:`2to3` tool will automatically adapt
+ imports when converting your sources to Python 3.
In addition to supporting the methods and operations of mutable sequences (see
@@ -128,7 +128,7 @@ attribute:
A real Python list object used to store the contents of the :class:`UserList`
class.
-**Subclassing requirements:** Subclasses of :class:`UserList` are expect to
+**Subclassing requirements:** Subclasses of :class:`UserList` are expected to
offer a constructor which can be called with either no arguments or one
argument. List operations which return a new sequence attempt to create an
instance of the actual implementation class. To do so, it assumes that the
@@ -187,8 +187,8 @@ The :mod:`UserString` module defines the following classes:
.. note::
The :class:`UserString` class has been moved to the :mod:`collections`
- module in Python 3.0. The :term:`2to3` tool will automatically adapt
- imports when converting your sources to 3.0.
+ module in Python 3. The :term:`2to3` tool will automatically adapt
+ imports when converting your sources to Python 3.
@@ -203,7 +203,7 @@ The :mod:`UserString` module defines the following classes:
hard to track down.
.. deprecated:: 2.6
- The :class:`MutableString` class has been removed in Python 3.0.
+ The :class:`MutableString` class has been removed in Python 3.
In addition to supporting the methods and operations of string and Unicode
objects (see section :ref:`string-methods`), :class:`UserString` instances
diff --git a/Doc/library/warnings.rst b/Doc/library/warnings.rst
index 6f3c105..27658d6 100644
--- a/Doc/library/warnings.rst
+++ b/Doc/library/warnings.rst
@@ -57,6 +57,8 @@ There are a number of built-in exceptions that represent warning categories.
This categorization is useful to be able to filter out groups of warnings. The
following warnings category classes are currently defined:
+.. tabularcolumns:: |l|p{0.6\linewidth}|
+
+----------------------------------+-----------------------------------------------+
| Class | Description |
+==================================+===============================================+
@@ -167,7 +169,8 @@ By default, Python installs several warning filters, which can be overridden by
the command-line options passed to :option:`-W` and calls to
:func:`filterwarnings`.
-* :exc:`PendingDeprecationWarning`, and :exc:`ImportWarning` are ignored.
+* :exc:`DeprecationWarning` and :exc:`PendingDeprecationWarning`, and
+ :exc:`ImportWarning` are ignored.
* :exc:`BytesWarning` is ignored unless the :option:`-b` option is given once or
twice; in this case this warning is either printed (``-b``) or turned into an
@@ -418,7 +421,7 @@ Available Context Managers
.. note::
- In Python 3.0, the arguments to the constructor for
+ In Python 3, the arguments to the constructor for
:class:`catch_warnings` are keyword-only arguments.
.. versionadded:: 2.6
diff --git a/Doc/library/weakref.rst b/Doc/library/weakref.rst
index 7929c51..1c3cdbb 100644
--- a/Doc/library/weakref.rst
+++ b/Doc/library/weakref.rst
@@ -53,12 +53,6 @@ own weak references directly. The low-level machinery used by the weak
dictionary implementations is exposed by the :mod:`weakref` module for the
benefit of advanced uses.
-.. note::
-
- Weak references to an object are cleared before the object's :meth:`__del__`
- is called, to ensure that the weak reference callback (if any) finds the
- object still alive.
-
Not all objects can be weakly referenced; those objects which can include class
instances, functions written in Python (but not in C), methods (both bound and
unbound), sets, frozensets, file objects, :term:`generator`\s, type objects,
@@ -169,7 +163,7 @@ than needed.
.. method:: WeakKeyDictionary.iterkeyrefs()
- Return an :term:`iterator` that yields the weak references to the keys.
+ Return an iterable of the weak references to the keys.
.. versionadded:: 2.5
@@ -201,7 +195,7 @@ methods of :class:`WeakKeyDictionary` objects.
.. method:: WeakValueDictionary.itervaluerefs()
- Return an :term:`iterator` that yields the weak references to the values.
+ Return an iterable of the weak references to the values.
.. versionadded:: 2.5
diff --git a/Doc/library/webbrowser.rst b/Doc/library/webbrowser.rst
index 1a27d07..e6e7ea2 100644
--- a/Doc/library/webbrowser.rst
+++ b/Doc/library/webbrowser.rst
@@ -36,7 +36,9 @@ The script :program:`webbrowser` can be used as a command-line interface for the
module. It accepts an URL as the argument. It accepts the following optional
parameters: ``-n`` opens the URL in a new browser window, if possible;
``-t`` opens the URL in a new browser page ("tab"). The options are,
-naturally, mutually exclusive.
+naturally, mutually exclusive. Usage example::
+
+ python -m webbrowser -t "http://www.python.org"
The following exception is defined:
@@ -48,7 +50,7 @@ The following exception is defined:
The following functions are defined:
-.. function:: open(url[, new=0[, autoraise=True]])
+.. function:: open(url, new=0, autoraise=True)
Display *url* using the default browser. If *new* is 0, the *url* is opened
in the same browser window if possible. If *new* is 1, a new browser window
@@ -138,9 +140,9 @@ for the controller classes, all defined in this module.
+-----------------------+-----------------------------------------+-------+
| ``'windows-default'`` | :class:`WindowsDefault` | \(2) |
+-----------------------+-----------------------------------------+-------+
-| ``'internet-config'`` | :class:`InternetConfig` | \(3) |
+| ``'macosx'`` | :class:`MacOSX('default')` | \(3) |
+-----------------------+-----------------------------------------+-------+
-| ``'macosx'`` | :class:`MacOSX('default')` | \(4) |
+| ``'safari'`` | :class:`MacOSX('safari')` | \(3) |
+-----------------------+-----------------------------------------+-------+
Notes:
@@ -156,9 +158,6 @@ Notes:
Only on Windows platforms.
(3)
- Only on Mac OS platforms; requires the standard MacPython :mod:`ic` module.
-
-(4)
Only on Mac OS X platform.
Here are some simple examples::
@@ -181,7 +180,7 @@ Browser controllers provide these methods which parallel three of the
module-level convenience functions:
-.. method:: controller.open(url[, new=0[, autoraise=True]])
+.. method:: controller.open(url, new=0, autoraise=True)
Display *url* using the browser handled by this controller. If *new* is 1, a new
browser window is opened if possible. If *new* is 2, a new browser page ("tab")
diff --git a/Doc/library/whichdb.rst b/Doc/library/whichdb.rst
index 7048a0e..3bcb57c 100644
--- a/Doc/library/whichdb.rst
+++ b/Doc/library/whichdb.rst
@@ -6,8 +6,8 @@
.. note::
The :mod:`whichdb` module's only function has been put into the :mod:`dbm`
- module in Python 3.0. The :term:`2to3` tool will automatically adapt imports
- when converting your sources to 3.0.
+ module in Python 3. The :term:`2to3` tool will automatically adapt imports
+ when converting your sources to Python 3.
The single function in this module attempts to guess which of the several simple
diff --git a/Doc/library/winsound.rst b/Doc/library/winsound.rst
index 2325081..5c590ff 100644
--- a/Doc/library/winsound.rst
+++ b/Doc/library/winsound.rst
@@ -133,6 +133,10 @@ provided by Windows platforms. It includes functions and several constants.
Return immediately if the sound driver is busy.
+ .. note::
+
+ This flag is not supported on modern Windows platforms.
+
.. data:: MB_ICONASTERISK
diff --git a/Doc/library/wsgiref.rst b/Doc/library/wsgiref.rst
index 3163497..0b0c7c2 100644
--- a/Doc/library/wsgiref.rst
+++ b/Doc/library/wsgiref.rst
@@ -59,7 +59,7 @@ parameter expect a WSGI-compliant dictionary to be supplied; please see
found, and "http" otherwise.
-.. function:: request_uri(environ [, include_query=1])
+.. function:: request_uri(environ, include_query=1)
Return the full request URI, optionally including the query string, using the
algorithm found in the "URL Reconstruction" section of :pep:`333`. If
@@ -148,7 +148,7 @@ also provides these miscellaneous utilities:
:rfc:`2616`.
-.. class:: FileWrapper(filelike [, blksize=8192])
+.. class:: FileWrapper(filelike, blksize=8192)
A wrapper to convert a file-like object to an :term:`iterator`. The resulting objects
support both :meth:`__getitem__` and :meth:`__iter__` iteration styles, for
@@ -271,7 +271,7 @@ request. (E.g., using the :func:`shift_path_info` function from
:mod:`wsgiref.util`.)
-.. function:: make_server(host, port, app [, server_class=WSGIServer [, handler_class=WSGIRequestHandler]])
+.. function:: make_server(host, port, app, server_class=WSGIServer, handler_class=WSGIRequestHandler)
Create a new WSGI server listening on *host* and *port*, accepting connections
for *app*. The return value is an instance of the supplied *server_class*, and
@@ -460,7 +460,7 @@ input, output, and error streams.
environment.
-.. class:: BaseCGIHandler(stdin, stdout, stderr, environ [, multithread=True [, multiprocess=False]])
+.. class:: BaseCGIHandler(stdin, stdout, stderr, environ, multithread=True, multiprocess=False)
Similar to :class:`CGIHandler`, but instead of using the :mod:`sys` and
:mod:`os` modules, the CGI environment and I/O streams are specified explicitly.
@@ -475,7 +475,7 @@ input, output, and error streams.
instead of :class:`SimpleHandler`.
-.. class:: SimpleHandler(stdin, stdout, stderr, environ [,multithread=True [, multiprocess=False]])
+.. class:: SimpleHandler(stdin, stdout, stderr, environ, multithread=True, multiprocess=False)
Similar to :class:`BaseCGIHandler`, but designed for use with HTTP origin
servers. If you are writing an HTTP server implementation, you will probably
diff --git a/Doc/library/xdrlib.rst b/Doc/library/xdrlib.rst
index e56650c..6f05306 100644
--- a/Doc/library/xdrlib.rst
+++ b/Doc/library/xdrlib.rst
@@ -274,6 +274,5 @@ Here is an example of how you would catch one of these exceptions::
p = xdrlib.Packer()
try:
p.pack_double(8.01)
- except xdrlib.ConversionError, instance:
+ except xdrlib.ConversionError as instance:
print 'packing the double failed:', instance.msg
-
diff --git a/Doc/library/xml.dom.minidom.rst b/Doc/library/xml.dom.minidom.rst
index 69e9e56..1ff7024 100644
--- a/Doc/library/xml.dom.minidom.rst
+++ b/Doc/library/xml.dom.minidom.rst
@@ -1,8 +1,8 @@
-:mod:`xml.dom.minidom` --- Lightweight DOM implementation
-=========================================================
+:mod:`xml.dom.minidom` --- Minimal DOM implementation
+=====================================================
.. module:: xml.dom.minidom
- :synopsis: Lightweight Document Object Model (DOM) implementation.
+ :synopsis: Minimal Document Object Model (DOM) implementation.
.. moduleauthor:: Paul Prescod <paul@prescod.net>
.. sectionauthor:: Paul Prescod <paul@prescod.net>
.. sectionauthor:: Martin v. Löwis <martin@v.loewis.de>
@@ -14,9 +14,19 @@
--------------
-:mod:`xml.dom.minidom` is a light-weight implementation of the Document Object
-Model interface. It is intended to be simpler than the full DOM and also
-significantly smaller.
+:mod:`xml.dom.minidom` is a minimal implementation of the Document Object
+Model interface, with an API similar to that in other languages. It is intended
+to be simpler than the full DOM and also significantly smaller. Users who are
+not already proficient with the DOM should consider using the
+:mod:`xml.etree.ElementTree` module for their XML processing instead
+
+
+.. warning::
+
+ The :mod:`xml.dom.minidom` module is not secure against
+ maliciously constructed data. If you need to parse untrusted or
+ unauthenticated data see :ref:`xml-vulnerabilities`.
+
DOM applications typically start by parsing some XML into a DOM. With
:mod:`xml.dom.minidom`, this is done through the parse functions::
@@ -48,7 +58,7 @@ instead:
.. function:: parseString(string[, parser])
Return a :class:`Document` that represents the *string*. This method creates a
- :class:`StringIO` object for the string and passes that on to :func:`parse`.
+ :class:`~StringIO.StringIO` object for the string and passes that on to :func:`parse`.
Both functions return a :class:`Document` object representing the content of the
document.
@@ -121,7 +131,7 @@ module documentation. This section lists the differences between the API and
to discard children of that node.
-.. method:: Node.writexml(writer[, indent=""[, addindent=""[, newl=""]]])
+.. method:: Node.writexml(writer, indent="", addindent="", newl="")
Write XML to the writer object. The writer should have a :meth:`write` method
which matches that of the file object interface. The *indent* parameter is the
@@ -266,4 +276,4 @@ utility to most DOM users.
.. [#] The encoding string included in XML output should conform to the
appropriate standards. For example, "UTF-8" is valid, but "UTF8" is
not. See http://www.w3.org/TR/2006/REC-xml11-20060816/#NT-EncodingDecl
- and http://www.iana.org/assignments/character-sets .
+ and http://www.iana.org/assignments/character-sets\ .
diff --git a/Doc/library/xml.dom.pulldom.rst b/Doc/library/xml.dom.pulldom.rst
index bad0daa..9032706 100644
--- a/Doc/library/xml.dom.pulldom.rst
+++ b/Doc/library/xml.dom.pulldom.rst
@@ -16,6 +16,13 @@
Object Model representation of a document from SAX events.
+.. warning::
+
+ The :mod:`xml.dom.pulldom` module is not secure against
+ maliciously constructed data. If you need to parse untrusted or
+ unauthenticated data see :ref:`xml-vulnerabilities`.
+
+
.. class:: PullDOM([documentFactory])
:class:`xml.sax.handler.ContentHandler` implementation that ...
diff --git a/Doc/library/xml.dom.rst b/Doc/library/xml.dom.rst
index 1069615..541d649 100644
--- a/Doc/library/xml.dom.rst
+++ b/Doc/library/xml.dom.rst
@@ -374,7 +374,7 @@ All of the components of an XML document are subclasses of :class:`Node`.
Add a new child node to this node at the end of the list of
children, returning *newChild*. If the node was already in
- in the tree, it is removed first.
+ the tree, it is removed first.
.. method:: Node.insertBefore(newChild, refChild)
@@ -441,14 +441,15 @@ objects:
In addition, the Python DOM interface requires that some additional support is
provided to allow :class:`NodeList` objects to be used as Python sequences. All
-:class:`NodeList` implementations must include support for :meth:`__len__` and
-:meth:`__getitem__`; this allows iteration over the :class:`NodeList` in
+:class:`NodeList` implementations must include support for
+:meth:`~object.__len__` and
+:meth:`~object.__getitem__`; this allows iteration over the :class:`NodeList` in
:keyword:`for` statements and proper support for the :func:`len` built-in
function.
If a DOM implementation supports modification of the document, the
-:class:`NodeList` implementation must also support the :meth:`__setitem__` and
-:meth:`__delitem__` methods.
+:class:`NodeList` implementation must also support the
+:meth:`~object.__setitem__` and :meth:`~object.__delitem__` methods.
.. _dom-documenttype-objects:
diff --git a/Doc/library/xml.etree.elementtree.rst b/Doc/library/xml.etree.elementtree.rst
index 5fbbf20..f14742e 100644
--- a/Doc/library/xml.etree.elementtree.rst
+++ b/Doc/library/xml.etree.elementtree.rst
@@ -16,6 +16,14 @@ The :class:`Element` type is a flexible container object, designed to store
hierarchical data structures in memory. The type can be described as a cross
between a list and a dictionary.
+
+.. warning::
+
+ The :mod:`xml.etree.ElementTree` module is not secure against
+ maliciously constructed data. If you need to parse untrusted or
+ unauthenticated data see :ref:`xml-vulnerabilities`.
+
+
Each element has a number of properties associated with it:
* a tag which is a string identifying what kind of data this element represents
@@ -46,11 +54,315 @@ the xml.etree.ElementTree.
`Introducing ElementTree 1.3
<http://effbot.org/zone/elementtree-13-intro.htm>`_.
+Tutorial
+--------
+
+This is a short tutorial for using :mod:`xml.etree.ElementTree` (``ET`` in
+short). The goal is to demonstrate some of the building blocks and basic
+concepts of the module.
+
+XML tree and elements
+^^^^^^^^^^^^^^^^^^^^^
+
+XML is an inherently hierarchical data format, and the most natural way to
+represent it is with a tree. ``ET`` has two classes for this purpose -
+:class:`ElementTree` represents the whole XML document as a tree, and
+:class:`Element` represents a single node in this tree. Interactions with
+the whole document (reading and writing to/from files) are usually done
+on the :class:`ElementTree` level. Interactions with a single XML element
+and its sub-elements are done on the :class:`Element` level.
+
+.. _elementtree-parsing-xml:
+
+Parsing XML
+^^^^^^^^^^^
+
+We'll be using the following XML document as the sample data for this section:
+
+.. code-block:: xml
+
+ <?xml version="1.0"?>
+ <data>
+ <country name="Liechtenstein">
+ <rank>1</rank>
+ <year>2008</year>
+ <gdppc>141100</gdppc>
+ <neighbor name="Austria" direction="E"/>
+ <neighbor name="Switzerland" direction="W"/>
+ </country>
+ <country name="Singapore">
+ <rank>4</rank>
+ <year>2011</year>
+ <gdppc>59900</gdppc>
+ <neighbor name="Malaysia" direction="N"/>
+ </country>
+ <country name="Panama">
+ <rank>68</rank>
+ <year>2011</year>
+ <gdppc>13600</gdppc>
+ <neighbor name="Costa Rica" direction="W"/>
+ <neighbor name="Colombia" direction="E"/>
+ </country>
+ </data>
+
+We have a number of ways to import the data. Reading the file from disk::
+
+ import xml.etree.ElementTree as ET
+ tree = ET.parse('country_data.xml')
+ root = tree.getroot()
+
+Reading the data from a string::
+
+ root = ET.fromstring(country_data_as_string)
+
+:func:`fromstring` parses XML from a string directly into an :class:`Element`,
+which is the root element of the parsed tree. Other parsing functions may
+create an :class:`ElementTree`. Check the documentation to be sure.
+
+As an :class:`Element`, ``root`` has a tag and a dictionary of attributes::
+
+ >>> root.tag
+ 'data'
+ >>> root.attrib
+ {}
+
+It also has children nodes over which we can iterate::
+
+ >>> for child in root:
+ ... print child.tag, child.attrib
+ ...
+ country {'name': 'Liechtenstein'}
+ country {'name': 'Singapore'}
+ country {'name': 'Panama'}
+
+Children are nested, and we can access specific child nodes by index::
+
+ >>> root[0][1].text
+ '2008'
+
+Finding interesting elements
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+:class:`Element` has some useful methods that help iterate recursively over all
+the sub-tree below it (its children, their children, and so on). For example,
+:meth:`Element.iter`::
+
+ >>> for neighbor in root.iter('neighbor'):
+ ... print neighbor.attrib
+ ...
+ {'name': 'Austria', 'direction': 'E'}
+ {'name': 'Switzerland', 'direction': 'W'}
+ {'name': 'Malaysia', 'direction': 'N'}
+ {'name': 'Costa Rica', 'direction': 'W'}
+ {'name': 'Colombia', 'direction': 'E'}
+
+:meth:`Element.findall` finds only elements with a tag which are direct
+children of the current element. :meth:`Element.find` finds the *first* child
+with a particular tag, and :attr:`Element.text` accesses the element's text
+content. :meth:`Element.get` accesses the element's attributes::
+
+ >>> for country in root.findall('country'):
+ ... rank = country.find('rank').text
+ ... name = country.get('name')
+ ... print name, rank
+ ...
+ Liechtenstein 1
+ Singapore 4
+ Panama 68
+
+More sophisticated specification of which elements to look for is possible by
+using :ref:`XPath <elementtree-xpath>`.
+
+Modifying an XML File
+^^^^^^^^^^^^^^^^^^^^^
+
+:class:`ElementTree` provides a simple way to build XML documents and write them to files.
+The :meth:`ElementTree.write` method serves this purpose.
+
+Once created, an :class:`Element` object may be manipulated by directly changing
+its fields (such as :attr:`Element.text`), adding and modifying attributes
+(:meth:`Element.set` method), as well as adding new children (for example
+with :meth:`Element.append`).
+
+Let's say we want to add one to each country's rank, and add an ``updated``
+attribute to the rank element::
+
+ >>> for rank in root.iter('rank'):
+ ... new_rank = int(rank.text) + 1
+ ... rank.text = str(new_rank)
+ ... rank.set('updated', 'yes')
+ ...
+ >>> tree.write('output.xml')
+
+Our XML now looks like this:
+
+.. code-block:: xml
+
+ <?xml version="1.0"?>
+ <data>
+ <country name="Liechtenstein">
+ <rank updated="yes">2</rank>
+ <year>2008</year>
+ <gdppc>141100</gdppc>
+ <neighbor name="Austria" direction="E"/>
+ <neighbor name="Switzerland" direction="W"/>
+ </country>
+ <country name="Singapore">
+ <rank updated="yes">5</rank>
+ <year>2011</year>
+ <gdppc>59900</gdppc>
+ <neighbor name="Malaysia" direction="N"/>
+ </country>
+ <country name="Panama">
+ <rank updated="yes">69</rank>
+ <year>2011</year>
+ <gdppc>13600</gdppc>
+ <neighbor name="Costa Rica" direction="W"/>
+ <neighbor name="Colombia" direction="E"/>
+ </country>
+ </data>
+
+We can remove elements using :meth:`Element.remove`. Let's say we want to
+remove all countries with a rank higher than 50::
+
+ >>> for country in root.findall('country'):
+ ... rank = int(country.find('rank').text)
+ ... if rank > 50:
+ ... root.remove(country)
+ ...
+ >>> tree.write('output.xml')
+
+Our XML now looks like this:
+
+.. code-block:: xml
+
+ <?xml version="1.0"?>
+ <data>
+ <country name="Liechtenstein">
+ <rank updated="yes">2</rank>
+ <year>2008</year>
+ <gdppc>141100</gdppc>
+ <neighbor name="Austria" direction="E"/>
+ <neighbor name="Switzerland" direction="W"/>
+ </country>
+ <country name="Singapore">
+ <rank updated="yes">5</rank>
+ <year>2011</year>
+ <gdppc>59900</gdppc>
+ <neighbor name="Malaysia" direction="N"/>
+ </country>
+ </data>
+
+Building XML documents
+^^^^^^^^^^^^^^^^^^^^^^
+
+The :func:`SubElement` function also provides a convenient way to create new
+sub-elements for a given element::
+
+ >>> a = ET.Element('a')
+ >>> b = ET.SubElement(a, 'b')
+ >>> c = ET.SubElement(a, 'c')
+ >>> d = ET.SubElement(c, 'd')
+ >>> ET.dump(a)
+ <a><b /><c><d /></c></a>
+
+Additional resources
+^^^^^^^^^^^^^^^^^^^^
+
+See http://effbot.org/zone/element-index.htm for tutorials and links to other
+docs.
+
+.. _elementtree-xpath:
+
+XPath support
+-------------
+
+This module provides limited support for
+`XPath expressions <http://www.w3.org/TR/xpath>`_ for locating elements in a
+tree. The goal is to support a small subset of the abbreviated syntax; a full
+XPath engine is outside the scope of the module.
+
+Example
+^^^^^^^
+
+Here's an example that demonstrates some of the XPath capabilities of the
+module. We'll be using the ``countrydata`` XML document from the
+:ref:`Parsing XML <elementtree-parsing-xml>` section::
+
+ import xml.etree.ElementTree as ET
+
+ root = ET.fromstring(countrydata)
+
+ # Top-level elements
+ root.findall(".")
+
+ # All 'neighbor' grand-children of 'country' children of the top-level
+ # elements
+ root.findall("./country/neighbor")
+
+ # Nodes with name='Singapore' that have a 'year' child
+ root.findall(".//year/..[@name='Singapore']")
+
+ # 'year' nodes that are children of nodes with name='Singapore'
+ root.findall(".//*[@name='Singapore']/year")
+
+ # All 'neighbor' nodes that are the second child of their parent
+ root.findall(".//neighbor[2]")
+
+Supported XPath syntax
+^^^^^^^^^^^^^^^^^^^^^^
+
+.. tabularcolumns:: |l|L|
+
++-----------------------+------------------------------------------------------+
+| Syntax | Meaning |
++=======================+======================================================+
+| ``tag`` | Selects all child elements with the given tag. |
+| | For example, ``spam`` selects all child elements |
+| | named ``spam``, and ``spam/egg`` selects all |
+| | grandchildren named ``egg`` in all children named |
+| | ``spam``. |
++-----------------------+------------------------------------------------------+
+| ``*`` | Selects all child elements. For example, ``*/egg`` |
+| | selects all grandchildren named ``egg``. |
++-----------------------+------------------------------------------------------+
+| ``.`` | Selects the current node. This is mostly useful |
+| | at the beginning of the path, to indicate that it's |
+| | a relative path. |
++-----------------------+------------------------------------------------------+
+| ``//`` | Selects all subelements, on all levels beneath the |
+| | current element. For example, ``.//egg`` selects |
+| | all ``egg`` elements in the entire tree. |
++-----------------------+------------------------------------------------------+
+| ``..`` | Selects the parent element. |
++-----------------------+------------------------------------------------------+
+| ``[@attrib]`` | Selects all elements that have the given attribute. |
++-----------------------+------------------------------------------------------+
+| ``[@attrib='value']`` | Selects all elements for which the given attribute |
+| | has the given value. The value cannot contain |
+| | quotes. |
++-----------------------+------------------------------------------------------+
+| ``[tag]`` | Selects all elements that have a child named |
+| | ``tag``. Only immediate children are supported. |
++-----------------------+------------------------------------------------------+
+| ``[position]`` | Selects all elements that are located at the given |
+| | position. The position can be either an integer |
+| | (1 is the first position), the expression ``last()`` |
+| | (for the last position), or a position relative to |
+| | the last position (e.g. ``last()-1``). |
++-----------------------+------------------------------------------------------+
+
+Predicates (expressions within square brackets) must be preceded by a tag
+name, an asterisk, or another predicate. ``position`` predicates must be
+preceded by a tag name.
+
+Reference
+---------
.. _elementtree-functions:
Functions
----------
+^^^^^^^^^
.. function:: Comment(text=None)
@@ -101,8 +413,9 @@ Functions
going on to the user. *source* is a filename or file object containing XML
data. *events* is a list of events to report back. If omitted, only "end"
events are reported. *parser* is an optional parser instance. If not
- given, the standard :class:`XMLParser` parser is used. Returns an
- :term:`iterator` providing ``(event, elem)`` pairs.
+ given, the standard :class:`XMLParser` parser is used. *parser* is not
+ supported by ``cElementTree``. Returns an :term:`iterator` providing
+ ``(event, elem)`` pairs.
.. note::
@@ -196,8 +509,7 @@ Functions
.. _elementtree-element-objects:
Element Objects
----------------
-
+^^^^^^^^^^^^^^^
.. class:: Element(tag, attrib={}, **extra)
@@ -368,8 +680,9 @@ Element Objects
or contents.
:class:`Element` objects also support the following sequence type methods
- for working with subelements: :meth:`__delitem__`, :meth:`__getitem__`,
- :meth:`__setitem__`, :meth:`__len__`.
+ for working with subelements: :meth:`~object.__delitem__`,
+ :meth:`~object.__getitem__`, :meth:`~object.__setitem__`,
+ :meth:`~object.__len__`.
Caution: Elements with no subelements will test as ``False``. This behavior
will change in future versions. Use specific ``len(elem)`` or ``elem is
@@ -387,7 +700,7 @@ Element Objects
.. _elementtree-elementtree-objects:
ElementTree Objects
--------------------
+^^^^^^^^^^^^^^^^^^^
.. class:: ElementTree(element=None, file=None)
@@ -409,26 +722,17 @@ ElementTree Objects
.. method:: find(match)
- Finds the first toplevel element matching *match*. *match* may be a tag
- name or path. Same as getroot().find(match). Returns the first matching
- element, or ``None`` if no element was found.
+ Same as :meth:`Element.find`, starting at the root of the tree.
.. method:: findall(match)
- Finds all matching subelements, by tag name or path. Same as
- getroot().findall(match). *match* may be a tag name or path. Returns a
- list containing all matching elements, in document order.
+ Same as :meth:`Element.findall`, starting at the root of the tree.
.. method:: findtext(match, default=None)
- Finds the element text for the first toplevel element with given tag.
- Same as getroot().findtext(match). *match* may be a tag name or path.
- *default* is the value to return if the element was not found. Returns
- the text content of the first matching element, or the default value no
- element was found. Note that if the element is found, but has no text
- content, this method returns an empty string.
+ Same as :meth:`Element.findtext`, starting at the root of the tree.
.. method:: getiterator(tag=None)
@@ -466,13 +770,15 @@ ElementTree Objects
root element.
- .. method:: write(file, encoding="us-ascii", xml_declaration=None, method="xml")
+ .. method:: write(file, encoding="us-ascii", xml_declaration=None, \
+ default_namespace=None, method="xml")
Writes the element tree to a file, as XML. *file* is a file name, or a
file object opened for writing. *encoding* [1]_ is the output encoding
(default is US-ASCII). *xml_declaration* controls if an XML declaration
should be added to the file. Use False for never, True for always, None
- for only if not US-ASCII or UTF-8 (default is None). *method* is either
+ for only if not US-ASCII or UTF-8 (default is None). *default_namespace*
+ sets the default XML namespace (for "xmlns"). *method* is either
``"xml"``, ``"html"`` or ``"text"`` (default is ``"xml"``). Returns an
encoded string.
@@ -507,7 +813,7 @@ Example of changing the attribute "target" of every link in first paragraph::
.. _elementtree-qname-objects:
QName Objects
--------------
+^^^^^^^^^^^^^
.. class:: QName(text_or_uri, tag=None)
@@ -523,7 +829,7 @@ QName Objects
.. _elementtree-treebuilder-objects:
TreeBuilder Objects
--------------------
+^^^^^^^^^^^^^^^^^^^
.. class:: TreeBuilder(element_factory=None)
@@ -574,7 +880,7 @@ TreeBuilder Objects
.. _elementtree-xmlparser-objects:
XMLParser Objects
------------------
+^^^^^^^^^^^^^^^^^
.. class:: XMLParser(html=0, target=None, encoding=None)
diff --git a/Doc/library/xml.rst b/Doc/library/xml.rst
new file mode 100644
index 0000000..e56eb2c
--- /dev/null
+++ b/Doc/library/xml.rst
@@ -0,0 +1,136 @@
+.. _xml:
+
+XML Processing Modules
+======================
+
+.. module:: xml
+ :synopsis: Package containing XML processing modules
+.. sectionauthor:: Christian Heimes <christian@python.org>
+.. sectionauthor:: Georg Brandl <georg@python.org>
+
+
+Python's interfaces for processing XML are grouped in the ``xml`` package.
+
+.. warning::
+
+ The XML modules are not secure against erroneous or maliciously
+ constructed data. If you need to parse untrusted or unauthenticated data see
+ :ref:`xml-vulnerabilities`.
+
+It is important to note that modules in the :mod:`xml` package require that
+there be at least one SAX-compliant XML parser available. The Expat parser is
+included with Python, so the :mod:`xml.parsers.expat` module will always be
+available.
+
+The documentation for the :mod:`xml.dom` and :mod:`xml.sax` packages are the
+definition of the Python bindings for the DOM and SAX interfaces.
+
+The XML handling submodules are:
+
+* :mod:`xml.etree.ElementTree`: the ElementTree API, a simple and lightweight
+ XML processor
+
+..
+
+* :mod:`xml.dom`: the DOM API definition
+* :mod:`xml.dom.minidom`: a minimal DOM implementation
+* :mod:`xml.dom.pulldom`: support for building partial DOM trees
+
+..
+
+* :mod:`xml.sax`: SAX2 base classes and convenience functions
+* :mod:`xml.parsers.expat`: the Expat parser binding
+
+
+.. _xml-vulnerabilities:
+
+XML vulnerabilities
+===================
+
+The XML processing modules are not secure against maliciously constructed data.
+An attacker can abuse vulnerabilities for e.g. denial of service attacks, to
+access local files, to generate network connections to other machines, or
+to or circumvent firewalls. The attacks on XML abuse unfamiliar features
+like inline `DTD`_ (document type definition) with entities.
+
+The following table gives an overview of the known attacks and if the various
+modules are vulnerable to them.
+
+========================= ======== ========= ========= ======== =========
+kind sax etree minidom pulldom xmlrpc
+========================= ======== ========= ========= ======== =========
+billion laughs **Yes** **Yes** **Yes** **Yes** **Yes**
+quadratic blowup **Yes** **Yes** **Yes** **Yes** **Yes**
+external entity expansion **Yes** No (1) No (2) **Yes** No (3)
+DTD retrieval **Yes** No No **Yes** No
+decompression bomb No No No No **Yes**
+========================= ======== ========= ========= ======== =========
+
+1. :mod:`xml.etree.ElementTree` doesn't expand external entities and raises a
+ ParserError when an entity occurs.
+2. :mod:`xml.dom.minidom` doesn't expand external entities and simply returns
+ the unexpanded entity verbatim.
+3. :mod:`xmlrpclib` doesn't expand external entities and omits them.
+
+
+billion laughs / exponential entity expansion
+ The `Billion Laughs`_ attack -- also known as exponential entity expansion --
+ uses multiple levels of nested entities. Each entity refers to another entity
+ several times, the final entity definition contains a small string. Eventually
+ the small string is expanded to several gigabytes. The exponential expansion
+ consumes lots of CPU time, too.
+
+quadratic blowup entity expansion
+ A quadratic blowup attack is similar to a `Billion Laughs`_ attack; it abuses
+ entity expansion, too. Instead of nested entities it repeats one large entity
+ with a couple of thousand chars over and over again. The attack isn't as
+ efficient as the exponential case but it avoids triggering countermeasures of
+ parsers against heavily nested entities.
+
+external entity expansion
+ Entity declarations can contain more than just text for replacement. They can
+ also point to external resources by public identifiers or system identifiers.
+ System identifiers are standard URIs or can refer to local files. The XML
+ parser retrieves the resource with e.g. HTTP or FTP requests and embeds the
+ content into the XML document.
+
+DTD retrieval
+ Some XML libraries like Python's :mod:`xml.dom.pulldom` retrieve document type
+ definitions from remote or local locations. The feature has similar
+ implications as the external entity expansion issue.
+
+decompression bomb
+ The issue of decompression bombs (aka `ZIP bomb`_) apply to all XML libraries
+ that can parse compressed XML stream like gzipped HTTP streams or LZMA-ed
+ files. For an attacker it can reduce the amount of transmitted data by three
+ magnitudes or more.
+
+The documentation of `defusedxml`_ on PyPI has further information about
+all known attack vectors with examples and references.
+
+defused packages
+----------------
+
+These external packages are recommended for any code that parses
+untrusted XML data.
+
+`defusedxml`_ is a pure Python package with modified subclasses of all stdlib
+XML parsers that prevent any potentially malicious operation. The
+package also ships with example exploits and extended documentation on more
+XML exploits like xpath injection.
+
+`defusedexpat`_ provides a modified libexpat and patched replacement
+:mod:`pyexpat` extension module with countermeasures against entity expansion
+DoS attacks. Defusedexpat still allows a sane and configurable amount of entity
+expansions. The modifications will be merged into future releases of Python.
+
+The workarounds and modifications are not included in patch releases as they
+break backward compatibility. After all inline DTD and entity expansion are
+well-defined XML features.
+
+
+.. _defusedxml: https://pypi.python.org/pypi/defusedxml/
+.. _defusedexpat: https://pypi.python.org/pypi/defusedexpat/
+.. _Billion Laughs: http://en.wikipedia.org/wiki/Billion_laughs
+.. _ZIP bomb: http://en.wikipedia.org/wiki/Zip_bomb
+.. _DTD: http://en.wikipedia.org/wiki/Document_Type_Definition
diff --git a/Doc/library/xml.sax.handler.rst b/Doc/library/xml.sax.handler.rst
index 23f429e..15140a3 100644
--- a/Doc/library/xml.sax.handler.rst
+++ b/Doc/library/xml.sax.handler.rst
@@ -240,7 +240,8 @@ events in the input document:
Signals the start of an element in non-namespace mode.
The *name* parameter contains the raw XML 1.0 name of the element type as a
- string and the *attrs* parameter holds an object of the :class:`Attributes`
+ string and the *attrs* parameter holds an object of the
+ :class:`~xml.sax.xmlreader.Attributes`
interface (see :ref:`attributes-objects`) containing the attributes of
the element. The object passed as *attrs* may be re-used by the parser; holding
on to a reference to it is not a reliable way to keep a copy of the attributes.
@@ -263,7 +264,8 @@ events in the input document:
The *name* parameter contains the name of the element type as a ``(uri,
localname)`` tuple, the *qname* parameter contains the raw XML 1.0 name used in
the source document, and the *attrs* parameter holds an instance of the
- :class:`AttributesNS` interface (see :ref:`attributes-ns-objects`)
+ :class:`~xml.sax.xmlreader.AttributesNS` interface (see
+ :ref:`attributes-ns-objects`)
containing the attributes of the element. If no namespace is associated with
the element, the *uri* component of *name* will be ``None``. The object passed
as *attrs* may be re-used by the parser; holding on to a reference to it is not
@@ -379,8 +381,9 @@ ErrorHandler Objects
--------------------
Objects with this interface are used to receive error and warning information
-from the :class:`XMLReader`. If you create an object that implements this
-interface, then register the object with your :class:`XMLReader`, the parser
+from the :class:`~xml.sax.xmlreader.XMLReader`. If you create an object that
+implements this interface, then register the object with your
+:class:`~xml.sax.xmlreader.XMLReader`, the parser
will call the methods in your object to report all warnings and errors. There
are three levels of errors available: warnings, (possibly) recoverable errors,
and unrecoverable errors. All methods take a :exc:`SAXParseException` as the
diff --git a/Doc/library/xml.sax.reader.rst b/Doc/library/xml.sax.reader.rst
index 4b3c18a..6956cd1 100644
--- a/Doc/library/xml.sax.reader.rst
+++ b/Doc/library/xml.sax.reader.rst
@@ -109,47 +109,50 @@ The :class:`XMLReader` interface supports the following methods:
.. method:: XMLReader.getContentHandler()
- Return the current :class:`ContentHandler`.
+ Return the current :class:`~xml.sax.handler.ContentHandler`.
.. method:: XMLReader.setContentHandler(handler)
- Set the current :class:`ContentHandler`. If no :class:`ContentHandler` is set,
- content events will be discarded.
+ Set the current :class:`~xml.sax.handler.ContentHandler`. If no
+ :class:`~xml.sax.handler.ContentHandler` is set, content events will be
+ discarded.
.. method:: XMLReader.getDTDHandler()
- Return the current :class:`DTDHandler`.
+ Return the current :class:`~xml.sax.handler.DTDHandler`.
.. method:: XMLReader.setDTDHandler(handler)
- Set the current :class:`DTDHandler`. If no :class:`DTDHandler` is set, DTD
+ Set the current :class:`~xml.sax.handler.DTDHandler`. If no
+ :class:`~xml.sax.handler.DTDHandler` is set, DTD
events will be discarded.
.. method:: XMLReader.getEntityResolver()
- Return the current :class:`EntityResolver`.
+ Return the current :class:`~xml.sax.handler.EntityResolver`.
.. method:: XMLReader.setEntityResolver(handler)
- Set the current :class:`EntityResolver`. If no :class:`EntityResolver` is set,
+ Set the current :class:`~xml.sax.handler.EntityResolver`. If no
+ :class:`~xml.sax.handler.EntityResolver` is set,
attempts to resolve an external entity will result in opening the system
identifier for the entity, and fail if it is not available.
.. method:: XMLReader.getErrorHandler()
- Return the current :class:`ErrorHandler`.
+ Return the current :class:`~xml.sax.handler.ErrorHandler`.
.. method:: XMLReader.setErrorHandler(handler)
- Set the current error handler. If no :class:`ErrorHandler` is set, errors will
- be raised as exceptions, and warnings will be printed.
+ Set the current error handler. If no :class:`~xml.sax.handler.ErrorHandler`
+ is set, errors will be raised as exceptions, and warnings will be printed.
.. method:: XMLReader.setLocale(locale)
@@ -326,8 +329,13 @@ The :class:`Attributes` Interface
---------------------------------
:class:`Attributes` objects implement a portion of the mapping protocol,
-including the methods :meth:`copy`, :meth:`get`, :meth:`has_key`, :meth:`items`,
-:meth:`keys`, and :meth:`values`. The following methods are also provided:
+including the methods :meth:`~collections.Mapping.copy`,
+:meth:`~collections.Mapping.get`,
+:meth:`~collections.Mapping.has_key`,
+:meth:`~collections.Mapping.items`,
+:meth:`~collections.Mapping.keys`,
+and :meth:`~collections.Mapping.values`. The following methods
+are also provided:
.. method:: Attributes.getLength()
diff --git a/Doc/library/xml.sax.rst b/Doc/library/xml.sax.rst
index 43d17c2..25e4fa9 100644
--- a/Doc/library/xml.sax.rst
+++ b/Doc/library/xml.sax.rst
@@ -16,12 +16,21 @@ Simple API for XML (SAX) interface for Python. The package itself provides the
SAX exceptions and the convenience functions which will be most used by users of
the SAX API.
+
+.. warning::
+
+ The :mod:`xml.sax` module is not secure against maliciously
+ constructed data. If you need to parse untrusted or unauthenticated data see
+ :ref:`xml-vulnerabilities`.
+
+
The convenience functions are:
.. function:: make_parser([parser_list])
- Create and return a SAX :class:`XMLReader` object. The first parser found will
+ Create and return a SAX :class:`~xml.sax.xmlreader.XMLReader` object. The
+ first parser found will
be used. If *parser_list* is provided, it must be a sequence of strings which
name modules that have a function named :func:`create_parser`. Modules listed
in *parser_list* will be used before modules in the default list of parsers.
@@ -31,8 +40,9 @@ The convenience functions are:
Create a SAX parser and use it to parse a document. The document, passed in as
*filename_or_stream*, can be a filename or a file object. The *handler*
- parameter needs to be a SAX :class:`ContentHandler` instance. If
- *error_handler* is given, it must be a SAX :class:`ErrorHandler` instance; if
+ parameter needs to be a SAX :class:`~handler.ContentHandler` instance. If
+ *error_handler* is given, it must be a SAX :class:`~handler.ErrorHandler`
+ instance; if
omitted, :exc:`SAXParseException` will be raised on all errors. There is no
return value; all work must be done by the *handler* passed in.
@@ -57,10 +67,12 @@ For these objects, only the interfaces are relevant; they are normally not
instantiated by the application itself. Since Python does not have an explicit
notion of interface, they are formally introduced as classes, but applications
may use implementations which do not inherit from the provided classes. The
-:class:`InputSource`, :class:`Locator`, :class:`Attributes`,
-:class:`AttributesNS`, and :class:`XMLReader` interfaces are defined in the
+:class:`~xml.sax.xmlreader.InputSource`, :class:`~xml.sax.xmlreader.Locator`,
+:class:`~xml.sax.xmlreader.Attributes`, :class:`~xml.sax.xmlreader.AttributesNS`,
+and :class:`~xml.sax.xmlreader.XMLReader` interfaces are defined in the
module :mod:`xml.sax.xmlreader`. The handler interfaces are defined in
-:mod:`xml.sax.handler`. For convenience, :class:`InputSource` (which is often
+:mod:`xml.sax.handler`. For convenience,
+:class:`~xml.sax.xmlreader.InputSource` (which is often
instantiated directly) and the handler classes are also available from
:mod:`xml.sax`. These interfaces are described below.
@@ -73,7 +85,8 @@ classes.
Encapsulate an XML error or warning. This class can contain basic error or
warning information from either the XML parser or the application: it can be
subclassed to provide additional functionality or to add localization. Note
- that although the handlers defined in the :class:`ErrorHandler` interface
+ that although the handlers defined in the
+ :class:`~xml.sax.handler.ErrorHandler` interface
receive instances of this exception, it is not required to actually raise the
exception --- it is also useful as a container for information.
@@ -86,22 +99,26 @@ classes.
.. exception:: SAXParseException(msg, exception, locator)
- Subclass of :exc:`SAXException` raised on parse errors. Instances of this class
- are passed to the methods of the SAX :class:`ErrorHandler` interface to provide
- information about the parse error. This class supports the SAX :class:`Locator`
- interface as well as the :class:`SAXException` interface.
+ Subclass of :exc:`SAXException` raised on parse errors. Instances of this
+ class are passed to the methods of the SAX
+ :class:`~xml.sax.handler.ErrorHandler` interface to provide information
+ about the parse error. This class supports the SAX
+ :class:`~xml.sax.xmlreader.Locator` interface as well as the
+ :class:`SAXException` interface.
.. exception:: SAXNotRecognizedException(msg[, exception])
- Subclass of :exc:`SAXException` raised when a SAX :class:`XMLReader` is
+ Subclass of :exc:`SAXException` raised when a SAX
+ :class:`~xml.sax.xmlreader.XMLReader` is
confronted with an unrecognized feature or property. SAX applications and
extensions may use this class for similar purposes.
.. exception:: SAXNotSupportedException(msg[, exception])
- Subclass of :exc:`SAXException` raised when a SAX :class:`XMLReader` is asked to
+ Subclass of :exc:`SAXException` raised when a SAX
+ :class:`~xml.sax.xmlreader.XMLReader` is asked to
enable a feature that is not supported, or to set a property to a value that the
implementation does not support. SAX applications and extensions may use this
class for similar purposes.
diff --git a/Doc/library/xml.sax.utils.rst b/Doc/library/xml.sax.utils.rst
index c796ca8..e774b6b 100644
--- a/Doc/library/xml.sax.utils.rst
+++ b/Doc/library/xml.sax.utils.rst
@@ -59,7 +59,8 @@ or as base classes.
.. class:: XMLGenerator([out[, encoding]])
- This class implements the :class:`ContentHandler` interface by writing SAX
+ This class implements the :class:`~xml.sax.handler.ContentHandler` interface
+ by writing SAX
events back into an XML document. In other words, using an :class:`XMLGenerator`
as the content handler will reproduce the original document being parsed. *out*
should be a file-like object which will default to *sys.stdout*. *encoding* is
@@ -68,7 +69,8 @@ or as base classes.
.. class:: XMLFilterBase(base)
- This class is designed to sit between an :class:`XMLReader` and the client
+ This class is designed to sit between an
+ :class:`~xml.sax.xmlreader.XMLReader` and the client
application's event handlers. By default, it does nothing but pass requests up
to the reader and events on to the handlers unmodified, but subclasses can
override specific methods to modify the event stream or the configuration
@@ -77,9 +79,10 @@ or as base classes.
.. function:: prepare_input_source(source[, base])
- This function takes an input source and an optional base URL and returns a fully
- resolved :class:`InputSource` object ready for reading. The input source can be
- given as a string, a file-like object, or an :class:`InputSource` object;
- parsers will use this function to implement the polymorphic *source* argument to
- their :meth:`parse` method.
+ This function takes an input source and an optional base URL and returns a
+ fully resolved :class:`~xml.sax.xmlreader.InputSource` object ready for
+ reading. The input source can be given as a string, a file-like object, or
+ an :class:`~xml.sax.xmlreader.InputSource` object; parsers will use this
+ function to implement the polymorphic *source* argument to their
+ :meth:`parse` method.
diff --git a/Doc/library/xmlrpclib.rst b/Doc/library/xmlrpclib.rst
index 183be92..0e9ff4b 100644
--- a/Doc/library/xmlrpclib.rst
+++ b/Doc/library/xmlrpclib.rst
@@ -8,8 +8,8 @@
.. note::
The :mod:`xmlrpclib` module has been renamed to :mod:`xmlrpc.client` in
- Python 3.0. The :term:`2to3` tool will automatically adapt imports when
- converting your sources to 3.0.
+ Python 3. The :term:`2to3` tool will automatically adapt imports when
+ converting your sources to Python 3.
.. XXX Not everything is documented yet. It might be good to describe
@@ -28,6 +28,13 @@ supports writing XML-RPC client code; it handles all the details of translating
between conformable Python objects and XML on the wire.
+.. warning::
+
+ The :mod:`xmlrpclib` module is not secure against maliciously
+ constructed data. If you need to parse untrusted or unauthenticated data see
+ :ref:`xml-vulnerabilities`.
+
+
.. class:: ServerProxy(uri[, transport[, encoding[, verbose[, allow_none[, use_datetime]]]]])
A :class:`ServerProxy` instance is an object that manages communication with a
@@ -380,7 +387,7 @@ The client code for the preceding server::
proxy = xmlrpclib.ServerProxy("http://localhost:8000/")
try:
proxy.add(2, 5)
- except xmlrpclib.Fault, err:
+ except xmlrpclib.Fault as err:
print "A fault occurred"
print "Fault code: %d" % err.faultCode
print "Fault string: %s" % err.faultString
@@ -427,7 +434,7 @@ by providing an URI that doesn't point to an XMLRPC server::
try:
proxy.some_method()
- except xmlrpclib.ProtocolError, err:
+ except xmlrpclib.ProtocolError as err:
print "A protocol error occurred"
print "URL: %s" % err.url
print "HTTP/HTTPS headers: %s" % err.headers
@@ -545,7 +552,7 @@ Example of Client Usage
try:
print server.examples.getStateName(41)
- except Error, v:
+ except Error as v:
print "ERROR", v
To access an XML-RPC server through a proxy, you need to define a custom
diff --git a/Doc/library/zipfile.rst b/Doc/library/zipfile.rst
index 14e40c8..261747a 100644
--- a/Doc/library/zipfile.rst
+++ b/Doc/library/zipfile.rst
@@ -25,9 +25,6 @@ decryption of encrypted files in ZIP archives, but it currently cannot
create an encrypted file. Decryption is extremely slow as it is
implemented in native Python rather than C.
-For other archive formats, see the :mod:`bz2`, :mod:`gzip`, and
-:mod:`tarfile` modules.
-
The module defines the following items:
.. exception:: BadZipfile
@@ -56,7 +53,7 @@ The module defines the following items:
.. class:: ZipInfo([filename[, date_time]])
Class used to represent information about a member of an archive. Instances
- of this class are returned by the :meth:`getinfo` and :meth:`infolist`
+ of this class are returned by the :meth:`.getinfo` and :meth:`.infolist`
methods of :class:`ZipFile` objects. Most users of the :mod:`zipfile` module
will not need to create these, but only use those created by this
module. *filename* should be the full name of the archive member, and
@@ -81,7 +78,7 @@ The module defines the following items:
.. data:: ZIP_DEFLATED
The numeric constant for the usual ZIP compression method. This requires the
- zlib module. No other compression methods are currently supported.
+ :mod:`zlib` module. No other compression methods are currently supported.
.. seealso::
@@ -128,7 +125,7 @@ ZipFile Objects
.. versionchanged:: 2.7.1
If the file is created with mode ``'a'`` or ``'w'`` and then
- :meth:`close`\ d without adding any files to the archive, the appropriate
+ :meth:`closed <close>` without adding any files to the archive, the appropriate
ZIP structures for an empty archive will be written to the file.
ZipFile is also a context manager and therefore supports the
@@ -166,21 +163,26 @@ ZipFile Objects
Return a list of archive members by name.
+ .. index::
+ single: universal newlines; zipfile.ZipFile.open method
+
.. method:: ZipFile.open(name[, mode[, pwd]])
Extract a member from the archive as a file-like object (ZipExtFile). *name* is
the name of the file in the archive, or a :class:`ZipInfo` object. The *mode*
- parameter, if included, must be one of the following: ``'r'`` (the default),
- ``'U'``, or ``'rU'``. Choosing ``'U'`` or ``'rU'`` will enable universal newline
+ parameter, if included, must be one of the following: ``'r'`` (the default),
+ ``'U'``, or ``'rU'``. Choosing ``'U'`` or ``'rU'`` will enable
+ :term:`universal newline <universal newlines>`
support in the read-only object. *pwd* is the password used for encrypted files.
- Calling :meth:`open` on a closed ZipFile will raise a :exc:`RuntimeError`.
+ Calling :meth:`.open` on a closed ZipFile will raise a :exc:`RuntimeError`.
.. note::
The file-like object is read-only and provides the following methods:
- :meth:`!read`, :meth:`!readline`, :meth:`!readlines`, :meth:`!__iter__`,
- :meth:`!next`.
+ :meth:`~file.read`, :meth:`~file.readline`,
+ :meth:`~file.readlines`, :meth:`__iter__`,
+ :meth:`~object.next`.
.. note::
@@ -195,7 +197,7 @@ ZipFile Objects
.. note::
- The :meth:`open`, :meth:`read` and :meth:`extract` methods can take a filename
+ The :meth:`.open`, :meth:`read` and :meth:`extract` methods can take a filename
or a :class:`ZipInfo` object. You will appreciate this when trying to read a
ZIP file that contains members with duplicate names.
@@ -212,6 +214,16 @@ ZipFile Objects
.. versionadded:: 2.6
+ .. note::
+
+ If a member filename is an absolute path, a drive/UNC sharepoint and
+ leading (back)slashes will be stripped, e.g.: ``///foo/bar`` becomes
+ ``foo/bar`` on Unix, and ``C:\foo\bar`` becomes ``foo\bar`` on Windows.
+ And all ``".."`` components in a member filename will be removed, e.g.:
+ ``../../foo../../ba..r`` becomes ``foo../ba..r``. On Windows illegal
+ characters (``:``, ``<``, ``>``, ``|``, ``"``, ``?``, and ``*``)
+ replaced by underscore (``_``).
+
.. method:: ZipFile.extractall([path[, members[, pwd]]])
@@ -227,6 +239,9 @@ ZipFile Objects
that have absolute filenames starting with ``"/"`` or filenames with two
dots ``".."``.
+ .. versionchanged:: 2.7.4
+ The zipfile module attempts to prevent that. See :meth:`extract` note.
+
.. versionadded:: 2.6
@@ -312,7 +327,7 @@ ZipFile Objects
:class:`ZipInfo` constructor sets this member to :const:`ZIP_STORED`.
.. versionchanged:: 2.7
- The *compression_type* argument.
+ The *compress_type* argument.
The following data attributes are also available:
@@ -328,7 +343,7 @@ The following data attributes are also available:
The comment text associated with the ZIP file. If assigning a comment to a
:class:`ZipFile` instance created with mode 'a' or 'w', this should be a
string no longer than 65535 bytes. Comments longer than this will be
- truncated in the written archive when :meth:`ZipFile.close` is called.
+ truncated in the written archive when :meth:`.close` is called.
.. _pyzipfile-objects:
@@ -368,8 +383,8 @@ The :class:`PyZipFile` constructor takes the same parameters as the
ZipInfo Objects
---------------
-Instances of the :class:`ZipInfo` class are returned by the :meth:`getinfo` and
-:meth:`infolist` methods of :class:`ZipFile` objects. Each object stores
+Instances of the :class:`ZipInfo` class are returned by the :meth:`.getinfo` and
+:meth:`.infolist` methods of :class:`ZipFile` objects. Each object stores
information about a single member of the ZIP archive.
Instances have the following attributes:
diff --git a/Doc/library/zipimport.rst b/Doc/library/zipimport.rst
index af18d15..828e9fc 100644
--- a/Doc/library/zipimport.rst
+++ b/Doc/library/zipimport.rst
@@ -19,7 +19,7 @@ Typically, :data:`sys.path` is a list of directory names as strings. This modul
also allows an item of :data:`sys.path` to be a string naming a ZIP file archive.
The ZIP archive can contain a subdirectory structure to support package imports,
and a path within the archive can be specified to only import from a
-subdirectory. For example, the path :file:`/tmp/example.zip/lib/` would only
+subdirectory. For example, the path :file:`example.zip/lib/` would only
import from the :file:`lib/` subdirectory within the archive.
Any files may be present in the ZIP archive, but only files :file:`.py` and
@@ -115,7 +115,7 @@ zipimporter Objects
.. method:: is_package(fullname)
- Return True if the module specified by *fullname* is a package. Raise
+ Return ``True`` if the module specified by *fullname* is a package. Raise
:exc:`ZipImportError` if the module couldn't be found.
@@ -151,8 +151,8 @@ Examples
Here is an example that imports a module from a ZIP archive - note that the
:mod:`zipimport` module is not explicitly used. ::
- $ unzip -l /tmp/example.zip
- Archive: /tmp/example.zip
+ $ unzip -l example.zip
+ Archive: example.zip
Length Date Time Name
-------- ---- ---- ----
8467 11-26-02 22:30 jwzthreading.py
@@ -161,8 +161,8 @@ Here is an example that imports a module from a ZIP archive - note that the
$ ./python
Python 2.3 (#1, Aug 1 2003, 19:54:32)
>>> import sys
- >>> sys.path.insert(0, '/tmp/example.zip') # Add .zip file to front of path
+ >>> sys.path.insert(0, 'example.zip') # Add .zip file to front of path
>>> import jwzthreading
>>> jwzthreading.__file__
- '/tmp/example.zip/jwzthreading.py'
+ 'example.zip/jwzthreading.py'
diff --git a/Doc/library/zlib.rst b/Doc/library/zlib.rst
index 92a3197..192bd4d 100644
--- a/Doc/library/zlib.rst
+++ b/Doc/library/zlib.rst
@@ -19,9 +19,7 @@ order. This documentation doesn't attempt to cover all of the permutations;
consult the zlib manual at http://www.zlib.net/manual.html for authoritative
information.
-For reading and writing ``.gz`` files see the :mod:`gzip` module. For
-other archive formats, see the :mod:`bz2`, :mod:`zipfile`, and
-:mod:`tarfile` modules.
+For reading and writing ``.gz`` files see the :mod:`gzip` module.
The available exception and functions in this module are:
@@ -64,18 +62,34 @@ The available exception and functions in this module are:
.. function:: compress(string[, level])
Compresses the data in *string*, returning a string contained compressed data.
- *level* is an integer from ``1`` to ``9`` controlling the level of compression;
+ *level* is an integer from ``0`` to ``9`` controlling the level of compression;
``1`` is fastest and produces the least compression, ``9`` is slowest and
- produces the most. The default value is ``6``. Raises the :exc:`error`
- exception if any error occurs.
+ produces the most. ``0`` is no compression. The default value is ``6``.
+ Raises the :exc:`error` exception if any error occurs.
-.. function:: compressobj([level])
+.. function:: compressobj([level[, method[, wbits[, memlevel[, strategy]]]]])
Returns a compression object, to be used for compressing data streams that won't
- fit into memory at once. *level* is an integer from ``1`` to ``9`` controlling
+ fit into memory at once. *level* is an integer from ``0`` to ``9`` controlling
the level of compression; ``1`` is fastest and produces the least compression,
- ``9`` is slowest and produces the most. The default value is ``6``.
+ ``9`` is slowest and produces the most. ``0`` is no compression. The default
+ value is ``6``.
+
+ *method* is the compression algorithm. Currently, the only supported value is
+ ``DEFLATED``.
+
+ *wbits* is the base two logarithm of the size of the window buffer. This
+ should be an integer from ``8`` to ``15``. Higher values give better
+ compression, but use more memory. The default is 15.
+
+ *memlevel* controls the amount of memory used for internal compression state.
+ Valid values range from ``1`` to ``9``. Higher values using more memory,
+ but are faster and produce smaller output. The default is 8.
+
+ *strategy* is used to tune the compression algorithm. Possible values are
+ ``Z_DEFAULT_STRATEGY``, ``Z_FILTERED``, and ``Z_HUFFMAN_ONLY``. The default
+ is ``Z_DEFAULT_STRATEGY``.
.. function:: crc32(data[, value])
diff --git a/Doc/license.rst b/Doc/license.rst
index 3b0c10e..5c5b269 100644
--- a/Doc/license.rst
+++ b/Doc/license.rst
@@ -50,59 +50,11 @@ been GPL-compatible; the table below summarizes the various releases.
+----------------+--------------+-----------+------------+-----------------+
| 2.1.1 | 2.1+2.0.1 | 2001 | PSF | yes |
+----------------+--------------+-----------+------------+-----------------+
-| 2.2 | 2.1.1 | 2001 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
| 2.1.2 | 2.1.1 | 2002 | PSF | yes |
+----------------+--------------+-----------+------------+-----------------+
| 2.1.3 | 2.1.2 | 2002 | PSF | yes |
+----------------+--------------+-----------+------------+-----------------+
-| 2.2.1 | 2.2 | 2002 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.2.2 | 2.2.1 | 2002 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.2.3 | 2.2.2 | 2002-2003 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.3 | 2.2.2 | 2002-2003 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.3.1 | 2.3 | 2002-2003 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.3.2 | 2.3.1 | 2003 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.3.3 | 2.3.2 | 2003 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.3.4 | 2.3.3 | 2004 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.3.5 | 2.3.4 | 2005 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.4 | 2.3 | 2004 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.4.1 | 2.4 | 2005 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.4.2 | 2.4.1 | 2005 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.4.3 | 2.4.2 | 2006 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.4.4 | 2.4.3 | 2006 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.5 | 2.4 | 2006 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.5.1 | 2.5 | 2007 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.5.2 | 2.5.1 | 2008 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.5.3 | 2.5.2 | 2008 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.6 | 2.5 | 2008 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.6.1 | 2.6 | 2008 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.6.2 | 2.6.1 | 2009 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.6.3 | 2.6.2 | 2009 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.6.4 | 2.6.3 | 2010 | PSF | yes |
-+----------------+--------------+-----------+------------+-----------------+
-| 2.7 | 2.6 | 2010 | PSF | yes |
+| 2.2 and above | 2.1.1 | 2001-now | PSF | yes |
+----------------+--------------+-----------+------------+-----------------+
.. note::
@@ -132,7 +84,7 @@ Terms and conditions for accessing or otherwise using Python
analyze, test, perform and/or display publicly, prepare derivative works,
distribute, and otherwise use Python |release| alone or in any derivative
version, provided, however, that PSF's License Agreement and PSF's notice of
- copyright, i.e., "Copyright © 2001-2012 Python Software Foundation; All Rights
+ copyright, i.e., "Copyright © 2001-2014 Python Software Foundation; All Rights
Reserved" are retained in Python |release| alone or in any derivative version
prepared by Licensee.
@@ -309,7 +261,7 @@ Mersenne Twister
----------------
The :mod:`_random` module includes code based on a download from
-http://www.math.keio.ac.jp/ matumoto/MT2002/emt19937ar.html. The following are
+http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/emt19937ar.html. The following are
the verbatim comments from the original code::
A C-program for MT19937, with initialization improved 2002/1/26.
@@ -350,8 +302,8 @@ the verbatim comments from the original code::
Any feedback is very welcome.
- http://www.math.keio.ac.jp/matumoto/emt.html
- email: matumoto@math.keio.ac.jp
+ http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html
+ email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space)
Sockets
@@ -794,8 +746,8 @@ OpenSSL license here::
*
*/
- Original SSLeay License
- -----------------------
+ Original SSLeay License
+ -----------------------
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
diff --git a/Doc/make.bat b/Doc/make.bat
index d234e77..d3f01b5 100644
--- a/Doc/make.bat
+++ b/Doc/make.bat
@@ -34,7 +34,7 @@ echo.
goto end
:checkout
-svn co %SVNROOT%/external/Sphinx-0.6.7/sphinx tools/sphinx
+svn co %SVNROOT%/external/Sphinx-1.0.7/sphinx tools/sphinx
svn co %SVNROOT%/external/docutils-0.6/docutils tools/docutils
svn co %SVNROOT%/external/Jinja-2.3.1/jinja2 tools/jinja2
svn co %SVNROOT%/external/Pygments-1.3.1/pygments tools/pygments
diff --git a/Doc/reference/compound_stmts.rst b/Doc/reference/compound_stmts.rst
index c99b65a..08230fa 100644
--- a/Doc/reference/compound_stmts.rst
+++ b/Doc/reference/compound_stmts.rst
@@ -238,10 +238,7 @@ present, must be last; it matches any exception. For an except clause with an
expression, that expression is evaluated, and the clause matches the exception
if the resulting object is "compatible" with the exception. An object is
compatible with an exception if it is the class or a base class of the exception
-object, a tuple containing an item compatible with the exception, or, in the
-(deprecated) case of string exceptions, is the raised string itself (note that
-the object identities must match, i.e. it must be the same string object, not
-just a string with the same value).
+object, or a tuple containing an item compatible with the exception.
If no except clause matches the exception, the search for an exception handler
continues in the surrounding code and on the invocation stack. [#]_
@@ -297,8 +294,19 @@ not handled, the exception is temporarily saved. The :keyword:`finally` clause
is executed. If there is a saved exception, it is re-raised at the end of the
:keyword:`finally` clause. If the :keyword:`finally` clause raises another
exception or executes a :keyword:`return` or :keyword:`break` statement, the
-saved exception is lost. The exception information is not available to the
-program during execution of the :keyword:`finally` clause.
+saved exception is discarded::
+
+ >>> def f():
+ ... try:
+ ... 1/0
+ ... finally:
+ ... return 42
+ ...
+ >>> f()
+ 42
+
+The exception information is not available to the program during execution of
+the :keyword:`finally` clause.
.. index::
statement: return
@@ -312,6 +320,20 @@ statement, the :keyword:`finally` clause is also executed 'on the way out.' A
reason is a problem with the current implementation --- this restriction may be
lifted in the future).
+The return value of a function is determined by the last :keyword:`return`
+statement executed. Since the :keyword:`finally` clause always executes, a
+:keyword:`return` statement executed in the :keyword:`finally` clause will
+always be the last one executed::
+
+ >>> def foo():
+ ... try:
+ ... return 'try'
+ ... finally:
+ ... return 'finally'
+ ...
+ >>> foo()
+ 'finally'
+
Additional information on exceptions can be found in section :ref:`exceptions`,
and information on using the :keyword:`raise` statement to generate exceptions
may be found in section :ref:`raise`.
@@ -323,7 +345,9 @@ may be found in section :ref:`raise`.
The :keyword:`with` statement
=============================
-.. index:: statement: with
+.. index::
+ statement: with
+ single: as; with statement
.. versionadded:: 2.5
@@ -399,6 +423,9 @@ is equivalent to ::
statement.
+.. index::
+ single: parameter; function definition
+
.. _function:
.. _def:
@@ -423,7 +450,7 @@ A function definition defines a user-defined function object (see section
funcdef: "def" `funcname` "(" [`parameter_list`] ")" ":" `suite`
dotted_name: `identifier` ("." `identifier`)*
parameter_list: (`defparameter` ",")*
- : ( "*" `identifier` [, "**" `identifier`]
+ : ( "*" `identifier` ["," "**" `identifier`]
: | "**" `identifier`
: | `defparameter` [","] )
defparameter: `parameter` ["=" `expression`]
@@ -459,12 +486,15 @@ is equivalent to::
def func(): pass
func = f1(arg)(f2(func))
-.. index:: triple: default; parameter; value
-
-When one or more top-level parameters have the form *parameter* ``=``
-*expression*, the function is said to have "default parameter values." For a
-parameter with a default value, the corresponding argument may be omitted from a
-call, in which case the parameter's default value is substituted. If a
+.. index::
+ triple: default; parameter; value
+ single: argument; function definition
+
+When one or more top-level :term:`parameters <parameter>` have the form
+*parameter* ``=`` *expression*, the function is said to have "default parameter
+values." For a parameter with a default value, the corresponding
+:term:`argument` may be omitted from a call, in which
+case the parameter's default value is substituted. If a
parameter has a default value, all following parameters must also have a default
value --- this is a syntactic restriction that is not expressed by the grammar.
@@ -495,14 +525,14 @@ receiving any excess positional parameters, defaulting to the empty tuple. If
the form "``**identifier``" is present, it is initialized to a new dictionary
receiving any excess keyword arguments, defaulting to a new empty dictionary.
-.. index:: pair: lambda; form
+.. index:: pair: lambda; expression
It is also possible to create anonymous functions (functions not bound to a
-name), for immediate use in expressions. This uses lambda forms, described in
-section :ref:`lambda`. Note that the lambda form is merely a shorthand for a
+name), for immediate use in expressions. This uses lambda expressions, described in
+section :ref:`lambda`. Note that the lambda expression is merely a shorthand for a
simplified function definition; a function defined in a ":keyword:`def`"
statement can be passed around or assigned to another name just like a function
-defined by a lambda form. The ":keyword:`def`" form is actually more powerful
+defined by a lambda expression. The ":keyword:`def`" form is actually more powerful
since it allows the execution of multiple statements.
**Programmer's note:** Functions are first-class objects. A "``def``" form
diff --git a/Doc/reference/datamodel.rst b/Doc/reference/datamodel.rst
index 0d87873..fdcd2fd 100644
--- a/Doc/reference/datamodel.rst
+++ b/Doc/reference/datamodel.rst
@@ -207,7 +207,7 @@ Ellipsis
single: True
These represent the truth values False and True. The two objects
- representing the values False and True are the only Boolean objects.
+ representing the values ``False`` and ``True`` are the only Boolean objects.
The Boolean type is a subtype of plain integers, and Boolean values
behave like the values 0 and 1, respectively, in almost all contexts,
the exception being that when converted to a string, the strings
@@ -356,8 +356,6 @@ Sequences
object: mutable sequence
object: mutable
pair: assignment; statement
- single: delete
- statement: del
single: subscription
single: slicing
@@ -411,7 +409,7 @@ Set types
These represent a mutable set. They are created by the built-in :func:`set`
constructor and can be modified afterwards by several methods, such as
- :meth:`add`.
+ :meth:`~set.add`.
Frozen sets
.. index:: object: frozenset
@@ -481,47 +479,44 @@ Callable types
Special attributes:
+ .. tabularcolumns:: |l|L|l|
+
+-----------------------+-------------------------------+-----------+
| Attribute | Meaning | |
+=======================+===============================+===========+
- | :attr:`func_doc` | The function's documentation | Writable |
- | | string, or ``None`` if | |
- | | unavailable | |
- +-----------------------+-------------------------------+-----------+
- | :attr:`__doc__` | Another way of spelling | Writable |
- | | :attr:`func_doc` | |
- +-----------------------+-------------------------------+-----------+
- | :attr:`func_name` | The function's name | Writable |
+ | :attr:`__doc__` | The function's documentation | Writable |
+ | :attr:`func_doc` | string, or ``None`` if | |
+ | | unavailable. | |
+-----------------------+-------------------------------+-----------+
- | :attr:`__name__` | Another way of spelling | Writable |
- | | :attr:`func_name` | |
+ | :attr:`__name__` | The function's name. | Writable |
+ | :attr:`func_name` | | |
+-----------------------+-------------------------------+-----------+
| :attr:`__module__` | The name of the module the | Writable |
| | function was defined in, or | |
| | ``None`` if unavailable. | |
+-----------------------+-------------------------------+-----------+
- | :attr:`func_defaults` | A tuple containing default | Writable |
- | | argument values for those | |
+ | :attr:`__defaults__` | A tuple containing default | Writable |
+ | :attr:`func_defaults` | argument values for those | |
| | arguments that have defaults, | |
| | or ``None`` if no arguments | |
- | | have a default value | |
+ | | have a default value. | |
+-----------------------+-------------------------------+-----------+
- | :attr:`func_code` | The code object representing | Writable |
- | | the compiled function body. | |
+ | :attr:`__code__` | The code object representing | Writable |
+ | :attr:`func_code` | the compiled function body. | |
+-----------------------+-------------------------------+-----------+
- | :attr:`func_globals` | A reference to the dictionary | Read-only |
- | | that holds the function's | |
+ | :attr:`__globals__` | A reference to the dictionary | Read-only |
+ | :attr:`func_globals` | that holds the function's | |
| | global variables --- the | |
| | global namespace of the | |
| | module in which the function | |
| | was defined. | |
+-----------------------+-------------------------------+-----------+
- | :attr:`func_dict` | The namespace supporting | Writable |
- | | arbitrary function | |
+ | :attr:`__dict__` | The namespace supporting | Writable |
+ | :attr:`func_dict` | arbitrary function | |
| | attributes. | |
+-----------------------+-------------------------------+-----------+
- | :attr:`func_closure` | ``None`` or a tuple of cells | Read-only |
- | | that contain bindings for the | |
+ | :attr:`__closure__` | ``None`` or a tuple of cells | Read-only |
+ | :attr:`func_closure` | that contain bindings for the | |
| | function's free variables. | |
+-----------------------+-------------------------------+-----------+
@@ -530,6 +525,12 @@ Callable types
.. versionchanged:: 2.4
``func_name`` is now writable.
+ .. versionchanged:: 2.6
+ The double-underscore attributes ``__closure__``, ``__code__``,
+ ``__defaults__``, and ``__globals__`` were introduced as aliases for
+ the corresponding ``func_*`` attributes for forwards compatibility
+ with Python 3.
+
Function objects also support getting and setting arbitrary attributes, which
can be used, for example, to attach metadata to functions. Regular attribute
dot-notation is used to get and set such attributes. *Note that the current
@@ -540,16 +541,21 @@ Callable types
code object; see the description of internal types below.
.. index::
- single: func_doc (function attribute)
single: __doc__ (function attribute)
single: __name__ (function attribute)
single: __module__ (function attribute)
single: __dict__ (function attribute)
+ single: __defaults__ (function attribute)
+ single: __code__ (function attribute)
+ single: __globals__ (function attribute)
+ single: __closure__ (function attribute)
+ single: func_doc (function attribute)
+ single: func_name (function attribute)
+ single: func_dict (function attribute)
single: func_defaults (function attribute)
- single: func_closure (function attribute)
single: func_code (function attribute)
single: func_globals (function attribute)
- single: func_dict (function attribute)
+ single: func_closure (function attribute)
pair: global; namespace
User-defined methods
@@ -573,7 +579,7 @@ Callable types
:attr:`im_self` used to refer to the class that defined the method.
.. versionchanged:: 2.6
- For 3.0 forward-compatibility, :attr:`im_func` is also available as
+ For Python 3 forward-compatibility, :attr:`im_func` is also available as
:attr:`__func__`, and :attr:`im_self` as :attr:`__self__`.
.. index::
@@ -621,9 +627,8 @@ Callable types
single: im_self (method attribute)
When a user-defined method object is created by retrieving a class method object
- from a class or instance, its :attr:`im_self` attribute is the class itself (the
- same as the :attr:`im_class` attribute), and its :attr:`im_func` attribute is
- the function object underlying the class method.
+ from a class or instance, its :attr:`im_self` attribute is the class itself, and
+ its :attr:`im_func` attribute is the function object underlying the class method.
When an unbound user-defined method object is called, the underlying function
(:attr:`im_func`) is called, with the restriction that the first argument must
@@ -660,7 +665,8 @@ Callable types
:ref:`yield`) is called a :dfn:`generator
function`. Such a function, when called, always returns an iterator object
which can be used to execute the body of the function: calling the iterator's
- :meth:`next` method will cause the function to execute until it provides a value
+ :meth:`~iterator.next` method will cause the function to execute until
+ it provides a value
using the :keyword:`yield` statement. When the function executes a
:keyword:`return` statement or falls off the end, a :exc:`StopIteration`
exception is raised and the iterator will have reached the end of the set of
@@ -795,8 +801,8 @@ Classes
associated class is either :class:`C` or one of its base classes, it is
transformed into an unbound user-defined method object whose :attr:`im_class`
attribute is :class:`C`. When it would yield a class method object, it is
- transformed into a bound user-defined method object whose :attr:`im_class`
- and :attr:`im_self` attributes are both :class:`C`. When it would yield a
+ transformed into a bound user-defined method object whose
+ :attr:`im_self` attribute is :class:`C`. When it would yield a
static method object, it is transformed into the object wrapped by the static
method object. See section :ref:`descriptors` for another way in which
attributes retrieved from a class may differ from those actually contained in
@@ -820,10 +826,10 @@ Classes
Special attributes: :attr:`__name__` is the class name; :attr:`__module__` is
the module name in which the class was defined; :attr:`__dict__` is the
- dictionary containing the class's namespace; :attr:`__bases__` is a tuple
- (possibly empty or a singleton) containing the base classes, in the order of
- their occurrence in the base class list; :attr:`__doc__` is the class's
- documentation string, or None if undefined.
+ dictionary containing the class's namespace; :attr:`~class.__bases__` is a
+ tuple (possibly empty or a singleton) containing the base classes, in the
+ order of their occurrence in the base class list; :attr:`__doc__` is the
+ class's documentation string, or None if undefined.
Class instances
.. index::
@@ -868,8 +874,8 @@ Class instances
single: __dict__ (instance attribute)
single: __class__ (instance attribute)
- Special attributes: :attr:`__dict__` is the attribute dictionary;
- :attr:`__class__` is the instance's class.
+ Special attributes: :attr:`~object.__dict__` is the attribute dictionary;
+ :attr:`~instance.__class__` is the instance's class.
Files
.. index::
@@ -1068,9 +1074,9 @@ Internal types
single: stop (slice object attribute)
single: step (slice object attribute)
- Special read-only attributes: :attr:`start` is the lower bound; :attr:`stop` is
- the upper bound; :attr:`step` is the step value; each is ``None`` if omitted.
- These attributes can have any type.
+ Special read-only attributes: :attr:`~slice.start` is the lower bound;
+ :attr:`~slice.stop` is the upper bound; :attr:`~slice.step` is the step
+ value; each is ``None`` if omitted. These attributes can have any type.
Slice objects support one method:
@@ -1149,7 +1155,7 @@ sources of additional information.
single: class; classic
single: class; old-style
-Old-style classes are removed in Python 3.0, leaving only the semantics of
+Old-style classes are removed in Python 3, leaving only the semantics of
new-style classes.
@@ -1177,7 +1183,8 @@ When implementing a class that emulates any built-in type, it is important that
the emulation only be implemented to the degree that it makes sense for the
object being modelled. For example, some sequences may work well with retrieval
of individual elements, but extracting a slice may not make sense. (One example
-of this is the :class:`NodeList` interface in the W3C's Document Object Model.)
+of this is the :class:`~xml.dom.NodeList` interface in the W3C's Document
+Object Model.)
.. _customization:
@@ -1413,7 +1420,7 @@ Basic customization
User-defined classes have :meth:`__cmp__` and :meth:`__hash__` methods
by default; with them, all objects compare unequal (except with themselves)
- and ``x.__hash__()`` returns ``id(x)``.
+ and ``x.__hash__()`` returns a result derived from ``id(x)``.
Classes which inherit a :meth:`__hash__` method from a parent class but
change the meaning of :meth:`__cmp__` or :meth:`__eq__` such that the hash
@@ -1809,10 +1816,10 @@ case the instance is itself a class.
:pep:`3119` - Introducing Abstract Base Classes
Includes the specification for customizing :func:`isinstance` and
- :func:`issubclass` behavior through :meth:`__instancecheck__` and
- :meth:`__subclasscheck__`, with motivation for this functionality in the
- context of adding Abstract Base Classes (see the :mod:`abc` module) to the
- language.
+ :func:`issubclass` behavior through :meth:`~class.__instancecheck__` and
+ :meth:`~class.__subclasscheck__`, with motivation for this functionality
+ in the context of adding Abstract Base Classes (see the :mod:`abc`
+ module) to the language.
.. _callable-types:
@@ -1845,7 +1852,7 @@ range of items. (For backwards compatibility, the method :meth:`__getslice__`
is also recommended that mappings provide the methods :meth:`keys`,
:meth:`values`, :meth:`items`, :meth:`has_key`, :meth:`get`, :meth:`clear`,
:meth:`setdefault`, :meth:`iterkeys`, :meth:`itervalues`, :meth:`iteritems`,
-:meth:`pop`, :meth:`popitem`, :meth:`copy`, and :meth:`update` behaving similar
+:meth:`pop`, :meth:`popitem`, :meth:`!copy`, and :meth:`update` behaving similar
to those for Python's standard dictionary objects. The :mod:`UserDict` module
provides a :class:`DictMixin` class to help create those methods from a base set
of :meth:`__getitem__`, :meth:`__setitem__`, :meth:`__delitem__`, and
@@ -2235,7 +2242,7 @@ Coercion rules
This section used to document the rules for coercion. As the language has
evolved, the coercion rules have become hard to document precisely; documenting
what one version of one particular implementation does is undesirable. Instead,
-here are some informal guidelines regarding coercion. In Python 3.0, coercion
+here are some informal guidelines regarding coercion. In Python 3, coercion
will not be supported.
*
diff --git a/Doc/reference/expressions.rst b/Doc/reference/expressions.rst
index a7c66d3..d3dc111 100644
--- a/Doc/reference/expressions.rst
+++ b/Doc/reference/expressions.rst
@@ -96,14 +96,13 @@ exception.
definition begins with two or more underscore characters and does not end in two
or more underscores, it is considered a :dfn:`private name` of that class.
Private names are transformed to a longer form before code is generated for
-them. The transformation inserts the class name in front of the name, with
-leading underscores removed, and a single underscore inserted in front of the
-class name. For example, the identifier ``__spam`` occurring in a class named
-``Ham`` will be transformed to ``_Ham__spam``. This transformation is
-independent of the syntactical context in which the identifier is used. If the
-transformed name is extremely long (longer than 255 characters), implementation
-defined truncation may happen. If the class name consists only of underscores,
-no transformation is done.
+them. The transformation inserts the class name, with leading underscores
+removed and a single underscore inserted, in front of the name. For example,
+the identifier ``__spam`` occurring in a class named ``Ham`` will be transformed
+to ``_Ham__spam``. This transformation is independent of the syntactical
+context in which the identifier is used. If the transformed name is extremely
+long (longer than 255 characters), implementation defined truncation may happen.
+If the class name consists only of underscores, no transformation is done.
@@ -185,7 +184,7 @@ brackets:
list_comprehension: `expression` `list_for`
list_for: "for" `target_list` "in" `old_expression_list` [`list_iter`]
old_expression_list: `old_expression` [("," `old_expression`)+ [","]]
- old_expression: `or_test` | `old_lambda_form`
+ old_expression: `or_test` | `old_lambda_expr`
list_iter: `list_for` | `list_if`
list_if: "if" `old_expression` [`list_iter`]
@@ -421,10 +420,18 @@ transferred to the generator's caller.
.. index:: object: generator
-The following generator's methods can be used to control the execution of a
-generator function:
+
+Generator-iterator methods
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This subsection describes the methods of a generator iterator. They can
+be used to control the execution of a generator function.
+
+Note that calling any of the generator methods below when the generator
+is already executing raises a :exc:`ValueError` exception.
.. index:: exception: StopIteration
+.. class:: generator
.. method:: generator.next()
@@ -438,6 +445,7 @@ generator function:
exits without yielding another value, a :exc:`StopIteration` exception is
raised.
+.. class:: .
.. method:: generator.send(value)
@@ -654,23 +662,24 @@ is a tuple containing the conversion of the slice items; otherwise, the
conversion of the lone slice item is the key. The conversion of a slice item
that is an expression is that expression. The conversion of an ellipsis slice
item is the built-in ``Ellipsis`` object. The conversion of a proper slice is a
-slice object (see section :ref:`types`) whose :attr:`start`, :attr:`stop` and
-:attr:`step` attributes are the values of the expressions given as lower bound,
-upper bound and stride, respectively, substituting ``None`` for missing
-expressions.
+slice object (see section :ref:`types`) whose :attr:`~slice.start`,
+:attr:`~slice.stop` and :attr:`~slice.step` attributes are the values of the
+expressions given as lower bound, upper bound and stride, respectively,
+substituting ``None`` for missing expressions.
+.. index::
+ object: callable
+ single: call
+ single: argument; call semantics
+
.. _calls:
Calls
-----
-.. index:: single: call
-
-.. index:: object: callable
-
-A call calls a callable object (e.g., a function) with a possibly empty series
-of arguments:
+A call calls a callable object (e.g., a :term:`function`) with a possibly empty
+series of :term:`arguments <argument>`:
.. productionlist::
call: `primary` "(" [`argument_list` [","]
@@ -689,12 +698,15 @@ of arguments:
A trailing comma may be present after the positional and keyword arguments but
does not affect the semantics.
+.. index::
+ single: parameter; call semantics
+
The primary must evaluate to a callable object (user-defined functions, built-in
functions, methods of built-in objects, class objects, methods of class
instances, and certain class instances themselves are callable; extensions may
define additional callable object types). All argument expressions are
evaluated before the call is attempted. Please refer to section :ref:`function`
-for the syntax of formal parameter lists.
+for the syntax of formal :term:`parameter` lists.
If keyword arguments are present, they are first converted to positional
arguments, as follows. First, a list of unfilled slots is created for the
@@ -1245,7 +1257,7 @@ Conditional Expressions
.. productionlist::
conditional_expression: `or_test` ["if" `or_test` "else" `expression`]
- expression: `conditional_expression` | `lambda_form`
+ expression: `conditional_expression` | `lambda_expr`
Conditional expressions (sometimes called a "ternary operator") have the lowest
priority of all Python operations.
@@ -1265,14 +1277,13 @@ Lambdas
.. index::
pair: lambda; expression
- pair: lambda; form
pair: anonymous; function
.. productionlist::
- lambda_form: "lambda" [`parameter_list`]: `expression`
- old_lambda_form: "lambda" [`parameter_list`]: `old_expression`
+ lambda_expr: "lambda" [`parameter_list`]: `expression`
+ old_lambda_expr: "lambda" [`parameter_list`]: `old_expression`
-Lambda forms (lambda expressions) have the same syntactic position as
+Lambda expressions (sometimes called lambda forms) have the same syntactic position as
expressions. They are a shorthand to create anonymous functions; the expression
``lambda arguments: expression`` yields a function object. The unnamed object
behaves like a function object defined with ::
@@ -1281,7 +1292,7 @@ behaves like a function object defined with ::
return expression
See section :ref:`function` for the syntax of parameter lists. Note that
-functions created with lambda forms cannot contain statements.
+functions created with lambda expressions cannot contain statements.
.. _exprlists:
@@ -1332,8 +1343,8 @@ their suffixes::
.. _operator-summary:
-Summary
-=======
+Operator precedence
+===================
.. index:: pair: operator; precedence
@@ -1356,10 +1367,10 @@ groups from right to left).
+-----------------------------------------------+-------------------------------------+
| :keyword:`and` | Boolean AND |
+-----------------------------------------------+-------------------------------------+
-| :keyword:`not` *x* | Boolean NOT |
+| :keyword:`not` ``x`` | Boolean NOT |
+-----------------------------------------------+-------------------------------------+
-| :keyword:`in`, :keyword:`not` :keyword:`in`, | Comparisons, including membership |
-| :keyword:`is`, :keyword:`is not`, ``<``, | tests and identity tests, |
+| :keyword:`in`, :keyword:`not in`, | Comparisons, including membership |
+| :keyword:`is`, :keyword:`is not`, ``<``, | tests and identity tests |
| ``<=``, ``>``, ``>=``, ``<>``, ``!=``, ``==`` | |
+-----------------------------------------------+-------------------------------------+
| ``|`` | Bitwise OR |
@@ -1384,7 +1395,7 @@ groups from right to left).
+-----------------------------------------------+-------------------------------------+
| ``(expressions...)``, | Binding or tuple display, |
| ``[expressions...]``, | list display, |
-| ``{key:datum...}``, | dictionary display, |
+| ``{key: value...}``, | dictionary display, |
| ```expressions...``` | string conversion |
+-----------------------------------------------+-------------------------------------+
@@ -1392,7 +1403,7 @@ groups from right to left).
.. [#] In Python 2.3 and later releases, a list comprehension "leaks" the control
variables of each ``for`` it contains into the containing scope. However, this
- behavior is deprecated, and relying on it will not work in Python 3.0
+ behavior is deprecated, and relying on it will not work in Python 3.
.. [#] While ``abs(x%y) < abs(y)`` is true mathematically, for floats it may not be
true numerically due to roundoff. For example, and assuming a platform on which
diff --git a/Doc/reference/index.rst b/Doc/reference/index.rst
index bd1a281..513e445 100644
--- a/Doc/reference/index.rst
+++ b/Doc/reference/index.rst
@@ -4,9 +4,6 @@
The Python Language Reference
#################################
-:Release: |version|
-:Date: |today|
-
This reference manual describes the syntax and "core semantics" of the
language. It is terse, but attempts to be exact and complete. The semantics of
non-essential built-in object types and of the built-in functions and modules
diff --git a/Doc/reference/lexical_analysis.rst b/Doc/reference/lexical_analysis.rst
index ea92b8e..52a09a8 100644
--- a/Doc/reference/lexical_analysis.rst
+++ b/Doc/reference/lexical_analysis.rst
@@ -104,9 +104,7 @@ are ignored by the syntax; they are not tokens.
Encoding declarations
---------------------
-.. index::
- single: source character set
- single: encodings
+.. index:: source character set, encoding declarations (source file)
If a comment in the first or second line of the Python script matches the
regular expression ``coding[=:]\s*([-\w.]+)``, this comment is processed as an
@@ -529,8 +527,7 @@ Notes:
(2)
Any Unicode character can be encoded this way, but characters outside the Basic
Multilingual Plane (BMP) will be encoded using a surrogate pair if Python is
- compiled to use 16-bit code units (the default). Individual code units which
- form parts of a surrogate pair can be encoded using this escape sequence.
+ compiled to use 16-bit code units (the default).
(3)
As in Standard C, up to three octal digits are accepted.
diff --git a/Doc/reference/simple_stmts.rst b/Doc/reference/simple_stmts.rst
index 05b78a0..d8e539d 100644
--- a/Doc/reference/simple_stmts.rst
+++ b/Doc/reference/simple_stmts.rst
@@ -72,6 +72,7 @@ Assignment statements
=====================
.. index::
+ single: =; assignment statement
pair: assignment; statement
pair: binding; name
pair: rebinding; name
@@ -241,6 +242,18 @@ Augmented assignment statements
.. index::
pair: augmented; assignment
single: statement; assignment, augmented
+ single: +=; augmented assignment
+ single: -=; augmented assignment
+ single: *=; augmented assignment
+ single: /=; augmented assignment
+ single: %=; augmented assignment
+ single: &=; augmented assignment
+ single: ^=; augmented assignment
+ single: |=; augmented assignment
+ single: **=; augmented assignment
+ single: //=; augmented assignment
+ single: >>=; augmented assignment
+ single: <<=; augmented assignment
Augmented assignment is the combination, in a single statement, of a binary
operation and an assignment statement:
@@ -511,6 +524,9 @@ reference count or by being garbage collected), the generator-iterator's
:meth:`close` method will be called, allowing any pending :keyword:`finally`
clauses to execute.
+For full details of :keyword:`yield` semantics, refer to the :ref:`yieldexpr`
+section.
+
.. note::
In Python 2.2, the :keyword:`yield` statement was only allowed when the
@@ -650,6 +666,7 @@ The :keyword:`import` statement
single: module; importing
pair: name; binding
keyword: from
+ single: as; import statement
.. productionlist::
import_stmt: "import" `module` ["as" `name`] ( "," `module` ["as" `name`] )*
@@ -737,7 +754,7 @@ can be found but the path exists then a value of ``None`` is
stored in :data:`sys.path_importer_cache` to signify that an implicit,
file-based finder that handles modules stored as individual files should be
used for that path. If the path does not exist then a finder which always
-returns `None`` is placed in the cache for the path.
+returns ``None`` is placed in the cache for the path.
.. index::
single: loader
@@ -978,21 +995,32 @@ The :keyword:`exec` statement
exec_stmt: "exec" `or_expr` ["in" `expression` ["," `expression`]]
This statement supports dynamic execution of Python code. The first expression
-should evaluate to either a string, an open file object, or a code object. If
-it is a string, the string is parsed as a suite of Python statements which is
-then executed (unless a syntax error occurs). [#]_ If it is an open file, the file
-is parsed until EOF and executed. If it is a code object, it is simply
-executed. In all cases, the code that's executed is expected to be valid as
-file input (see section :ref:`file-input`). Be aware that the
+should evaluate to either a Unicode string, a *Latin-1* encoded string, an open
+file object, a code object, or a tuple. If it is a string, the string is parsed
+as a suite of Python statements which is then executed (unless a syntax error
+occurs). [#]_ If it is an open file, the file is parsed until EOF and executed.
+If it is a code object, it is simply executed. For the interpretation of a
+tuple, see below. In all cases, the code that's executed is expected to be
+valid as file input (see section :ref:`file-input`). Be aware that the
:keyword:`return` and :keyword:`yield` statements may not be used outside of
function definitions even within the context of code passed to the
:keyword:`exec` statement.
In all cases, if the optional parts are omitted, the code is executed in the
-current scope. If only the first expression after :keyword:`in` is specified,
+current scope. If only the first expression after ``in`` is specified,
it should be a dictionary, which will be used for both the global and the local
variables. If two expressions are given, they are used for the global and local
variables, respectively. If provided, *locals* can be any mapping object.
+Remember that at module level, globals and locals are the same dictionary. If
+two separate objects are given as *globals* and *locals*, the code will be
+executed as if it were embedded in a class definition.
+
+The first expression may also be a tuple of length 2 or 3. In this case, the
+optional parts must be omitted. The form ``exec(expr, globals)`` is equivalent
+to ``exec expr in globals``, while the form ``exec(expr, globals, locals)`` is
+equivalent to ``exec expr in globals, locals``. The tuple form of ``exec``
+provides compatibility with Python 3, where ``exec`` is a function rather than
+a statement.
.. versionchanged:: 2.4
Formerly, *locals* was required to be a dictionary.
@@ -1021,5 +1049,5 @@ which may be useful to pass around for use by :keyword:`exec`.
.. rubric:: Footnotes
.. [#] Note that the parser only accepts the Unix-style end of line convention.
- If you are reading the code from a file, make sure to use universal
- newline mode to convert Windows or Mac-style newlines.
+ If you are reading the code from a file, make sure to use
+ :term:`universal newlines` mode to convert Windows or Mac-style newlines.
diff --git a/Doc/tools/dailybuild.py b/Doc/tools/dailybuild.py
index 1a471e6..786d5ab 100755
--- a/Doc/tools/dailybuild.py
+++ b/Doc/tools/dailybuild.py
@@ -5,12 +5,12 @@
#
# Usages:
#
-# dailybuild.py
+# dailybuild.py [-q]
#
# without any arguments builds docs for all branches configured in the global
-# BRANCHES value.
+# BRANCHES value. -q selects "quick build", which means to build only HTML.
#
-# dailybuild.py [-d] <checkout> <target>
+# dailybuild.py [-q] [-d] <checkout> <target>
#
# builds one version, where <checkout> is an SVN checkout directory of the
# Python branch to build docs for, and <target> is the directory where the
@@ -29,31 +29,79 @@ import getopt
BUILDROOT = '/home/gbrandl/docbuild'
+SPHINXBUILD = os.path.join(BUILDROOT, 'sphinx-env/bin/sphinx-build')
WWWROOT = '/data/ftp.python.org/pub/docs.python.org'
BRANCHES = [
# checkout, target, isdev
- (BUILDROOT + '/python33', WWWROOT + '/dev', True),
- (BUILDROOT + '/python27', WWWROOT, False),
- (BUILDROOT + '/python32', WWWROOT + '/py3k', False),
+ (BUILDROOT + '/python34', WWWROOT + '/3.4', False),
+ (BUILDROOT + '/python35', WWWROOT + '/3.5', True),
+ (BUILDROOT + '/python27', WWWROOT + '/2.7', False),
]
-def build_one(checkout, target, isdev):
+def _files_changed(old, new):
+ with open(old, 'rb') as fp1, open(new, 'rb') as fp2:
+ st1 = os.fstat(fp1.fileno())
+ st2 = os.fstat(fp2.fileno())
+ if st1.st_size != st2.st_size:
+ return False
+ if st1.st_mtime >= st2.st_mtime:
+ return True
+ while True:
+ one = fp1.read(4096)
+ two = fp2.read(4096)
+ if one != two:
+ return False
+ if one == '':
+ break
+ return True
+
+def build_one(checkout, target, isdev, quick):
print 'Doc autobuild started in %s' % checkout
os.chdir(checkout)
- print 'Running svn update'
- os.system('svn update')
+ print 'Running hg pull --update'
+ os.system('hg pull --update')
print 'Running make autobuild'
- if os.WEXITSTATUS(os.system(
- 'cd Doc; make autobuild-%s' % (isdev and 'dev' or 'stable'))) == 2:
+ maketarget = 'autobuild-' + ('html' if quick else
+ ('dev' if isdev else 'stable'))
+ if os.WEXITSTATUS(os.system('cd Doc; make SPHINXBUILD=%s %s' % (SPHINXBUILD, maketarget))) == 2:
print '*' * 80
return
- print 'Copying HTML files'
+ print('Computing changed files')
+ changed = []
+ for dirpath, dirnames, filenames in os.walk('Doc/build/html/'):
+ dir_rel = dirpath[len('Doc/build/html/'):]
+ for fn in filenames:
+ local_path = os.path.join(dirpath, fn)
+ rel_path = os.path.join(dir_rel, fn)
+ target_path = os.path.join(target, rel_path)
+ if (os.path.exists(target_path) and
+ not _files_changed(target_path, local_path)):
+ changed.append(rel_path)
+ print 'Copying HTML files to %s' % target
os.system('cp -a Doc/build/html/* %s' % target)
- print 'Copying dist files'
- os.system('mkdir -p %s/archives' % target)
- os.system('cp -a Doc/dist/* %s/archives' % target)
+ if not quick:
+ print 'Copying dist files'
+ os.system('mkdir -p %s/archives' % target)
+ os.system('cp -a Doc/dist/* %s/archives' % target)
+ changed.append('archives/')
+ for fn in os.listdir(os.path.join(target, 'archives')):
+ changed.append('archives/' + fn)
+ print '%s files changed' % len(changed)
+ if changed:
+ target_ino = os.stat(target).st_ino
+ targets_dir = os.path.dirname(target)
+ prefixes = []
+ for fn in os.listdir(targets_dir):
+ if os.stat(os.path.join(targets_dir, fn)).st_ino == target_ino:
+ prefixes.append(fn)
+ to_purge = []
+ for prefix in prefixes:
+ to_purge.extend(prefix + "/" + p for p in changed)
+ purge_cmd = 'curl -X PURGE "https://docs.python.org/{%s}"' % ','.join(to_purge)
+ print("Running CDN purge")
+ os.system(purge_cmd)
print 'Finished'
print '=' * 80
@@ -67,15 +115,21 @@ def usage():
if __name__ == '__main__':
try:
- opts, args = getopt.getopt(sys.argv[1:], 'd')
+ opts, args = getopt.getopt(sys.argv[1:], 'dq')
except getopt.error:
usage()
- if opts and not args:
+ quick = devel = False
+ for opt, _ in opts:
+ if opt == '-q':
+ quick = True
+ if opt == '-d':
+ devel = True
+ if devel and not args:
usage()
if args:
if len(args) != 2:
usage()
- build_one(args[0], args[1], bool(opts))
+ build_one(os.path.abspath(args[0]), os.path.abspath(args[1]), devel, quick)
else:
- for branch in BRANCHES:
- build_one(*branch)
+ for checkout, dest, devel in BRANCHES:
+ build_one(checkout, dest, devel, quick)
diff --git a/Doc/tools/sphinx-build.py b/Doc/tools/sphinx-build.py
index a0bd7fa..d3fe702 100644
--- a/Doc/tools/sphinx-build.py
+++ b/Doc/tools/sphinx-build.py
@@ -15,13 +15,13 @@ warnings.filterwarnings('ignore', category=UserWarning, module='jinja2')
if __name__ == '__main__':
- if sys.version_info[:3] < (2, 4, 0):
- print >>sys.stderr, """\
-Error: Sphinx needs to be executed with Python 2.4 or newer
+ if sys.version_info[:3] < (2, 4, 0) or sys.version_info[:3] > (3, 0, 0):
+ sys.stderr.write("""\
+Error: Sphinx needs to be executed with Python 2.4 or newer (not 3.x though).
(If you run this from the Makefile, you can set the PYTHON variable
to the path of an alternative interpreter executable, e.g.,
``make html PYTHON=python2.5``).
-"""
+""")
sys.exit(1)
from sphinx import main
diff --git a/Doc/tools/sphinxext/download.html b/Doc/tools/sphinxext/download.html
index 4fca138..3adf2e9 100644
--- a/Doc/tools/sphinxext/download.html
+++ b/Doc/tools/sphinxext/download.html
@@ -35,9 +35,13 @@ in the table are the size of the download files in megabytes.</p>
</tr>
</table>
-
<p>These archives contain all the content in the documentation.</p>
+<p>HTML Help (<tt>.chm</tt>) files are made available in the "Windows" section
+on the <a href="http://python.org/download/releases/{{ release[:5] }}/">Python
+download page</a>.</p>
+
+
<h2>Unpacking</h2>
<p>Unix users should download the .tar.bz2 archives; these are bzipped tar
diff --git a/Doc/tools/sphinxext/indexsidebar.html b/Doc/tools/sphinxext/indexsidebar.html
index 8d3feef..5c4c75d 100644
--- a/Doc/tools/sphinxext/indexsidebar.html
+++ b/Doc/tools/sphinxext/indexsidebar.html
@@ -1,23 +1,17 @@
- <h3>Download</h3>
- <p><a href="{{ pathto('download') }}">Download these documents</a></p>
- <h3>Docs for other versions</h3>
- <ul>
- <li><a href="http://docs.python.org/2.6/">Python 2.6 (stable)</a></li>
- <li><a href="http://docs.python.org/3.2/">Python 3.2 (stable)</a></li>
- <li><a href="http://docs.python.org/dev/">Python 3.3 (in development)</a></li>
- <li><a href="http://www.python.org/doc/versions/">Old versions</a></li>
- </ul>
+<h3>Download</h3>
+<p><a href="{{ pathto('download') }}">Download these documents</a></p>
+<h3>Docs for other versions</h3>
+<ul>
+ <li><a href="http://docs.python.org/3.4/">Python 3.4 (stable)</a></li>
+ <li><a href="http://docs.python.org/3.5/">Python 3.5 (in development)</a></li>
+ <li><a href="http://www.python.org/doc/versions/">Old versions</a></li>
+</ul>
- <h3>Other resources</h3>
- <ul>
- {# XXX: many of these should probably be merged in the main docs #}
- <li><a href="http://www.python.org/doc/faq/">FAQs</a></li>
- <li><a href="http://www.python.org/doc/essays/">Guido's Essays</a></li>
- <li><a href="http://www.python.org/doc/newstyle/">New-style Classes</a></li>
- <li><a href="http://www.python.org/dev/peps/">PEP Index</a></li>
- <li><a href="http://wiki.python.org/moin/BeginnersGuide">Beginner's Guide</a></li>
- <li><a href="http://wiki.python.org/moin/PythonBooks">Book List</a></li>
- <li><a href="http://www.python.org/doc/av/">Audio/Visual Talks</a></li>
- <li><a href="http://www.python.org/doc/other/">Other Doc Collections</a></li>
- <li><a href="{{ pathto('bugs') }}">Report a Bug</a></li>
- </ul>
+<h3>Other resources</h3>
+<ul>
+ {# XXX: many of these should probably be merged in the main docs #}
+ <li><a href="http://www.python.org/dev/peps/">PEP Index</a></li>
+ <li><a href="http://wiki.python.org/moin/BeginnersGuide">Beginner's Guide</a></li>
+ <li><a href="http://wiki.python.org/moin/PythonBooks">Book List</a></li>
+ <li><a href="http://www.python.org/doc/av/">Audio/Visual Talks</a></li>
+</ul>
diff --git a/Doc/tools/sphinxext/layout.html b/Doc/tools/sphinxext/layout.html
index d4bb105..f24dc8c 100644
--- a/Doc/tools/sphinxext/layout.html
+++ b/Doc/tools/sphinxext/layout.html
@@ -2,18 +2,35 @@
{% block rootrellink %}
<li><img src="{{ pathto('_static/py.png', 1) }}" alt=""
style="vertical-align: middle; margin-top: -1px"/></li>
- <li><a href="{{ pathto('index') }}">{{ shorttitle }}</a>{{ reldelim1 }}</li>
+ <li><a href="http://www.python.org/">Python</a>{{ reldelim1 }}</li>
+ <li>
+ {%- if versionswitcher is defined %}
+ <span class="version_switcher_placeholder">{{ release }}</span>
+ <a href="{{ pathto('index') }}">Documentation</a>{{ reldelim1 }}
+ {%- else %}
+ <a href="{{ pathto('index') }}">{{ shorttitle }}</a>{{ reldelim1 }}
+ {%- endif %}
+ </li>
{% endblock %}
+{% block relbar1 %} {% if builder != 'qthelp' %} {{ relbar() }} {% endif %} {% endblock %}
+{% block relbar2 %} {% if builder != 'qthelp' %} {{ relbar() }} {% endif %} {% endblock %}
{% block extrahead %}
<link rel="shortcut icon" type="image/png" href="{{ pathto('_static/py.png', 1) }}" />
{% if not embedded %}<script type="text/javascript" src="{{ pathto('_static/copybutton.js', 1) }}"></script>{% endif %}
+ {% if versionswitcher is defined and not embedded %}<script type="text/javascript" src="{{ pathto('_static/version_switch.js', 1) }}"></script>{% endif %}
{{ super() }}
+ {% if builder == 'qthelp' %}
+ <style type="text/css">
+ body { background-color: white; }
+ div.document { background-color: white; }
+ </style>
+ {% endif %}
{% endblock %}
{% block footer %}
<div class="footer">
&copy; <a href="{{ pathto('copyright') }}">Copyright</a> {{ copyright|e }}.
<br />
- The Python Software Foundation is a non-profit corporation.
+ The Python Software Foundation is a non-profit corporation.
<a href="http://www.python.org/psf/donations/">Please donate.</a>
<br />
Last updated on {{ last_updated|e }}.
diff --git a/Doc/tools/sphinxext/pyspecific.py b/Doc/tools/sphinxext/pyspecific.py
index 4b91253..b90969e 100644
--- a/Doc/tools/sphinxext/pyspecific.py
+++ b/Doc/tools/sphinxext/pyspecific.py
@@ -5,7 +5,7 @@
Sphinx extension with Python doc-specific markup.
- :copyright: 2008, 2009, 2010 by Georg Brandl.
+ :copyright: 2008-2013 by Georg Brandl.
:license: Python license.
"""
@@ -13,7 +13,12 @@ ISSUE_URI = 'http://bugs.python.org/issue%s'
SOURCE_URI = 'http://hg.python.org/cpython/file/2.7/%s'
from docutils import nodes, utils
+
+import sphinx
from sphinx.util.nodes import split_explicit_title
+from sphinx.writers.html import HTMLTranslator
+from sphinx.writers.latex import LaTeXTranslator
+from sphinx.locale import versionlabels
# monkey-patch reST parser to disable alphabetic and roman enumerated lists
from docutils.parsers.rst.states import Body
@@ -22,20 +27,45 @@ Body.enum.converters['loweralpha'] = \
Body.enum.converters['lowerroman'] = \
Body.enum.converters['upperroman'] = lambda x: None
-# monkey-patch HTML translator to give versionmodified paragraphs a class
-def new_visit_versionmodified(self, node):
- self.body.append(self.starttag(node, 'p', CLASS=node['type']))
- text = versionlabels[node['type']] % node['version']
- if len(node):
- text += ': '
- else:
- text += '.'
- self.body.append('<span class="versionmodified">%s</span>' % text)
-
-from sphinx.writers.html import HTMLTranslator
-from sphinx.locale import versionlabels
-HTMLTranslator.visit_versionmodified = new_visit_versionmodified
-
+if sphinx.__version__[:3] < '1.2':
+ # monkey-patch HTML translator to give versionmodified paragraphs a class
+ def new_visit_versionmodified(self, node):
+ self.body.append(self.starttag(node, 'p', CLASS=node['type']))
+ text = versionlabels[node['type']] % node['version']
+ if len(node):
+ text += ': '
+ else:
+ text += '.'
+ self.body.append('<span class="versionmodified">%s</span>' % text)
+ HTMLTranslator.visit_versionmodified = new_visit_versionmodified
+
+# monkey-patch HTML and LaTeX translators to keep doctest blocks in the
+# doctest docs themselves
+orig_visit_literal_block = HTMLTranslator.visit_literal_block
+def new_visit_literal_block(self, node):
+ meta = self.builder.env.metadata[self.builder.current_docname]
+ old_trim_doctest_flags = self.highlighter.trim_doctest_flags
+ if 'keepdoctest' in meta:
+ self.highlighter.trim_doctest_flags = False
+ try:
+ orig_visit_literal_block(self, node)
+ finally:
+ self.highlighter.trim_doctest_flags = old_trim_doctest_flags
+
+HTMLTranslator.visit_literal_block = new_visit_literal_block
+
+orig_depart_literal_block = LaTeXTranslator.depart_literal_block
+def new_depart_literal_block(self, node):
+ meta = self.builder.env.metadata[self.curfilestack[-1]]
+ old_trim_doctest_flags = self.highlighter.trim_doctest_flags
+ if 'keepdoctest' in meta:
+ self.highlighter.trim_doctest_flags = False
+ try:
+ orig_depart_literal_block(self, node)
+ finally:
+ self.highlighter.trim_doctest_flags = old_trim_doctest_flags
+
+LaTeXTranslator.depart_literal_block = new_depart_literal_block
# Support for marking up and linking to bugs.python.org issues
@@ -154,11 +184,11 @@ pydoc_topic_labels = [
'bltin-null-object', 'bltin-type-objects', 'booleans',
'break', 'callable-types', 'calls', 'class', 'comparisons', 'compound',
'context-managers', 'continue', 'conversions', 'customization', 'debugger',
- 'del', 'dict', 'dynamic-features', 'else', 'exceptions', 'execmodel',
+ 'del', 'dict', 'dynamic-features', 'else', 'exceptions', 'exec', 'execmodel',
'exprlists', 'floating', 'for', 'formatstrings', 'function', 'global',
'id-classes', 'identifiers', 'if', 'imaginary', 'import', 'in', 'integers',
- 'lambda', 'lists', 'naming', 'nonlocal', 'numbers', 'numeric-types',
- 'objects', 'operator-summary', 'pass', 'power', 'raise', 'return',
+ 'lambda', 'lists', 'naming', 'numbers', 'numeric-types',
+ 'objects', 'operator-summary', 'pass', 'power', 'print', 'raise', 'return',
'sequence-types', 'shifting', 'slicings', 'specialattrs', 'specialnames',
'string-methods', 'strings', 'subscriptions', 'truth', 'try', 'types',
'typesfunctions', 'typesmapping', 'typesmethods', 'typesmodules',
diff --git a/Doc/tools/sphinxext/static/basic.css b/Doc/tools/sphinxext/static/basic.css
index 2b47622..21c3db2 100644
--- a/Doc/tools/sphinxext/static/basic.css
+++ b/Doc/tools/sphinxext/static/basic.css
@@ -38,7 +38,10 @@ div.related li.right {
/* -- sidebar --------------------------------------------------------------- */
div.sphinxsidebarwrapper {
+ position: relative;
+ top: 0;
padding: 10px 5px 0 10px;
+ word-wrap: break-word;
}
div.sphinxsidebar {
@@ -253,8 +256,8 @@ table.docutils {
table.docutils td, table.docutils th {
padding: 2px 5px 2px 5px;
- border-left: 0;
- background-color: #eef;
+ border-left: 0;
+ background-color: #eef;
}
table.docutils td p.last, table.docutils th p.last {
@@ -270,7 +273,7 @@ table.footnote td, table.footnote th {
}
table.docutils th {
- border-top: 1px solid #cac;
+ border-top: 1px solid #cac;
background-color: #ede;
}
@@ -280,7 +283,7 @@ th {
}
th.head {
- text-align: center;
+ text-align: center;
}
/* -- other body styles ----------------------------------------------------- */
@@ -333,10 +336,14 @@ dl.glossary dt {
font-style: italic;
}
-p.deprecated {
+.deprecated {
background-color: #ffe4e4;
border: 1px solid #f66;
- padding: 7px
+ padding: 7px;
+}
+
+div.deprecated p {
+ margin-bottom: 0;
}
.system-message {
diff --git a/Doc/tools/sphinxext/static/sidebar.js b/Doc/tools/sphinxext/static/sidebar.js
new file mode 100644
index 0000000..1bdd829
--- /dev/null
+++ b/Doc/tools/sphinxext/static/sidebar.js
@@ -0,0 +1,186 @@
+/*
+ * sidebar.js
+ * ~~~~~~~~~~
+ *
+ * This script makes the Sphinx sidebar collapsible and implements
+ * intelligent scrolling.
+ *
+ * .sphinxsidebar contains .sphinxsidebarwrapper. This script adds
+ * in .sphixsidebar, after .sphinxsidebarwrapper, the #sidebarbutton
+ * used to collapse and expand the sidebar.
+ *
+ * When the sidebar is collapsed the .sphinxsidebarwrapper is hidden
+ * and the width of the sidebar and the margin-left of the document
+ * are decreased. When the sidebar is expanded the opposite happens.
+ * This script saves a per-browser/per-session cookie used to
+ * remember the position of the sidebar among the pages.
+ * Once the browser is closed the cookie is deleted and the position
+ * reset to the default (expanded).
+ *
+ * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+
+$(function() {
+ // global elements used by the functions.
+ // the 'sidebarbutton' element is defined as global after its
+ // creation, in the add_sidebar_button function
+ var jwindow = $(window);
+ var jdocument = $(document);
+ var bodywrapper = $('.bodywrapper');
+ var sidebar = $('.sphinxsidebar');
+ var sidebarwrapper = $('.sphinxsidebarwrapper');
+
+ // original margin-left of the bodywrapper and width of the sidebar
+ // with the sidebar expanded
+ var bw_margin_expanded = bodywrapper.css('margin-left');
+ var ssb_width_expanded = sidebar.width();
+
+ // margin-left of the bodywrapper and width of the sidebar
+ // with the sidebar collapsed
+ var bw_margin_collapsed = '.8em';
+ var ssb_width_collapsed = '.8em';
+
+ // colors used by the current theme
+ var dark_color = $('.related').css('background-color');
+ var light_color = $('.document').css('background-color');
+
+ function get_viewport_height() {
+ if (window.innerHeight)
+ return window.innerHeight;
+ else
+ return jwindow.height();
+ }
+
+ function sidebar_is_collapsed() {
+ return sidebarwrapper.is(':not(:visible)');
+ }
+
+ function toggle_sidebar() {
+ if (sidebar_is_collapsed())
+ expand_sidebar();
+ else
+ collapse_sidebar();
+ // adjust the scrolling of the sidebar
+ scroll_sidebar();
+ }
+
+ function collapse_sidebar() {
+ sidebarwrapper.hide();
+ sidebar.css('width', ssb_width_collapsed);
+ bodywrapper.css('margin-left', bw_margin_collapsed);
+ sidebarbutton.css({
+ 'margin-left': '0',
+ 'height': bodywrapper.height()
+ });
+ sidebarbutton.find('span').text('»');
+ sidebarbutton.attr('title', _('Expand sidebar'));
+ document.cookie = 'sidebar=collapsed';
+ }
+
+ function expand_sidebar() {
+ bodywrapper.css('margin-left', bw_margin_expanded);
+ sidebar.css('width', ssb_width_expanded);
+ sidebarwrapper.show();
+ sidebarbutton.css({
+ 'margin-left': ssb_width_expanded-12,
+ 'height': bodywrapper.height()
+ });
+ sidebarbutton.find('span').text('«');
+ sidebarbutton.attr('title', _('Collapse sidebar'));
+ document.cookie = 'sidebar=expanded';
+ }
+
+ function add_sidebar_button() {
+ sidebarwrapper.css({
+ 'float': 'left',
+ 'margin-right': '0',
+ 'width': ssb_width_expanded - 28
+ });
+ // create the button
+ sidebar.append(
+ '<div id="sidebarbutton"><span>&laquo;</span></div>'
+ );
+ var sidebarbutton = $('#sidebarbutton');
+ light_color = sidebarbutton.css('background-color');
+ // find the height of the viewport to center the '<<' in the page
+ var viewport_height = get_viewport_height();
+ sidebarbutton.find('span').css({
+ 'display': 'block',
+ 'margin-top': (viewport_height - sidebar.position().top - 20) / 2
+ });
+
+ sidebarbutton.click(toggle_sidebar);
+ sidebarbutton.attr('title', _('Collapse sidebar'));
+ sidebarbutton.css({
+ 'color': '#FFFFFF',
+ 'border-left': '1px solid ' + dark_color,
+ 'font-size': '1.2em',
+ 'cursor': 'pointer',
+ 'height': bodywrapper.height(),
+ 'padding-top': '1px',
+ 'margin-left': ssb_width_expanded - 12
+ });
+
+ sidebarbutton.hover(
+ function () {
+ $(this).css('background-color', dark_color);
+ },
+ function () {
+ $(this).css('background-color', light_color);
+ }
+ );
+ }
+
+ function set_position_from_cookie() {
+ if (!document.cookie)
+ return;
+ var items = document.cookie.split(';');
+ for(var k=0; k<items.length; k++) {
+ var key_val = items[k].split('=');
+ var key = key_val[0];
+ if (key == 'sidebar') {
+ var value = key_val[1];
+ if ((value == 'collapsed') && (!sidebar_is_collapsed()))
+ collapse_sidebar();
+ else if ((value == 'expanded') && (sidebar_is_collapsed()))
+ expand_sidebar();
+ }
+ }
+ }
+
+ add_sidebar_button();
+ var sidebarbutton = $('#sidebarbutton');
+ set_position_from_cookie();
+
+
+ /* intelligent scrolling */
+ function scroll_sidebar() {
+ var sidebar_height = sidebarwrapper.height();
+ var viewport_height = get_viewport_height();
+ var offset = sidebar.position()['top'];
+ var wintop = jwindow.scrollTop();
+ var winbot = wintop + viewport_height;
+ var curtop = sidebarwrapper.position()['top'];
+ var curbot = curtop + sidebar_height;
+ // does sidebar fit in window?
+ if (sidebar_height < viewport_height) {
+ // yes: easy case -- always keep at the top
+ sidebarwrapper.css('top', $u.min([$u.max([0, wintop - offset - 10]),
+ jdocument.height() - sidebar_height - 200]));
+ }
+ else {
+ // no: only scroll if top/bottom edge of sidebar is at
+ // top/bottom edge of window
+ if (curtop > wintop && curbot > winbot) {
+ sidebarwrapper.css('top', $u.max([wintop - offset - 10, 0]));
+ }
+ else if (curtop < wintop && curbot < winbot) {
+ sidebarwrapper.css('top', $u.min([winbot - sidebar_height - offset - 20,
+ jdocument.height() - sidebar_height - 200]));
+ }
+ }
+ }
+ jwindow.scroll(scroll_sidebar);
+});
diff --git a/Doc/tools/sphinxext/static/version_switch.js b/Doc/tools/sphinxext/static/version_switch.js
new file mode 100644
index 0000000..e5528eb
--- /dev/null
+++ b/Doc/tools/sphinxext/static/version_switch.js
@@ -0,0 +1,67 @@
+(function() {
+ 'use strict';
+
+ var all_versions = {
+ '3.5': 'dev (3.5)',
+ '3.4': '3.4',
+ '3.3': '3.3',
+ '3.2': '3.2',
+ '2.7': '2.7',
+ '2.6': '2.6'
+ };
+
+ function build_select(current_version, current_release) {
+ var buf = ['<select>'];
+
+ $.each(all_versions, function(version, title) {
+ buf.push('<option value="' + version + '"');
+ if (version == current_version)
+ buf.push(' selected="selected">' + current_release + '</option>');
+ else
+ buf.push('>' + title + '</option>');
+ });
+
+ buf.push('</select>');
+ return buf.join('');
+ }
+
+ function patch_url(url, new_version) {
+ var url_re = /\.org\/(\d|py3k|dev|((release\/)?\d\.\d[\w\d\.]*))\//,
+ new_url = url.replace(url_re, '.org/' + new_version + '/');
+
+ if (new_url == url && !new_url.match(url_re)) {
+ // python 2 url without version?
+ new_url = url.replace(/\.org\//, '.org/' + new_version + '/');
+ }
+ return new_url;
+ }
+
+ function on_switch() {
+ var selected = $(this).children('option:selected').attr('value');
+
+ var url = window.location.href,
+ new_url = patch_url(url, selected);
+
+ if (new_url != url) {
+ // check beforehand if url exists, else redirect to version's start page
+ $.ajax({
+ url: new_url,
+ success: function() {
+ window.location.href = new_url;
+ },
+ error: function() {
+ window.location.href = 'http://docs.python.org/' + selected;
+ }
+ });
+ }
+ }
+
+ $(document).ready(function() {
+ var release = DOCUMENTATION_OPTIONS.VERSION;
+ var version = release.substr(0, 3);
+ var select = build_select(version, release);
+
+ $('.version_switcher_placeholder').html(select);
+ $('.version_switcher_placeholder select').bind('change', on_switch);
+ });
+})();
diff --git a/Doc/tools/sphinxext/susp-ignored.csv b/Doc/tools/sphinxext/susp-ignored.csv
index 3785e96..dc719e5 100644
--- a/Doc/tools/sphinxext/susp-ignored.csv
+++ b/Doc/tools/sphinxext/susp-ignored.csv
@@ -1,9 +1,6 @@
c-api/arg,,:ref,"PyArg_ParseTuple(args, ""O|O:ref"", &object, &callback)"
c-api/list,,:high,list[low:high]
-c-api/list,,:high,list[low:high] = itemlist
c-api/sequence,,:i2,o[i1:i2]
-c-api/sequence,,:i2,o[i1:i2] = v
-c-api/sequence,,:i2,del o[i1:i2]
c-api/unicode,,:end,str[start:end]
distutils/setupscript,,::,
extending/embedding,,:numargs,"if(!PyArg_ParseTuple(args, "":numargs""))"
@@ -11,7 +8,10 @@ extending/extending,,:set,"if (PyArg_ParseTuple(args, ""O:set_callback"", &temp)
extending/extending,,:myfunction,"PyArg_ParseTuple(args, ""D:myfunction"", &c);"
extending/newtypes,,:call,"if (!PyArg_ParseTuple(args, ""sss:call"", &arg1, &arg2, &arg3)) {"
extending/windows,,:initspam,/export:initspam
-howto/cporting,,:add,"if (!PyArg_ParseTuple(args, ""ii:add_ints"", &one, &two))"
+faq/programming,,:reduce,"print (lambda Ru,Ro,Iu,Io,IM,Sx,Sy:reduce(lambda x,y:x+y,map(lambda y,"
+faq/programming,,:reduce,"Sx=Sx,Sy=Sy:reduce(lambda x,y:x+y,map(lambda x,xc=Ru,yc=yc,Ru=Ru,Ro=Ro,"
+faq/programming,,:chr,">=4.0) or 1+f(xc,yc,x*x-y*y+xc,2.0*x*y+yc,k-1,f):f(xc,yc,x,y,k,f):chr("
+faq/programming,,::,for x in sequence[::-1]:
howto/cporting,,:encode,"if (!PyArg_ParseTuple(args, ""O:encode_object"", &myobj))"
howto/cporting,,:say,"if (!PyArg_ParseTuple(args, ""U:say_hello"", &name))"
howto/curses,,:black,"They are: 0:black, 1:red, 2:green, 3:yellow, 4:blue, 5:magenta, 6:cyan, and"
@@ -22,11 +22,36 @@ howto/curses,,:magenta,"They are: 0:black, 1:red, 2:green, 3:yellow, 4:blue, 5:m
howto/curses,,:red,"They are: 0:black, 1:red, 2:green, 3:yellow, 4:blue, 5:magenta, 6:cyan, and"
howto/curses,,:white,"7:white."
howto/curses,,:yellow,"They are: 0:black, 1:red, 2:green, 3:yellow, 4:blue, 5:magenta, 6:cyan, and"
+howto/logging,,:root,WARNING:root:Watch out!
+howto/logging,,:Watch,WARNING:root:Watch out!
+howto/logging,,:root,DEBUG:root:This message should go to the log file
+howto/logging,,:root,INFO:root:So should this
+howto/logging,,:So,INFO:root:So should this
+howto/logging,,:root,"WARNING:root:And this, too"
+howto/logging,,:And,"WARNING:root:And this, too"
+howto/logging,,:root,INFO:root:Started
+howto/logging,,:Started,INFO:root:Started
+howto/logging,,:root,INFO:root:Doing something
+howto/logging,,:Doing,INFO:root:Doing something
+howto/logging,,:root,INFO:root:Finished
+howto/logging,,:Finished,INFO:root:Finished
+howto/logging,,:root,WARNING:root:Look before you leap!
+howto/logging,,:Look,WARNING:root:Look before you leap!
+howto/logging,,:This,DEBUG:This message should appear on the console
+howto/logging,,:So,INFO:So should this
+howto/logging,,:And,"WARNING:And this, too"
+howto/logging,,:logger,severity:logger name:message
+howto/logging,,:message,severity:logger name:message
+howto/logging,,:This,DEBUG:root:This message should go to the log file
+howto/pyporting,,::,Programming Language :: Python :: 2
+howto/pyporting,,::,Programming Language :: Python :: 3
howto/regex,,::,
howto/regex,,:foo,(?:foo)
howto/urllib2,,:example,"for example ""joe@password:example.com"""
-howto/webservers,,.. image:,.. image:: http.png
library/audioop,,:ipos,"# factor = audioop.findfactor(in_test[ipos*2:ipos*2+len(out_test)],"
+library/bisect,,:hi,all(val >= x for val in a[i:hi])
+library/bisect,,:hi,all(val > x for val in a[i:hi])
+library/cookie,,`,!#$%&'*+-.^_`|~
library/datetime,,:MM,
library/datetime,,:SS,
library/decimal,,:optional,"trailneg:optional trailing minus indicator"
@@ -40,22 +65,18 @@ library/dis,,`,TOS = `TOS`
library/doctest,,`,``factorial`` from the ``example`` module:
library/doctest,,`,The ``example`` module
library/doctest,,`,Using ``factorial``
+library/exceptions,,:err,err.object[err.start:err.end]
library/functions,,:step,a[start:stop:step]
library/functions,,:stop,"a[start:stop, i]"
library/functions,,:stop,a[start:stop:step]
library/hotshot,,:lineno,"ncalls tottime percall cumtime percall filename:lineno(function)"
library/httplib,,:port,host:port
-library/imaplib,,:MM,"""DD-Mmm-YYYY HH:MM:SS +HHMM"""
-library/imaplib,,:SS,"""DD-Mmm-YYYY HH:MM:SS +HHMM"""
+library/imaplib,,:MM,"""DD-Mmm-YYYY HH:MM:SS"
+library/imaplib,,:SS,"""DD-Mmm-YYYY HH:MM:SS"
library/itertools,,:stop,elements from seq[start:stop:step]
library/itertools,,:step,elements from seq[start:stop:step]
library/linecache,,:sys,"sys:x:3:3:sys:/dev:/bin/sh"
-library/logging,,:And,
-library/logging,,:package1,
-library/logging,,:package2,
-library/logging,,:root,
-library/logging,,:This,
-library/logging,,:port,host:port
+library/logging.handlers,,:port,host:port
library/mmap,,:i2,obj[i1:i2]
library/multiprocessing,,:queue,">>> QueueManager.register('get_queue', callable=lambda:queue)"
library/multiprocessing,,`,">>> l._callmethod('__getitem__', (20,)) # equiv to `l[20]`"
@@ -65,31 +86,26 @@ library/multiprocessing,,`,# `Pool.imap()` (which will save on the amount of cod
library/multiprocessing,,`,# A test file for the `multiprocessing` package
library/multiprocessing,,`,# A test of `multiprocessing.Pool` class
library/multiprocessing,,`,# Add more tasks using `put()`
-library/multiprocessing,,`,# create server for a `HostManager` object
-library/multiprocessing,,`,# Depends on `multiprocessing` package -- tested with `processing-0.60`
library/multiprocessing,,`,# in the original order then consider using `Pool.map()` or
library/multiprocessing,,`,# Not sure if we should synchronize access to `socket.accept()` method by
library/multiprocessing,,`,# object. (We import `multiprocessing.reduction` to enable this pickling.)
library/multiprocessing,,`,# register the Foo class; make `f()` and `g()` accessible via proxy
library/multiprocessing,,`,# register the Foo class; make `g()` and `_h()` accessible via proxy
library/multiprocessing,,`,# register the generator function baz; use `GeneratorProxy` to make proxies
-library/multiprocessing,,`,`Cluster` is a subclass of `SyncManager` so it allows creation of
-library/multiprocessing,,`,`hostname` gives the name of the host. If hostname is not
-library/multiprocessing,,`,`slots` is used to specify the number of slots for processes on
library/optparse,,:len,"del parser.rargs[:len(value)]"
library/os.path,,:foo,c:foo
-library/parser,,`,"""Make a function that raises an argument to the exponent `exp`."""
+library/pdb,,:lineno,filename:lineno
library/posix,,`,"CFLAGS=""`getconf LFS_CFLAGS`"" OPT=""-g -O2 $CFLAGS"""
library/profile,,:lineno,ncalls tottime percall cumtime percall filename:lineno(function)
library/profile,,:lineno,filename:lineno(function)
library/pyexpat,,:elem1,<py:elem1 />
library/pyexpat,,:py,"xmlns:py = ""http://www.python.org/ns/"">"
-library/repr,,`,"return `obj`"
-library/smtplib,,:port,"as well as a regular host:port server."
+library/smtplib,,:port,method must support that as well as a regular host:port
library/socket,,::,'5aef:2b::8'
+library/socket,,::,"(10, 1, 6, '', ('2001:888:2000:d::a2', 80, 0, 0))]"
library/sqlite3,,:memory,
-library/sqlite3,,:age,"select name_last, age from people where name_last=:who and age=:age"
-library/sqlite3,,:who,"select name_last, age from people where name_last=:who and age=:age"
+library/sqlite3,,:who,"cur.execute(""select * from people where name_last=:who and age=:age"", {""who"": who, ""age"": age})"
+library/sqlite3,,:age,"cur.execute(""select * from people where name_last=:who and age=:age"", {""who"": who, ""age"": age})"
library/ssl,,:My,"Organization Name (eg, company) [Internet Widgits Pty Ltd]:My Organization, Inc."
library/ssl,,:My,"Organizational Unit Name (eg, section) []:My Group"
library/ssl,,:myserver,"Common Name (eg, YOUR name) []:myserver.mygroup.myorganization.com"
@@ -98,8 +114,7 @@ library/ssl,,:ops,Email Address []:ops@myserver.mygroup.myorganization.com
library/ssl,,:Some,"Locality Name (eg, city) []:Some City"
library/ssl,,:US,Country Name (2 letter code) [AU]:US
library/stdtypes,,:len,s[len(s):len(s)]
-library/stdtypes,,:len,s[len(s):len(s)]
-library/string,,:end,s[start:end]
+library/stdtypes,,:end,s[start:end]
library/string,,:end,s[start:end]
library/subprocess,,`,"output=`mycmd myarg`"
library/subprocess,,`,"output=`dmesg | grep hda`"
@@ -111,6 +126,7 @@ library/time,,:ss,
library/turtle,,::,Example::
library/urllib,,:port,:port
library/urllib2,,:password,"""joe:password@python.org"""
+library/urllib2,,:close,Connection:close
library/uuid,,:uuid,urn:uuid:12345678-1234-5678-1234-567812345678
library/xmlrpclib,,:pass,http://user:pass@host:port/path
library/xmlrpclib,,:pass,user:pass
@@ -118,14 +134,15 @@ library/xmlrpclib,,:port,http://user:pass@host:port/path
license,,`,THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
license,,:zooko,mailto:zooko@zooko.com
license,,`,THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+license,,`,* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+license,,`,* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+license,,`,"``Software''), to deal in the Software without restriction, including"
+license,,`,"THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,"
reference/datamodel,,:step,a[i:j:step]
reference/datamodel,,:max,
reference/expressions,,:index,x[index:index]
-reference/expressions,,:datum,{key:datum...}
reference/expressions,,`,`expressions...`
-reference/grammar,,:output,#diagram:output
-reference/grammar,,:rules,#diagram:rules
-reference/grammar,,:token,#diagram:token
+reference/expressions,,`,"""`"""
reference/grammar,,`,'`' testlist1 '`'
reference/lexical_analysis,,:fileencoding,# vim:fileencoding=<encoding-name>
reference/lexical_analysis,,`,", : . ` = ;"
@@ -148,8 +165,7 @@ using/cmdline,,:line,action:message:category:module:line
using/cmdline,,:message,action:message:category:module:line
using/cmdline,,:module,action:message:category:module:line
using/cmdline,,:errorhandler,:errorhandler
-using/windows,162,`,`` this fixes syntax highlighting errors in some editors due to the \\\\ hackery
-using/windows,170,`,``
+using/unix,,:Packaging,http://en.opensuse.org/Portal:Packaging
whatsnew/2.0,418,:len,
whatsnew/2.3,,::,
whatsnew/2.3,,:config,
@@ -163,36 +179,8 @@ whatsnew/2.4,,:System,
whatsnew/2.5,,:memory,:memory:
whatsnew/2.5,,:step,[start:stop:step]
whatsnew/2.5,,:stop,[start:stop:step]
-distutils/examples,267,`,This is the description of the ``foobar`` package.
-faq/programming,,:reduce,"print (lambda Ru,Ro,Iu,Io,IM,Sx,Sy:reduce(lambda x,y:x+y,map(lambda y,"
-faq/programming,,:reduce,"Sx=Sx,Sy=Sy:reduce(lambda x,y:x+y,map(lambda x,xc=Ru,yc=yc,Ru=Ru,Ro=Ro,"
-faq/programming,,:chr,">=4.0) or 1+f(xc,yc,x*x-y*y+xc,2.0*x*y+yc,k-1,f):f(xc,yc,x,y,k,f):chr("
-faq/programming,,::,for x in sequence[::-1]:
-faq/windows,229,:EOF,@setlocal enableextensions & python -x %~f0 %* & goto :EOF
-faq/windows,393,:REG,.py :REG_SZ: c:\<path to python>\python.exe -u %s %s
-library/bisect,,:hi,all(val >= x for val in a[i:hi])
-library/bisect,,:hi,all(val > x for val in a[i:hi])
-library/http.client,52,:port,host:port
-library/nntplib,,:bytes,:bytes
-library/nntplib,,:lines,:lines
-library/nntplib,,:lines,"['xref', 'from', ':lines', ':bytes', 'references', 'date', 'message-id', 'subject']"
-library/nntplib,,:bytes,"['xref', 'from', ':lines', ':bytes', 'references', 'date', 'message-id', 'subject']"
-library/pickle,567,:memory,"conn = sqlite3.connect("":memory:"")"
-library/profile,293,:lineno,"(sort by filename:lineno),"
-library/socket,,::,"(10, 1, 6, '', ('2001:888:2000:d::a2', 80, 0, 0))]"
-library/stdtypes,,:end,s[start:end]
-library/stdtypes,,:end,s[start:end]
-license,,`,* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
-license,,`,* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
-license,,`,"``Software''), to deal in the Software without restriction, including"
-license,,`,"THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,"
-reference/lexical_analysis,704,`,$ ? `
whatsnew/2.7,735,:Sunday,'2009:4:Sunday'
whatsnew/2.7,862,::,"export PYTHONWARNINGS=all,error:::Cookie:0"
whatsnew/2.7,862,:Cookie,"export PYTHONWARNINGS=all,error:::Cookie:0"
whatsnew/2.7,,::,>>> urlparse.urlparse('http://[1080::8:800:200C:417A]/foo')
whatsnew/2.7,,::,"ParseResult(scheme='http', netloc='[1080::8:800:200C:417A]',"
-howto/pyporting,75,::,# make sure to use :: Python *and* :: Python :: 3 so
-howto/pyporting,75,::,"'Programming Language :: Python',"
-howto/pyporting,75,::,'Programming Language :: Python :: 3'
-library/urllib2,67,:close,Connection:close
diff --git a/Doc/tools/sphinxext/suspicious.py b/Doc/tools/sphinxext/suspicious.py
index f15e931..e397560 100644
--- a/Doc/tools/sphinxext/suspicious.py
+++ b/Doc/tools/sphinxext/suspicious.py
@@ -66,6 +66,10 @@ class Rule:
# None -> don't care
self.issue = issue # the markup fragment that triggered this rule
self.line = line # text of the container element (single line only)
+ self.used = False
+
+ def __repr__(self):
+ return '{0.docname},,{0.issue},{0.line}'.format(self)
@@ -105,6 +109,12 @@ class CheckSuspiciousMarkupBuilder(Builder):
doctree.walk(visitor)
def finish(self):
+ unused_rules = [rule for rule in self.rules if not rule.used]
+ if unused_rules:
+ self.warn('Found %s/%s unused rules:' %
+ (len(unused_rules), len(self.rules)))
+ for rule in unused_rules:
+ self.info(repr(rule))
return
def check_issue(self, line, lineno, issue):
@@ -129,6 +139,7 @@ class CheckSuspiciousMarkupBuilder(Builder):
if (rule.lineno is not None) and \
abs(rule.lineno - lineno) > 5: continue
# if it came this far, the rule matched
+ rule.used = True
return True
return False
diff --git a/Doc/tutorial/classes.rst b/Doc/tutorial/classes.rst
index 9f115b0..60d382c 100644
--- a/Doc/tutorial/classes.rst
+++ b/Doc/tutorial/classes.rst
@@ -337,6 +337,77 @@ object and the argument list, and the function object is called with this new
argument list.
+.. _tut-class-and-instance-variables:
+
+Class and Instance Variables
+----------------------------
+
+Generally speaking, instance variables are for data unique to each instance
+and class variables are for attributes and methods shared by all instances
+of the class::
+
+ class Dog:
+
+ kind = 'canine' # class variable shared by all instances
+
+ def __init__(self, name):
+ self.name = name # instance variable unique to each instance
+
+ >>> d = Dog('Fido')
+ >>> e = Dog('Buddy')
+ >>> d.kind # shared by all dogs
+ 'canine'
+ >>> e.kind # shared by all dogs
+ 'canine'
+ >>> d.name # unique to d
+ 'Fido'
+ >>> e.name # unique to e
+ 'Buddy'
+
+As discussed in :ref:`tut-object`, shared data can have possibly surprising
+effects with involving :term:`mutable` objects such as lists and dictionaries.
+For example, the *tricks* list in the following code should not be used as a
+class variable because just a single list would be shared by all *Dog*
+instances::
+
+ class Dog:
+
+ tricks = [] # mistaken use of a class variable
+
+ def __init__(self, name):
+ self.name = name
+
+ def add_trick(self, trick):
+ self.tricks.append(trick)
+
+ >>> d = Dog('Fido')
+ >>> e = Dog('Buddy')
+ >>> d.add_trick('roll over')
+ >>> e.add_trick('play dead')
+ >>> d.tricks # unexpectedly shared by all dogs
+ ['roll over', 'play dead']
+
+Correct design of the class should use an instance variable instead::
+
+ class Dog:
+
+ def __init__(self, name):
+ self.name = name
+ self.tricks = [] # creates a new empty list for each dog
+
+ def add_trick(self, trick):
+ self.tricks.append(trick)
+
+ >>> d = Dog('Fido')
+ >>> e = Dog('Buddy')
+ >>> d.add_trick('roll over')
+ >>> e.add_trick('play dead')
+ >>> d.tricks
+ ['roll over']
+ >>> e.tricks
+ ['play dead']
+
+
.. _tut-remarks:
Random Remarks
@@ -534,8 +605,8 @@ http://www.python.org/download/releases/2.3/mro/.
.. _tut-private:
-Private Variables
-=================
+Private Variables and Class-local References
+============================================
"Private" instance variables that cannot be accessed except from inside an
object don't exist in Python. However, there is a convention that is followed
@@ -609,7 +680,7 @@ will do nicely::
A piece of Python code that expects a particular abstract data type can often be
passed a class that emulates the methods of that data type instead. For
instance, if you have a function that formats some data from a file object, you
-can define a class with methods :meth:`read` and :meth:`readline` that get the
+can define a class with methods :meth:`read` and :meth:`!readline` that get the
data from a string buffer instead, and pass it as an argument.
.. (Unfortunately, this technique has its limitations: a class can't define
@@ -688,14 +759,15 @@ using a :keyword:`for` statement::
for char in "123":
print char
for line in open("myfile.txt"):
- print line
+ print line,
This style of access is clear, concise, and convenient. The use of iterators
pervades and unifies Python. Behind the scenes, the :keyword:`for` statement
calls :func:`iter` on the container object. The function returns an iterator
-object that defines the method :meth:`next` which accesses elements in the
-container one at a time. When there are no more elements, :meth:`next` raises a
-:exc:`StopIteration` exception which tells the :keyword:`for` loop to terminate.
+object that defines the method :meth:`~iterator.next` which accesses elements
+in the container one at a time. When there are no more elements,
+:meth:`~iterator.next` raises a :exc:`StopIteration` exception which tells the
+:keyword:`for` loop to terminate.
This example shows how it all works::
>>> s = 'abc'
diff --git a/Doc/tutorial/controlflow.rst b/Doc/tutorial/controlflow.rst
index 24c02eb..8ffaf3f 100644
--- a/Doc/tutorial/controlflow.rst
+++ b/Doc/tutorial/controlflow.rst
@@ -19,14 +19,14 @@ example::
>>> x = int(raw_input("Please enter an integer: "))
Please enter an integer: 42
>>> if x < 0:
- ... x = 0
- ... print 'Negative changed to zero'
+ ... x = 0
+ ... print 'Negative changed to zero'
... elif x == 0:
- ... print 'Zero'
+ ... print 'Zero'
... elif x == 1:
- ... print 'Single'
+ ... print 'Single'
... else:
- ... print 'More'
+ ... print 'More'
...
More
@@ -59,24 +59,24 @@ they appear in the sequence. For example (no pun intended):
::
>>> # Measure some strings:
- ... a = ['cat', 'window', 'defenestrate']
- >>> for x in a:
- ... print x, len(x)
+ ... words = ['cat', 'window', 'defenestrate']
+ >>> for w in words:
+ ... print w, len(w)
...
cat 3
window 6
defenestrate 12
-It is not safe to modify the sequence being iterated over in the loop (this can
-only happen for mutable sequence types, such as lists). If you need to modify
-the list you are iterating over (for example, to duplicate selected items) you
-must iterate over a copy. The slice notation makes this particularly
-convenient::
+If you need to modify the sequence you are iterating over while inside the loop
+(for example to duplicate selected items), it is recommended that you first
+make a copy. Iterating over a sequence does not implicitly make a copy. The
+slice notation makes this especially convenient::
- >>> for x in a[:]: # make a slice copy of the entire list
- ... if len(x) > 6: a.insert(0, x)
+ >>> for w in words[:]: # Loop over a slice copy of the entire list.
+ ... if len(w) > 6:
+ ... words.insert(0, w)
...
- >>> a
+ >>> words
['defenestrate', 'cat', 'window', 'defenestrate']
@@ -129,9 +129,6 @@ function, see :ref:`tut-loopidioms`.
The :keyword:`break` statement, like in C, breaks out of the smallest enclosing
:keyword:`for` or :keyword:`while` loop.
-The :keyword:`continue` statement, also borrowed from C, continues with the next
-iteration of the loop.
-
Loop statements may have an ``else`` clause; it is executed when the loop
terminates through exhaustion of the list (with :keyword:`for`) or when the
condition becomes false (with :keyword:`while`), but not when the loop is
@@ -159,6 +156,30 @@ following loop, which searches for prime numbers::
(Yes, this is the correct code. Look closely: the ``else`` clause belongs to
the :keyword:`for` loop, **not** the :keyword:`if` statement.)
+When used with a loop, the ``else`` clause has more in common with the
+``else`` clause of a :keyword:`try` statement than it does that of
+:keyword:`if` statements: a :keyword:`try` statement's ``else`` clause runs
+when no exception occurs, and a loop's ``else`` clause runs when no ``break``
+occurs. For more on the :keyword:`try` statement and exceptions, see
+:ref:`tut-handling`.
+
+The :keyword:`continue` statement, also borrowed from C, continues with the next
+iteration of the loop::
+
+ >>> for num in range(2, 10):
+ ... if num % 2 == 0:
+ ... print "Found an even number", num
+ ... continue
+ ... print "Found a number", num
+ Found an even number 2
+ Found a number 3
+ Found an even number 4
+ Found a number 5
+ Found an even number 6
+ Found a number 7
+ Found an even number 8
+ Found a number 9
+
.. _tut-pass:
@@ -520,17 +541,16 @@ In the same fashion, dictionaries can deliver keyword arguments with the ``**``\
.. _tut-lambda:
-Lambda Forms
-------------
+Lambda Expressions
+------------------
-By popular demand, a few features commonly found in functional programming
-languages like Lisp have been added to Python. With the :keyword:`lambda`
-keyword, small anonymous functions can be created. Here's a function that
-returns the sum of its two arguments: ``lambda a, b: a+b``. Lambda forms can be
-used wherever function objects are required. They are syntactically restricted
-to a single expression. Semantically, they are just syntactic sugar for a
-normal function definition. Like nested function definitions, lambda forms can
-reference variables from the containing scope::
+Small anonymous functions can be created with the :keyword:`lambda` keyword.
+This function returns the sum of its two arguments: ``lambda a, b: a+b``.
+Lambda functions can be used wherever function objects are required. They are
+syntactically restricted to a single expression. Semantically, they are just
+syntactic sugar for a normal function definition. Like nested function
+definitions, lambda functions can reference variables from the containing
+scope::
>>> def make_incrementor(n):
... return lambda x: x + n
@@ -541,6 +561,14 @@ reference variables from the containing scope::
>>> f(1)
43
+The above example uses a lambda expression to return a function. Another use
+is to pass a small function as an argument::
+
+ >>> pairs = [(1, 'one'), (2, 'two'), (3, 'three'), (4, 'four')]
+ >>> pairs.sort(key=lambda pair: pair[1])
+ >>> pairs
+ [(4, 'four'), (1, 'one'), (3, 'three'), (2, 'two')]
+
.. _tut-docstrings:
diff --git a/Doc/tutorial/datastructures.rst b/Doc/tutorial/datastructures.rst
index 489a336..acc2cc1 100644
--- a/Doc/tutorial/datastructures.rst
+++ b/Doc/tutorial/datastructures.rst
@@ -68,10 +68,11 @@ objects:
Return the number of times *x* appears in the list.
-.. method:: list.sort()
+.. method:: list.sort(cmp=None, key=None, reverse=False)
:noindex:
- Sort the items of the list, in place.
+ Sort the items of the list in place (the arguments can be used for sort
+ customization, see :func:`sorted` for their explanation).
.. method:: list.reverse()
@@ -99,6 +100,15 @@ An example that uses most of the list methods::
>>> a.sort()
>>> a
[-1, 1, 66.25, 333, 333, 1234.5]
+ >>> a.pop()
+ 1234.5
+ >>> a
+ [-1, 1, 66.25, 333, 333]
+
+You might have noticed that methods like ``insert``, ``remove`` or ``sort`` that
+only modify the list have no return value printed -- they return the default
+``None``. [1]_ This is a design principle for all mutable data structures in
+Python.
.. _tut-lists-as-stacks:
@@ -171,7 +181,7 @@ There are three built-in functions that are very useful when used with lists:
the sequence for which ``function(item)`` is true. If *sequence* is a
:class:`string` or :class:`tuple`, the result will be of the same type;
otherwise, it is always a :class:`list`. For example, to compute a sequence of
-numbers not divisible by 2 and 3::
+numbers not divisible by 2 or 3::
>>> def f(x): return x % 2 != 0 and x % 3 != 0
...
@@ -229,8 +239,7 @@ Don't use this example's definition of :func:`sum`: since summing numbers is
such a common need, a built-in function ``sum(sequence)`` is already provided,
and works exactly like this.
-.. versionadded:: 2.3
-
+.. _tut-listcomps:
List Comprehensions
-------------------
@@ -423,17 +432,31 @@ A tuple consists of a number of values separated by commas, for instance::
... u = t, (1, 2, 3, 4, 5)
>>> u
((12345, 54321, 'hello!'), (1, 2, 3, 4, 5))
+ >>> # Tuples are immutable:
+ ... t[0] = 88888
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ TypeError: 'tuple' object does not support item assignment
+ >>> # but they can contain mutable objects:
+ ... v = ([1, 2, 3], [3, 2, 1])
+ >>> v
+ ([1, 2, 3], [3, 2, 1])
+
As you see, on output tuples are always enclosed in parentheses, so that nested
tuples are interpreted correctly; they may be input with or without surrounding
parentheses, although often parentheses are necessary anyway (if the tuple is
-part of a larger expression).
-
-Tuples have many uses. For example: (x, y) coordinate pairs, employee records
-from a database, etc. Tuples, like strings, are immutable: it is not possible
-to assign to the individual items of a tuple (you can simulate much of the same
-effect with slicing and concatenation, though). It is also possible to create
-tuples which contain mutable objects, such as lists.
+part of a larger expression). It is not possible to assign to the individual
+items of a tuple, however it is possible to create tuples which contain mutable
+objects, such as lists.
+
+Though tuples may seem similar to lists, they are often used in different
+situations and for different purposes.
+Tuples are :term:`immutable`, and usually contain an heterogeneous sequence of
+elements that are accessed via unpacking (see later in this section) or indexing
+(or even by attribute in the case of :func:`namedtuples <collections.namedtuple>`).
+Lists are :term:`mutable`, and their elements are usually homogeneous and are
+accessed by iterating over the list.
A special problem is the construction of tuples containing 0 or 1 items: the
syntax has some extra quirks to accommodate these. Empty tuples are constructed
@@ -462,8 +485,6 @@ variables on the left to have the same number of elements as the length of the
sequence. Note that multiple assignment is really just a combination of tuple
packing and sequence unpacking.
-.. XXX Add a bit on the difference between tuples and lists.
-
.. _tut-sets:
@@ -475,6 +496,10 @@ with no duplicate elements. Basic uses include membership testing and
eliminating duplicate entries. Set objects also support mathematical operations
like union, intersection, difference, and symmetric difference.
+Curly braces or the :func:`set` function can be used to create sets. Note: to
+create an empty set you have to use ``set()``, not ``{}``; the latter creates an
+empty dictionary, a data structure that we discuss in the next section.
+
Here is a brief demonstration::
>>> basket = ['apple', 'orange', 'apple', 'pear', 'orange', 'banana']
@@ -501,6 +526,13 @@ Here is a brief demonstration::
>>> a ^ b # letters in a or b but not both
set(['r', 'd', 'b', 'm', 'z', 'l'])
+Similarly to :ref:`list comprehensions <tut-listcomps>`, set comprehensions
+are also supported::
+
+ >>> a = {x for x in 'abracadabra' if x not in 'abc'}
+ >>> a
+ set(['r', 'd'])
+
.. _tut-dictionaries:
@@ -552,18 +584,17 @@ Here is a small example using a dictionary::
>>> 'guido' in tel
True
-The :func:`dict` constructor builds dictionaries directly from lists of
-key-value pairs stored as tuples. When the pairs form a pattern, list
-comprehensions can compactly specify the key-value list. ::
+The :func:`dict` constructor builds dictionaries directly from sequences of
+key-value pairs::
>>> dict([('sape', 4139), ('guido', 4127), ('jack', 4098)])
{'sape': 4139, 'jack': 4098, 'guido': 4127}
- >>> dict([(x, x**2) for x in (2, 4, 6)]) # use a list comprehension
- {2: 4, 4: 16, 6: 36}
-Later in the tutorial, we will learn about Generator Expressions which are even
-better suited for the task of supplying key-values pairs to the :func:`dict`
-constructor.
+In addition, dict comprehensions can be used to create dictionaries from
+arbitrary key and value expressions::
+
+ >>> {x: x**2 for x in (2, 4, 6)}
+ {2: 4, 4: 16, 6: 36}
When the keys are simple strings, it is sometimes easier to specify pairs using
keyword arguments::
@@ -577,16 +608,6 @@ keyword arguments::
Looping Techniques
==================
-When looping through dictionaries, the key and corresponding value can be
-retrieved at the same time using the :meth:`iteritems` method. ::
-
- >>> knights = {'gallahad': 'the pure', 'robin': 'the brave'}
- >>> for k, v in knights.iteritems():
- ... print k, v
- ...
- gallahad the pure
- robin the brave
-
When looping through a sequence, the position index and corresponding value can
be retrieved at the same time using the :func:`enumerate` function. ::
@@ -633,6 +654,29 @@ returns a new sorted list while leaving the source unaltered. ::
orange
pear
+When looping through dictionaries, the key and corresponding value can be
+retrieved at the same time using the :meth:`iteritems` method. ::
+
+ >>> knights = {'gallahad': 'the pure', 'robin': 'the brave'}
+ >>> for k, v in knights.iteritems():
+ ... print k, v
+ ...
+ gallahad the pure
+ robin the brave
+
+To change a sequence you are iterating over while inside the loop (for
+example to duplicate certain items), it is recommended that you first make
+a copy. Looping over a sequence does not implicitly make a copy. The slice
+notation makes this especially convenient::
+
+ >>> words = ['cat', 'window', 'defenestrate']
+ >>> for w in words[:]: # Loop over a slice copy of the entire list.
+ ... if len(w) > 6:
+ ... words.insert(0, w)
+ ...
+ >>> words
+ ['defenestrate', 'cat', 'window', 'defenestrate']
+
.. _tut-conditions:
diff --git a/Doc/tutorial/errors.rst b/Doc/tutorial/errors.rst
index 1351957..6d14cb3 100644
--- a/Doc/tutorial/errors.rst
+++ b/Doc/tutorial/errors.rst
@@ -120,6 +120,14 @@ name multiple exceptions as a parenthesized tuple, for example::
... except (RuntimeError, TypeError, NameError):
... pass
+Note that the parentheses around this tuple are required, because
+``except ValueError, e:`` was the syntax used for what is normally
+written as ``except ValueError as e:`` in modern Python (described
+below). The old syntax is still supported for backwards compatibility.
+This means ``except RuntimeError, TypeError`` is not equivalent to
+``except (RuntimeError, TypeError):`` but to ``except RuntimeError as
+TypeError:`` which is not what you want.
+
The last except clause may omit the exception name(s), to serve as a wildcard.
Use this with extreme caution, since it is easy to mask a real programming error
in this way! It can also be used to print an error message and then re-raise
@@ -131,8 +139,8 @@ the exception (allowing a caller to handle the exception as well)::
f = open('myfile.txt')
s = f.readline()
i = int(s.strip())
- except IOError as (errno, strerror):
- print "I/O error({0}): {1}".format(errno, strerror)
+ except IOError as e:
+ print "I/O error({0}): {1}".format(e.errno, e.strerror)
except ValueError:
print "Could not convert data to an integer."
except:
@@ -177,7 +185,7 @@ attributes to it as desired. ::
... print type(inst) # the exception instance
... print inst.args # arguments stored in .args
... print inst # __str__ allows args to printed directly
- ... x, y = inst # __getitem__ allows args to be unpacked directly
+ ... x, y = inst.args
... print 'x =', x
... print 'y =', y
...
@@ -389,7 +397,7 @@ succeeded or failed. Look at the following example, which tries to open a file
and print its contents to the screen. ::
for line in open("myfile.txt"):
- print line
+ print line,
The problem with this code is that it leaves the file open for an indeterminate
amount of time after the code has finished executing. This is not an issue in
@@ -399,7 +407,7 @@ ensures they are always cleaned up promptly and correctly. ::
with open("myfile.txt") as f:
for line in f:
- print line
+ print line,
After the statement is executed, the file *f* is always closed, even if a
problem was encountered while processing the lines. Other objects which provide
diff --git a/Doc/tutorial/index.rst b/Doc/tutorial/index.rst
index 7d0bfc2..604cff8 100644
--- a/Doc/tutorial/index.rst
+++ b/Doc/tutorial/index.rst
@@ -4,9 +4,6 @@
The Python Tutorial
######################
-:Release: |version|
-:Date: |today|
-
Python is an easy to learn, powerful programming language. It has efficient
high-level data structures and a simple but effective approach to
object-oriented programming. Python's elegant syntax and dynamic typing,
diff --git a/Doc/tutorial/inputoutput.rst b/Doc/tutorial/inputoutput.rst
index daad58d..6fdc5f0 100644
--- a/Doc/tutorial/inputoutput.rst
+++ b/Doc/tutorial/inputoutput.rst
@@ -37,7 +37,7 @@ or :func:`str` functions.
The :func:`str` function is meant to return representations of values which are
fairly human-readable, while :func:`repr` is meant to generate representations
which can be read by the interpreter (or will force a :exc:`SyntaxError` if
-there is not equivalent syntax). For objects which don't have a particular
+there is no equivalent syntax). For objects which don't have a particular
representation for human consumption, :func:`str` will return the same value as
:func:`repr`. Many values, such as numbers or structures like lists and
dictionaries, have the same representation using either function. Strings and
@@ -215,10 +215,6 @@ operation. For example::
>>> print 'The value of PI is approximately %5.3f.' % math.pi
The value of PI is approximately 3.142.
-Since :meth:`str.format` is quite new, a lot of Python code still uses the ``%``
-operator. However, because this old style of formatting will eventually be
-removed from the language, :meth:`str.format` should generally be used.
-
More information can be found in the :ref:`string-formatting` section.
@@ -236,9 +232,9 @@ arguments: ``open(filename, mode)``.
::
- >>> f = open('/tmp/workfile', 'w')
+ >>> f = open('workfile', 'w')
>>> print f
- <open file '/tmp/workfile', mode 'w' at 80a0960>
+ <open file 'workfile', mode 'w' at 80a0960>
The first argument is a string containing the filename. The second argument is
another string containing a few characters describing the way in which the file
@@ -295,18 +291,8 @@ containing only a single newline. ::
>>> f.readline()
''
-``f.readlines()`` returns a list containing all the lines of data in the file.
-If given an optional parameter *sizehint*, it reads that many bytes from the
-file and enough more to complete a line, and returns the lines from that. This
-is often used to allow efficient reading of a large file by lines, but without
-having to load the entire file in memory. Only complete lines will be returned.
-::
-
- >>> f.readlines()
- ['This is the first line of the file.\n', 'Second line of the file\n']
-
-An alternative approach to reading lines is to loop over the file object. This is
-memory efficient, fast, and leads to simpler code::
+For reading lines from a file, you can loop over the file object. This is memory
+efficient, fast, and leads to simple code::
>>> for line in f:
print line,
@@ -314,9 +300,8 @@ memory efficient, fast, and leads to simpler code::
This is the first line of the file.
Second line of the file
-The alternative approach is simpler but does not provide as fine-grained
-control. Since the two approaches manage line buffering differently, they
-should not be mixed.
+If you want to read all the lines of a file in a list you can also use
+``list(f)`` or ``f.readlines()``.
``f.write(string)`` writes the contents of *string* to the file, returning
``None``. ::
@@ -339,7 +324,7 @@ of the file, 1 uses the current file position, and 2 uses the end of the file as
the reference point. *from_what* can be omitted and defaults to 0, using the
beginning of the file as the reference point. ::
- >>> f = open('/tmp/workfile', 'r+')
+ >>> f = open('workfile', 'r+')
>>> f.write('0123456789abcdef')
>>> f.seek(5) # Go to the 6th byte in the file
>>> f.read(1)
@@ -363,7 +348,7 @@ objects. This has the advantage that the file is properly closed after its
suite finishes, even if an exception is raised on the way. It is also much
shorter than writing equivalent :keyword:`try`\ -\ :keyword:`finally` blocks::
- >>> with open('/tmp/workfile', 'r') as f:
+ >>> with open('workfile', 'r') as f:
... read_data = f.read()
>>> f.closed
True
@@ -373,47 +358,64 @@ File objects have some additional methods, such as :meth:`~file.isatty` and
Reference for a complete guide to file objects.
-.. _tut-pickle:
+.. _tut-json:
-The :mod:`pickle` Module
-------------------------
+Saving structured data with :mod:`json`
+---------------------------------------
-.. index:: module: pickle
+.. index:: module: json
-Strings can easily be written to and read from a file. Numbers take a bit more
+Strings can easily be written to and read from a file. Numbers take a bit more
effort, since the :meth:`read` method only returns strings, which will have to
be passed to a function like :func:`int`, which takes a string like ``'123'``
-and returns its numeric value 123. However, when you want to save more complex
-data types like lists, dictionaries, or class instances, things get a lot more
-complicated.
-
-Rather than have users be constantly writing and debugging code to save
-complicated data types, Python provides a standard module called :mod:`pickle`.
-This is an amazing module that can take almost any Python object (even some
-forms of Python code!), and convert it to a string representation; this process
-is called :dfn:`pickling`. Reconstructing the object from the string
-representation is called :dfn:`unpickling`. Between pickling and unpickling,
-the string representing the object may have been stored in a file or data, or
+and returns its numeric value 123. When you want to save more complex data
+types like nested lists and dictionaries, parsing and serializing by hand
+becomes complicated.
+
+Rather than having users constantly writing and debugging code to save
+complicated data types to files, Python allows you to use the popular data
+interchange format called `JSON (JavaScript Object Notation)
+<http://json.org>`_. The standard module called :mod:`json` can take Python
+data hierarchies, and convert them to string representations; this process is
+called :dfn:`serializing`. Reconstructing the data from the string representation
+is called :dfn:`deserializing`. Between serializing and deserializing, the
+string representing the object may have been stored in a file or data, or
sent over a network connection to some distant machine.
-If you have an object ``x``, and a file object ``f`` that's been opened for
-writing, the simplest way to pickle the object takes only one line of code::
+.. note::
+ The JSON format is commonly used by modern applications to allow for data
+ exchange. Many programmers are already familiar with it, which makes
+ it a good choice for interoperability.
+
+If you have an object ``x``, you can view its JSON string representation with a
+simple line of code::
+
+ >>> json.dumps([1, 'simple', 'list'])
+ '[1, "simple", "list"]'
+
+Another variant of the :func:`~json.dumps` function, called :func:`~json.dump`,
+simply serializes the object to a file. So if ``f`` is a :term:`file object`
+opened for writing, we can do this::
+
+ json.dump(x, f)
- pickle.dump(x, f)
+To decode the object again, if ``f`` is a :term:`file object` which has
+been opened for reading::
-To unpickle the object again, if ``f`` is a file object which has been opened
-for reading::
+ x = json.load(f)
- x = pickle.load(f)
+This simple serialization technique can handle lists and dictionaries, but
+serializing arbitrary class instances in JSON requires a bit of extra effort.
+The reference for the :mod:`json` module contains an explanation of this.
-(There are other variants of this, used when pickling many objects or when you
-don't want to write the pickled data to a file; consult the complete
-documentation for :mod:`pickle` in the Python Library Reference.)
+.. seealso::
-:mod:`pickle` is the standard way to make Python objects which can be stored and
-reused by other programs or by a future invocation of the same program; the
-technical term for this is a :dfn:`persistent` object. Because :mod:`pickle` is
-so widely used, many authors who write Python extensions take care to ensure
-that new data types such as matrices can be properly pickled and unpickled.
+ :mod:`pickle` - the pickle module
+ Contrary to :ref:`JSON <tut-json>`, *pickle* is a protocol which allows
+ the serialization of arbitrarily complex Python objects. As such, it is
+ specific to Python and cannot be used to communicate with applications
+ written in other languages. It is also insecure by default:
+ deserializing pickle data coming from an untrusted source can execute
+ arbitrary code, if the data was crafted by a skilled attacker.
diff --git a/Doc/tutorial/interpreter.rst b/Doc/tutorial/interpreter.rst
index 6bdc1c5..5140b43 100644
--- a/Doc/tutorial/interpreter.rst
+++ b/Doc/tutorial/interpreter.rst
@@ -185,8 +185,8 @@ encodings can be found in the Python Library Reference, in the section on
For example, to write Unicode literals including the Euro currency symbol, the
ISO-8859-15 encoding can be used, with the Euro symbol having the ordinal value
-164. This script will print the value 8364 (the Unicode codepoint corresponding
-to the Euro symbol) and then exit::
+164. This script, when saved in the ISO-8859-15 encoding, will print the value
+8364 (the Unicode codepoint corresponding to the Euro symbol) and then exit::
# -*- coding: iso-8859-15 -*-
@@ -252,7 +252,7 @@ of your user site-packages directory. Start Python and run this code:
>>> import site
>>> site.getusersitepackages()
- '/home/user/.local/lib/python3.2/site-packages'
+ '/home/user/.local/lib/python2.7/site-packages'
Now you can create a file named :file:`usercustomize.py` in that directory and
put anything you want in it. It will affect every invocation of Python, unless
diff --git a/Doc/tutorial/introduction.rst b/Doc/tutorial/introduction.rst
index c99915f..1871dd1 100644
--- a/Doc/tutorial/introduction.rst
+++ b/Doc/tutorial/introduction.rst
@@ -82,8 +82,7 @@ A value can be assigned to several variables simultaneously::
Variables must be "defined" (assigned a value) before they can be used, or an
error will occur::
- >>> # try to access an undefined variable
- ... n
+ >>> n # try to access an undefined variable
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'n' is not defined
diff --git a/Doc/tutorial/modules.rst b/Doc/tutorial/modules.rst
index 62b9bb5..f5b8114 100644
--- a/Doc/tutorial/modules.rst
+++ b/Doc/tutorial/modules.rst
@@ -71,7 +71,8 @@ More on Modules
A module can contain executable statements as well as function definitions.
These statements are intended to initialize the module. They are executed only
-the *first* time the module is imported somewhere. [#]_
+the *first* time the module name is encountered in an import statement. [#]_
+(They are also run if the file is executed as a script.)
Each module has its own private symbol table, which is used as the global symbol
table by all functions defined in the module. Thus, the author of a module can
@@ -242,7 +243,7 @@ modules are built into the interpreter; these provide access to operations that
are not part of the core of the language but are nevertheless built in, either
for efficiency or to provide access to operating system primitives such as
system calls. The set of such modules is a configuration option which also
-depends on the underlying platform For example, the :mod:`winreg` module is only
+depends on the underlying platform. For example, the :mod:`winreg` module is only
provided on Windows systems. One particular module deserves some attention:
:mod:`sys`, which is built into every Python interpreter. The variables
``sys.ps1`` and ``sys.ps2`` define the strings used as primary and secondary
@@ -282,16 +283,21 @@ defines. It returns a sorted list of strings::
>>> import fibo, sys
>>> dir(fibo)
['__name__', 'fib', 'fib2']
- >>> dir(sys)
- ['__displayhook__', '__doc__', '__excepthook__', '__name__', '__stderr__',
- '__stdin__', '__stdout__', '_getframe', 'api_version', 'argv',
- 'builtin_module_names', 'byteorder', 'callstats', 'copyright',
- 'displayhook', 'exc_clear', 'exc_info', 'exc_type', 'excepthook',
- 'exec_prefix', 'executable', 'exit', 'getdefaultencoding', 'getdlopenflags',
- 'getrecursionlimit', 'getrefcount', 'hexversion', 'maxint', 'maxunicode',
- 'meta_path', 'modules', 'path', 'path_hooks', 'path_importer_cache',
- 'platform', 'prefix', 'ps1', 'ps2', 'setcheckinterval', 'setdlopenflags',
- 'setprofile', 'setrecursionlimit', 'settrace', 'stderr', 'stdin', 'stdout',
+ >>> dir(sys) # doctest: +NORMALIZE_WHITESPACE
+ ['__displayhook__', '__doc__', '__excepthook__', '__name__', '__package__',
+ '__stderr__', '__stdin__', '__stdout__', '_clear_type_cache',
+ '_current_frames', '_getframe', '_mercurial', 'api_version', 'argv',
+ 'builtin_module_names', 'byteorder', 'call_tracing', 'callstats',
+ 'copyright', 'displayhook', 'dont_write_bytecode', 'exc_clear', 'exc_info',
+ 'exc_traceback', 'exc_type', 'exc_value', 'excepthook', 'exec_prefix',
+ 'executable', 'exit', 'flags', 'float_info', 'float_repr_style',
+ 'getcheckinterval', 'getdefaultencoding', 'getdlopenflags',
+ 'getfilesystemencoding', 'getobjects', 'getprofile', 'getrecursionlimit',
+ 'getrefcount', 'getsizeof', 'gettotalrefcount', 'gettrace', 'hexversion',
+ 'long_info', 'maxint', 'maxsize', 'maxunicode', 'meta_path', 'modules',
+ 'path', 'path_hooks', 'path_importer_cache', 'platform', 'prefix', 'ps1',
+ 'py3kwarning', 'setcheckinterval', 'setdlopenflags', 'setprofile',
+ 'setrecursionlimit', 'settrace', 'stderr', 'stdin', 'stdout', 'subversion',
'version', 'version_info', 'warnoptions']
Without arguments, :func:`dir` lists the names you have defined currently::
@@ -300,7 +306,7 @@ Without arguments, :func:`dir` lists the names you have defined currently::
>>> import fibo
>>> fib = fibo.fib
>>> dir()
- ['__builtins__', '__doc__', '__file__', '__name__', 'a', 'fib', 'fibo', 'sys']
+ ['__builtins__', '__name__', '__package__', 'a', 'fib', 'fibo', 'sys']
Note that it lists all types of names: variables, modules, functions, etc.
@@ -311,10 +317,11 @@ want a list of those, they are defined in the standard module
:mod:`__builtin__`::
>>> import __builtin__
- >>> dir(__builtin__)
- ['ArithmeticError', 'AssertionError', 'AttributeError', 'DeprecationWarning',
- 'EOFError', 'Ellipsis', 'EnvironmentError', 'Exception', 'False',
- 'FloatingPointError', 'FutureWarning', 'IOError', 'ImportError',
+ >>> dir(__builtin__) # doctest: +NORMALIZE_WHITESPACE
+ ['ArithmeticError', 'AssertionError', 'AttributeError', 'BaseException',
+ 'BufferError', 'BytesWarning', 'DeprecationWarning', 'EOFError',
+ 'Ellipsis', 'EnvironmentError', 'Exception', 'False', 'FloatingPointError',
+ 'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError', 'ImportWarning',
'IndentationError', 'IndexError', 'KeyError', 'KeyboardInterrupt',
'LookupError', 'MemoryError', 'NameError', 'None', 'NotImplemented',
'NotImplementedError', 'OSError', 'OverflowError',
@@ -323,18 +330,19 @@ want a list of those, they are defined in the standard module
'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError', 'True',
'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
- 'UserWarning', 'ValueError', 'Warning', 'WindowsError',
+ 'UnicodeWarning', 'UserWarning', 'ValueError', 'Warning',
'ZeroDivisionError', '_', '__debug__', '__doc__', '__import__',
- '__name__', 'abs', 'apply', 'basestring', 'bool', 'buffer',
- 'callable', 'chr', 'classmethod', 'cmp', 'coerce', 'compile',
- 'complex', 'copyright', 'credits', 'delattr', 'dict', 'dir', 'divmod',
- 'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float',
- 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'help', 'hex',
- 'id', 'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter',
- 'len', 'license', 'list', 'locals', 'long', 'map', 'max', 'memoryview',
- 'min', 'object', 'oct', 'open', 'ord', 'pow', 'property', 'quit', 'range',
- 'raw_input', 'reduce', 'reload', 'repr', 'reversed', 'round', 'set',
- 'setattr', 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super',
+ '__name__', '__package__', 'abs', 'all', 'any', 'apply', 'basestring',
+ 'bin', 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr',
+ 'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'copyright',
+ 'credits', 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval',
+ 'execfile', 'exit', 'file', 'filter', 'float', 'format', 'frozenset',
+ 'getattr', 'globals', 'hasattr', 'hash', 'help', 'hex', 'id', 'input',
+ 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len', 'license',
+ 'list', 'locals', 'long', 'map', 'max', 'memoryview', 'min', 'next',
+ 'object', 'oct', 'open', 'ord', 'pow', 'print', 'property', 'quit',
+ 'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed', 'round',
+ 'set', 'setattr', 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super',
'tuple', 'type', 'unichr', 'unicode', 'vars', 'xrange', 'zip']
@@ -360,7 +368,9 @@ There are also many different operations you might want to perform on sound data
(such as mixing, adding echo, applying an equalizer function, creating an
artificial stereo effect), so in addition you will be writing a never-ending
stream of modules to perform these operations. Here's a possible structure for
-your package (expressed in terms of a hierarchical filesystem)::
+your package (expressed in terms of a hierarchical filesystem):
+
+.. code-block:: text
sound/ Top-level package
__init__.py Initialize the sound package
@@ -457,7 +467,7 @@ list of module names that should be imported when ``from package import *`` is
encountered. It is up to the package author to keep this list up-to-date when a
new version of the package is released. Package authors may also decide not to
support it, if they don't see a use for importing \* from their package. For
-example, the file :file:`sounds/effects/__init__.py` could contain the following
+example, the file :file:`sound/effects/__init__.py` could contain the following
code::
__all__ = ["echo", "surround", "reverse"]
@@ -543,6 +553,6 @@ modules found in a package.
.. rubric:: Footnotes
.. [#] In fact function definitions are also 'statements' that are 'executed'; the
- execution of a module-level function enters the function name in the module's
- global symbol table.
+ execution of a module-level function definition enters the function name in
+ the module's global symbol table.
diff --git a/Doc/tutorial/stdlib.rst b/Doc/tutorial/stdlib.rst
index 844f8bc..f6239d6 100644
--- a/Doc/tutorial/stdlib.rst
+++ b/Doc/tutorial/stdlib.rst
@@ -145,7 +145,7 @@ Internet Access
===============
There are a number of modules for accessing the internet and processing internet
-protocols. Two of the simplest are :mod:`urllib2` for retrieving data from urls
+protocols. Two of the simplest are :mod:`urllib2` for retrieving data from URLs
and :mod:`smtplib` for sending mail::
>>> import urllib2
@@ -278,8 +278,10 @@ file::
def test_average(self):
self.assertEqual(average([20, 30, 70]), 40.0)
self.assertEqual(round(average([1, 5, 7]), 1), 4.3)
- self.assertRaises(ZeroDivisionError, average, [])
- self.assertRaises(TypeError, average, 20, 30, 70)
+ with self.assertRaises(ZeroDivisionError):
+ average([])
+ with self.assertRaises(TypeError):
+ average(20, 30, 70)
unittest.main() # Calling from the command line invokes all tests
diff --git a/Doc/tutorial/stdlib2.rst b/Doc/tutorial/stdlib2.rst
index 514d583..65fb093 100644
--- a/Doc/tutorial/stdlib2.rst
+++ b/Doc/tutorial/stdlib2.rst
@@ -71,9 +71,9 @@ formatting numbers with group separators::
Templating
==========
-The :mod:`string` module includes a versatile :class:`Template` class with a
-simplified syntax suitable for editing by end-users. This allows users to
-customize their applications without having to alter the application.
+The :mod:`string` module includes a versatile :class:`~string.Template` class
+with a simplified syntax suitable for editing by end-users. This allows users
+to customize their applications without having to alter the application.
The format uses placeholder names formed by ``$`` with valid Python identifiers
(alphanumeric characters and underscores). Surrounding the placeholder with
@@ -85,17 +85,17 @@ spaces. Writing ``$$`` creates a single escaped ``$``::
>>> t.substitute(village='Nottingham', cause='the ditch fund')
'Nottinghamfolk send $10 to the ditch fund.'
-The :meth:`substitute` method raises a :exc:`KeyError` when a placeholder is not
-supplied in a dictionary or a keyword argument. For mail-merge style
-applications, user supplied data may be incomplete and the
-:meth:`safe_substitute` method may be more appropriate --- it will leave
-placeholders unchanged if data is missing::
+The :meth:`~string.Template.substitute` method raises a :exc:`KeyError` when a
+placeholder is not supplied in a dictionary or a keyword argument. For
+mail-merge style applications, user supplied data may be incomplete and the
+:meth:`~string.Template.safe_substitute` method may be more appropriate ---
+it will leave placeholders unchanged if data is missing::
>>> t = Template('Return the $item to $owner.')
>>> d = dict(item='unladen swallow')
>>> t.substitute(d)
Traceback (most recent call last):
- . . .
+ ...
KeyError: 'owner'
>>> t.safe_substitute(d)
'Return the unladen swallow to $owner.'
@@ -132,8 +132,9 @@ templates for XML files, plain text reports, and HTML web reports.
Working with Binary Data Record Layouts
=======================================
-The :mod:`struct` module provides :func:`pack` and :func:`unpack` functions for
-working with variable length binary record formats. The following example shows
+The :mod:`struct` module provides :func:`~struct.pack` and
+:func:`~struct.unpack` functions for working with variable length binary
+record formats. The following example shows
how to loop through header information in a ZIP file without using the
:mod:`zipfile` module. Pack codes ``"H"`` and ``"I"`` represent two and four
byte unsigned numbers respectively. The ``"<"`` indicates that they are
@@ -218,7 +219,9 @@ At its simplest, log messages are sent to a file or to ``sys.stderr``::
logging.error('Error occurred')
logging.critical('Critical error -- shutting down')
-This produces the following output::
+This produces the following output:
+
+.. code-block:: none
WARNING:root:Warning:config file server.conf not found
ERROR:root:Error occurred
@@ -227,8 +230,9 @@ This produces the following output::
By default, informational and debugging messages are suppressed and the output
is sent to standard error. Other output options include routing messages
through email, datagrams, sockets, or to an HTTP Server. New filters can select
-different routing based on message priority: :const:`DEBUG`, :const:`INFO`,
-:const:`WARNING`, :const:`ERROR`, and :const:`CRITICAL`.
+different routing based on message priority: :const:`~logging.DEBUG`,
+:const:`~logging.INFO`, :const:`~logging.WARNING`, :const:`~logging.ERROR`,
+and :const:`~logging.CRITICAL`.
The logging system can be configured directly from Python or can be loaded from
a user editable configuration file for customized logging without altering the
@@ -255,9 +259,9 @@ applications include caching objects that are expensive to create::
>>> import weakref, gc
>>> class A:
... def __init__(self, value):
- ... self.value = value
+ ... self.value = value
... def __repr__(self):
- ... return str(self.value)
+ ... return str(self.value)
...
>>> a = A(10) # create a reference
>>> d = weakref.WeakValueDictionary()
@@ -285,11 +289,11 @@ Many data structure needs can be met with the built-in list type. However,
sometimes there is a need for alternative implementations with different
performance trade-offs.
-The :mod:`array` module provides an :class:`array()` object that is like a list
-that stores only homogeneous data and stores it more compactly. The following
-example shows an array of numbers stored as two byte unsigned binary numbers
-(typecode ``"H"``) rather than the usual 16 bytes per entry for regular lists of
-Python int objects::
+The :mod:`array` module provides an :class:`~array.array()` object that is like
+a list that stores only homogeneous data and stores it more compactly. The
+following example shows an array of numbers stored as two byte unsigned binary
+numbers (typecode ``"H"``) rather than the usual 16 bytes per entry for regular
+lists of Python int objects::
>>> from array import array
>>> a = array('H', [4000, 10, 700, 22222])
@@ -298,10 +302,10 @@ Python int objects::
>>> a[1:3]
array('H', [10, 700])
-The :mod:`collections` module provides a :class:`deque()` object that is like a
-list with faster appends and pops from the left side but slower lookups in the
-middle. These objects are well suited for implementing queues and breadth first
-tree searches::
+The :mod:`collections` module provides a :class:`~collections.deque()` object
+that is like a list with faster appends and pops from the left side but slower
+lookups in the middle. These objects are well suited for implementing queues
+and breadth first tree searches::
>>> from collections import deque
>>> d = deque(["task1", "task2", "task3"])
@@ -309,6 +313,8 @@ tree searches::
>>> print "Handling", d.popleft()
Handling task1
+::
+
unsearched = deque([starting_node])
def breadth_first_search(unsearched):
node = unsearched.popleft()
@@ -345,8 +351,8 @@ not want to run a full list sort::
Decimal Floating Point Arithmetic
=================================
-The :mod:`decimal` module offers a :class:`Decimal` datatype for decimal
-floating point arithmetic. Compared to the built-in :class:`float`
+The :mod:`decimal` module offers a :class:`~decimal.Decimal` datatype for
+decimal floating point arithmetic. Compared to the built-in :class:`float`
implementation of binary floating point, the class is especially helpful for
* financial applications and other uses which require exact decimal
@@ -370,13 +376,15 @@ becomes significant if the results are rounded to the nearest cent::
>>> round(.70 * 1.05, 2) # same calculation with floats
0.73
-The :class:`Decimal` result keeps a trailing zero, automatically inferring four
-place significance from multiplicands with two place significance. Decimal
-reproduces mathematics as done by hand and avoids issues that can arise when
-binary floating point cannot exactly represent decimal quantities.
+The :class:`~decimal.Decimal` result keeps a trailing zero, automatically
+inferring four place significance from multiplicands with two place
+significance. Decimal reproduces mathematics as done by hand and avoids
+issues that can arise when binary floating point cannot exactly represent
+decimal quantities.
-Exact representation enables the :class:`Decimal` class to perform modulo
-calculations and equality tests that are unsuitable for binary floating point::
+Exact representation enables the :class:`~decimal.Decimal` class to perform
+modulo calculations and equality tests that are unsuitable for binary floating
+point::
>>> Decimal('1.00') % Decimal('.10')
Decimal('0.00')
diff --git a/Doc/tutorial/whatnow.rst b/Doc/tutorial/whatnow.rst
index 157cc9f..48ff5c8 100644
--- a/Doc/tutorial/whatnow.rst
+++ b/Doc/tutorial/whatnow.rst
@@ -54,9 +54,8 @@ python-list@python.org. The newsgroup and mailing list are gatewayed, so
messages posted to one will automatically be forwarded to the other. There are
around 120 postings a day (with peaks up to several hundred), asking (and
answering) questions, suggesting new features, and announcing new modules.
-Before posting, be sure to check the list of `Frequently Asked Questions
-<http://www.python.org/doc/faq/>`_ (also called the FAQ), or look for it in the
-:file:`Misc/` directory of the Python source distribution. Mailing list
+Before posting, be sure to check the list of :ref:`Frequently Asked Questions
+<faq-index>` (also called the FAQ). Mailing list
archives are available at http://mail.python.org/pipermail/. The FAQ answers
many of the questions that come up again and again, and may already contain the
solution for your problem.
diff --git a/Doc/using/cmdline.rst b/Doc/using/cmdline.rst
index 4b2793f..842b266 100644
--- a/Doc/using/cmdline.rst
+++ b/Doc/using/cmdline.rst
@@ -397,18 +397,9 @@ Miscellaneous options
.. cmdoption:: -3
- Warn about Python 3.x incompatibilities which cannot be fixed trivially by
- :ref:`2to3 <2to3-reference>`. Among these are:
-
- * :meth:`dict.has_key`
- * :func:`apply`
- * :func:`callable`
- * :func:`coerce`
- * :func:`execfile`
- * :func:`reduce`
- * :func:`reload`
-
- Using these will emit a :exc:`DeprecationWarning`.
+ Warn about Python 3.x possible incompatibilities by emitting a
+ :exc:`DeprecationWarning` for features that are removed or significantly
+ changed in Python 3.
.. versionadded:: 2.6
@@ -442,7 +433,10 @@ Options you shouldn't use
Environment variables
---------------------
-These environment variables influence Python's behavior.
+These environment variables influence Python's behavior, they are processed
+before the command-line switches other than -E. It is customary that
+command-line switches override environmental variables where there is a
+conflict.
.. envvar:: PYTHONHOME
@@ -541,7 +535,8 @@ These environment variables influence Python's behavior.
.. envvar:: PYTHONDONTWRITEBYTECODE
If this is set, Python won't try to write ``.pyc`` or ``.pyo`` files on the
- import of source modules.
+ import of source modules. This is equivalent to specifying the :option:`-B`
+ option.
.. versionadded:: 2.6
@@ -637,4 +632,3 @@ if Python was configured with the ``--with-pydebug`` build option.
If set, Python will print memory allocation statistics every time a new
object arena is created, and on shutdown.
-
diff --git a/Doc/using/mac.rst b/Doc/using/mac.rst
index c5f7f2d..5f29812 100644
--- a/Doc/using/mac.rst
+++ b/Doc/using/mac.rst
@@ -25,14 +25,14 @@ installers for the latest 2.3 release for Mac OS 9 and related documentation.
Getting and Installing MacPython
================================
-Mac OS X 10.5 comes with Python 2.5.1 pre-installed by Apple. If you wish, you
+Mac OS X 10.8 comes with Python 2.7 pre-installed by Apple. If you wish, you
are invited to install the most recent version of Python from the Python website
(http://www.python.org). A current "universal binary" build of Python, which
runs natively on the Mac's new Intel and legacy PPC CPU's, is available there.
What you get after installing is a number of things:
-* A :file:`MacPython 2.5` folder in your :file:`Applications` folder. In here
+* A :file:`MacPython 2.7` folder in your :file:`Applications` folder. In here
you find IDLE, the development environment that is a standard part of official
Python distributions; PythonLauncher, which handles double-clicking Python
scripts from the Finder; and the "Build Applet" tool, which allows you to
@@ -100,7 +100,7 @@ aware of: programs that talk to the Aqua window manager (in other words,
anything that has a GUI) need to be run in a special way. Use :program:`pythonw`
instead of :program:`python` to start such scripts.
-With Python 2.5, you can use either :program:`python` or :program:`pythonw`.
+With Python 2.7, you can use either :program:`python` or :program:`pythonw`.
Configuration
@@ -133,13 +133,11 @@ Installing Additional Python Packages
There are several methods to install additional Python packages:
-* http://pythonmac.org/packages/ contains selected compiled packages for Python
- 2.5, 2.4, and 2.3.
-
* Packages can be installed via the standard Python distutils mode (``python
setup.py install``).
-* Many packages can also be installed via the :program:`setuptools` extension.
+* Many packages can also be installed via the :program:`setuptools` extension
+ or :program:`pip` wrapper, see http://www.pip-installer.org/.
GUI Programming on the Mac
@@ -167,7 +165,7 @@ http://www.riverbankcomputing.co.uk/software/pyqt/intro.
Distributing Python Applications on the Mac
===========================================
-The "Build Applet" tool that is placed in the MacPython 2.5 folder is fine for
+The "Build Applet" tool that is placed in the MacPython 2.7 folder is fine for
packaging small Python scripts on your own machine to run as a standard Mac
application. This tool, however, is not robust enough to distribute Python
applications to other users.
@@ -177,20 +175,6 @@ The standard tool for deploying standalone Python applications on the Mac is
at http://undefined.org/python/#py2app.
-Application Scripting
-=====================
-
-Python can also be used to script other Mac applications via Apple's Open
-Scripting Architecture (OSA); see http://appscript.sourceforge.net. Appscript is
-a high-level, user-friendly Apple event bridge that allows you to control
-scriptable Mac OS X applications using ordinary Python scripts. Appscript makes
-Python a serious alternative to Apple's own *AppleScript* language for
-automating your Mac. A related package, *PyOSA*, is an OSA language component
-for the Python scripting language, allowing Python code to be executed by any
-OSA-enabled application (Script Editor, Mail, iTunes, etc.). PyOSA makes Python
-a full peer to AppleScript.
-
-
Other Resources
===============
diff --git a/Doc/using/unix.rst b/Doc/using/unix.rst
index 1539254..ec5ac6c 100644
--- a/Doc/using/unix.rst
+++ b/Doc/using/unix.rst
@@ -28,7 +28,7 @@ following links:
http://www.debian.org/doc/manuals/maint-guide/first.en.html
for Debian users
- http://linuxmafia.com/pub/linux/suse-linux-internals/chapter35.html
+ http://en.opensuse.org/Portal:Packaging
for OpenSuse users
http://docs.fedoraproject.org/en-US/Fedora_Draft_Documentation/0.1/html/RPM_Guide/ch-creating-rpms.html
for Fedora users
@@ -145,7 +145,7 @@ information on how to code in Python in these editors, look at:
* http://sourceforge.net/projects/python-mode
Geany is an excellent IDE with support for a lot of languages. For more
-information, read: http://geany.uvena.de/
+information, read: http://www.geany.org/
Komodo edit is another extremely good IDE. It also has support for a lot of
languages. For more information, read:
diff --git a/Doc/using/windows.rst b/Doc/using/windows.rst
index 978440c..b49ba7b 100644
--- a/Doc/using/windows.rst
+++ b/Doc/using/windows.rst
@@ -84,6 +84,8 @@ In order to run Python flawlessly, you might have to change certain environment
settings in Windows.
+.. _setting-envvars:
+
Excursus: Setting environment variables
---------------------------------------
diff --git a/Doc/whatsnew/2.2.rst b/Doc/whatsnew/2.2.rst
index 412c1d0..5f6fe11 100644
--- a/Doc/whatsnew/2.2.rst
+++ b/Doc/whatsnew/2.2.rst
@@ -450,9 +450,9 @@ signal that the iterator is done.
Python classes can define an :meth:`__iter__` method, which should create and
return a new iterator for the object; if the object is its own iterator, this
method can just return ``self``. In particular, iterators will usually be their
-own iterators. Extension types implemented in C can implement a :attr:`tp_iter`
+own iterators. Extension types implemented in C can implement a :c:member:`~PyTypeObject.tp_iter`
function in order to return an iterator, and extension types that want to behave
-as iterators can define a :attr:`tp_iternext` function.
+as iterators can define a :c:member:`~PyTypeObject.tp_iternext` function.
So, after all this, what do iterators actually do? They have one required
method, :meth:`next`, which takes no arguments and returns the next value. When
@@ -478,7 +478,7 @@ there are no more values to be returned, calling :meth:`next` should raise the
In 2.2, Python's :keyword:`for` statement no longer expects a sequence; it
expects something for which :func:`iter` will return an iterator. For backward
compatibility and convenience, an iterator is automatically constructed for
-sequences that don't implement :meth:`__iter__` or a :attr:`tp_iter` slot, so
+sequences that don't implement :meth:`__iter__` or a :c:member:`~PyTypeObject.tp_iter` slot, so
``for i in [1,2,3]`` will still work. Wherever the Python interpreter loops
over a sequence, it's been changed to use the iterator protocol. This means you
can do things like this::
diff --git a/Doc/whatsnew/2.3.rst b/Doc/whatsnew/2.3.rst
index b034eb2..85fea68 100644
--- a/Doc/whatsnew/2.3.rst
+++ b/Doc/whatsnew/2.3.rst
@@ -366,6 +366,9 @@ Under MacOS, :func:`os.listdir` may now return Unicode filenames.
.. ======================================================================
+.. index::
+ single: universal newlines; What's new
+
PEP 278: Universal Newline Support
==================================
@@ -376,12 +379,12 @@ mark the ends of lines in text files. Unix uses the linefeed (ASCII character
10), MacOS uses the carriage return (ASCII character 13), and Windows uses a
two-character sequence of a carriage return plus a newline.
-Python's file objects can now support end of line conventions other than the one
-followed by the platform on which Python is running. Opening a file with the
-mode ``'U'`` or ``'rU'`` will open a file for reading in universal newline mode.
-All three line ending conventions will be translated to a ``'\n'`` in the
-strings returned by the various file methods such as :meth:`read` and
-:meth:`readline`.
+Python's file objects can now support end of line conventions other than the
+one followed by the platform on which Python is running. Opening a file with
+the mode ``'U'`` or ``'rU'`` will open a file for reading in :term:`universal
+newlines` mode. All three line ending conventions will be translated to a
+``'\n'`` in the strings returned by the various file methods such as
+:meth:`read` and :meth:`readline`.
Universal newline support is also used when importing modules and when executing
a file with the :func:`execfile` function. This means that Python modules can
diff --git a/Doc/whatsnew/2.4.rst b/Doc/whatsnew/2.4.rst
index 97a0293..fa6c124 100644
--- a/Doc/whatsnew/2.4.rst
+++ b/Doc/whatsnew/2.4.rst
@@ -411,6 +411,9 @@ error streams will be. You can provide a file object or a file descriptor, or
you can use the constant ``subprocess.PIPE`` to create a pipe between the
subprocess and the parent.
+.. index::
+ single: universal newlines; What's new
+
The constructor has a number of handy options:
* *close_fds* requests that all file descriptors be closed before running the
@@ -424,7 +427,7 @@ The constructor has a number of handy options:
* *preexec_fn* is a function that gets called before the child is started.
* *universal_newlines* opens the child's input and output using Python's
- universal newline feature.
+ :term:`universal newlines` feature.
Once you've created the :class:`Popen` instance, you can call its :meth:`wait`
method to pause until the subprocess has exited, :meth:`poll` to check if it's
@@ -843,7 +846,7 @@ Here are all of the changes that Python 2.4 makes to the core Python language.
['A', 'b', 'c', 'D']
Finally, the *reverse* parameter takes a Boolean value. If the value is true,
- the list will be sorted into reverse order. Instead of ``L.sort() ;
+ the list will be sorted into reverse order. Instead of ``L.sort();
L.reverse()``, you can now write ``L.sort(reverse=True)``.
The results of sorting are now guaranteed to be stable. This means that two
diff --git a/Doc/whatsnew/2.5.rst b/Doc/whatsnew/2.5.rst
index ff599c8..c420a19 100644
--- a/Doc/whatsnew/2.5.rst
+++ b/Doc/whatsnew/2.5.rst
@@ -286,7 +286,7 @@ Python's standard :mod:`string` module? There's no clean way to ignore
:mod:`pkg.string` and look for the standard module; generally you had to look at
the contents of ``sys.modules``, which is slightly unclean. Holger Krekel's
:mod:`py.std` package provides a tidier way to perform imports from the standard
-library, ``import py ; py.std.string.join()``, but that package isn't available
+library, ``import py; py.std.string.join()``, but that package isn't available
on all Python installations.
Reading code which relies on relative imports is also less clear, because a
@@ -1338,13 +1338,17 @@ complete list of changes, or look through the SVN logs for all the details.
.. XXX need to provide some more detail here
+ .. index::
+ single: universal newlines; What's new
+
* The :mod:`fileinput` module was made more flexible. Unicode filenames are now
supported, and a *mode* parameter that defaults to ``"r"`` was added to the
- :func:`input` function to allow opening files in binary or universal-newline
- mode. Another new parameter, *openhook*, lets you use a function other than
- :func:`open` to open the input files. Once you're iterating over the set of
- files, the :class:`FileInput` object's new :meth:`fileno` returns the file
- descriptor for the currently opened file. (Contributed by Georg Brandl.)
+ :func:`input` function to allow opening files in binary or :term:`universal
+ newlines` mode. Another new parameter, *openhook*, lets you use a function
+ other than :func:`open` to open the input files. Once you're iterating over
+ the set of files, the :class:`FileInput` object's new :meth:`fileno` returns
+ the file descriptor for the currently opened file. (Contributed by Georg
+ Brandl.)
* In the :mod:`gc` module, the new :func:`get_count` function returns a 3-tuple
containing the current collection counts for the three GC generations. This is
diff --git a/Doc/whatsnew/2.6.rst b/Doc/whatsnew/2.6.rst
index 796963a..cefdcaf 100644
--- a/Doc/whatsnew/2.6.rst
+++ b/Doc/whatsnew/2.6.rst
@@ -5,8 +5,6 @@
.. XXX add trademark info for Apple, Microsoft, SourceForge.
:Author: A.M. Kuchling (amk at amk.ca)
-:Release: |release|
-:Date: |today|
.. $Id$
Rules for maintenance:
@@ -1067,9 +1065,12 @@ the :mod:`io` module:
The :class:`BytesIO` class supports reading, writing, and seeking
over an in-memory buffer.
+ .. index::
+ single: universal newlines; What's new
+
* :class:`TextIOBase`: Provides functions for reading and writing
strings (remember, strings will be Unicode in Python 3.0),
- and supporting universal newlines. :class:`TextIOBase` defines
+ and supporting :term:`universal newlines`. :class:`TextIOBase` defines
the :meth:`readline` method and supports iteration upon
objects.
@@ -1886,7 +1887,7 @@ changes, or look through the Subversion logs for all the details.
>>> dq=deque(maxlen=3)
>>> dq
deque([], maxlen=3)
- >>> dq.append(1) ; dq.append(2) ; dq.append(3)
+ >>> dq.append(1); dq.append(2); dq.append(3)
>>> dq
deque([1, 2, 3], maxlen=3)
>>> dq.append(4)
@@ -2778,12 +2779,12 @@ http://www.json.org.
types. The following example encodes and decodes a dictionary::
>>> import json
- >>> data = {"spam" : "foo", "parrot" : 42}
+ >>> data = {"spam": "foo", "parrot": 42}
>>> in_json = json.dumps(data) # Encode the data
>>> in_json
'{"parrot": 42, "spam": "foo"}'
>>> json.loads(in_json) # Decode into a Python object
- {"spam" : "foo", "parrot" : 42}
+ {"spam": "foo", "parrot": 42}
It's also possible to write your own decoders and encoders to support
more types. Pretty-printing of the JSON strings is also supported.
diff --git a/Doc/whatsnew/2.7.rst b/Doc/whatsnew/2.7.rst
index 560e2f7..249f6d6 100644
--- a/Doc/whatsnew/2.7.rst
+++ b/Doc/whatsnew/2.7.rst
@@ -3,8 +3,6 @@
****************************
:Author: A.M. Kuchling (amk at amk.ca)
-:Release: |release|
-:Date: |today|
.. hyperlink all the methods & functions.
@@ -81,45 +79,91 @@ bug/patch item for each change.
The Future for Python 2.x
=========================
-Python 2.7 is intended to be the last major release in the 2.x series.
-The Python maintainers are planning to focus their future efforts on
-the Python 3.x series.
-
-This means that 2.7 will remain in place for a long time, running
-production systems that have not been ported to Python 3.x.
-Two consequences of the long-term significance of 2.7 are:
-
-* It's very likely the 2.7 release will have a longer period of
- maintenance compared to earlier 2.x versions. Python 2.7 will
- continue to be maintained while the transition to 3.x continues, and
- the developers are planning to support Python 2.7 with bug-fix
- releases beyond the typical two years.
-
-* A policy decision was made to silence warnings only of interest to
- developers. :exc:`DeprecationWarning` and its
- descendants are now ignored unless otherwise requested, preventing
- users from seeing warnings triggered by an application. This change
- was also made in the branch that will become Python 3.2. (Discussed
- on stdlib-sig and carried out in :issue:`7319`.)
-
- In previous releases, :exc:`DeprecationWarning` messages were
- enabled by default, providing Python developers with a clear
- indication of where their code may break in a future major version
- of Python.
-
- However, there are increasingly many users of Python-based
- applications who are not directly involved in the development of
- those applications. :exc:`DeprecationWarning` messages are
- irrelevant to such users, making them worry about an application
- that's actually working correctly and burdening application developers
- with responding to these concerns.
-
- You can re-enable display of :exc:`DeprecationWarning` messages by
- running Python with the :option:`-Wdefault <-W>` (short form:
- :option:`-Wd <-W>`) switch, or by setting the :envvar:`PYTHONWARNINGS`
- environment variable to ``"default"`` (or ``"d"``) before running
- Python. Python code can also re-enable them
- by calling ``warnings.simplefilter('default')``.
+Python 2.7 is the last major release in the 2.x series, as the Python
+maintainers have shifted the focus of their new feature development efforts
+to the Python 3.x series. This means that while Python 2 continues to
+receive bug fixes, and to be updated to build correctly on new hardware and
+versions of supported operated systems, there will be no new full feature
+releases for the language or standard library.
+
+However, while there is a large common subset between Python 2.7 and Python
+3, and many of the changes involved in migrating to that common subset, or
+directly to Python 3, can be safely automated, some other changes (notably
+those associated with Unicode handling) may require careful consideration,
+and preferably robust automated regression test suites, to migrate
+effectively.
+
+This means that Python 2.7 will remain in place for a long time, providing a
+stable and supported base platform for production systems that have not yet
+been ported to Python 3. The full expected lifecycle of the Python 2.7
+series is detailed in :pep:`373`.
+
+Some key consequences of the long-term significance of 2.7 are:
+
+* As noted above, the 2.7 release has a much longer period of maintenance
+ when compared to earlier 2.x versions. Python 2.7 is currently expected to
+ remain supported by the core development team (receiving security updates
+ and other bug fixes) until at least 2020 (10 years after its initial
+ release, compared to the more typical support period of 18-24 months).
+
+* As the Python 2.7 standard library ages, making effective use of the
+ Python Package Index (either directly or via a redistributor) becomes
+ more important for Python 2 users. In addition to a wide variety of third
+ party packages for various tasks, the available packages include backports
+ of new modules and features from the Python 3 standard library that are
+ compatible with Python 2, as well as various tools and libraries that can
+ make it easier to migrate to Python 3. The `Python Packaging User Guide
+ <https://packaging.python.org>`__ provides guidance on downloading and
+ installing software from the Python Package Index.
+
+* While the preferred approach to enhancing Python 2 is now the publication
+ of new packages on the Python Package Index, this approach doesn't
+ necessarily work in all cases, especially those related to network
+ security. In exceptional cases that cannot be handled adequately by
+ publishing new or updated packages on PyPI, the Python Enhancement
+ Proposal process may be used to make the case for adding new features
+ directly to the Python 2 standard library. Any such additions, and the
+ maintenance releases where they were added, will be noted in the
+ :ref:`py27-maintenance-enhancements` section below.
+
+For projects wishing to migrate from Python 2 to Python 3, or for library
+and framework developers wishing to support users on both Python 2 and
+Python 3, there are a variety of tools and guides available to help decide
+on a suitable approach and manage some of the technical details involved.
+The recommended starting point is the :ref:`pyporting-howto` HOWTO guide.
+
+
+Changes to the Handling of Deprecation Warnings
+===============================================
+
+For Python 2.7, a policy decision was made to silence warnings only of
+interest to developers by default. :exc:`DeprecationWarning` and its
+descendants are now ignored unless otherwise requested, preventing
+users from seeing warnings triggered by an application. This change
+was also made in the branch that became Python 3.2. (Discussed
+on stdlib-sig and carried out in :issue:`7319`.)
+
+In previous releases, :exc:`DeprecationWarning` messages were
+enabled by default, providing Python developers with a clear
+indication of where their code may break in a future major version
+of Python.
+
+However, there are increasingly many users of Python-based
+applications who are not directly involved in the development of
+those applications. :exc:`DeprecationWarning` messages are
+irrelevant to such users, making them worry about an application
+that's actually working correctly and burdening application developers
+with responding to these concerns.
+
+You can re-enable display of :exc:`DeprecationWarning` messages by
+running Python with the :option:`-Wdefault <-W>` (short form:
+:option:`-Wd <-W>`) switch, or by setting the :envvar:`PYTHONWARNINGS`
+environment variable to ``"default"`` (or ``"d"``) before running
+Python. Python code can also re-enable them
+by calling ``warnings.simplefilter('default')``.
+
+The ``unittest`` module also automatically reenables deprecation warnings
+when running tests.
Python 3.1 Features
@@ -2466,6 +2510,54 @@ For applications that embed Python:
.. ======================================================================
+.. _py27-maintenance-enhancements:
+
+New Features Added to Python 2.7 Maintenance Releases
+=====================================================
+
+New features may be added to Python 2.7 maintenance releases when the
+situation genuinely calls for it. Any such additions must go through
+the Python Enhancement Proposal process, and make a compelling case for why
+they can't be adequately addressed by either adding the new feature solely to
+Python 3, or else by publishing it on the Python Package Index.
+
+In addition to the specific proposals listed below, there is a general
+exemption allowing new ``-3`` warnings to be added in any Python 2.7
+maintenance release.
+
+
+PEP 434: IDLE Enhancement Exception for All Branches
+----------------------------------------------------
+
+:pep:`434` describes a general exemption for changes made to the IDLE
+development environment shipped along with Python. This exemption makes it
+possible for the IDLE developers to provide a more consistent user
+experience across all supported versions of Python 2 and 3.
+
+For details of any IDLE changes, refer to the NEWS file for the specific
+release.
+
+
+PEP 466: Network Security Enhancements for Python 2.7
+-----------------------------------------------------
+
+:pep:`466` describes a number of network security enhancement proposals
+that have been approved for inclusion in Python 2.7 maintenance releases,
+with the first of those changes appearing in the Python 2.7.7 release.
+
+:pep:`466` related features added in Python 2.7.7:
+
+* :func:`hmac.compare_digest` was added to make a timing attack resistant
+ comparison operation broadly available to Python 2 applications
+ (backported by Alex Gaynor in :issue:`21306`)
+
+* the version of OpenSSL linked with the prebuilt Windows installers
+ published on python.org was updated to 1.0.1g (contributed by
+ Zachary Ware in :issue:`21462`)
+
+
+.. ======================================================================
+
.. _acks27:
Acknowledgements
diff --git a/Include/abstract.h b/Include/abstract.h
index a377423..78a1825 100644
--- a/Include/abstract.h
+++ b/Include/abstract.h
@@ -1149,7 +1149,7 @@ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx*/
PyAPI_FUNC(PyObject *) PySequence_Fast(PyObject *o, const char* m);
/*
- Returns the sequence, o, as a tuple, unless it's already a
+ Return the sequence, o, as a list, unless it's already a
tuple or list. Use PySequence_Fast_GET_ITEM to access the
members of this list, and PySequence_Fast_GET_SIZE to get its length.
diff --git a/Include/cStringIO.h b/Include/cStringIO.h
index 6ca44a8..973a471 100644
--- a/Include/cStringIO.h
+++ b/Include/cStringIO.h
@@ -7,7 +7,7 @@ extern "C" {
This header provides access to cStringIO objects from C.
Functions are provided for calling cStringIO objects and
- macros are provided for testing whether you have cStringIO
+ macros are provided for testing whether you have cStringIO
objects.
Before calling any of the functions or macros, you must initialize
@@ -28,7 +28,7 @@ extern "C" {
/* Basic functions to manipulate cStringIO objects from C */
static struct PycStringIO_CAPI {
-
+
/* Read a string from an input object. If the last argument
is -1, the remainder will be read.
*/
diff --git a/Include/datetime.h b/Include/datetime.h
index 47abe5c..c0e7ffd 100644
--- a/Include/datetime.h
+++ b/Include/datetime.h
@@ -42,7 +42,7 @@ typedef struct
typedef struct
{
- PyObject_HEAD /* a pure abstract base clase */
+ PyObject_HEAD /* a pure abstract base class */
} PyDateTime_TZInfo;
diff --git a/Include/import.h b/Include/import.h
index 1b7fe0a..89f51b5 100644
--- a/Include/import.h
+++ b/Include/import.h
@@ -40,8 +40,8 @@ PyAPI_FUNC(struct filedescr *) _PyImport_FindModule(
PyAPI_FUNC(int) _PyImport_IsScript(struct filedescr *);
PyAPI_FUNC(void) _PyImport_ReInitLock(void);
-PyAPI_FUNC(PyObject *)_PyImport_FindExtension(char *, char *);
-PyAPI_FUNC(PyObject *)_PyImport_FixupExtension(char *, char *);
+PyAPI_FUNC(PyObject *) _PyImport_FindExtension(char *, char *);
+PyAPI_FUNC(PyObject *) _PyImport_FixupExtension(char *, char *);
struct _inittab {
char *name;
diff --git a/Include/intobject.h b/Include/intobject.h
index 78746a6..252eea9 100644
--- a/Include/intobject.h
+++ b/Include/intobject.h
@@ -40,6 +40,7 @@ PyAPI_FUNC(PyObject *) PyInt_FromSize_t(size_t);
PyAPI_FUNC(PyObject *) PyInt_FromSsize_t(Py_ssize_t);
PyAPI_FUNC(long) PyInt_AsLong(PyObject *);
PyAPI_FUNC(Py_ssize_t) PyInt_AsSsize_t(PyObject *);
+PyAPI_FUNC(int) _PyInt_AsInt(PyObject *);
PyAPI_FUNC(unsigned long) PyInt_AsUnsignedLongMask(PyObject *);
#ifdef HAVE_LONG_LONG
PyAPI_FUNC(unsigned PY_LONG_LONG) PyInt_AsUnsignedLongLongMask(PyObject *);
diff --git a/Include/listobject.h b/Include/listobject.h
index c445873..7cd616b 100644
--- a/Include/listobject.h
+++ b/Include/listobject.h
@@ -41,7 +41,7 @@ typedef struct {
PyAPI_DATA(PyTypeObject) PyList_Type;
#define PyList_Check(op) \
- PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_LIST_SUBCLASS)
+ PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_LIST_SUBCLASS)
#define PyList_CheckExact(op) (Py_TYPE(op) == &PyList_Type)
PyAPI_FUNC(PyObject *) PyList_New(Py_ssize_t size);
diff --git a/Include/longobject.h b/Include/longobject.h
index 2b40461..8be2345 100644
--- a/Include/longobject.h
+++ b/Include/longobject.h
@@ -25,6 +25,7 @@ PyAPI_FUNC(long) PyLong_AsLongAndOverflow(PyObject *, int *);
PyAPI_FUNC(unsigned long) PyLong_AsUnsignedLong(PyObject *);
PyAPI_FUNC(unsigned long) PyLong_AsUnsignedLongMask(PyObject *);
PyAPI_FUNC(Py_ssize_t) PyLong_AsSsize_t(PyObject *);
+PyAPI_FUNC(int) _PyLong_AsInt(PyObject *);
PyAPI_FUNC(PyObject *) PyLong_GetInfo(void);
/* For use by intobject.c only */
diff --git a/Include/node.h b/Include/node.h
index e23e709..9f6760c 100644
--- a/Include/node.h
+++ b/Include/node.h
@@ -20,6 +20,9 @@ PyAPI_FUNC(node *) PyNode_New(int type);
PyAPI_FUNC(int) PyNode_AddChild(node *n, int type,
char *str, int lineno, int col_offset);
PyAPI_FUNC(void) PyNode_Free(node *n);
+#ifndef Py_LIMITED_API
+Py_ssize_t _PyNode_SizeOf(node *n);
+#endif
/* Node access functions */
#define NCH(n) ((n)->n_nchildren)
diff --git a/Include/object.h b/Include/object.h
index edcd3fc..afbc68d 100644
--- a/Include/object.h
+++ b/Include/object.h
@@ -971,24 +971,39 @@ chain of N deallocations is broken into N / PyTrash_UNWIND_LEVEL pieces,
with the call stack never exceeding a depth of PyTrash_UNWIND_LEVEL.
*/
+/* This is the old private API, invoked by the macros before 2.7.4.
+ Kept for binary compatibility of extensions. */
PyAPI_FUNC(void) _PyTrash_deposit_object(PyObject*);
PyAPI_FUNC(void) _PyTrash_destroy_chain(void);
PyAPI_DATA(int) _PyTrash_delete_nesting;
PyAPI_DATA(PyObject *) _PyTrash_delete_later;
+/* The new thread-safe private API, invoked by the macros below. */
+PyAPI_FUNC(void) _PyTrash_thread_deposit_object(PyObject*);
+PyAPI_FUNC(void) _PyTrash_thread_destroy_chain(void);
+
#define PyTrash_UNWIND_LEVEL 50
+/* Note the workaround for when the thread state is NULL (issue #17703) */
#define Py_TRASHCAN_SAFE_BEGIN(op) \
- if (_PyTrash_delete_nesting < PyTrash_UNWIND_LEVEL) { \
- ++_PyTrash_delete_nesting;
- /* The body of the deallocator is here. */
+ do { \
+ PyThreadState *_tstate = PyThreadState_GET(); \
+ if (!_tstate || \
+ _tstate->trash_delete_nesting < PyTrash_UNWIND_LEVEL) { \
+ if (_tstate) \
+ ++_tstate->trash_delete_nesting;
+ /* The body of the deallocator is here. */
#define Py_TRASHCAN_SAFE_END(op) \
- --_PyTrash_delete_nesting; \
- if (_PyTrash_delete_later && _PyTrash_delete_nesting <= 0) \
- _PyTrash_destroy_chain(); \
- } \
- else \
- _PyTrash_deposit_object((PyObject*)op);
+ if (_tstate) { \
+ --_tstate->trash_delete_nesting; \
+ if (_tstate->trash_delete_later \
+ && _tstate->trash_delete_nesting <= 0) \
+ _PyTrash_thread_destroy_chain(); \
+ } \
+ } \
+ else \
+ _PyTrash_thread_deposit_object((PyObject*)op); \
+ } while (0);
#ifdef __cplusplus
}
diff --git a/Include/osdefs.h b/Include/osdefs.h
index 6937659..77af923 100644
--- a/Include/osdefs.h
+++ b/Include/osdefs.h
@@ -36,6 +36,14 @@ extern "C" {
#endif
/* Max pathname length */
+#ifdef __hpux
+#include <sys/param.h>
+#include <limits.h>
+#ifndef PATH_MAX
+#define PATH_MAX MAXPATHLEN
+#endif
+#endif
+
#ifndef MAXPATHLEN
#if defined(PATH_MAX) && PATH_MAX > 1024
#define MAXPATHLEN PATH_MAX
diff --git a/Include/patchlevel.h b/Include/patchlevel.h
index 0a05696..732b07e 100644
--- a/Include/patchlevel.h
+++ b/Include/patchlevel.h
@@ -6,7 +6,7 @@
defined(PY_MAJOR_VERSION).
When the major or minor version changes, the VERSION variable in
- configure.in must also be changed.
+ configure.ac must also be changed.
There is also (independent) API version information in modsupport.h.
*/
@@ -22,12 +22,12 @@
/*--start constants--*/
#define PY_MAJOR_VERSION 2
#define PY_MINOR_VERSION 7
-#define PY_MICRO_VERSION 3
+#define PY_MICRO_VERSION 8
#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL
#define PY_RELEASE_SERIAL 0
/* Version as a string */
-#define PY_VERSION "2.7.3"
+#define PY_VERSION "2.7.8"
/*--end constants--*/
/* Subversion Revision number of this file (not of the repository). Empty
diff --git a/Include/pyfpe.h b/Include/pyfpe.h
index 19110ab..e957119 100644
--- a/Include/pyfpe.h
+++ b/Include/pyfpe.h
@@ -4,8 +4,8 @@
extern "C" {
#endif
/*
- ---------------------------------------------------------------------
- / Copyright (c) 1996. \
+ ---------------------------------------------------------------------
+ / Copyright (c) 1996. \
| The Regents of the University of California. |
| All rights reserved. |
| |
@@ -37,8 +37,8 @@ extern "C" {
| opinions of authors expressed herein do not necessarily state or |
| reflect those of the United States Government or the University |
| of California, and shall not be used for advertising or product |
- \ endorsement purposes. /
- ---------------------------------------------------------------------
+ \ endorsement purposes. /
+ ---------------------------------------------------------------------
*/
/*
diff --git a/Include/pyport.h b/Include/pyport.h
index 1abc14c..85e852f 100644
--- a/Include/pyport.h
+++ b/Include/pyport.h
@@ -93,9 +93,12 @@ Used in: PY_LONG_LONG
* uint32_t to be such a type unless stdint.h or inttypes.h defines uint32_t.
* However, it doesn't set HAVE_UINT32_T, so we do that here.
*/
-#if (defined UINT32_MAX || defined uint32_t)
-#ifndef PY_UINT32_T
+#ifdef uint32_t
#define HAVE_UINT32_T 1
+#endif
+
+#ifdef HAVE_UINT32_T
+#ifndef PY_UINT32_T
#define PY_UINT32_T uint32_t
#endif
#endif
@@ -103,23 +106,33 @@ Used in: PY_LONG_LONG
/* Macros for a 64-bit unsigned integer type; used for type 'twodigits' in the
* long integer implementation, when 30-bit digits are enabled.
*/
-#if (defined UINT64_MAX || defined uint64_t)
-#ifndef PY_UINT64_T
+#ifdef uint64_t
#define HAVE_UINT64_T 1
+#endif
+
+#ifdef HAVE_UINT64_T
+#ifndef PY_UINT64_T
#define PY_UINT64_T uint64_t
#endif
#endif
/* Signed variants of the above */
-#if (defined INT32_MAX || defined int32_t)
-#ifndef PY_INT32_T
+#ifdef int32_t
#define HAVE_INT32_T 1
+#endif
+
+#ifdef HAVE_INT32_T
+#ifndef PY_INT32_T
#define PY_INT32_T int32_t
#endif
#endif
-#if (defined INT64_MAX || defined int64_t)
-#ifndef PY_INT64_T
+
+#ifdef int64_t
#define HAVE_INT64_T 1
+#endif
+
+#ifdef HAVE_INT64_T
+#ifndef PY_INT64_T
#define PY_INT64_T int64_t
#endif
#endif
@@ -549,6 +562,30 @@ extern "C" {
_Py_set_387controlword(old_387controlword)
#endif
+/* get and set x87 control word for VisualStudio/x86 */
+#if defined(_MSC_VER) && !defined(_WIN64) /* x87 not supported in 64-bit */
+#define HAVE_PY_SET_53BIT_PRECISION 1
+#define _Py_SET_53BIT_PRECISION_HEADER \
+ unsigned int old_387controlword, new_387controlword, out_387controlword
+/* We use the __control87_2 function to set only the x87 control word.
+ The SSE control word is unaffected. */
+#define _Py_SET_53BIT_PRECISION_START \
+ do { \
+ __control87_2(0, 0, &old_387controlword, NULL); \
+ new_387controlword = \
+ (old_387controlword & ~(_MCW_PC | _MCW_RC)) | (_PC_53 | _RC_NEAR); \
+ if (new_387controlword != old_387controlword) \
+ __control87_2(new_387controlword, _MCW_PC | _MCW_RC, \
+ &out_387controlword, NULL); \
+ } while (0)
+#define _Py_SET_53BIT_PRECISION_END \
+ do { \
+ if (new_387controlword != old_387controlword) \
+ __control87_2(old_387controlword, _MCW_PC | _MCW_RC, \
+ &out_387controlword, NULL); \
+ } while (0)
+#endif
+
/* default definitions are empty */
#ifndef HAVE_PY_SET_53BIT_PRECISION
#define _Py_SET_53BIT_PRECISION_HEADER
@@ -622,7 +659,7 @@ extern char * _getpty(int *, int, mode_t, int);
/* On QNX 6, struct termio must be declared by including sys/termio.h
if TCGETA, TCSETA, TCSETAW, or TCSETAF are used. sys/termio.h must
be included before termios.h or it will generate an error. */
-#ifdef HAVE_SYS_TERMIO_H
+#if defined(HAVE_SYS_TERMIO_H) && !defined(__hpux)
#include <sys/termio.h>
#endif
diff --git a/Include/pystate.h b/Include/pystate.h
index 8d74940..f2cfc30 100644
--- a/Include/pystate.h
+++ b/Include/pystate.h
@@ -95,6 +95,9 @@ typedef struct _ts {
PyObject *async_exc; /* Asynchronous exception to raise */
long thread_id; /* Thread id where this tstate was created */
+ int trash_delete_nesting;
+ PyObject *trash_delete_later;
+
/* XXX signal handlers should also be here */
} PyThreadState;
diff --git a/Include/pythonrun.h b/Include/pythonrun.h
index 6bfc175..c319c52 100644
--- a/Include/pythonrun.h
+++ b/Include/pythonrun.h
@@ -146,6 +146,8 @@ PyAPI_FUNC(void) PyFloat_Fini(void);
PyAPI_FUNC(void) PyOS_FiniInterrupts(void);
PyAPI_FUNC(void) PyByteArray_Fini(void);
+PyAPI_DATA(PyThreadState *) _Py_Finalizing;
+
/* Stuff with no proper home (yet) */
PyAPI_FUNC(char *) PyOS_Readline(FILE *, FILE *, char *);
PyAPI_DATA(int) (*PyOS_InputHook)(void);
diff --git a/Include/sysmodule.h b/Include/sysmodule.h
index 16af119..bf962b5 100644
--- a/Include/sysmodule.h
+++ b/Include/sysmodule.h
@@ -19,9 +19,6 @@ PyAPI_FUNC(void) PySys_WriteStdout(const char *format, ...)
PyAPI_FUNC(void) PySys_WriteStderr(const char *format, ...)
Py_GCC_ATTRIBUTE((format(printf, 1, 2)));
-PyAPI_DATA(PyObject *) _PySys_TraceFunc, *_PySys_ProfileFunc;
-PyAPI_DATA(int) _PySys_CheckInterval;
-
PyAPI_FUNC(void) PySys_ResetWarnOptions(void);
PyAPI_FUNC(void) PySys_AddWarnOption(char *);
PyAPI_FUNC(int) PySys_HasWarnOptions(void);
diff --git a/Include/timefuncs.h b/Include/timefuncs.h
index 553142d..a9d26a7 100644
--- a/Include/timefuncs.h
+++ b/Include/timefuncs.h
@@ -16,6 +16,9 @@ extern "C" {
*/
PyAPI_FUNC(time_t) _PyTime_DoubleToTimet(double x);
+/* Get the current time since the epoch in seconds */
+PyAPI_FUNC(double) _PyTime_FloatTime(void);
+
#ifdef __cplusplus
}
diff --git a/Include/weakrefobject.h b/Include/weakrefobject.h
index f15c9d9..e7c0eae 100644
--- a/Include/weakrefobject.h
+++ b/Include/weakrefobject.h
@@ -49,9 +49,6 @@ PyAPI_DATA(PyTypeObject) _PyWeakref_CallableProxyType;
((Py_TYPE(op) == &_PyWeakref_ProxyType) || \
(Py_TYPE(op) == &_PyWeakref_CallableProxyType))
-/* This macro calls PyWeakref_CheckRef() last since that can involve a
- function call; this makes it more likely that the function call
- will be avoided. */
#define PyWeakref_Check(op) \
(PyWeakref_CheckRef(op) || PyWeakref_CheckProxy(op))
@@ -66,7 +63,17 @@ PyAPI_FUNC(Py_ssize_t) _PyWeakref_GetWeakrefCount(PyWeakReference *head);
PyAPI_FUNC(void) _PyWeakref_ClearRef(PyWeakReference *self);
-#define PyWeakref_GET_OBJECT(ref) (((PyWeakReference *)(ref))->wr_object)
+/* Explanation for the Py_REFCNT() check: when a weakref's target is part
+ of a long chain of deallocations which triggers the trashcan mechanism,
+ clearing the weakrefs can be delayed long after the target's refcount
+ has dropped to zero. In the meantime, code accessing the weakref will
+ be able to "see" the target object even though it is supposed to be
+ unreachable. See issue #16602. */
+
+#define PyWeakref_GET_OBJECT(ref) \
+ (Py_REFCNT(((PyWeakReference *)(ref))->wr_object) > 0 \
+ ? ((PyWeakReference *)(ref))->wr_object \
+ : Py_None)
#ifdef __cplusplus
diff --git a/LICENSE b/LICENSE
index c7f2094..f9ca2c9 100644
--- a/LICENSE
+++ b/LICENSE
@@ -36,34 +36,9 @@ the various releases.
2.1 2.0+1.6.1 2001 PSF no
2.0.1 2.0+1.6.1 2001 PSF yes
2.1.1 2.1+2.0.1 2001 PSF yes
- 2.2 2.1.1 2001 PSF yes
2.1.2 2.1.1 2002 PSF yes
2.1.3 2.1.2 2002 PSF yes
- 2.2.1 2.2 2002 PSF yes
- 2.2.2 2.2.1 2002 PSF yes
- 2.2.3 2.2.2 2003 PSF yes
- 2.3 2.2.2 2002-2003 PSF yes
- 2.3.1 2.3 2002-2003 PSF yes
- 2.3.2 2.3.1 2002-2003 PSF yes
- 2.3.3 2.3.2 2002-2003 PSF yes
- 2.3.4 2.3.3 2004 PSF yes
- 2.3.5 2.3.4 2005 PSF yes
- 2.4 2.3 2004 PSF yes
- 2.4.1 2.4 2005 PSF yes
- 2.4.2 2.4.1 2005 PSF yes
- 2.4.3 2.4.2 2006 PSF yes
- 2.4.4 2.4.3 2006 PSF yes
- 2.5 2.4 2006 PSF yes
- 2.5.1 2.5 2007 PSF yes
- 2.5.2 2.5.1 2008 PSF yes
- 2.5.3 2.5.2 2008 PSF yes
- 2.6 2.5 2008 PSF yes
- 2.6.1 2.6 2008 PSF yes
- 2.6.2 2.6.1 2009 PSF yes
- 2.6.3 2.6.2 2009 PSF yes
- 2.6.4 2.6.3 2009 PSF yes
- 2.6.5 2.6.4 2010 PSF yes
- 2.7 2.6 2010 PSF yes
+ 2.2 and above 2.1.1 2001-now PSF yes
Footnotes:
@@ -99,8 +74,8 @@ analyze, test, perform and/or display publicly, prepare derivative works,
distribute, and otherwise use Python alone or in any derivative version,
provided, however, that PSF's License Agreement and PSF's notice of copyright,
i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
-2011, 2012 Python Software Foundation; All Rights Reserved" are retained in Python
-alone or in any derivative version prepared by Licensee.
+2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are retained
+in Python alone or in any derivative version prepared by Licensee.
3. In the event Licensee prepares a derivative work that is based on
or incorporates Python or any part thereof, and wants to make
diff --git a/Lib/BaseHTTPServer.py b/Lib/BaseHTTPServer.py
index 1a39485..deaf2f9 100644
--- a/Lib/BaseHTTPServer.py
+++ b/Lib/BaseHTTPServer.py
@@ -447,13 +447,13 @@ class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
specified as subsequent arguments (it's just like
printf!).
- The client host and current date/time are prefixed to
- every message.
+ The client ip address and current date/time are prefixed to every
+ message.
"""
sys.stderr.write("%s - - [%s] %s\n" %
- (self.address_string(),
+ (self.client_address[0],
self.log_date_time_string(),
format%args))
diff --git a/Lib/CGIHTTPServer.py b/Lib/CGIHTTPServer.py
index 13ca0b5..2acf913 100644
--- a/Lib/CGIHTTPServer.py
+++ b/Lib/CGIHTTPServer.py
@@ -84,9 +84,11 @@ class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
path begins with one of the strings in self.cgi_directories
(and the next character is a '/' or the end of the string).
"""
- splitpath = _url_collapse_path_split(self.path)
- if splitpath[0] in self.cgi_directories:
- self.cgi_info = splitpath
+ collapsed_path = _url_collapse_path(urllib.unquote(self.path))
+ dir_sep = collapsed_path.find('/', 1)
+ head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:]
+ if head in self.cgi_directories:
+ self.cgi_info = head, tail
return True
return False
@@ -103,18 +105,17 @@ class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def run_cgi(self):
"""Execute a CGI script."""
- path = self.path
dir, rest = self.cgi_info
- i = path.find('/', len(dir) + 1)
+ i = rest.find('/')
while i >= 0:
- nextdir = path[:i]
- nextrest = path[i+1:]
+ nextdir = rest[:i]
+ nextrest = rest[i+1:]
scriptdir = self.translate_path(nextdir)
if os.path.isdir(scriptdir):
dir, rest = nextdir, nextrest
- i = path.find('/', len(dir) + 1)
+ i = rest.find('/')
else:
break
@@ -298,44 +299,46 @@ class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
self.log_message("CGI script exited OK")
-# TODO(gregory.p.smith): Move this into an appropriate library.
-def _url_collapse_path_split(path):
+def _url_collapse_path(path):
"""
Given a URL path, remove extra '/'s and '.' path elements and collapse
- any '..' references.
+ any '..' references and returns a colllapsed path.
Implements something akin to RFC-2396 5.2 step 6 to parse relative paths.
+ The utility of this function is limited to is_cgi method and helps
+ preventing some security attacks.
Returns: A tuple of (head, tail) where tail is everything after the final /
and head is everything before it. Head will always start with a '/' and,
if it contains anything else, never have a trailing '/'.
Raises: IndexError if too many '..' occur within the path.
+
"""
# Similar to os.path.split(os.path.normpath(path)) but specific to URL
# path semantics rather than local operating system semantics.
- path_parts = []
- for part in path.split('/'):
- if part == '.':
- path_parts.append('')
- else:
- path_parts.append(part)
- # Filter out blank non trailing parts before consuming the '..'.
- path_parts = [part for part in path_parts[:-1] if part] + path_parts[-1:]
+ path_parts = path.split('/')
+ head_parts = []
+ for part in path_parts[:-1]:
+ if part == '..':
+ head_parts.pop() # IndexError if more '..' than prior parts
+ elif part and part != '.':
+ head_parts.append( part )
if path_parts:
tail_part = path_parts.pop()
+ if tail_part:
+ if tail_part == '..':
+ head_parts.pop()
+ tail_part = ''
+ elif tail_part == '.':
+ tail_part = ''
else:
tail_part = ''
- head_parts = []
- for part in path_parts:
- if part == '..':
- head_parts.pop()
- else:
- head_parts.append(part)
- if tail_part and tail_part == '..':
- head_parts.pop()
- tail_part = ''
- return ('/' + '/'.join(head_parts), tail_part)
+
+ splitpath = ('/' + '/'.join(head_parts), tail_part)
+ collapsed_path = "/".join(splitpath)
+
+ return collapsed_path
nobody = None
diff --git a/Lib/Cookie.py b/Lib/Cookie.py
index 323450b..452d4e3 100644
--- a/Lib/Cookie.py
+++ b/Lib/Cookie.py
@@ -1,6 +1,3 @@
-#!/usr/bin/env python
-#
-
####
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
#
@@ -238,7 +235,7 @@ class CookieError(Exception):
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
-# quoted with a preceeding '\' slash.
+# quoted with a preceding '\' slash.
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
@@ -390,7 +387,7 @@ def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
- return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \
+ return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
@@ -539,7 +536,7 @@ _CookiePattern = re.compile(
r"(?P<val>" # Start of group 'val'
r'"(?:[^\\"]|\\.)*"' # Any doublequoted string
r"|" # or
- r"\w{3},\s[\w\d-]{9,11}\s[\d:]{8}\sGMT" # Special case for "expires" attr
+ r"\w{3},\s[\s\w\d-]{9,11}\s[\d:]{8}\sGMT" # Special case for "expires" attr
r"|" # or
""+ _LegalCharsPatt +"*" # Any word or empty string
r")" # End of group 'val'
diff --git a/Lib/HTMLParser.py b/Lib/HTMLParser.py
index d4e14d4..3f97830 100644
--- a/Lib/HTMLParser.py
+++ b/Lib/HTMLParser.py
@@ -22,17 +22,20 @@ charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
-tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*')
+
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
+# note: if you change tagfind/attrfind remember to update locatestarttagend too
+tagfind = re.compile('([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*')
+# this regex is currently unused, but left for backward compatibility
tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
attrfind = re.compile(
- r'[\s/]*((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
+ r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend = re.compile(r"""
- <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
+ <[a-zA-Z][^\t\n\r\f />\x00]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
@@ -192,9 +195,9 @@ class HTMLParser(markupbase.ParserBase):
i = self.updatepos(i, k)
continue
else:
- if ";" in rawdata[i:]: #bail by consuming &#
- self.handle_data(rawdata[0:2])
- i = self.updatepos(i, 2)
+ if ";" in rawdata[i:]: # bail by consuming '&#'
+ self.handle_data(rawdata[i:i+2])
+ i = self.updatepos(i, i+2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
@@ -289,7 +292,7 @@ class HTMLParser(markupbase.ParserBase):
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
- self.lasttag = tag = rawdata[i+1:k].lower()
+ self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = attrfind.match(rawdata, k)
@@ -373,14 +376,14 @@ class HTMLParser(markupbase.ParserBase):
self.handle_data(rawdata[i:gtpos])
return gtpos
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
- namematch = tagfind_tolerant.match(rawdata, i+2)
+ namematch = tagfind.match(rawdata, i+2)
if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i+3] == '</>':
return i+3
else:
return self.parse_bogus_comment(i)
- tagname = namematch.group().lower()
+ tagname = namematch.group(1).lower()
# consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover
diff --git a/Lib/Queue.py b/Lib/Queue.py
index 2db8d76..00364b3 100644
--- a/Lib/Queue.py
+++ b/Lib/Queue.py
@@ -109,7 +109,7 @@ class Queue:
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
- a positive number, it blocks at most 'timeout' seconds and raises
+ a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
@@ -125,7 +125,7 @@ class Queue:
while self._qsize() == self.maxsize:
self.not_full.wait()
elif timeout < 0:
- raise ValueError("'timeout' must be a positive number")
+ raise ValueError("'timeout' must be a non-negative number")
else:
endtime = _time() + timeout
while self._qsize() == self.maxsize:
@@ -152,7 +152,7 @@ class Queue:
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
- a positive number, it blocks at most 'timeout' seconds and raises
+ a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
@@ -167,7 +167,7 @@ class Queue:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
- raise ValueError("'timeout' must be a positive number")
+ raise ValueError("'timeout' must be a non-negative number")
else:
endtime = _time() + timeout
while not self._qsize():
diff --git a/Lib/SimpleHTTPServer.py b/Lib/SimpleHTTPServer.py
index 3e0334d..d497e1e 100644
--- a/Lib/SimpleHTTPServer.py
+++ b/Lib/SimpleHTTPServer.py
@@ -43,8 +43,10 @@ class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Serve a GET request."""
f = self.send_head()
if f:
- self.copyfile(f, self.wfile)
- f.close()
+ try:
+ self.copyfile(f, self.wfile)
+ finally:
+ f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
@@ -88,13 +90,17 @@ class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
except IOError:
self.send_error(404, "File not found")
return None
- self.send_response(200)
- self.send_header("Content-type", ctype)
- fs = os.fstat(f.fileno())
- self.send_header("Content-Length", str(fs[6]))
- self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
- self.end_headers()
- return f
+ try:
+ self.send_response(200)
+ self.send_header("Content-type", ctype)
+ fs = os.fstat(f.fileno())
+ self.send_header("Content-Length", str(fs[6]))
+ self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
+ self.end_headers()
+ return f
+ except:
+ f.close()
+ raise
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
@@ -149,6 +155,8 @@ class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
+ # Don't forget explicit trailing slash when normalizing. Issue17324
+ trailing_slash = path.rstrip().endswith('/')
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
@@ -158,6 +166,8 @@ class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
+ if trailing_slash:
+ path += '/'
return path
def copyfile(self, source, outputfile):
diff --git a/Lib/SimpleXMLRPCServer.py b/Lib/SimpleXMLRPCServer.py
index 4fefa5a..fcc3d2e 100644
--- a/Lib/SimpleXMLRPCServer.py
+++ b/Lib/SimpleXMLRPCServer.py
@@ -1,4 +1,4 @@
-"""Simple XML-RPC Server.
+r"""Simple XML-RPC Server.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
@@ -704,4 +704,5 @@ if __name__ == '__main__':
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
+ server.register_multicall_functions()
server.serve_forever()
diff --git a/Lib/SocketServer.py b/Lib/SocketServer.py
index 2ff3888..afb8686 100644
--- a/Lib/SocketServer.py
+++ b/Lib/SocketServer.py
@@ -133,6 +133,7 @@ import socket
import select
import sys
import os
+import errno
try:
import threading
except ImportError:
@@ -147,6 +148,15 @@ if hasattr(socket, "AF_UNIX"):
"ThreadingUnixStreamServer",
"ThreadingUnixDatagramServer"])
+def _eintr_retry(func, *args):
+ """restart a system call interrupted by EINTR"""
+ while True:
+ try:
+ return func(*args)
+ except (OSError, select.error) as e:
+ if e.args[0] != errno.EINTR:
+ raise
+
class BaseServer:
"""Base class for server classes.
@@ -222,7 +232,8 @@ class BaseServer:
# connecting to the socket to wake this up instead of
# polling. Polling reduces our responsiveness to a
# shutdown request and wastes cpu at all other times.
- r, w, e = select.select([self], [], [], poll_interval)
+ r, w, e = _eintr_retry(select.select, [self], [], [],
+ poll_interval)
if self in r:
self._handle_request_noblock()
finally:
@@ -262,7 +273,7 @@ class BaseServer:
timeout = self.timeout
elif self.timeout is not None:
timeout = min(timeout, self.timeout)
- fd_sets = select.select([self], [], [], timeout)
+ fd_sets = _eintr_retry(select.select, [self], [], [], timeout)
if not fd_sets[0]:
self.handle_timeout()
return
@@ -502,35 +513,37 @@ class ForkingMixIn:
def collect_children(self):
"""Internal routine to wait for children that have exited."""
- if self.active_children is None: return
+ if self.active_children is None:
+ return
+
+ # If we're above the max number of children, wait and reap them until
+ # we go back below threshold. Note that we use waitpid(-1) below to be
+ # able to collect children in size(<defunct children>) syscalls instead
+ # of size(<children>): the downside is that this might reap children
+ # which we didn't spawn, which is why we only resort to this when we're
+ # above max_children.
while len(self.active_children) >= self.max_children:
- # XXX: This will wait for any child process, not just ones
- # spawned by this library. This could confuse other
- # libraries that expect to be able to wait for their own
- # children.
- try:
- pid, status = os.waitpid(0, 0)
- except os.error:
- pid = None
- if pid not in self.active_children: continue
- self.active_children.remove(pid)
-
- # XXX: This loop runs more system calls than it ought
- # to. There should be a way to put the active_children into a
- # process group and then use os.waitpid(-pgid) to wait for any
- # of that set, but I couldn't find a way to allocate pgids
- # that couldn't collide.
- for child in self.active_children:
try:
- pid, status = os.waitpid(child, os.WNOHANG)
- except os.error:
- pid = None
- if not pid: continue
+ pid, _ = os.waitpid(-1, 0)
+ self.active_children.discard(pid)
+ except OSError as e:
+ if e.errno == errno.ECHILD:
+ # we don't have any children, we're done
+ self.active_children.clear()
+ elif e.errno != errno.EINTR:
+ break
+
+ # Now reap all defunct children.
+ for pid in self.active_children.copy():
try:
- self.active_children.remove(pid)
- except ValueError, e:
- raise ValueError('%s. x=%d and list=%r' % (e.message, pid,
- self.active_children))
+ pid, _ = os.waitpid(pid, os.WNOHANG)
+ # if the child hasn't exited yet, pid will be 0 and ignored by
+ # discard() below
+ self.active_children.discard(pid)
+ except OSError as e:
+ if e.errno == errno.ECHILD:
+ # someone else reaped it
+ self.active_children.discard(pid)
def handle_timeout(self):
"""Wait for zombies after self.timeout seconds of inactivity.
@@ -546,8 +559,8 @@ class ForkingMixIn:
if pid:
# Parent process
if self.active_children is None:
- self.active_children = []
- self.active_children.append(pid)
+ self.active_children = set()
+ self.active_children.add(pid)
self.close_request(request) #close handle in parent process
return
else:
@@ -690,7 +703,12 @@ class StreamRequestHandler(BaseRequestHandler):
def finish(self):
if not self.wfile.closed:
- self.wfile.flush()
+ try:
+ self.wfile.flush()
+ except socket.error:
+ # An final socket error may have occurred here, such as
+ # the local error ECONNABORTED.
+ pass
self.wfile.close()
self.rfile.close()
diff --git a/Lib/StringIO.py b/Lib/StringIO.py
index f74a066..b63525b 100644
--- a/Lib/StringIO.py
+++ b/Lib/StringIO.py
@@ -158,7 +158,7 @@ class StringIO:
newpos = self.len
else:
newpos = i+1
- if length is not None and length > 0:
+ if length is not None and length >= 0:
if self.pos + length < newpos:
newpos = self.pos + length
r = self.buf[self.pos:newpos]
diff --git a/Lib/_LWPCookieJar.py b/Lib/_LWPCookieJar.py
index 2a4fa7b..90cc633 100644
--- a/Lib/_LWPCookieJar.py
+++ b/Lib/_LWPCookieJar.py
@@ -48,7 +48,7 @@ def lwp_cookie_str(cookie):
class LWPCookieJar(FileCookieJar):
"""
- The LWPCookieJar saves a sequence of"Set-Cookie3" lines.
+ The LWPCookieJar saves a sequence of "Set-Cookie3" lines.
"Set-Cookie3" is the format used by the libwww-perl libary, not known
to be compatible with any browser, but which is easy to read and
doesn't lose information about RFC 2965 cookies.
@@ -60,7 +60,7 @@ class LWPCookieJar(FileCookieJar):
"""
def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
- """Return cookies as a string of "\n"-separated "Set-Cookie3" headers.
+ """Return cookies as a string of "\\n"-separated "Set-Cookie3" headers.
ignore_discard and ignore_expires: see docstring for FileCookieJar.save
diff --git a/Lib/_MozillaCookieJar.py b/Lib/_MozillaCookieJar.py
index 00e8bcf..585bc17 100644
--- a/Lib/_MozillaCookieJar.py
+++ b/Lib/_MozillaCookieJar.py
@@ -39,7 +39,7 @@ class MozillaCookieJar(FileCookieJar):
magic_re = "#( Netscape)? HTTP Cookie File"
header = """\
# Netscape HTTP Cookie File
-# http://www.netscape.com/newsref/std/cookie_spec.html
+# http://curl.haxx.se/rfc/cookie_spec.html
# This is a generated file! Do not edit.
"""
diff --git a/Lib/__future__.py b/Lib/__future__.py
index 9156459..e0996eb 100644
--- a/Lib/__future__.py
+++ b/Lib/__future__.py
@@ -112,7 +112,7 @@ division = _Feature((2, 2, 0, "alpha", 2),
CO_FUTURE_DIVISION)
absolute_import = _Feature((2, 5, 0, "alpha", 1),
- (2, 7, 0, "alpha", 0),
+ (3, 0, 0, "alpha", 0),
CO_FUTURE_ABSOLUTE_IMPORT)
with_statement = _Feature((2, 5, 0, "alpha", 1),
diff --git a/Lib/_abcoll.py b/Lib/_abcoll.py
index e7376e4..a943263 100644
--- a/Lib/_abcoll.py
+++ b/Lib/_abcoll.py
@@ -74,6 +74,7 @@ class Iterator(Iterable):
@abstractmethod
def next(self):
+ 'Return the next item from the iterator. When exhausted, raise StopIteration'
raise StopIteration
def __iter__(self):
@@ -164,12 +165,17 @@ class Set(Sized, Iterable, Container):
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
- return other < self
+ return len(self) > len(other) and self.__ge__(other)
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
- return other <= self
+ if len(self) < len(other):
+ return False
+ for elem in other:
+ if elem not in self:
+ return False
+ return True
def __eq__(self, other):
if not isinstance(other, Set):
@@ -193,7 +199,10 @@ class Set(Sized, Iterable, Container):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
+ __rand__ = __and__
+
def isdisjoint(self, other):
+ 'Return True if two sets have a null intersection.'
for value in other:
if value in self:
return False
@@ -205,6 +214,8 @@ class Set(Sized, Iterable, Container):
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
+ __ror__ = __or__
+
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
@@ -213,6 +224,14 @@ class Set(Sized, Iterable, Container):
return self._from_iterable(value for value in self
if value not in other)
+ def __rsub__(self, other):
+ if not isinstance(other, Set):
+ if not isinstance(other, Iterable):
+ return NotImplemented
+ other = self._from_iterable(other)
+ return self._from_iterable(value for value in other
+ if value not in self)
+
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
@@ -220,6 +239,8 @@ class Set(Sized, Iterable, Container):
other = self._from_iterable(other)
return (self - other) | (other - self)
+ __rxor__ = __xor__
+
# Sets are not hashable by default, but subclasses can change this
__hash__ = None
@@ -259,6 +280,16 @@ Set.register(frozenset)
class MutableSet(Set):
+ """A mutable set is a finite, iterable container.
+
+ This class provides concrete generic implementations of all
+ methods except for __contains__, __iter__, __len__,
+ add(), and discard().
+
+ To override the comparisons (presumably for speed, as the
+ semantics are fixed), all you have to do is redefine __le__ and
+ then the other operations will automatically follow suit.
+ """
@abstractmethod
def add(self, value):
@@ -333,11 +364,20 @@ MutableSet.register(set)
class Mapping(Sized, Iterable, Container):
+ """A Mapping is a generic container for associating key/value
+ pairs.
+
+ This class provides concrete generic implementations of all
+ methods except for __getitem__, __iter__, and __len__.
+
+ """
+
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
+ 'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.'
try:
return self[key]
except KeyError:
@@ -352,23 +392,29 @@ class Mapping(Sized, Iterable, Container):
return True
def iterkeys(self):
+ 'D.iterkeys() -> an iterator over the keys of D'
return iter(self)
def itervalues(self):
+ 'D.itervalues() -> an iterator over the values of D'
for key in self:
yield self[key]
def iteritems(self):
+ 'D.iteritems() -> an iterator over the (key, value) items of D'
for key in self:
yield (key, self[key])
def keys(self):
+ "D.keys() -> list of D's keys"
return list(self)
def items(self):
+ "D.items() -> list of D's (key, value) pairs, as 2-tuples"
return [(key, self[key]) for key in self]
def values(self):
+ "D.values() -> list of D's values"
return [self[key] for key in self]
# Mappings are not hashable by default, but subclasses can change this
@@ -443,6 +489,15 @@ class ValuesView(MappingView):
class MutableMapping(Mapping):
+ """A MutableMapping is a generic container for associating
+ key/value pairs.
+
+ This class provides concrete generic implementations of all
+ methods except for __getitem__, __setitem__, __delitem__,
+ __iter__, and __len__.
+
+ """
+
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@@ -454,6 +509,9 @@ class MutableMapping(Mapping):
__marker = object()
def pop(self, key, default=__marker):
+ '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised.
+ '''
try:
value = self[key]
except KeyError:
@@ -465,6 +523,9 @@ class MutableMapping(Mapping):
return value
def popitem(self):
+ '''D.popitem() -> (k, v), remove and return some (key, value) pair
+ as a 2-tuple; but raise KeyError if D is empty.
+ '''
try:
key = next(iter(self))
except StopIteration:
@@ -474,6 +535,7 @@ class MutableMapping(Mapping):
return key, value
def clear(self):
+ 'D.clear() -> None. Remove all items from D.'
try:
while True:
self.popitem()
@@ -481,6 +543,11 @@ class MutableMapping(Mapping):
pass
def update(*args, **kwds):
+ ''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
+ If E present and has a .keys() method, does: for k in E: D[k] = E[k]
+ If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
+ In either case, this is followed by: for k, v in F.items(): D[k] = v
+ '''
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
@@ -502,6 +569,7 @@ class MutableMapping(Mapping):
self[key] = value
def setdefault(self, key, default=None):
+ 'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D'
try:
return self[key]
except KeyError:
@@ -546,12 +614,16 @@ class Sequence(Sized, Iterable, Container):
yield self[i]
def index(self, value):
+ '''S.index(value) -> integer -- return first index of value.
+ Raises ValueError if the value is not present.
+ '''
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
+ 'S.count(value) -> integer -- return number of occurrences of value'
return sum(1 for v in self if v == value)
Sequence.register(tuple)
@@ -562,6 +634,13 @@ Sequence.register(xrange)
class MutableSequence(Sequence):
+ """All the operations on a read-only sequence.
+
+ Concrete subclasses must provide __new__ or __init__,
+ __getitem__, __setitem__, __delitem__, __len__, and insert().
+
+ """
+
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@@ -572,26 +651,36 @@ class MutableSequence(Sequence):
@abstractmethod
def insert(self, index, value):
+ 'S.insert(index, object) -- insert object before index'
raise IndexError
def append(self, value):
+ 'S.append(object) -- append object to the end of the sequence'
self.insert(len(self), value)
def reverse(self):
+ 'S.reverse() -- reverse *IN PLACE*'
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
+ 'S.extend(iterable) -- extend sequence by appending elements from the iterable'
for v in values:
self.append(v)
def pop(self, index=-1):
+ '''S.pop([index]) -> item -- remove and return item at index (default last).
+ Raise IndexError if list is empty or index is out of range.
+ '''
v = self[index]
del self[index]
return v
def remove(self, value):
+ '''S.remove(value) -- remove first occurrence of value.
+ Raise ValueError if the value is not present.
+ '''
del self[self.index(value)]
def __iadd__(self, values):
diff --git a/Lib/_osx_support.py b/Lib/_osx_support.py
new file mode 100644
index 0000000..2cd1428
--- /dev/null
+++ b/Lib/_osx_support.py
@@ -0,0 +1,502 @@
+"""Shared OS X support functions."""
+
+import os
+import re
+import sys
+
+__all__ = [
+ 'compiler_fixup',
+ 'customize_config_vars',
+ 'customize_compiler',
+ 'get_platform_osx',
+]
+
+# configuration variables that may contain universal build flags,
+# like "-arch" or "-isdkroot", that may need customization for
+# the user environment
+_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS',
+ 'BLDSHARED', 'LDSHARED', 'CC', 'CXX',
+ 'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
+ 'PY_CORE_CFLAGS')
+
+# configuration variables that may contain compiler calls
+_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX')
+
+# prefix added to original configuration variable names
+_INITPRE = '_OSX_SUPPORT_INITIAL_'
+
+
+def _find_executable(executable, path=None):
+ """Tries to find 'executable' in the directories listed in 'path'.
+
+ A string listing directories separated by 'os.pathsep'; defaults to
+ os.environ['PATH']. Returns the complete filename or None if not found.
+ """
+ if path is None:
+ path = os.environ['PATH']
+
+ paths = path.split(os.pathsep)
+ base, ext = os.path.splitext(executable)
+
+ if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'):
+ executable = executable + '.exe'
+
+ if not os.path.isfile(executable):
+ for p in paths:
+ f = os.path.join(p, executable)
+ if os.path.isfile(f):
+ # the file exists, we have a shot at spawn working
+ return f
+ return None
+ else:
+ return executable
+
+
+def _read_output(commandstring):
+ """Output from successful command execution or None"""
+ # Similar to os.popen(commandstring, "r").read(),
+ # but without actually using os.popen because that
+ # function is not usable during python bootstrap.
+ # tempfile is also not available then.
+ import contextlib
+ try:
+ import tempfile
+ fp = tempfile.NamedTemporaryFile()
+ except ImportError:
+ fp = open("/tmp/_osx_support.%s"%(
+ os.getpid(),), "w+b")
+
+ with contextlib.closing(fp) as fp:
+ cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name)
+ return fp.read().strip() if not os.system(cmd) else None
+
+
+def _find_build_tool(toolname):
+ """Find a build tool on current path or using xcrun"""
+ return (_find_executable(toolname)
+ or _read_output("/usr/bin/xcrun -find %s" % (toolname,))
+ or ''
+ )
+
+_SYSTEM_VERSION = None
+
+def _get_system_version():
+ """Return the OS X system version as a string"""
+ # Reading this plist is a documented way to get the system
+ # version (see the documentation for the Gestalt Manager)
+ # We avoid using platform.mac_ver to avoid possible bootstrap issues during
+ # the build of Python itself (distutils is used to build standard library
+ # extensions).
+
+ global _SYSTEM_VERSION
+
+ if _SYSTEM_VERSION is None:
+ _SYSTEM_VERSION = ''
+ try:
+ f = open('/System/Library/CoreServices/SystemVersion.plist')
+ except IOError:
+ # We're on a plain darwin box, fall back to the default
+ # behaviour.
+ pass
+ else:
+ try:
+ m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
+ r'<string>(.*?)</string>', f.read())
+ finally:
+ f.close()
+ if m is not None:
+ _SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2])
+ # else: fall back to the default behaviour
+
+ return _SYSTEM_VERSION
+
+def _remove_original_values(_config_vars):
+ """Remove original unmodified values for testing"""
+ # This is needed for higher-level cross-platform tests of get_platform.
+ for k in list(_config_vars):
+ if k.startswith(_INITPRE):
+ del _config_vars[k]
+
+def _save_modified_value(_config_vars, cv, newvalue):
+ """Save modified and original unmodified value of configuration var"""
+
+ oldvalue = _config_vars.get(cv, '')
+ if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars):
+ _config_vars[_INITPRE + cv] = oldvalue
+ _config_vars[cv] = newvalue
+
+def _supports_universal_builds():
+ """Returns True if universal builds are supported on this system"""
+ # As an approximation, we assume that if we are running on 10.4 or above,
+ # then we are running with an Xcode environment that supports universal
+ # builds, in particular -isysroot and -arch arguments to the compiler. This
+ # is in support of allowing 10.4 universal builds to run on 10.3.x systems.
+
+ osx_version = _get_system_version()
+ if osx_version:
+ try:
+ osx_version = tuple(int(i) for i in osx_version.split('.'))
+ except ValueError:
+ osx_version = ''
+ return bool(osx_version >= (10, 4)) if osx_version else False
+
+
+def _find_appropriate_compiler(_config_vars):
+ """Find appropriate C compiler for extension module builds"""
+
+ # Issue #13590:
+ # The OSX location for the compiler varies between OSX
+ # (or rather Xcode) releases. With older releases (up-to 10.5)
+ # the compiler is in /usr/bin, with newer releases the compiler
+ # can only be found inside Xcode.app if the "Command Line Tools"
+ # are not installed.
+ #
+ # Futhermore, the compiler that can be used varies between
+ # Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2'
+ # as the compiler, after that 'clang' should be used because
+ # gcc-4.2 is either not present, or a copy of 'llvm-gcc' that
+ # miscompiles Python.
+
+ # skip checks if the compiler was overriden with a CC env variable
+ if 'CC' in os.environ:
+ return _config_vars
+
+ # The CC config var might contain additional arguments.
+ # Ignore them while searching.
+ cc = oldcc = _config_vars['CC'].split()[0]
+ if not _find_executable(cc):
+ # Compiler is not found on the shell search PATH.
+ # Now search for clang, first on PATH (if the Command LIne
+ # Tools have been installed in / or if the user has provided
+ # another location via CC). If not found, try using xcrun
+ # to find an uninstalled clang (within a selected Xcode).
+
+ # NOTE: Cannot use subprocess here because of bootstrap
+ # issues when building Python itself (and os.popen is
+ # implemented on top of subprocess and is therefore not
+ # usable as well)
+
+ cc = _find_build_tool('clang')
+
+ elif os.path.basename(cc).startswith('gcc'):
+ # Compiler is GCC, check if it is LLVM-GCC
+ data = _read_output("'%s' --version"
+ % (cc.replace("'", "'\"'\"'"),))
+ if data and 'llvm-gcc' in data:
+ # Found LLVM-GCC, fall back to clang
+ cc = _find_build_tool('clang')
+
+ if not cc:
+ raise SystemError(
+ "Cannot locate working compiler")
+
+ if cc != oldcc:
+ # Found a replacement compiler.
+ # Modify config vars using new compiler, if not already explicitly
+ # overriden by an env variable, preserving additional arguments.
+ for cv in _COMPILER_CONFIG_VARS:
+ if cv in _config_vars and cv not in os.environ:
+ cv_split = _config_vars[cv].split()
+ cv_split[0] = cc if cv != 'CXX' else cc + '++'
+ _save_modified_value(_config_vars, cv, ' '.join(cv_split))
+
+ return _config_vars
+
+
+def _remove_universal_flags(_config_vars):
+ """Remove all universal build arguments from config vars"""
+
+ for cv in _UNIVERSAL_CONFIG_VARS:
+ # Do not alter a config var explicitly overriden by env var
+ if cv in _config_vars and cv not in os.environ:
+ flags = _config_vars[cv]
+ flags = re.sub('-arch\s+\w+\s', ' ', flags)
+ flags = re.sub('-isysroot [^ \t]*', ' ', flags)
+ _save_modified_value(_config_vars, cv, flags)
+
+ return _config_vars
+
+
+def _remove_unsupported_archs(_config_vars):
+ """Remove any unsupported archs from config vars"""
+ # Different Xcode releases support different sets for '-arch'
+ # flags. In particular, Xcode 4.x no longer supports the
+ # PPC architectures.
+ #
+ # This code automatically removes '-arch ppc' and '-arch ppc64'
+ # when these are not supported. That makes it possible to
+ # build extensions on OSX 10.7 and later with the prebuilt
+ # 32-bit installer on the python.org website.
+
+ # skip checks if the compiler was overriden with a CC env variable
+ if 'CC' in os.environ:
+ return _config_vars
+
+ if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None:
+ # NOTE: Cannot use subprocess here because of bootstrap
+ # issues when building Python itself
+ status = os.system(
+ """echo 'int main{};' | """
+ """'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null"""
+ %(_config_vars['CC'].replace("'", "'\"'\"'"),))
+ if status:
+ # The compile failed for some reason. Because of differences
+ # across Xcode and compiler versions, there is no reliable way
+ # to be sure why it failed. Assume here it was due to lack of
+ # PPC support and remove the related '-arch' flags from each
+ # config variables not explicitly overriden by an environment
+ # variable. If the error was for some other reason, we hope the
+ # failure will show up again when trying to compile an extension
+ # module.
+ for cv in _UNIVERSAL_CONFIG_VARS:
+ if cv in _config_vars and cv not in os.environ:
+ flags = _config_vars[cv]
+ flags = re.sub('-arch\s+ppc\w*\s', ' ', flags)
+ _save_modified_value(_config_vars, cv, flags)
+
+ return _config_vars
+
+
+def _override_all_archs(_config_vars):
+ """Allow override of all archs with ARCHFLAGS env var"""
+ # NOTE: This name was introduced by Apple in OSX 10.5 and
+ # is used by several scripting languages distributed with
+ # that OS release.
+ if 'ARCHFLAGS' in os.environ:
+ arch = os.environ['ARCHFLAGS']
+ for cv in _UNIVERSAL_CONFIG_VARS:
+ if cv in _config_vars and '-arch' in _config_vars[cv]:
+ flags = _config_vars[cv]
+ flags = re.sub('-arch\s+\w+\s', ' ', flags)
+ flags = flags + ' ' + arch
+ _save_modified_value(_config_vars, cv, flags)
+
+ return _config_vars
+
+
+def _check_for_unavailable_sdk(_config_vars):
+ """Remove references to any SDKs not available"""
+ # If we're on OSX 10.5 or later and the user tries to
+ # compile an extension using an SDK that is not present
+ # on the current machine it is better to not use an SDK
+ # than to fail. This is particularly important with
+ # the standalone Command Line Tools alternative to a
+ # full-blown Xcode install since the CLT packages do not
+ # provide SDKs. If the SDK is not present, it is assumed
+ # that the header files and dev libs have been installed
+ # to /usr and /System/Library by either a standalone CLT
+ # package or the CLT component within Xcode.
+ cflags = _config_vars.get('CFLAGS', '')
+ m = re.search(r'-isysroot\s+(\S+)', cflags)
+ if m is not None:
+ sdk = m.group(1)
+ if not os.path.exists(sdk):
+ for cv in _UNIVERSAL_CONFIG_VARS:
+ # Do not alter a config var explicitly overriden by env var
+ if cv in _config_vars and cv not in os.environ:
+ flags = _config_vars[cv]
+ flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags)
+ _save_modified_value(_config_vars, cv, flags)
+
+ return _config_vars
+
+
+def compiler_fixup(compiler_so, cc_args):
+ """
+ This function will strip '-isysroot PATH' and '-arch ARCH' from the
+ compile flags if the user has specified one them in extra_compile_flags.
+
+ This is needed because '-arch ARCH' adds another architecture to the
+ build, without a way to remove an architecture. Furthermore GCC will
+ barf if multiple '-isysroot' arguments are present.
+ """
+ stripArch = stripSysroot = False
+
+ compiler_so = list(compiler_so)
+
+ if not _supports_universal_builds():
+ # OSX before 10.4.0, these don't support -arch and -isysroot at
+ # all.
+ stripArch = stripSysroot = True
+ else:
+ stripArch = '-arch' in cc_args
+ stripSysroot = '-isysroot' in cc_args
+
+ if stripArch or 'ARCHFLAGS' in os.environ:
+ while True:
+ try:
+ index = compiler_so.index('-arch')
+ # Strip this argument and the next one:
+ del compiler_so[index:index+2]
+ except ValueError:
+ break
+
+ if 'ARCHFLAGS' in os.environ and not stripArch:
+ # User specified different -arch flags in the environ,
+ # see also distutils.sysconfig
+ compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
+
+ if stripSysroot:
+ while True:
+ try:
+ index = compiler_so.index('-isysroot')
+ # Strip this argument and the next one:
+ del compiler_so[index:index+2]
+ except ValueError:
+ break
+
+ # Check if the SDK that is used during compilation actually exists,
+ # the universal build requires the usage of a universal SDK and not all
+ # users have that installed by default.
+ sysroot = None
+ if '-isysroot' in cc_args:
+ idx = cc_args.index('-isysroot')
+ sysroot = cc_args[idx+1]
+ elif '-isysroot' in compiler_so:
+ idx = compiler_so.index('-isysroot')
+ sysroot = compiler_so[idx+1]
+
+ if sysroot and not os.path.isdir(sysroot):
+ from distutils import log
+ log.warn("Compiling with an SDK that doesn't seem to exist: %s",
+ sysroot)
+ log.warn("Please check your Xcode installation")
+
+ return compiler_so
+
+
+def customize_config_vars(_config_vars):
+ """Customize Python build configuration variables.
+
+ Called internally from sysconfig with a mutable mapping
+ containing name/value pairs parsed from the configured
+ makefile used to build this interpreter. Returns
+ the mapping updated as needed to reflect the environment
+ in which the interpreter is running; in the case of
+ a Python from a binary installer, the installed
+ environment may be very different from the build
+ environment, i.e. different OS levels, different
+ built tools, different available CPU architectures.
+
+ This customization is performed whenever
+ distutils.sysconfig.get_config_vars() is first
+ called. It may be used in environments where no
+ compilers are present, i.e. when installing pure
+ Python dists. Customization of compiler paths
+ and detection of unavailable archs is deferred
+ until the first extension module build is
+ requested (in distutils.sysconfig.customize_compiler).
+
+ Currently called from distutils.sysconfig
+ """
+
+ if not _supports_universal_builds():
+ # On Mac OS X before 10.4, check if -arch and -isysroot
+ # are in CFLAGS or LDFLAGS and remove them if they are.
+ # This is needed when building extensions on a 10.3 system
+ # using a universal build of python.
+ _remove_universal_flags(_config_vars)
+
+ # Allow user to override all archs with ARCHFLAGS env var
+ _override_all_archs(_config_vars)
+
+ # Remove references to sdks that are not found
+ _check_for_unavailable_sdk(_config_vars)
+
+ return _config_vars
+
+
+def customize_compiler(_config_vars):
+ """Customize compiler path and configuration variables.
+
+ This customization is performed when the first
+ extension module build is requested
+ in distutils.sysconfig.customize_compiler).
+ """
+
+ # Find a compiler to use for extension module builds
+ _find_appropriate_compiler(_config_vars)
+
+ # Remove ppc arch flags if not supported here
+ _remove_unsupported_archs(_config_vars)
+
+ # Allow user to override all archs with ARCHFLAGS env var
+ _override_all_archs(_config_vars)
+
+ return _config_vars
+
+
+def get_platform_osx(_config_vars, osname, release, machine):
+ """Filter values for get_platform()"""
+ # called from get_platform() in sysconfig and distutils.util
+ #
+ # For our purposes, we'll assume that the system version from
+ # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
+ # to. This makes the compatibility story a bit more sane because the
+ # machine is going to compile and link as if it were
+ # MACOSX_DEPLOYMENT_TARGET.
+
+ macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '')
+ macrelease = _get_system_version() or macver
+ macver = macver or macrelease
+
+ if macver:
+ release = macver
+ osname = "macosx"
+
+ # Use the original CFLAGS value, if available, so that we
+ # return the same machine type for the platform string.
+ # Otherwise, distutils may consider this a cross-compiling
+ # case and disallow installs.
+ cflags = _config_vars.get(_INITPRE+'CFLAGS',
+ _config_vars.get('CFLAGS', ''))
+ if macrelease:
+ try:
+ macrelease = tuple(int(i) for i in macrelease.split('.')[0:2])
+ except ValueError:
+ macrelease = (10, 0)
+ else:
+ # assume no universal support
+ macrelease = (10, 0)
+
+ if (macrelease >= (10, 4)) and '-arch' in cflags.strip():
+ # The universal build will build fat binaries, but not on
+ # systems before 10.4
+
+ machine = 'fat'
+
+ archs = re.findall('-arch\s+(\S+)', cflags)
+ archs = tuple(sorted(set(archs)))
+
+ if len(archs) == 1:
+ machine = archs[0]
+ elif archs == ('i386', 'ppc'):
+ machine = 'fat'
+ elif archs == ('i386', 'x86_64'):
+ machine = 'intel'
+ elif archs == ('i386', 'ppc', 'x86_64'):
+ machine = 'fat3'
+ elif archs == ('ppc64', 'x86_64'):
+ machine = 'fat64'
+ elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
+ machine = 'universal'
+ else:
+ raise ValueError(
+ "Don't know machine value for archs=%r" % (archs,))
+
+ elif machine == 'i386':
+ # On OSX the machine type returned by uname is always the
+ # 32-bit variant, even if the executable architecture is
+ # the 64-bit variant
+ if sys.maxint >= 2**32:
+ machine = 'x86_64'
+
+ elif machine in ('PowerPC', 'Power_Macintosh'):
+ # Pick a sane name for the PPC architecture.
+ # See 'i386' case
+ if sys.maxint >= 2**32:
+ machine = 'ppc64'
+ else:
+ machine = 'ppc'
+
+ return (osname, release, machine)
diff --git a/Lib/_pyio.py b/Lib/_pyio.py
index 34d0c4c..3acbc65 100644
--- a/Lib/_pyio.py
+++ b/Lib/_pyio.py
@@ -192,38 +192,45 @@ def open(file, mode="r", buffering=-1,
(appending and "a" or "") +
(updating and "+" or ""),
closefd)
- line_buffering = False
- if buffering == 1 or buffering < 0 and raw.isatty():
- buffering = -1
- line_buffering = True
- if buffering < 0:
- buffering = DEFAULT_BUFFER_SIZE
- try:
- bs = os.fstat(raw.fileno()).st_blksize
- except (os.error, AttributeError):
- pass
+ result = raw
+ try:
+ line_buffering = False
+ if buffering == 1 or buffering < 0 and raw.isatty():
+ buffering = -1
+ line_buffering = True
+ if buffering < 0:
+ buffering = DEFAULT_BUFFER_SIZE
+ try:
+ bs = os.fstat(raw.fileno()).st_blksize
+ except (os.error, AttributeError):
+ pass
+ else:
+ if bs > 1:
+ buffering = bs
+ if buffering < 0:
+ raise ValueError("invalid buffering size")
+ if buffering == 0:
+ if binary:
+ return result
+ raise ValueError("can't have unbuffered text I/O")
+ if updating:
+ buffer = BufferedRandom(raw, buffering)
+ elif writing or appending:
+ buffer = BufferedWriter(raw, buffering)
+ elif reading:
+ buffer = BufferedReader(raw, buffering)
else:
- if bs > 1:
- buffering = bs
- if buffering < 0:
- raise ValueError("invalid buffering size")
- if buffering == 0:
+ raise ValueError("unknown mode: %r" % mode)
+ result = buffer
if binary:
- return raw
- raise ValueError("can't have unbuffered text I/O")
- if updating:
- buffer = BufferedRandom(raw, buffering)
- elif writing or appending:
- buffer = BufferedWriter(raw, buffering)
- elif reading:
- buffer = BufferedReader(raw, buffering)
- else:
- raise ValueError("unknown mode: %r" % mode)
- if binary:
- return buffer
- text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
- text.mode = mode
- return text
+ return result
+ text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
+ result = text
+ text.mode = mode
+ return result
+ except:
+ result.close()
+ raise
class DocDescriptor:
@@ -298,7 +305,7 @@ class IOBase:
def seek(self, pos, whence=0):
"""Change stream position.
- Change the stream position to byte offset offset. offset is
+ Change the stream position to byte offset pos. Argument pos is
interpreted relative to the position indicated by whence. Values
for whence are:
@@ -340,8 +347,10 @@ class IOBase:
This method has no effect if the file is already closed.
"""
if not self.__closed:
- self.flush()
- self.__closed = True
+ try:
+ self.flush()
+ finally:
+ self.__closed = True
def __del__(self):
"""Destructor. Calls close()."""
@@ -883,12 +892,18 @@ class BytesIO(BufferedIOBase):
return pos
def readable(self):
+ if self.closed:
+ raise ValueError("I/O operation on closed file.")
return True
def writable(self):
+ if self.closed:
+ raise ValueError("I/O operation on closed file.")
return True
def seekable(self):
+ if self.closed:
+ raise ValueError("I/O operation on closed file.")
return True
@@ -1451,7 +1466,7 @@ class TextIOWrapper(TextIOBase):
enabled. With this enabled, on input, the lines endings '\n', '\r',
or '\r\n' are translated to '\n' before being returned to the
caller. Conversely, on output, '\n' is translated to the system
- default line seperator, os.linesep. If newline is any other of its
+ default line separator, os.linesep. If newline is any other of its
legal values, that newline becomes the newline when the file is read
and it is returned untranslated. On output, '\n' is converted to the
newline.
@@ -1546,6 +1561,8 @@ class TextIOWrapper(TextIOBase):
return self._buffer
def seekable(self):
+ if self.closed:
+ raise ValueError("I/O operation on closed file.")
return self._seekable
def readable(self):
@@ -1560,8 +1577,10 @@ class TextIOWrapper(TextIOBase):
def close(self):
if self.buffer is not None and not self.closed:
- self.flush()
- self.buffer.close()
+ try:
+ self.flush()
+ finally:
+ self.buffer.close()
@property
def closed(self):
@@ -1985,7 +2004,13 @@ class StringIO(TextIOWrapper):
def getvalue(self):
self.flush()
- return self.buffer.getvalue().decode(self._encoding, self._errors)
+ decoder = self._decoder or self._get_decoder()
+ old_state = decoder.getstate()
+ decoder.reset()
+ try:
+ return decoder.decode(self.buffer.getvalue(), final=True)
+ finally:
+ decoder.setstate(old_state)
def __repr__(self):
# TextIOWrapper tells the encoding in its repr. In StringIO,
diff --git a/Lib/_strptime.py b/Lib/_strptime.py
index d9563b9..042db6f 100644
--- a/Lib/_strptime.py
+++ b/Lib/_strptime.py
@@ -222,7 +222,7 @@ class TimeRE(dict):
"""Convert a list to a regex string for matching a directive.
Want possible matching values to be from longest to shortest. This
- prevents the possibility of a match occuring for a value that also
+ prevents the possibility of a match occurring for a value that also
a substring of a larger value that should have matched (e.g., 'abc'
matching when 'abcdef' should have been the match).
@@ -326,7 +326,8 @@ def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
if len(data_string) != found.end():
raise ValueError("unconverted data remains: %s" %
data_string[found.end():])
- year = 1900
+
+ year = None
month = day = 1
hour = minute = second = fraction = 0
tz = -1
@@ -425,6 +426,12 @@ def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
else:
tz = value
break
+ leap_year_fix = False
+ if year is None and month == 2 and day == 29:
+ year = 1904 # 1904 is first leap year of 20th century
+ leap_year_fix = True
+ elif year is None:
+ year = 1900
# If we know the week of the year and what day of that week, we can figure
# out the Julian day of the year.
if julian == -1 and week_of_year != -1 and weekday != -1:
@@ -446,6 +453,12 @@ def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
day = datetime_result.day
if weekday == -1:
weekday = datetime_date(year, month, day).weekday()
+ if leap_year_fix:
+ # the caller didn't supply a year but asked for Feb 29th. We couldn't
+ # use the default of 1900 for computations. We set it back to ensure
+ # that February 29th is smaller than March 1st.
+ year = 1900
+
return (time.struct_time((year, month, day,
hour, minute, second,
weekday, julian, tz)), fraction)
diff --git a/Lib/_weakrefset.py b/Lib/_weakrefset.py
index ffa5e31..627959b 100644
--- a/Lib/_weakrefset.py
+++ b/Lib/_weakrefset.py
@@ -60,10 +60,12 @@ class WeakSet(object):
for itemref in self.data:
item = itemref()
if item is not None:
+ # Caveat: the iterator will keep a strong reference to
+ # `item` until it is resumed or closed.
yield item
def __len__(self):
- return sum(x() is not None for x in self.data)
+ return len(self.data) - len(self._pending_removals)
def __contains__(self, item):
try:
@@ -116,36 +118,21 @@ class WeakSet(object):
def update(self, other):
if self._pending_removals:
self._commit_removals()
- if isinstance(other, self.__class__):
- self.data.update(other.data)
- else:
- for element in other:
- self.add(element)
+ for element in other:
+ self.add(element)
def __ior__(self, other):
self.update(other)
return self
- # Helper functions for simple delegating methods.
- def _apply(self, other, method):
- if not isinstance(other, self.__class__):
- other = self.__class__(other)
- newdata = method(other.data)
- newset = self.__class__()
- newset.data = newdata
- return newset
-
def difference(self, other):
- return self._apply(other, self.data.difference)
+ newset = self.copy()
+ newset.difference_update(other)
+ return newset
__sub__ = difference
def difference_update(self, other):
- if self._pending_removals:
- self._commit_removals()
- if self is other:
- self.data.clear()
- else:
- self.data.difference_update(ref(item) for item in other)
+ self.__isub__(other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
@@ -156,13 +143,11 @@ class WeakSet(object):
return self
def intersection(self, other):
- return self._apply(other, self.data.intersection)
+ return self.__class__(item for item in other if item in self)
__and__ = intersection
def intersection_update(self, other):
- if self._pending_removals:
- self._commit_removals()
- self.data.intersection_update(ref(item) for item in other)
+ self.__iand__(other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
@@ -171,45 +156,48 @@ class WeakSet(object):
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
- __lt__ = issubset
+ __le__ = issubset
- def __le__(self, other):
- return self.data <= set(ref(item) for item in other)
+ def __lt__(self, other):
+ return self.data < set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
- __gt__ = issuperset
+ __ge__ = issuperset
- def __ge__(self, other):
- return self.data >= set(ref(item) for item in other)
+ def __gt__(self, other):
+ return self.data > set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(ref(item) for item in other)
+ def __ne__(self, other):
+ opposite = self.__eq__(other)
+ if opposite is NotImplemented:
+ return NotImplemented
+ return not opposite
+
def symmetric_difference(self, other):
- return self._apply(other, self.data.symmetric_difference)
+ newset = self.copy()
+ newset.symmetric_difference_update(other)
+ return newset
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
- if self._pending_removals:
- self._commit_removals()
- if self is other:
- self.data.clear()
- else:
- self.data.symmetric_difference_update(ref(item) for item in other)
+ self.__ixor__(other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
- self.data.symmetric_difference_update(ref(item) for item in other)
+ self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
return self
def union(self, other):
- return self._apply(other, self.data.union)
+ return self.__class__(e for s in (self, other) for e in s)
__or__ = union
def isdisjoint(self, other):
diff --git a/Lib/aifc.py b/Lib/aifc.py
index b8adc85..9ac710f 100644
--- a/Lib/aifc.py
+++ b/Lib/aifc.py
@@ -123,7 +123,7 @@ It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
-Marks can be added anytime. If there are any marks, ypu must call
+Marks can be added anytime. If there are any marks, you must call
close() after all frames have been written.
The close() method is called automatically when the class instance
is destroyed.
@@ -480,31 +480,30 @@ class Aifc_read:
pass
else:
self._convert = self._adpcm2lin
- self._framesize = self._framesize // 4
+ self._sampwidth = 2
return
# for ULAW and ALAW try Compression Library
try:
import cl
except ImportError:
- if self._comptype == 'ULAW':
+ if self._comptype in ('ULAW', 'ulaw'):
try:
import audioop
self._convert = self._ulaw2lin
- self._framesize = self._framesize // 2
+ self._sampwidth = 2
return
except ImportError:
pass
raise Error, 'cannot read compressed AIFF-C files'
- if self._comptype == 'ULAW':
+ if self._comptype in ('ULAW', 'ulaw'):
scheme = cl.G711_ULAW
- self._framesize = self._framesize // 2
- elif self._comptype == 'ALAW':
+ elif self._comptype in ('ALAW', 'alaw'):
scheme = cl.G711_ALAW
- self._framesize = self._framesize // 2
else:
raise Error, 'unsupported compression type'
self._decomp = cl.OpenDecompressor(scheme)
self._convert = self._decomp_data
+ self._sampwidth = 2
else:
self._comptype = 'NONE'
self._compname = 'not compressed'
@@ -655,7 +654,7 @@ class Aifc_write:
def setcomptype(self, comptype, compname):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
- if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
+ if comptype not in ('NONE', 'ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'):
raise Error, 'unsupported compression type'
self._comptype = comptype
self._compname = compname
@@ -675,7 +674,7 @@ class Aifc_write:
nchannels, sampwidth, framerate, nframes, comptype, compname = info
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
- if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
+ if comptype not in ('NONE', 'ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'):
raise Error, 'unsupported compression type'
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
@@ -732,22 +731,28 @@ class Aifc_write:
self._patchheader()
def close(self):
- self._ensure_header_written(0)
- if self._datawritten & 1:
- # quick pad to even size
- self._file.write(chr(0))
- self._datawritten = self._datawritten + 1
- self._writemarkers()
- if self._nframeswritten != self._nframes or \
- self._datalength != self._datawritten or \
- self._marklength:
- self._patchheader()
- if self._comp:
- self._comp.CloseCompressor()
- self._comp = None
- # Prevent ref cycles
- self._convert = None
- self._file.close()
+ if self._file is None:
+ return
+ try:
+ self._ensure_header_written(0)
+ if self._datawritten & 1:
+ # quick pad to even size
+ self._file.write(chr(0))
+ self._datawritten = self._datawritten + 1
+ self._writemarkers()
+ if self._nframeswritten != self._nframes or \
+ self._datalength != self._datawritten or \
+ self._marklength:
+ self._patchheader()
+ if self._comp:
+ self._comp.CloseCompressor()
+ self._comp = None
+ finally:
+ # Prevent ref cycles
+ self._convert = None
+ f = self._file
+ self._file = None
+ f.close()
#
# Internal methods.
@@ -773,7 +778,7 @@ class Aifc_write:
def _ensure_header_written(self, datasize):
if not self._nframeswritten:
- if self._comptype in ('ULAW', 'ALAW'):
+ if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'):
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
@@ -798,7 +803,7 @@ class Aifc_write:
try:
import cl
except ImportError:
- if self._comptype == 'ULAW':
+ if self._comptype in ('ULAW', 'ulaw'):
try:
import audioop
self._convert = self._lin2ulaw
@@ -806,9 +811,9 @@ class Aifc_write:
except ImportError:
pass
raise Error, 'cannot write compressed AIFF-C files'
- if self._comptype == 'ULAW':
+ if self._comptype in ('ULAW', 'ulaw'):
scheme = cl.G711_ULAW
- elif self._comptype == 'ALAW':
+ elif self._comptype in ('ALAW', 'alaw'):
scheme = cl.G711_ALAW
else:
raise Error, 'unsupported compression type'
@@ -839,7 +844,7 @@ class Aifc_write:
if self._datalength & 1:
self._datalength = self._datalength + 1
if self._aifc:
- if self._comptype in ('ULAW', 'ALAW'):
+ if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'):
self._datalength = self._datalength // 2
if self._datalength & 1:
self._datalength = self._datalength + 1
@@ -847,7 +852,10 @@ class Aifc_write:
self._datalength = (self._datalength + 3) // 4
if self._datalength & 1:
self._datalength = self._datalength + 1
- self._form_length_pos = self._file.tell()
+ try:
+ self._form_length_pos = self._file.tell()
+ except (AttributeError, IOError):
+ self._form_length_pos = None
commlength = self._write_form_length(self._datalength)
if self._aifc:
self._file.write('AIFC')
@@ -859,15 +867,20 @@ class Aifc_write:
self._file.write('COMM')
_write_ulong(self._file, commlength)
_write_short(self._file, self._nchannels)
- self._nframes_pos = self._file.tell()
+ if self._form_length_pos is not None:
+ self._nframes_pos = self._file.tell()
_write_ulong(self._file, self._nframes)
- _write_short(self._file, self._sampwidth * 8)
+ if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'):
+ _write_short(self._file, 8)
+ else:
+ _write_short(self._file, self._sampwidth * 8)
_write_float(self._file, self._framerate)
if self._aifc:
self._file.write(self._comptype)
_write_string(self._file, self._compname)
self._file.write('SSND')
- self._ssnd_length_pos = self._file.tell()
+ if self._form_length_pos is not None:
+ self._ssnd_length_pos = self._file.tell()
_write_ulong(self._file, self._datalength + 8)
_write_ulong(self._file, 0)
_write_ulong(self._file, 0)
@@ -947,23 +960,27 @@ if __name__ == '__main__':
sys.argv.append('/usr/demos/data/audio/bach.aiff')
fn = sys.argv[1]
f = open(fn, 'r')
- print "Reading", fn
- print "nchannels =", f.getnchannels()
- print "nframes =", f.getnframes()
- print "sampwidth =", f.getsampwidth()
- print "framerate =", f.getframerate()
- print "comptype =", f.getcomptype()
- print "compname =", f.getcompname()
- if sys.argv[2:]:
- gn = sys.argv[2]
- print "Writing", gn
- g = open(gn, 'w')
- g.setparams(f.getparams())
- while 1:
- data = f.readframes(1024)
- if not data:
- break
- g.writeframes(data)
- g.close()
+ try:
+ print "Reading", fn
+ print "nchannels =", f.getnchannels()
+ print "nframes =", f.getnframes()
+ print "sampwidth =", f.getsampwidth()
+ print "framerate =", f.getframerate()
+ print "comptype =", f.getcomptype()
+ print "compname =", f.getcompname()
+ if sys.argv[2:]:
+ gn = sys.argv[2]
+ print "Writing", gn
+ g = open(gn, 'w')
+ try:
+ g.setparams(f.getparams())
+ while 1:
+ data = f.readframes(1024)
+ if not data:
+ break
+ g.writeframes(data)
+ finally:
+ g.close()
+ print "Done."
+ finally:
f.close()
- print "Done."
diff --git a/Lib/argparse.py b/Lib/argparse.py
index a9129de..b5bb19a 100644
--- a/Lib/argparse.py
+++ b/Lib/argparse.py
@@ -168,6 +168,8 @@ class HelpFormatter(object):
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = max_help_position
+ self._max_help_position = min(max_help_position,
+ max(width - 20, indent_increment * 2))
self._width = width
self._current_indent = 0
@@ -339,7 +341,7 @@ class HelpFormatter(object):
else:
line_len = len(indent) - 1
for part in parts:
- if line_len + 1 + len(part) > text_width:
+ if line_len + 1 + len(part) > text_width and line:
lines.append(indent + ' '.join(line))
line = []
line_len = len(indent) - 1
@@ -478,7 +480,7 @@ class HelpFormatter(object):
def _format_text(self, text):
if '%(prog)' in text:
text = text % dict(prog=self._prog)
- text_width = self._width - self._current_indent
+ text_width = max(self._width - self._current_indent, 11)
indent = ' ' * self._current_indent
return self._fill_text(text, text_width, indent) + '\n\n'
@@ -486,7 +488,7 @@ class HelpFormatter(object):
# determine the required width and the entry label
help_position = min(self._action_max_length + 2,
self._max_help_position)
- help_width = self._width - help_position
+ help_width = max(self._width - help_position, 11)
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
@@ -740,10 +742,10 @@ class Action(_AttributeHolder):
- default -- The value to be produced if the option is not specified.
- - type -- The type which the command-line arguments should be converted
- to, should be one of 'string', 'int', 'float', 'complex' or a
- callable object that accepts a single string argument. If None,
- 'string' is assumed.
+ - type -- A callable that accepts a single string argument, and
+ returns the converted value. The standard Python types str, int,
+ float, and complex are useful examples of such callables. If None,
+ str is used.
- choices -- A container of values that should be allowed. If not None,
after a command-line argument has been converted to the appropriate
@@ -1155,9 +1157,13 @@ class Namespace(_AttributeHolder):
__hash__ = None
def __eq__(self, other):
+ if not isinstance(other, Namespace):
+ return NotImplemented
return vars(self) == vars(other)
def __ne__(self, other):
+ if not isinstance(other, Namespace):
+ return NotImplemented
return not (self == other)
def __contains__(self, key):
@@ -1692,9 +1698,12 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer):
return args
def parse_known_args(self, args=None, namespace=None):
- # args default to the system args
if args is None:
+ # args default to the system args
args = _sys.argv[1:]
+ else:
+ # make sure that args are mutable
+ args = list(args)
# default Namespace built from parser defaults
if namespace is None:
@@ -1705,10 +1714,7 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer):
if action.dest is not SUPPRESS:
if not hasattr(namespace, action.dest):
if action.default is not SUPPRESS:
- default = action.default
- if isinstance(action.default, basestring):
- default = self._get_value(action, default)
- setattr(namespace, action.dest, default)
+ setattr(namespace, action.dest, action.default)
# add any parser defaults that aren't present
for dest in self._defaults:
@@ -1936,12 +1942,23 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer):
if positionals:
self.error(_('too few arguments'))
- # make sure all required actions were present
+ # make sure all required actions were present, and convert defaults.
for action in self._actions:
- if action.required:
- if action not in seen_actions:
+ if action not in seen_actions:
+ if action.required:
name = _get_action_name(action)
self.error(_('argument %s is required') % name)
+ else:
+ # Convert action default now instead of doing it before
+ # parsing arguments to avoid calling convert functions
+ # twice (which may fail) if the argument was given, but
+ # only if it was defined already in the namespace
+ if (action.default is not None and
+ isinstance(action.default, basestring) and
+ hasattr(namespace, action.dest) and
+ action.default is getattr(namespace, action.dest)):
+ setattr(namespace, action.dest,
+ self._get_value(action, action.default))
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
@@ -1967,7 +1984,7 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer):
for arg_string in arg_strings:
# for regular arguments, just add them back into the list
- if arg_string[0] not in self.fromfile_prefix_chars:
+ if not arg_string or arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
# replace arguments referencing files with the file content
@@ -2174,9 +2191,12 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer):
# Value conversion methods
# ========================
def _get_values(self, action, arg_strings):
- # for everything but PARSER args, strip out '--'
+ # for everything but PARSER, REMAINDER args, strip out first '--'
if action.nargs not in [PARSER, REMAINDER]:
- arg_strings = [s for s in arg_strings if s != '--']
+ try:
+ arg_strings.remove('--')
+ except ValueError:
+ pass
# optional argument produces a default when not present
if not arg_strings and action.nargs == OPTIONAL:
diff --git a/Lib/asyncore.py b/Lib/asyncore.py
index 35db387..29099bd 100644
--- a/Lib/asyncore.py
+++ b/Lib/asyncore.py
@@ -225,6 +225,7 @@ class dispatcher:
debug = False
connected = False
accepting = False
+ connecting = False
closing = False
addr = None
ignore_log_types = frozenset(['warning'])
@@ -248,7 +249,7 @@ class dispatcher:
try:
self.addr = sock.getpeername()
except socket.error, err:
- if err.args[0] == ENOTCONN:
+ if err.args[0] in (ENOTCONN, EINVAL):
# To handle the case where we got an unconnected
# socket.
self.connected = False
@@ -342,9 +343,11 @@ class dispatcher:
def connect(self, address):
self.connected = False
+ self.connecting = True
err = self.socket.connect_ex(address)
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \
or err == EINVAL and os.name in ('nt', 'ce'):
+ self.addr = address
return
if err in (0, EISCONN):
self.addr = address
@@ -390,7 +393,7 @@ class dispatcher:
else:
return data
except socket.error, why:
- # winsock sometimes throws ENOTCONN
+ # winsock sometimes raises ENOTCONN
if why.args[0] in _DISCONNECTED:
self.handle_close()
return ''
@@ -400,6 +403,7 @@ class dispatcher:
def close(self):
self.connected = False
self.accepting = False
+ self.connecting = False
self.del_channel()
try:
self.socket.close()
@@ -438,7 +442,8 @@ class dispatcher:
# sockets that are connected
self.handle_accept()
elif not self.connected:
- self.handle_connect_event()
+ if self.connecting:
+ self.handle_connect_event()
self.handle_read()
else:
self.handle_read()
@@ -449,6 +454,7 @@ class dispatcher:
raise socket.error(err, _strerror(err))
self.handle_connect()
self.connected = True
+ self.connecting = False
def handle_write_event(self):
if self.accepting:
@@ -457,12 +463,8 @@ class dispatcher:
return
if not self.connected:
- #check for errors
- err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
- if err != 0:
- raise socket.error(err, _strerror(err))
-
- self.handle_connect_event()
+ if self.connecting:
+ self.handle_connect_event()
self.handle_write()
def handle_expt_event(self):
diff --git a/Lib/bdb.py b/Lib/bdb.py
index 6aad979..59440a9 100644
--- a/Lib/bdb.py
+++ b/Lib/bdb.py
@@ -24,6 +24,7 @@ class Bdb:
self.skip = set(skip) if skip else None
self.breaks = {}
self.fncache = {}
+ self.frame_returning = None
def canonic(self, filename):
if filename == "<" + filename[1:-1] + ">":
@@ -82,7 +83,11 @@ class Bdb:
def dispatch_return(self, frame, arg):
if self.stop_here(frame) or frame == self.returnframe:
- self.user_return(frame, arg)
+ try:
+ self.frame_returning = frame
+ self.user_return(frame, arg)
+ finally:
+ self.frame_returning = None
if self.quitting: raise BdbQuit
return self.trace_dispatch
@@ -186,6 +191,14 @@ class Bdb:
def set_step(self):
"""Stop after one line of code."""
+ # Issue #13183: pdb skips frames after hitting a breakpoint and running
+ # step commands.
+ # Restore the trace function in the caller (that may not have been set
+ # for performance reasons) when returning from the current frame.
+ if self.frame_returning:
+ caller_frame = self.frame_returning.f_back
+ if caller_frame and not caller_frame.f_trace:
+ caller_frame.f_trace = self.trace_dispatch
self._set_stopinfo(None, None)
def set_next(self, frame):
diff --git a/Lib/bsddb/__init__.py b/Lib/bsddb/__init__.py
index ed4deea..13c9c27 100644
--- a/Lib/bsddb/__init__.py
+++ b/Lib/bsddb/__init__.py
@@ -33,7 +33,7 @@
#----------------------------------------------------------------------
-"""Support for Berkeley DB 4.1 through 4.8 with a simple interface.
+"""Support for Berkeley DB 4.3 through 5.3 with a simple interface.
For the full featured object oriented interface use the bsddb.db module
instead. It mirrors the Oracle Berkeley DB C API.
@@ -138,7 +138,7 @@ class _iter_mixin(MutableMapping):
except _bsddb.DBCursorClosedError:
# the database was modified during iteration. abort.
pass
-# When Python 2.3 not supported in bsddb3, we can change this to "finally"
+# When Python 2.4 not supported in bsddb3, we can change this to "finally"
except :
self._in_iter -= 1
raise
@@ -181,7 +181,7 @@ class _iter_mixin(MutableMapping):
except _bsddb.DBCursorClosedError:
# the database was modified during iteration. abort.
pass
-# When Python 2.3 not supported in bsddb3, we can change this to "finally"
+# When Python 2.4 not supported in bsddb3, we can change this to "finally"
except :
self._in_iter -= 1
raise
diff --git a/Lib/bsddb/dbobj.py b/Lib/bsddb/dbobj.py
index c7c7322..1400fe1 100644
--- a/Lib/bsddb/dbobj.py
+++ b/Lib/bsddb/dbobj.py
@@ -30,12 +30,7 @@ else :
import db
if sys.version_info < (2, 6) :
- try:
- from UserDict import DictMixin
- except ImportError:
- # DictMixin is new in Python 2.3
- class DictMixin: pass
- MutableMapping = DictMixin
+ from UserDict import DictMixin as MutableMapping
else :
import collections
MutableMapping = collections.MutableMapping
@@ -196,6 +191,8 @@ class DB(MutableMapping):
return self._cobj.set_bt_compare(*args, **kwargs)
def set_cachesize(self, *args, **kwargs):
return self._cobj.set_cachesize(*args, **kwargs)
+ def set_dup_compare(self, *args, **kwargs) :
+ return self._cobj.set_dup_compare(*args, **kwargs)
def set_flags(self, *args, **kwargs):
return self._cobj.set_flags(*args, **kwargs)
def set_h_ffactor(self, *args, **kwargs):
diff --git a/Lib/bsddb/dbshelve.py b/Lib/bsddb/dbshelve.py
index e3f6d4c..7d0daa2 100644
--- a/Lib/bsddb/dbshelve.py
+++ b/Lib/bsddb/dbshelve.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
#------------------------------------------------------------------------
# Copyright (c) 1997-2001 by Total Control Software
# All Rights Reserved
@@ -43,7 +42,7 @@ else :
if sys.version_info < (2, 6) :
import cPickle
else :
- # When we drop support for python 2.3 and 2.4
+ # When we drop support for python 2.4
# we could use: (in 2.5 we need a __future__ statement)
#
# with warnings.catch_warnings():
@@ -51,7 +50,7 @@ else :
# ...
#
# We can not use "with" as is, because it would be invalid syntax
- # in python 2.3, 2.4 and (with no __future__) 2.5.
+ # in python 2.4 and (with no __future__) 2.5.
# Here we simulate "with" following PEP 343 :
import warnings
w = warnings.catch_warnings()
@@ -65,32 +64,12 @@ else :
w.__exit__()
del w
-#At version 2.3 cPickle switched to using protocol instead of bin
-if sys.version_info >= (2, 3):
- HIGHEST_PROTOCOL = cPickle.HIGHEST_PROTOCOL
-# In python 2.3.*, "cPickle.dumps" accepts no
-# named parameters. "pickle.dumps" accepts them,
-# so this seems a bug.
- if sys.version_info < (2, 4):
- def _dumps(object, protocol):
- return cPickle.dumps(object, protocol)
- else :
- def _dumps(object, protocol):
- return cPickle.dumps(object, protocol=protocol)
-
-else:
- HIGHEST_PROTOCOL = None
- def _dumps(object, protocol):
- return cPickle.dumps(object, bin=protocol)
-
+HIGHEST_PROTOCOL = cPickle.HIGHEST_PROTOCOL
+def _dumps(object, protocol):
+ return cPickle.dumps(object, protocol=protocol)
if sys.version_info < (2, 6) :
- try:
- from UserDict import DictMixin
- except ImportError:
- # DictMixin is new in Python 2.3
- class DictMixin: pass
- MutableMapping = DictMixin
+ from UserDict import DictMixin as MutableMapping
else :
import collections
MutableMapping = collections.MutableMapping
diff --git a/Lib/bsddb/dbtables.py b/Lib/bsddb/dbtables.py
index 3ebc68d..e8acdd0 100644
--- a/Lib/bsddb/dbtables.py
+++ b/Lib/bsddb/dbtables.py
@@ -30,7 +30,7 @@ else :
if sys.version_info < (2, 6) :
import cPickle as pickle
else :
- # When we drop support for python 2.3 and 2.4
+ # When we drop support for python 2.4
# we could use: (in 2.5 we need a __future__ statement)
#
# with warnings.catch_warnings():
@@ -38,7 +38,7 @@ else :
# ...
#
# We can not use "with" as is, because it would be invalid syntax
- # in python 2.3, 2.4 and (with no __future__) 2.5.
+ # in python 2.4 and (with no __future__) 2.5.
# Here we simulate "with" following PEP 343 :
import warnings
w = warnings.catch_warnings()
diff --git a/Lib/bsddb/test/test_all.py b/Lib/bsddb/test/test_all.py
index e9fe618..caef1ac 100644
--- a/Lib/bsddb/test/test_all.py
+++ b/Lib/bsddb/test/test_all.py
@@ -392,10 +392,8 @@ if sys.version_info[0] >= 3 :
return self._dbenv.get_tmp_dir().decode(charset)
def get_data_dirs(self) :
- # Have to use a list comprehension and not
- # generators, because we are supporting Python 2.3.
return tuple(
- [i.decode(charset) for i in self._dbenv.get_data_dirs()])
+ (i.decode(charset) for i in self._dbenv.get_data_dirs()))
class DBSequence_py3k(object) :
def __init__(self, db, *args, **kwargs) :
@@ -484,6 +482,8 @@ def print_versions():
print '-=' * 38
print db.DB_VERSION_STRING
print 'bsddb.db.version(): %s' % (db.version(), )
+ if db.version() >= (5, 0) :
+ print 'bsddb.db.full_version(): %s' %repr(db.full_version())
print 'bsddb.db.__version__: %s' % db.__version__
print 'bsddb.db.cvsid: %s' % db.cvsid
@@ -528,7 +528,8 @@ def get_new_database_path() :
# This path can be overriden via "set_test_path_prefix()".
import os, os.path
-get_new_path.prefix=os.path.join(os.sep,"tmp","z-Berkeley_DB")
+get_new_path.prefix=os.path.join(os.environ.get("TMPDIR",
+ os.path.join(os.sep,"tmp")), "z-Berkeley_DB")
get_new_path.num=0
def get_test_path_prefix() :
diff --git a/Lib/bsddb/test/test_basics.py b/Lib/bsddb/test/test_basics.py
index 4d43390..3c57be4 100644
--- a/Lib/bsddb/test/test_basics.py
+++ b/Lib/bsddb/test/test_basics.py
@@ -9,6 +9,7 @@ import string
from pprint import pprint
import unittest
import time
+import sys
from test_all import db, test_support, verbose, get_new_environment_path, \
get_new_database_path
@@ -44,13 +45,6 @@ class BasicTestCase(unittest.TestCase):
_numKeys = 1002 # PRIVATE. NOTE: must be an even value
- import sys
- if sys.version_info < (2, 4):
- def assertTrue(self, expr, msg=None):
- self.failUnless(expr,msg=msg)
- def assertFalse(self, expr, msg=None):
- self.failIf(expr,msg=msg)
-
def setUp(self):
if self.useEnv:
self.homeDir=get_new_environment_path()
@@ -74,14 +68,13 @@ class BasicTestCase(unittest.TestCase):
# create and open the DB
self.d = db.DB(self.env)
if not self.useEnv :
- if db.version() >= (4, 2) :
- self.d.set_cachesize(*self.cachesize)
- cachesize = self.d.get_cachesize()
- self.assertEqual(cachesize[0], self.cachesize[0])
- self.assertEqual(cachesize[2], self.cachesize[2])
- # Berkeley DB expands the cache 25% accounting overhead,
- # if the cache is small.
- self.assertEqual(125, int(100.0*cachesize[1]/self.cachesize[1]))
+ self.d.set_cachesize(*self.cachesize)
+ cachesize = self.d.get_cachesize()
+ self.assertEqual(cachesize[0], self.cachesize[0])
+ self.assertEqual(cachesize[2], self.cachesize[2])
+ # Berkeley DB expands the cache 25% accounting overhead,
+ # if the cache is small.
+ self.assertEqual(125, int(100.0*cachesize[1]/self.cachesize[1]))
self.d.set_flags(self.dbsetflags)
if self.dbname:
self.d.open(self.filename, self.dbname, self.dbtype,
@@ -161,7 +154,6 @@ class BasicTestCase(unittest.TestCase):
try:
d.delete('abcd')
except db.DBNotFoundError, val:
- import sys
if sys.version_info < (2, 6) :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
@@ -184,7 +176,6 @@ class BasicTestCase(unittest.TestCase):
try:
d.put('abcd', 'this should fail', flags=db.DB_NOOVERWRITE)
except db.DBKeyExistError, val:
- import sys
if sys.version_info < (2, 6) :
self.assertEqual(val[0], db.DB_KEYEXIST)
else :
@@ -338,7 +329,6 @@ class BasicTestCase(unittest.TestCase):
rec = c.next()
except db.DBNotFoundError, val:
if get_raises_error:
- import sys
if sys.version_info < (2, 6) :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
@@ -363,7 +353,6 @@ class BasicTestCase(unittest.TestCase):
rec = c.prev()
except db.DBNotFoundError, val:
if get_raises_error:
- import sys
if sys.version_info < (2, 6) :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
@@ -390,7 +379,6 @@ class BasicTestCase(unittest.TestCase):
try:
n = c.set('bad key')
except db.DBNotFoundError, val:
- import sys
if sys.version_info < (2, 6) :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
@@ -408,7 +396,6 @@ class BasicTestCase(unittest.TestCase):
try:
n = c.get_both('0404', 'bad data')
except db.DBNotFoundError, val:
- import sys
if sys.version_info < (2, 6) :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
@@ -441,7 +428,6 @@ class BasicTestCase(unittest.TestCase):
rec = c.current()
except db.DBKeyEmptyError, val:
if get_raises_error:
- import sys
if sys.version_info < (2, 6) :
self.assertEqual(val[0], db.DB_KEYEMPTY)
else :
@@ -490,7 +476,6 @@ class BasicTestCase(unittest.TestCase):
# a bug may cause a NULL pointer dereference...
getattr(c, method)(*args)
except db.DBError, val:
- import sys
if sys.version_info < (2, 6) :
self.assertEqual(val[0], 0)
else :
@@ -712,11 +697,6 @@ class BasicHashWithEnvTestCase(BasicWithEnvTestCase):
#----------------------------------------------------------------------
class BasicTransactionTestCase(BasicTestCase):
- import sys
- if sys.version_info < (2, 4):
- def assertTrue(self, expr, msg=None):
- return self.failUnless(expr,msg=msg)
-
if (sys.version_info < (2, 7)) or ((sys.version_info >= (3, 0)) and
(sys.version_info < (3, 2))) :
def assertIn(self, a, b, msg=None) :
@@ -792,7 +772,6 @@ class BasicTransactionTestCase(BasicTestCase):
for log in logs:
if verbose:
print 'log file: ' + log
- if db.version() >= (4,2):
logs = self.env.log_archive(db.DB_ARCH_REMOVE)
self.assertTrue(not logs)
@@ -875,7 +854,6 @@ class BasicTransactionTestCase(BasicTestCase):
#----------------------------------------
- if db.version() >= (4, 2) :
def test_get_tx_max(self) :
self.assertEqual(self.env.get_tx_max(), 30)
@@ -1098,11 +1076,6 @@ class HashMultiDBTestCase(BasicMultiDBTestCase):
class PrivateObject(unittest.TestCase) :
- import sys
- if sys.version_info < (2, 4):
- def assertTrue(self, expr, msg=None):
- self.failUnless(expr,msg=msg)
-
def tearDown(self) :
del self.obj
@@ -1116,7 +1089,6 @@ class PrivateObject(unittest.TestCase) :
self.assertTrue(a is b) # Object identity
def test03_leak_assignment(self) :
- import sys
a = "example of private object"
refcount = sys.getrefcount(a)
self.obj.set_private(a)
@@ -1125,7 +1097,6 @@ class PrivateObject(unittest.TestCase) :
self.assertEqual(refcount, sys.getrefcount(a))
def test04_leak_GC(self) :
- import sys
a = "example of private object"
refcount = sys.getrefcount(a)
self.obj.set_private(a)
@@ -1141,11 +1112,6 @@ class DBPrivateObject(PrivateObject) :
self.obj = db.DB()
class CrashAndBurn(unittest.TestCase) :
- import sys
- if sys.version_info < (2, 4):
- def assertTrue(self, expr, msg=None):
- self.failUnless(expr,msg=msg)
-
#def test01_OpenCrash(self) :
# # See http://bugs.python.org/issue3307
# self.assertRaises(db.DBInvalidArgError, db.DB, None, 65535)
diff --git a/Lib/bsddb/test/test_compare.py b/Lib/bsddb/test/test_compare.py
index db9cd74..cb3b463 100644
--- a/Lib/bsddb/test/test_compare.py
+++ b/Lib/bsddb/test/test_compare.py
@@ -1,5 +1,5 @@
"""
-TestCases for python DB Btree key comparison function.
+TestCases for python DB duplicate and Btree key comparison function.
"""
import sys, os, re
@@ -20,31 +20,24 @@ def cmp(a, b) :
lexical_cmp = cmp
-def lowercase_cmp(left, right):
- return cmp (left.lower(), right.lower())
+def lowercase_cmp(left, right) :
+ return cmp(left.lower(), right.lower())
-def make_reverse_comparator (cmp):
- def reverse (left, right, delegate=cmp):
- return - delegate (left, right)
+def make_reverse_comparator(cmp) :
+ def reverse(left, right, delegate=cmp) :
+ return - delegate(left, right)
return reverse
_expected_lexical_test_data = ['', 'CCCP', 'a', 'aaa', 'b', 'c', 'cccce', 'ccccf']
_expected_lowercase_test_data = ['', 'a', 'aaa', 'b', 'c', 'CC', 'cccce', 'ccccf', 'CCCP']
-class ComparatorTests (unittest.TestCase):
- if sys.version_info < (2, 4) :
- def assertTrue(self, expr, msg=None) :
- return self.failUnless(expr,msg=msg)
-
- def comparator_test_helper (self, comparator, expected_data):
+class ComparatorTests(unittest.TestCase) :
+ def comparator_test_helper(self, comparator, expected_data) :
data = expected_data[:]
import sys
if sys.version_info < (2, 6) :
- if sys.version_info < (2, 4) :
- data.sort(comparator)
- else :
- data.sort(cmp=comparator)
+ data.sort(cmp=comparator)
else : # Insertion Sort. Please, improve
data2 = []
for i in data :
@@ -60,143 +53,139 @@ class ComparatorTests (unittest.TestCase):
self.assertEqual(data, expected_data,
"comparator `%s' is not right: %s vs. %s"
% (comparator, expected_data, data))
- def test_lexical_comparator (self):
- self.comparator_test_helper (lexical_cmp, _expected_lexical_test_data)
- def test_reverse_lexical_comparator (self):
+ def test_lexical_comparator(self) :
+ self.comparator_test_helper(lexical_cmp, _expected_lexical_test_data)
+ def test_reverse_lexical_comparator(self) :
rev = _expected_lexical_test_data[:]
- rev.reverse ()
- self.comparator_test_helper (make_reverse_comparator (lexical_cmp),
+ rev.reverse()
+ self.comparator_test_helper(make_reverse_comparator(lexical_cmp),
rev)
- def test_lowercase_comparator (self):
- self.comparator_test_helper (lowercase_cmp,
+ def test_lowercase_comparator(self) :
+ self.comparator_test_helper(lowercase_cmp,
_expected_lowercase_test_data)
-class AbstractBtreeKeyCompareTestCase (unittest.TestCase):
+class AbstractBtreeKeyCompareTestCase(unittest.TestCase) :
env = None
db = None
- if sys.version_info < (2, 4) :
- def assertTrue(self, expr, msg=None):
- self.failUnless(expr,msg=msg)
-
if (sys.version_info < (2, 7)) or ((sys.version_info >= (3,0)) and
(sys.version_info < (3, 2))) :
def assertLess(self, a, b, msg=None) :
return self.assertTrue(a<b, msg=msg)
- def setUp (self):
+ def setUp(self) :
self.filename = self.__class__.__name__ + '.db'
self.homeDir = get_new_environment_path()
env = db.DBEnv()
- env.open (self.homeDir,
+ env.open(self.homeDir,
db.DB_CREATE | db.DB_INIT_MPOOL
| db.DB_INIT_LOCK | db.DB_THREAD)
self.env = env
- def tearDown (self):
+ def tearDown(self) :
self.closeDB()
if self.env is not None:
self.env.close()
self.env = None
test_support.rmtree(self.homeDir)
- def addDataToDB (self, data):
+ def addDataToDB(self, data) :
i = 0
for item in data:
- self.db.put (item, str (i))
+ self.db.put(item, str(i))
i = i + 1
- def createDB (self, key_comparator):
- self.db = db.DB (self.env)
- self.setupDB (key_comparator)
- self.db.open (self.filename, "test", db.DB_BTREE, db.DB_CREATE)
+ def createDB(self, key_comparator) :
+ self.db = db.DB(self.env)
+ self.setupDB(key_comparator)
+ self.db.open(self.filename, "test", db.DB_BTREE, db.DB_CREATE)
- def setupDB (self, key_comparator):
- self.db.set_bt_compare (key_comparator)
+ def setupDB(self, key_comparator) :
+ self.db.set_bt_compare(key_comparator)
- def closeDB (self):
+ def closeDB(self) :
if self.db is not None:
- self.db.close ()
+ self.db.close()
self.db = None
- def startTest (self):
+ def startTest(self) :
pass
- def finishTest (self, expected = None):
+ def finishTest(self, expected = None) :
if expected is not None:
- self.check_results (expected)
- self.closeDB ()
+ self.check_results(expected)
+ self.closeDB()
- def check_results (self, expected):
- curs = self.db.cursor ()
+ def check_results(self, expected) :
+ curs = self.db.cursor()
try:
index = 0
- rec = curs.first ()
+ rec = curs.first()
while rec:
key, ignore = rec
- self.assertLess(index, len (expected),
+ self.assertLess(index, len(expected),
"to many values returned from cursor")
self.assertEqual(expected[index], key,
"expected value `%s' at %d but got `%s'"
% (expected[index], index, key))
index = index + 1
- rec = curs.next ()
- self.assertEqual(index, len (expected),
+ rec = curs.next()
+ self.assertEqual(index, len(expected),
"not enough values returned from cursor")
finally:
- curs.close ()
+ curs.close()
-class BtreeKeyCompareTestCase (AbstractBtreeKeyCompareTestCase):
- def runCompareTest (self, comparator, data):
- self.startTest ()
- self.createDB (comparator)
- self.addDataToDB (data)
- self.finishTest (data)
+class BtreeKeyCompareTestCase(AbstractBtreeKeyCompareTestCase) :
+ def runCompareTest(self, comparator, data) :
+ self.startTest()
+ self.createDB(comparator)
+ self.addDataToDB(data)
+ self.finishTest(data)
- def test_lexical_ordering (self):
- self.runCompareTest (lexical_cmp, _expected_lexical_test_data)
+ def test_lexical_ordering(self) :
+ self.runCompareTest(lexical_cmp, _expected_lexical_test_data)
- def test_reverse_lexical_ordering (self):
+ def test_reverse_lexical_ordering(self) :
expected_rev_data = _expected_lexical_test_data[:]
- expected_rev_data.reverse ()
- self.runCompareTest (make_reverse_comparator (lexical_cmp),
+ expected_rev_data.reverse()
+ self.runCompareTest(make_reverse_comparator(lexical_cmp),
expected_rev_data)
- def test_compare_function_useless (self):
- self.startTest ()
- def socialist_comparator (l, r):
+ def test_compare_function_useless(self) :
+ self.startTest()
+ def socialist_comparator(l, r) :
return 0
- self.createDB (socialist_comparator)
- self.addDataToDB (['b', 'a', 'd'])
+ self.createDB(socialist_comparator)
+ self.addDataToDB(['b', 'a', 'd'])
# all things being equal the first key will be the only key
# in the database... (with the last key's value fwiw)
- self.finishTest (['b'])
+ self.finishTest(['b'])
-class BtreeExceptionsTestCase (AbstractBtreeKeyCompareTestCase):
- def test_raises_non_callable (self):
- self.startTest ()
- self.assertRaises (TypeError, self.createDB, 'abc')
- self.assertRaises (TypeError, self.createDB, None)
- self.finishTest ()
+class BtreeExceptionsTestCase(AbstractBtreeKeyCompareTestCase) :
+ def test_raises_non_callable(self) :
+ self.startTest()
+ self.assertRaises(TypeError, self.createDB, 'abc')
+ self.assertRaises(TypeError, self.createDB, None)
+ self.finishTest()
- def test_set_bt_compare_with_function (self):
- self.startTest ()
- self.createDB (lexical_cmp)
- self.finishTest ()
+ def test_set_bt_compare_with_function(self) :
+ self.startTest()
+ self.createDB(lexical_cmp)
+ self.finishTest()
- def check_results (self, results):
+ def check_results(self, results) :
pass
- def test_compare_function_incorrect (self):
- self.startTest ()
- def bad_comparator (l, r):
+ def test_compare_function_incorrect(self) :
+ self.startTest()
+ def bad_comparator(l, r) :
return 1
# verify that set_bt_compare checks that comparator('', '') == 0
- self.assertRaises (TypeError, self.createDB, bad_comparator)
- self.finishTest ()
+ self.assertRaises(TypeError, self.createDB, bad_comparator)
+ self.finishTest()
- def verifyStderr(self, method, successRe):
+ def verifyStderr(self, method, successRe) :
"""
Call method() while capturing sys.stderr output internally and
call self.fail() if successRe.search() does not match the stderr
@@ -210,64 +199,249 @@ class BtreeExceptionsTestCase (AbstractBtreeKeyCompareTestCase):
temp = sys.stderr
sys.stderr = stdErr
errorOut = temp.getvalue()
- if not successRe.search(errorOut):
+ if not successRe.search(errorOut) :
self.fail("unexpected stderr output:\n"+errorOut)
if sys.version_info < (3, 0) : # XXX: How to do this in Py3k ???
sys.exc_traceback = sys.last_traceback = None
- def _test_compare_function_exception (self):
- self.startTest ()
- def bad_comparator (l, r):
+ def _test_compare_function_exception(self) :
+ self.startTest()
+ def bad_comparator(l, r) :
if l == r:
# pass the set_bt_compare test
return 0
raise RuntimeError, "i'm a naughty comparison function"
- self.createDB (bad_comparator)
+ self.createDB(bad_comparator)
#print "\n*** test should print 2 uncatchable tracebacks ***"
- self.addDataToDB (['a', 'b', 'c']) # this should raise, but...
- self.finishTest ()
+ self.addDataToDB(['a', 'b', 'c']) # this should raise, but...
+ self.finishTest()
- def test_compare_function_exception(self):
+ def test_compare_function_exception(self) :
self.verifyStderr(
self._test_compare_function_exception,
re.compile('(^RuntimeError:.* naughty.*){2}', re.M|re.S)
)
- def _test_compare_function_bad_return (self):
- self.startTest ()
- def bad_comparator (l, r):
+ def _test_compare_function_bad_return(self) :
+ self.startTest()
+ def bad_comparator(l, r) :
if l == r:
# pass the set_bt_compare test
return 0
return l
- self.createDB (bad_comparator)
+ self.createDB(bad_comparator)
+ #print "\n*** test should print 2 errors about returning an int ***"
+ self.addDataToDB(['a', 'b', 'c']) # this should raise, but...
+ self.finishTest()
+
+ def test_compare_function_bad_return(self) :
+ self.verifyStderr(
+ self._test_compare_function_bad_return,
+ re.compile('(^TypeError:.* return an int.*){2}', re.M|re.S)
+ )
+
+
+ def test_cannot_assign_twice(self) :
+
+ def my_compare(a, b) :
+ return 0
+
+ self.startTest()
+ self.createDB(my_compare)
+ self.assertRaises(RuntimeError, self.db.set_bt_compare, my_compare)
+
+class AbstractDuplicateCompareTestCase(unittest.TestCase) :
+ env = None
+ db = None
+
+ if (sys.version_info < (2, 7)) or ((sys.version_info >= (3,0)) and
+ (sys.version_info < (3, 2))) :
+ def assertLess(self, a, b, msg=None) :
+ return self.assertTrue(a<b, msg=msg)
+
+ def setUp(self) :
+ self.filename = self.__class__.__name__ + '.db'
+ self.homeDir = get_new_environment_path()
+ env = db.DBEnv()
+ env.open(self.homeDir,
+ db.DB_CREATE | db.DB_INIT_MPOOL
+ | db.DB_INIT_LOCK | db.DB_THREAD)
+ self.env = env
+
+ def tearDown(self) :
+ self.closeDB()
+ if self.env is not None:
+ self.env.close()
+ self.env = None
+ test_support.rmtree(self.homeDir)
+
+ def addDataToDB(self, data) :
+ for item in data:
+ self.db.put("key", item)
+
+ def createDB(self, dup_comparator) :
+ self.db = db.DB(self.env)
+ self.setupDB(dup_comparator)
+ self.db.open(self.filename, "test", db.DB_BTREE, db.DB_CREATE)
+
+ def setupDB(self, dup_comparator) :
+ self.db.set_flags(db.DB_DUPSORT)
+ self.db.set_dup_compare(dup_comparator)
+
+ def closeDB(self) :
+ if self.db is not None:
+ self.db.close()
+ self.db = None
+
+ def startTest(self) :
+ pass
+
+ def finishTest(self, expected = None) :
+ if expected is not None:
+ self.check_results(expected)
+ self.closeDB()
+
+ def check_results(self, expected) :
+ curs = self.db.cursor()
+ try:
+ index = 0
+ rec = curs.first()
+ while rec:
+ ignore, data = rec
+ self.assertLess(index, len(expected),
+ "to many values returned from cursor")
+ self.assertEqual(expected[index], data,
+ "expected value `%s' at %d but got `%s'"
+ % (expected[index], index, data))
+ index = index + 1
+ rec = curs.next()
+ self.assertEqual(index, len(expected),
+ "not enough values returned from cursor")
+ finally:
+ curs.close()
+
+class DuplicateCompareTestCase(AbstractDuplicateCompareTestCase) :
+ def runCompareTest(self, comparator, data) :
+ self.startTest()
+ self.createDB(comparator)
+ self.addDataToDB(data)
+ self.finishTest(data)
+
+ def test_lexical_ordering(self) :
+ self.runCompareTest(lexical_cmp, _expected_lexical_test_data)
+
+ def test_reverse_lexical_ordering(self) :
+ expected_rev_data = _expected_lexical_test_data[:]
+ expected_rev_data.reverse()
+ self.runCompareTest(make_reverse_comparator(lexical_cmp),
+ expected_rev_data)
+
+class DuplicateExceptionsTestCase(AbstractDuplicateCompareTestCase) :
+ def test_raises_non_callable(self) :
+ self.startTest()
+ self.assertRaises(TypeError, self.createDB, 'abc')
+ self.assertRaises(TypeError, self.createDB, None)
+ self.finishTest()
+
+ def test_set_dup_compare_with_function(self) :
+ self.startTest()
+ self.createDB(lexical_cmp)
+ self.finishTest()
+
+ def check_results(self, results) :
+ pass
+
+ def test_compare_function_incorrect(self) :
+ self.startTest()
+ def bad_comparator(l, r) :
+ return 1
+ # verify that set_dup_compare checks that comparator('', '') == 0
+ self.assertRaises(TypeError, self.createDB, bad_comparator)
+ self.finishTest()
+
+ def test_compare_function_useless(self) :
+ self.startTest()
+ def socialist_comparator(l, r) :
+ return 0
+ self.createDB(socialist_comparator)
+ # DUPSORT does not allow "duplicate duplicates"
+ self.assertRaises(db.DBKeyExistError, self.addDataToDB, ['b', 'a', 'd'])
+ self.finishTest()
+
+ def verifyStderr(self, method, successRe) :
+ """
+ Call method() while capturing sys.stderr output internally and
+ call self.fail() if successRe.search() does not match the stderr
+ output. This is used to test for uncatchable exceptions.
+ """
+ stdErr = sys.stderr
+ sys.stderr = StringIO()
+ try:
+ method()
+ finally:
+ temp = sys.stderr
+ sys.stderr = stdErr
+ errorOut = temp.getvalue()
+ if not successRe.search(errorOut) :
+ self.fail("unexpected stderr output:\n"+errorOut)
+ if sys.version_info < (3, 0) : # XXX: How to do this in Py3k ???
+ sys.exc_traceback = sys.last_traceback = None
+
+ def _test_compare_function_exception(self) :
+ self.startTest()
+ def bad_comparator(l, r) :
+ if l == r:
+ # pass the set_dup_compare test
+ return 0
+ raise RuntimeError, "i'm a naughty comparison function"
+ self.createDB(bad_comparator)
+ #print "\n*** test should print 2 uncatchable tracebacks ***"
+ self.addDataToDB(['a', 'b', 'c']) # this should raise, but...
+ self.finishTest()
+
+ def test_compare_function_exception(self) :
+ self.verifyStderr(
+ self._test_compare_function_exception,
+ re.compile('(^RuntimeError:.* naughty.*){2}', re.M|re.S)
+ )
+
+ def _test_compare_function_bad_return(self) :
+ self.startTest()
+ def bad_comparator(l, r) :
+ if l == r:
+ # pass the set_dup_compare test
+ return 0
+ return l
+ self.createDB(bad_comparator)
#print "\n*** test should print 2 errors about returning an int ***"
- self.addDataToDB (['a', 'b', 'c']) # this should raise, but...
- self.finishTest ()
+ self.addDataToDB(['a', 'b', 'c']) # this should raise, but...
+ self.finishTest()
- def test_compare_function_bad_return(self):
+ def test_compare_function_bad_return(self) :
self.verifyStderr(
self._test_compare_function_bad_return,
re.compile('(^TypeError:.* return an int.*){2}', re.M|re.S)
)
- def test_cannot_assign_twice (self):
+ def test_cannot_assign_twice(self) :
- def my_compare (a, b):
+ def my_compare(a, b) :
return 0
self.startTest()
self.createDB(my_compare)
- self.assertRaises (RuntimeError, self.db.set_bt_compare, my_compare)
+ self.assertRaises(RuntimeError, self.db.set_dup_compare, my_compare)
-def test_suite ():
- res = unittest.TestSuite ()
+def test_suite() :
+ res = unittest.TestSuite()
- res.addTest (unittest.makeSuite (ComparatorTests))
- res.addTest (unittest.makeSuite (BtreeExceptionsTestCase))
- res.addTest (unittest.makeSuite (BtreeKeyCompareTestCase))
+ res.addTest(unittest.makeSuite(ComparatorTests))
+ res.addTest(unittest.makeSuite(BtreeExceptionsTestCase))
+ res.addTest(unittest.makeSuite(BtreeKeyCompareTestCase))
+ res.addTest(unittest.makeSuite(DuplicateExceptionsTestCase))
+ res.addTest(unittest.makeSuite(DuplicateCompareTestCase))
return res
if __name__ == '__main__':
- unittest.main (defaultTest = 'suite')
+ unittest.main(defaultTest = 'suite')
diff --git a/Lib/bsddb/test/test_db.py b/Lib/bsddb/test/test_db.py
index 8d3a32b..2bc109f 100644
--- a/Lib/bsddb/test/test_db.py
+++ b/Lib/bsddb/test/test_db.py
@@ -7,11 +7,6 @@ from test_all import db, test_support, get_new_environment_path, \
#----------------------------------------------------------------------
class DB(unittest.TestCase):
- import sys
- if sys.version_info < (2, 4) :
- def assertTrue(self, expr, msg=None):
- self.failUnless(expr,msg=msg)
-
def setUp(self):
self.path = get_new_database_path()
self.db = db.DB()
@@ -19,10 +14,28 @@ class DB(unittest.TestCase):
def tearDown(self):
self.db.close()
del self.db
- test_support.rmtree(self.path)
+ test_support.unlink(self.path)
class DB_general(DB) :
- if db.version() >= (4, 2) :
+ def test_get_open_flags(self) :
+ self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE)
+ self.assertEqual(db.DB_CREATE, self.db.get_open_flags())
+
+ def test_get_open_flags2(self) :
+ self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE |
+ db.DB_THREAD)
+ self.assertEqual(db.DB_CREATE | db.DB_THREAD, self.db.get_open_flags())
+
+ def test_get_dbname_filename(self) :
+ self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE)
+ self.assertEqual((self.path, None), self.db.get_dbname())
+
+ def test_get_dbname_filename_database(self) :
+ name = "jcea-random-name"
+ self.db.open(self.path, dbname=name, dbtype=db.DB_HASH,
+ flags = db.DB_CREATE)
+ self.assertEqual((self.path, name), self.db.get_dbname())
+
def test_bt_minkey(self) :
for i in [17, 108, 1030] :
self.db.set_bt_minkey(i)
@@ -44,30 +57,34 @@ class DB_general(DB) :
self.db.set_priority(flag)
self.assertEqual(flag, self.db.get_priority())
+ def test_get_transactional(self) :
+ self.assertFalse(self.db.get_transactional())
+ self.db.open(self.path, dbtype=db.DB_HASH, flags = db.DB_CREATE)
+ self.assertFalse(self.db.get_transactional())
+
class DB_hash(DB) :
- if db.version() >= (4, 2) :
- def test_h_ffactor(self) :
- for ffactor in [4, 16, 256] :
- self.db.set_h_ffactor(ffactor)
- self.assertEqual(ffactor, self.db.get_h_ffactor())
-
- def test_h_nelem(self) :
- for nelem in [1, 2, 4] :
- nelem = nelem*1024*1024 # Millions
- self.db.set_h_nelem(nelem)
- self.assertEqual(nelem, self.db.get_h_nelem())
-
- def test_pagesize(self) :
- for i in xrange(9, 17) : # From 512 to 65536
- i = 1<<i
- self.db.set_pagesize(i)
- self.assertEqual(i, self.db.get_pagesize())
-
- # The valid values goes from 512 to 65536
- # Test 131072 bytes...
- self.assertRaises(db.DBInvalidArgError, self.db.set_pagesize, 1<<17)
- # Test 256 bytes...
- self.assertRaises(db.DBInvalidArgError, self.db.set_pagesize, 1<<8)
+ def test_h_ffactor(self) :
+ for ffactor in [4, 16, 256] :
+ self.db.set_h_ffactor(ffactor)
+ self.assertEqual(ffactor, self.db.get_h_ffactor())
+
+ def test_h_nelem(self) :
+ for nelem in [1, 2, 4] :
+ nelem = nelem*1024*1024 # Millions
+ self.db.set_h_nelem(nelem)
+ self.assertEqual(nelem, self.db.get_h_nelem())
+
+ def test_pagesize(self) :
+ for i in xrange(9, 17) : # From 512 to 65536
+ i = 1<<i
+ self.db.set_pagesize(i)
+ self.assertEqual(i, self.db.get_pagesize())
+
+ # The valid values goes from 512 to 65536
+ # Test 131072 bytes...
+ self.assertRaises(db.DBInvalidArgError, self.db.set_pagesize, 1<<17)
+ # Test 256 bytes...
+ self.assertRaises(db.DBInvalidArgError, self.db.set_pagesize, 1<<8)
class DB_txn(DB) :
def setUp(self) :
@@ -84,7 +101,6 @@ class DB_txn(DB) :
del self.env
test_support.rmtree(self.homeDir)
- if db.version() >= (4, 2) :
def test_flags(self) :
self.db.set_flags(db.DB_CHKSUM)
self.assertEqual(db.DB_CHKSUM, self.db.get_flags())
@@ -92,40 +108,45 @@ class DB_txn(DB) :
self.assertEqual(db.DB_TXN_NOT_DURABLE | db.DB_CHKSUM,
self.db.get_flags())
+ def test_get_transactional(self) :
+ self.assertFalse(self.db.get_transactional())
+ # DB_AUTO_COMMIT = Implicit transaction
+ self.db.open("XXX", dbtype=db.DB_HASH,
+ flags = db.DB_CREATE | db.DB_AUTO_COMMIT)
+ self.assertTrue(self.db.get_transactional())
+
class DB_recno(DB) :
- if db.version() >= (4, 2) :
- def test_re_pad(self) :
- for i in [' ', '*'] : # Check chars
- self.db.set_re_pad(i)
- self.assertEqual(ord(i), self.db.get_re_pad())
- for i in [97, 65] : # Check integers
- self.db.set_re_pad(i)
- self.assertEqual(i, self.db.get_re_pad())
-
- def test_re_delim(self) :
- for i in [' ', '*'] : # Check chars
- self.db.set_re_delim(i)
- self.assertEqual(ord(i), self.db.get_re_delim())
- for i in [97, 65] : # Check integers
- self.db.set_re_delim(i)
- self.assertEqual(i, self.db.get_re_delim())
-
- def test_re_source(self) :
- for i in ["test", "test2", "test3"] :
- self.db.set_re_source(i)
- self.assertEqual(i, self.db.get_re_source())
+ def test_re_pad(self) :
+ for i in [' ', '*'] : # Check chars
+ self.db.set_re_pad(i)
+ self.assertEqual(ord(i), self.db.get_re_pad())
+ for i in [97, 65] : # Check integers
+ self.db.set_re_pad(i)
+ self.assertEqual(i, self.db.get_re_pad())
+
+ def test_re_delim(self) :
+ for i in [' ', '*'] : # Check chars
+ self.db.set_re_delim(i)
+ self.assertEqual(ord(i), self.db.get_re_delim())
+ for i in [97, 65] : # Check integers
+ self.db.set_re_delim(i)
+ self.assertEqual(i, self.db.get_re_delim())
+
+ def test_re_source(self) :
+ for i in ["test", "test2", "test3"] :
+ self.db.set_re_source(i)
+ self.assertEqual(i, self.db.get_re_source())
class DB_queue(DB) :
- if db.version() >= (4, 2) :
- def test_re_len(self) :
- for i in [33, 65, 300, 2000] :
- self.db.set_re_len(i)
- self.assertEqual(i, self.db.get_re_len())
-
- def test_q_extentsize(self) :
- for i in [1, 60, 100] :
- self.db.set_q_extentsize(i)
- self.assertEqual(i, self.db.get_q_extentsize())
+ def test_re_len(self) :
+ for i in [33, 65, 300, 2000] :
+ self.db.set_re_len(i)
+ self.assertEqual(i, self.db.get_re_len())
+
+ def test_q_extentsize(self) :
+ for i in [1, 60, 100] :
+ self.db.set_q_extentsize(i)
+ self.assertEqual(i, self.db.get_q_extentsize())
def test_suite():
suite = unittest.TestSuite()
diff --git a/Lib/bsddb/test/test_dbenv.py b/Lib/bsddb/test/test_dbenv.py
index 37281df..76ef7db 100644
--- a/Lib/bsddb/test/test_dbenv.py
+++ b/Lib/bsddb/test/test_dbenv.py
@@ -7,14 +7,6 @@ from test_all import db, test_support, get_new_environment_path, \
#----------------------------------------------------------------------
class DBEnv(unittest.TestCase):
- import sys
- if sys.version_info < (2, 4) :
- def assertTrue(self, expr, msg=None):
- self.failUnless(expr,msg=msg)
-
- def assertFalse(self, expr, msg=None):
- self.failIf(expr,msg=msg)
-
def setUp(self):
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
@@ -25,12 +17,31 @@ class DBEnv(unittest.TestCase):
test_support.rmtree(self.homeDir)
class DBEnv_general(DBEnv) :
+ def test_get_open_flags(self) :
+ flags = db.DB_CREATE | db.DB_INIT_MPOOL
+ self.env.open(self.homeDir, flags)
+ self.assertEqual(flags, self.env.get_open_flags())
+
+ def test_get_open_flags2(self) :
+ flags = db.DB_CREATE | db.DB_INIT_MPOOL | \
+ db.DB_INIT_LOCK | db.DB_THREAD
+ self.env.open(self.homeDir, flags)
+ self.assertEqual(flags, self.env.get_open_flags())
+
if db.version() >= (4, 7) :
def test_lk_partitions(self) :
for i in [10, 20, 40] :
self.env.set_lk_partitions(i)
self.assertEqual(i, self.env.get_lk_partitions())
+ def test_getset_intermediate_dir_mode(self) :
+ self.assertEqual(None, self.env.get_intermediate_dir_mode())
+ for mode in ["rwx------", "rw-rw-rw-", "rw-r--r--"] :
+ self.env.set_intermediate_dir_mode(mode)
+ self.assertEqual(mode, self.env.get_intermediate_dir_mode())
+ self.assertRaises(db.DBInvalidArgError,
+ self.env.set_intermediate_dir_mode, "abcde")
+
if db.version() >= (4, 6) :
def test_thread(self) :
for i in [16, 100, 1000] :
@@ -58,21 +69,19 @@ class DBEnv_general(DBEnv) :
self.env.set_lg_filemode(i)
self.assertEqual(i, self.env.get_lg_filemode())
- if db.version() >= (4, 3) :
- def test_mp_max_openfd(self) :
- for i in [17, 31, 42] :
- self.env.set_mp_max_openfd(i)
- self.assertEqual(i, self.env.get_mp_max_openfd())
-
- def test_mp_max_write(self) :
- for i in [100, 200, 300] :
- for j in [1, 2, 3] :
- j *= 1000000
- self.env.set_mp_max_write(i, j)
- v=self.env.get_mp_max_write()
- self.assertEqual((i, j), v)
-
- if db.version() >= (4, 2) :
+ def test_mp_max_openfd(self) :
+ for i in [17, 31, 42] :
+ self.env.set_mp_max_openfd(i)
+ self.assertEqual(i, self.env.get_mp_max_openfd())
+
+ def test_mp_max_write(self) :
+ for i in [100, 200, 300] :
+ for j in [1, 2, 3] :
+ j *= 1000000
+ self.env.set_mp_max_write(i, j)
+ v=self.env.get_mp_max_write()
+ self.assertEqual((i, j), v)
+
def test_invalid_txn(self) :
# This environment doesn't support transactions
self.assertRaises(db.DBInvalidArgError, self.env.txn_begin)
@@ -115,7 +124,7 @@ class DBEnv_general(DBEnv) :
self.assertEqual(i, self.env.get_lk_max_lockers())
def test_lg_regionmax(self) :
- for i in [128, 256, 1024] :
+ for i in [128, 256, 1000] :
i = i*1024*1024
self.env.set_lg_regionmax(i)
j = self.env.get_lg_regionmax()
@@ -127,8 +136,7 @@ class DBEnv_general(DBEnv) :
db.DB_LOCK_MINLOCKS, db.DB_LOCK_MINWRITE,
db.DB_LOCK_OLDEST, db.DB_LOCK_RANDOM, db.DB_LOCK_YOUNGEST]
- if db.version() >= (4, 3) :
- flags.append(db.DB_LOCK_MAXWRITE)
+ flags.append(db.DB_LOCK_MAXWRITE)
for i in flags :
self.env.set_lk_detect(i)
@@ -172,8 +180,12 @@ class DBEnv_general(DBEnv) :
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
cachesize = (0, 2*1024*1024, 1)
self.assertRaises(db.DBInvalidArgError,
- self.env.set_cachesize, *cachesize)
- self.assertEqual(cachesize2, self.env.get_cachesize())
+ self.env.set_cachesize, *cachesize)
+ cachesize3 = self.env.get_cachesize()
+ self.assertEqual(cachesize2[0], cachesize3[0])
+ self.assertEqual(cachesize2[2], cachesize3[2])
+ # In Berkeley DB 5.1, the cachesize can change when opening the Env
+ self.assertTrue(cachesize2[1] <= cachesize3[1])
def test_set_cachesize_dbenv_db(self) :
# You can not configure the cachesize using
@@ -305,7 +317,7 @@ class DBEnv_log_txn(DBEnv) :
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL |
db.DB_INIT_LOG | db.DB_INIT_TXN)
- if db.version() >= (4, 5) :
+ if (db.version() >= (4, 5)) and (db.version() < (5, 2)) :
def test_tx_max(self) :
txns=[]
def tx() :
diff --git a/Lib/bsddb/test/test_dbshelve.py b/Lib/bsddb/test/test_dbshelve.py
index c0135fe..c3701e1 100644
--- a/Lib/bsddb/test/test_dbshelve.py
+++ b/Lib/bsddb/test/test_dbshelve.py
@@ -12,9 +12,6 @@ from test_all import db, dbshelve, test_support, verbose, \
-if sys.version_info < (2, 4) :
- from sets import Set as set
-
#----------------------------------------------------------------------
@@ -33,10 +30,6 @@ class DataClass:
class DBShelveTestCase(unittest.TestCase):
- if sys.version_info < (2, 4):
- def assertTrue(self, expr, msg=None):
- return self.failUnless(expr,msg=msg)
-
if (sys.version_info < (2, 7)) or ((sys.version_info >= (3, 0)) and
(sys.version_info < (3, 2))) :
def assertIn(self, a, b, msg=None) :
diff --git a/Lib/bsddb/test/test_dbtables.py b/Lib/bsddb/test/test_dbtables.py
index bb31381..250c492 100644
--- a/Lib/bsddb/test/test_dbtables.py
+++ b/Lib/bsddb/test/test_dbtables.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-#
#-----------------------------------------------------------------------
# A test suite for the table interface built on bsddb.db
#-----------------------------------------------------------------------
diff --git a/Lib/bsddb/test/test_distributed_transactions.py b/Lib/bsddb/test/test_distributed_transactions.py
index 1711fc5..9058575 100644
--- a/Lib/bsddb/test/test_distributed_transactions.py
+++ b/Lib/bsddb/test/test_distributed_transactions.py
@@ -7,13 +7,6 @@ import unittest
from test_all import db, test_support, get_new_environment_path, \
get_new_database_path
-try :
- a=set()
-except : # Python 2.3
- from sets import Set as set
-else :
- del a
-
from test_all import verbose
#----------------------------------------------------------------------
@@ -37,15 +30,11 @@ class DBTxn_distributed(unittest.TestCase):
self.db = db.DB(self.dbenv)
self.db.set_re_len(db.DB_GID_SIZE)
if must_open_db :
- if db.version() >= (4,2) :
- txn=self.dbenv.txn_begin()
- self.db.open(self.filename,
- db.DB_QUEUE, db.DB_CREATE | db.DB_THREAD, 0666,
- txn=txn)
- txn.commit()
- else :
- self.db.open(self.filename,
- db.DB_QUEUE, db.DB_CREATE | db.DB_THREAD, 0666)
+ txn=self.dbenv.txn_begin()
+ self.db.open(self.filename,
+ db.DB_QUEUE, db.DB_CREATE | db.DB_THREAD, 0666,
+ txn=txn)
+ txn.commit()
def setUp(self) :
self.homeDir = get_new_environment_path()
diff --git a/Lib/bsddb/test/test_early_close.py b/Lib/bsddb/test/test_early_close.py
index 86b86dc..e925279 100644
--- a/Lib/bsddb/test/test_early_close.py
+++ b/Lib/bsddb/test/test_early_close.py
@@ -174,7 +174,7 @@ class DBEnvClosedEarlyCrash(unittest.TestCase):
txn.commit()
warnings.resetwarnings()
else :
- # When we drop support for python 2.3 and 2.4
+ # When we drop support for python 2.4
# we could use: (in 2.5 we need a __future__ statement)
#
# with warnings.catch_warnings():
@@ -182,7 +182,7 @@ class DBEnvClosedEarlyCrash(unittest.TestCase):
# txn.commit()
#
# We can not use "with" as is, because it would be invalid syntax
- # in python 2.3, 2.4 and (with no __future__) 2.5.
+ # in python 2.4 and (with no __future__) 2.5.
# Here we simulate "with" following PEP 343 :
w = warnings.catch_warnings()
w.__enter__()
@@ -194,15 +194,14 @@ class DBEnvClosedEarlyCrash(unittest.TestCase):
self.assertRaises(db.DBCursorClosedError, c2.first)
- if db.version() > (4,3,0) :
- def test07_close_db_before_sequence(self):
- import os.path
- path=os.path.join(self.homeDir,self.filename)
- d = db.DB()
- d.open(path, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
- dbs=db.DBSequence(d)
- d.close() # This "close" should close the child DBSequence also
- dbs.close() # If not closed, core dump (in Berkeley DB 4.6.*)
+ def test07_close_db_before_sequence(self):
+ import os.path
+ path=os.path.join(self.homeDir,self.filename)
+ d = db.DB()
+ d.open(path, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
+ dbs=db.DBSequence(d)
+ d.close() # This "close" should close the child DBSequence also
+ dbs.close() # If not closed, core dump (in Berkeley DB 4.6.*)
#----------------------------------------------------------------------
diff --git a/Lib/bsddb/test/test_lock.py b/Lib/bsddb/test/test_lock.py
index 25260fc..10ca8d6 100644
--- a/Lib/bsddb/test/test_lock.py
+++ b/Lib/bsddb/test/test_lock.py
@@ -19,12 +19,6 @@ if have_threads :
#----------------------------------------------------------------------
class LockingTestCase(unittest.TestCase):
- import sys
- if sys.version_info < (2, 4) :
- def assertTrue(self, expr, msg=None):
- self.failUnless(expr,msg=msg)
-
-
def setUp(self):
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
@@ -89,7 +83,6 @@ class LockingTestCase(unittest.TestCase):
for t in threads:
t.join()
- if db.version() >= (4, 2) :
def test03_lock_timeout(self):
self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_LOCK_TIMEOUT), 0)
diff --git a/Lib/bsddb/test/test_misc.py b/Lib/bsddb/test/test_misc.py
index 8dda296..b1e928f 100644
--- a/Lib/bsddb/test/test_misc.py
+++ b/Lib/bsddb/test/test_misc.py
@@ -9,13 +9,6 @@ from test_all import db, dbshelve, hashopen, test_support, get_new_environment_p
#----------------------------------------------------------------------
class MiscTestCase(unittest.TestCase):
- if sys.version_info < (2, 4) :
- def assertTrue(self, expr, msg=None):
- self.failUnless(expr, msg=msg)
-
- def assertFalse(self, expr, msg=None):
- self.failIf(expr, msg=msg)
-
def setUp(self):
self.filename = get_new_database_path()
self.homeDir = get_new_environment_path()
@@ -97,10 +90,6 @@ class MiscTestCase(unittest.TestCase):
test_support.unlink(self.filename)
def test07_DB_set_flags_persists(self):
- if db.version() < (4,2):
- # The get_flags API required for this to work is only available
- # in Berkeley DB >= 4.2
- return
try:
db1 = db.DB()
db1.set_flags(db.DB_DUPSORT)
diff --git a/Lib/bsddb/test/test_queue.py b/Lib/bsddb/test/test_queue.py
index 251a8cf..c213ff4 100644
--- a/Lib/bsddb/test/test_queue.py
+++ b/Lib/bsddb/test/test_queue.py
@@ -99,11 +99,6 @@ class SimpleQueueTestCase(unittest.TestCase):
print '\n', '-=' * 30
print "Running %s.test02_basicPost32..." % self.__class__.__name__
- if db.version() < (3, 2, 0):
- if verbose:
- print "Test not run, DB not new enough..."
- return
-
d = db.DB()
d.set_re_len(40) # Queues must be fixed length
d.open(self.filename, db.DB_QUEUE, db.DB_CREATE)
diff --git a/Lib/bsddb/test/test_recno.py b/Lib/bsddb/test/test_recno.py
index 3191fdc..fb6956a 100644
--- a/Lib/bsddb/test/test_recno.py
+++ b/Lib/bsddb/test/test_recno.py
@@ -14,12 +14,6 @@ letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
#----------------------------------------------------------------------
class SimpleRecnoTestCase(unittest.TestCase):
- if sys.version_info < (2, 4) :
- def assertFalse(self, expr, msg=None) :
- return self.failIf(expr,msg=msg)
- def assertTrue(self, expr, msg=None) :
- return self.assertTrue(expr, msg=msg)
-
if (sys.version_info < (2, 7)) or ((sys.version_info >= (3, 0)) and
(sys.version_info < (3, 2))) :
def assertIsInstance(self, obj, datatype, msg=None) :
@@ -236,7 +230,9 @@ class SimpleRecnoTestCase(unittest.TestCase):
d.close()
# get the text from the backing source
- text = open(source, 'r').read()
+ f = open(source, 'r')
+ text = f.read()
+ f.close()
text = text.strip()
if verbose:
print text
@@ -256,7 +252,9 @@ class SimpleRecnoTestCase(unittest.TestCase):
d.sync()
d.close()
- text = open(source, 'r').read()
+ f = open(source, 'r')
+ text = f.read()
+ f.close()
text = text.strip()
if verbose:
print text
@@ -298,6 +296,18 @@ class SimpleRecnoTestCase(unittest.TestCase):
c.close()
d.close()
+ def test04_get_size_empty(self) :
+ d = db.DB()
+ d.open(self.filename, dbtype=db.DB_RECNO, flags=db.DB_CREATE)
+
+ row_id = d.append(' ')
+ self.assertEqual(1, d.get_size(key=row_id))
+ row_id = d.append('')
+ self.assertEqual(0, d.get_size(key=row_id))
+
+
+
+
#----------------------------------------------------------------------
diff --git a/Lib/bsddb/test/test_replication.py b/Lib/bsddb/test/test_replication.py
index f3e974f..12ab2dd 100644
--- a/Lib/bsddb/test/test_replication.py
+++ b/Lib/bsddb/test/test_replication.py
@@ -12,11 +12,6 @@ from test_all import db, test_support, have_threads, verbose, \
#----------------------------------------------------------------------
class DBReplication(unittest.TestCase) :
- import sys
- if sys.version_info < (2, 4) :
- def assertTrue(self, expr, msg=None):
- self.failUnless(expr,msg=msg)
-
def setUp(self) :
self.homeDirMaster = get_new_environment_path()
self.homeDirClient = get_new_environment_path()
@@ -76,13 +71,57 @@ class DBReplication(unittest.TestCase) :
class DBReplicationManager(DBReplication) :
def test01_basic_replication(self) :
master_port = test_support.find_unused_port()
- self.dbenvMaster.repmgr_set_local_site("127.0.0.1", master_port)
client_port = test_support.find_unused_port()
- self.dbenvClient.repmgr_set_local_site("127.0.0.1", client_port)
- self.dbenvMaster.repmgr_add_remote_site("127.0.0.1", client_port)
- self.dbenvClient.repmgr_add_remote_site("127.0.0.1", master_port)
- self.dbenvMaster.rep_set_nsites(2)
- self.dbenvClient.rep_set_nsites(2)
+ if db.version() >= (5, 2) :
+ self.site = self.dbenvMaster.repmgr_site("127.0.0.1", master_port)
+ self.site.set_config(db.DB_GROUP_CREATOR, True)
+ self.site.set_config(db.DB_LOCAL_SITE, True)
+ self.site2 = self.dbenvMaster.repmgr_site("127.0.0.1", client_port)
+
+ self.site3 = self.dbenvClient.repmgr_site("127.0.0.1", master_port)
+ self.site3.set_config(db.DB_BOOTSTRAP_HELPER, True)
+ self.site4 = self.dbenvClient.repmgr_site("127.0.0.1", client_port)
+ self.site4.set_config(db.DB_LOCAL_SITE, True)
+
+ d = {
+ db.DB_BOOTSTRAP_HELPER: [False, False, True, False],
+ db.DB_GROUP_CREATOR: [True, False, False, False],
+ db.DB_LEGACY: [False, False, False, False],
+ db.DB_LOCAL_SITE: [True, False, False, True],
+ db.DB_REPMGR_PEER: [False, False, False, False ],
+ }
+
+ for i, j in d.items() :
+ for k, v in \
+ zip([self.site, self.site2, self.site3, self.site4], j) :
+ if v :
+ self.assertTrue(k.get_config(i))
+ else :
+ self.assertFalse(k.get_config(i))
+
+ self.assertNotEqual(self.site.get_eid(), self.site2.get_eid())
+ self.assertNotEqual(self.site3.get_eid(), self.site4.get_eid())
+
+ for i, j in zip([self.site, self.site2, self.site3, self.site4], \
+ [master_port, client_port, master_port, client_port]) :
+ addr = i.get_address()
+ self.assertEqual(addr, ("127.0.0.1", j))
+
+ for i in [self.site, self.site2] :
+ self.assertEqual(i.get_address(),
+ self.dbenvMaster.repmgr_site_by_eid(i.get_eid()).get_address())
+ for i in [self.site3, self.site4] :
+ self.assertEqual(i.get_address(),
+ self.dbenvClient.repmgr_site_by_eid(i.get_eid()).get_address())
+ else :
+ self.dbenvMaster.repmgr_set_local_site("127.0.0.1", master_port)
+ self.dbenvClient.repmgr_set_local_site("127.0.0.1", client_port)
+ self.dbenvMaster.repmgr_add_remote_site("127.0.0.1", client_port)
+ self.dbenvClient.repmgr_add_remote_site("127.0.0.1", master_port)
+
+ self.dbenvMaster.rep_set_nsites(2)
+ self.dbenvClient.rep_set_nsites(2)
+
self.dbenvMaster.rep_set_priority(10)
self.dbenvClient.rep_set_priority(0)
@@ -144,17 +183,19 @@ class DBReplicationManager(DBReplication) :
d = self.dbenvMaster.repmgr_site_list()
self.assertEqual(len(d), 1)
- self.assertEqual(d[0][0], "127.0.0.1")
- self.assertEqual(d[0][1], client_port)
- self.assertTrue((d[0][2]==db.DB_REPMGR_CONNECTED) or \
- (d[0][2]==db.DB_REPMGR_DISCONNECTED))
+ d = d.values()[0] # There is only one
+ self.assertEqual(d[0], "127.0.0.1")
+ self.assertEqual(d[1], client_port)
+ self.assertTrue((d[2]==db.DB_REPMGR_CONNECTED) or \
+ (d[2]==db.DB_REPMGR_DISCONNECTED))
d = self.dbenvClient.repmgr_site_list()
self.assertEqual(len(d), 1)
- self.assertEqual(d[0][0], "127.0.0.1")
- self.assertEqual(d[0][1], master_port)
- self.assertTrue((d[0][2]==db.DB_REPMGR_CONNECTED) or \
- (d[0][2]==db.DB_REPMGR_DISCONNECTED))
+ d = d.values()[0] # There is only one
+ self.assertEqual(d[0], "127.0.0.1")
+ self.assertEqual(d[1], master_port)
+ self.assertTrue((d[2]==db.DB_REPMGR_CONNECTED) or \
+ (d[2]==db.DB_REPMGR_DISCONNECTED))
if db.version() >= (4,6) :
d = self.dbenvMaster.repmgr_stat(flags=db.DB_STAT_CLEAR);
@@ -461,6 +502,13 @@ class DBBaseReplication(DBReplication) :
self.assertTrue(self.confirmed_master)
+ # Race condition showed up after upgrading to Solaris 10 Update 10
+ # https://forums.oracle.com/forums/thread.jspa?messageID=9902860
+ # jcea@jcea.es: See private email from Paula Bingham (Oracle),
+ # in 20110929.
+ while not (self.dbenvClient.rep_stat()["startup_complete"]) :
+ pass
+
if db.version() >= (4,7) :
def test04_test_clockskew(self) :
fast, slow = 1234, 1230
diff --git a/Lib/bsddb/test/test_sequence.py b/Lib/bsddb/test/test_sequence.py
index 3d3ee29..f0aa12a 100644
--- a/Lib/bsddb/test/test_sequence.py
+++ b/Lib/bsddb/test/test_sequence.py
@@ -5,11 +5,6 @@ from test_all import db, test_support, get_new_environment_path, get_new_databas
class DBSequenceTest(unittest.TestCase):
- import sys
- if sys.version_info < (2, 4) :
- def assertTrue(self, expr, msg=None):
- self.failUnless(expr,msg=msg)
-
def setUp(self):
self.int_32_max = 0x100000000
self.homeDir = get_new_environment_path()
@@ -133,8 +128,7 @@ class DBSequenceTest(unittest.TestCase):
def test_suite():
suite = unittest.TestSuite()
- if db.version() >= (4,3):
- suite.addTest(unittest.makeSuite(DBSequenceTest))
+ suite.addTest(unittest.makeSuite(DBSequenceTest))
return suite
diff --git a/Lib/bsddb/test/test_thread.py b/Lib/bsddb/test/test_thread.py
index 91002b8..42212e9 100644
--- a/Lib/bsddb/test/test_thread.py
+++ b/Lib/bsddb/test/test_thread.py
@@ -35,10 +35,6 @@ class BaseThreadedTestCase(unittest.TestCase):
dbsetflags = 0
envflags = 0
- if sys.version_info < (2, 4) :
- def assertTrue(self, expr, msg=None):
- self.failUnless(expr,msg=msg)
-
def setUp(self):
if verbose:
dbutils._deadlock_VerboseFile = sys.stdout
diff --git a/Lib/calendar.py b/Lib/calendar.py
index 3106ef2..d3bd236 100644
--- a/Lib/calendar.py
+++ b/Lib/calendar.py
@@ -161,7 +161,11 @@ class Calendar(object):
oneday = datetime.timedelta(days=1)
while True:
yield date
- date += oneday
+ try:
+ date += oneday
+ except OverflowError:
+ # Adding one day could fail after datetime.MAXYEAR
+ break
if date.month != month and date.weekday() == self.firstweekday:
break
@@ -216,7 +220,7 @@ class Calendar(object):
def yeardatescalendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting. The return
- value is a list of month rows. Each month row contains upto width months.
+ value is a list of month rows. Each month row contains up to width months.
Each month contains between 4 and 6 weeks and each week contains 1-7
days. Days are datetime.date objects.
"""
@@ -488,6 +492,7 @@ class TimeEncoding:
def __enter__(self):
self.oldlocale = _locale.getlocale(_locale.LC_TIME)
_locale.setlocale(_locale.LC_TIME, self.locale)
+ return _locale.getlocale(_locale.LC_TIME)[1]
def __exit__(self, *args):
_locale.setlocale(_locale.LC_TIME, self.oldlocale)
diff --git a/Lib/cgi.py b/Lib/cgi.py
index e7cd778..64ba6d1 100755
--- a/Lib/cgi.py
+++ b/Lib/cgi.py
@@ -37,7 +37,6 @@ __version__ = "2.6"
from operator import attrgetter
import sys
import os
-import urllib
import UserDict
import urlparse
@@ -698,6 +697,9 @@ class FieldStorage:
if not line:
self.done = -1
break
+ if delim == "\r":
+ line = delim + line
+ delim = ""
if line[:2] == "--" and last_line_lfend:
strippedline = line.strip()
if strippedline == next:
@@ -714,6 +716,12 @@ class FieldStorage:
delim = "\n"
line = line[:-1]
last_line_lfend = True
+ elif line[-1] == "\r":
+ # We may interrupt \r\n sequences if they span the 2**16
+ # byte boundary
+ delim = "\r"
+ line = line[:-1]
+ last_line_lfend = False
else:
delim = ""
last_line_lfend = False
diff --git a/Lib/cgitb.py b/Lib/cgitb.py
index 5becdf3..8acc4b7 100644
--- a/Lib/cgitb.py
+++ b/Lib/cgitb.py
@@ -295,14 +295,19 @@ class Hook:
if self.logdir is not None:
suffix = ['.txt', '.html'][self.format=="html"]
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir)
+
try:
file = os.fdopen(fd, 'w')
file.write(doc)
file.close()
- msg = '<p> %s contains the description of this error.' % path
+ msg = '%s contains the description of this error.' % path
except:
- msg = '<p> Tried to save traceback to %s, but failed.' % path
- self.file.write(msg + '\n')
+ msg = 'Tried to save traceback to %s, but failed.' % path
+
+ if self.format == 'html':
+ self.file.write('<p>%s</p>\n' % msg)
+ else:
+ self.file.write(msg + '\n')
try:
self.file.flush()
except: pass
diff --git a/Lib/cmd.py b/Lib/cmd.py
index ae7f12e..05ba7e3 100644
--- a/Lib/cmd.py
+++ b/Lib/cmd.py
@@ -294,6 +294,7 @@ class Cmd:
return list(commands | topics)
def do_help(self, arg):
+ 'List available commands with "help" or detailed help with "help cmd".'
if arg:
# XXX check arg syntax
try:
diff --git a/Lib/codecs.py b/Lib/codecs.py
index f4cd60a..93c16c3 100644
--- a/Lib/codecs.py
+++ b/Lib/codecs.py
@@ -456,16 +456,13 @@ class StreamReader(Codec):
# read until we get the required number of characters (if available)
while True:
- # can the request can be satisfied from the character buffer?
- if chars < 0:
- if size < 0:
- if self.charbuffer:
- break
- elif len(self.charbuffer) >= size:
- break
- else:
+ # can the request be satisfied from the character buffer?
+ if chars >= 0:
if len(self.charbuffer) >= chars:
break
+ elif size >= 0:
+ if len(self.charbuffer) >= size:
+ break
# we need more data
if size < 0:
newdata = self.stream.read()
diff --git a/Lib/collections.py b/Lib/collections.py
index 958e523..8831f0b 100644
--- a/Lib/collections.py
+++ b/Lib/collections.py
@@ -6,11 +6,12 @@ import _abcoll
__all__ += _abcoll.__all__
from _collections import deque, defaultdict
-from operator import itemgetter as _itemgetter
+from operator import itemgetter as _itemgetter, eq as _eq
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
+from itertools import imap as _imap
try:
from thread import get_ident as _get_ident
@@ -50,49 +51,45 @@ class OrderedDict(dict):
self.__map = {}
self.__update(*args, **kwds)
- def __setitem__(self, key, value, PREV=0, NEXT=1, dict_setitem=dict.__setitem__):
+ def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
- last = root[PREV]
- last[NEXT] = root[PREV] = self.__map[key] = [last, root, key]
- dict_setitem(self, key, value)
+ last = root[0]
+ last[1] = root[0] = self.__map[key] = [last, root, key]
+ return dict_setitem(self, key, value)
- def __delitem__(self, key, PREV=0, NEXT=1, dict_delitem=dict.__delitem__):
+ def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
- link_prev, link_next, key = self.__map.pop(key)
- link_prev[NEXT] = link_next
- link_next[PREV] = link_prev
+ link_prev, link_next, _ = self.__map.pop(key)
+ link_prev[1] = link_next # update link_prev[NEXT]
+ link_next[0] = link_prev # update link_next[PREV]
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
- NEXT, KEY = 1, 2
root = self.__root
- curr = root[NEXT]
+ curr = root[1] # start at the first node
while curr is not root:
- yield curr[KEY]
- curr = curr[NEXT]
+ yield curr[2] # yield the curr[KEY]
+ curr = curr[1] # move to next node
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
- PREV, KEY = 0, 2
root = self.__root
- curr = root[PREV]
+ curr = root[0] # start at the last node
while curr is not root:
- yield curr[KEY]
- curr = curr[PREV]
+ yield curr[2] # yield the curr[KEY]
+ curr = curr[0] # move to previous node
def clear(self):
'od.clear() -> None. Remove all items from od.'
- for node in self.__map.itervalues():
- del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
@@ -208,7 +205,7 @@ class OrderedDict(dict):
'''
if isinstance(other, OrderedDict):
- return len(self)==len(other) and self.items() == other.items()
+ return dict.__eq__(self, other) and all(_imap(_eq, self, other))
return dict.__eq__(self, other)
def __ne__(self, other):
@@ -234,10 +231,64 @@ class OrderedDict(dict):
### namedtuple
################################################################################
+_class_template = '''\
+class {typename}(tuple):
+ '{typename}({arg_list})'
+
+ __slots__ = ()
+
+ _fields = {field_names!r}
+
+ def __new__(_cls, {arg_list}):
+ 'Create new instance of {typename}({arg_list})'
+ return _tuple.__new__(_cls, ({arg_list}))
+
+ @classmethod
+ def _make(cls, iterable, new=tuple.__new__, len=len):
+ 'Make a new {typename} object from a sequence or iterable'
+ result = new(cls, iterable)
+ if len(result) != {num_fields:d}:
+ raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
+ return result
+
+ def __repr__(self):
+ 'Return a nicely formatted representation string'
+ return '{typename}({repr_fmt})' % self
+
+ def _asdict(self):
+ 'Return a new OrderedDict which maps field names to their values'
+ return OrderedDict(zip(self._fields, self))
+
+ def _replace(_self, **kwds):
+ 'Return a new {typename} object replacing specified fields with new values'
+ result = _self._make(map(kwds.pop, {field_names!r}, _self))
+ if kwds:
+ raise ValueError('Got unexpected field names: %r' % kwds.keys())
+ return result
+
+ def __getnewargs__(self):
+ 'Return self as a plain tuple. Used by copy and pickle.'
+ return tuple(self)
+
+ __dict__ = _property(_asdict)
+
+ def __getstate__(self):
+ 'Exclude the OrderedDict from pickling'
+ pass
+
+{field_defs}
+'''
+
+_repr_template = '{name}=%r'
+
+_field_template = '''\
+ {name} = _property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
+'''
+
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
- >>> Point = namedtuple('Point', 'x y')
+ >>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
@@ -258,87 +309,70 @@ def namedtuple(typename, field_names, verbose=False, rename=False):
"""
- # Parse and validate the field names. Validation serves two purposes,
- # generating informative error messages and preventing template injection attacks.
+ # Validate the field names. At the user's option, either generate an error
+ # message or automatically replace the field name with a valid name.
if isinstance(field_names, basestring):
- field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
- field_names = tuple(map(str, field_names))
+ field_names = field_names.replace(',', ' ').split()
+ field_names = map(str, field_names)
+ typename = str(typename)
if rename:
- names = list(field_names)
seen = set()
- for i, name in enumerate(names):
- if (not all(c.isalnum() or c=='_' for c in name) or _iskeyword(name)
- or not name or name[0].isdigit() or name.startswith('_')
+ for index, name in enumerate(field_names):
+ if (not all(c.isalnum() or c=='_' for c in name)
+ or _iskeyword(name)
+ or not name
+ or name[0].isdigit()
+ or name.startswith('_')
or name in seen):
- names[i] = '_%d' % i
+ field_names[index] = '_%d' % index
seen.add(name)
- field_names = tuple(names)
- for name in (typename,) + field_names:
+ for name in [typename] + field_names:
+ if type(name) != str:
+ raise TypeError('Type names and field names must be strings')
if not all(c.isalnum() or c=='_' for c in name):
- raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
+ raise ValueError('Type names and field names can only contain '
+ 'alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
- raise ValueError('Type names and field names cannot be a keyword: %r' % name)
+ raise ValueError('Type names and field names cannot be a '
+ 'keyword: %r' % name)
if name[0].isdigit():
- raise ValueError('Type names and field names cannot start with a number: %r' % name)
- seen_names = set()
+ raise ValueError('Type names and field names cannot start with '
+ 'a number: %r' % name)
+ seen = set()
for name in field_names:
if name.startswith('_') and not rename:
- raise ValueError('Field names cannot start with an underscore: %r' % name)
- if name in seen_names:
+ raise ValueError('Field names cannot start with an underscore: '
+ '%r' % name)
+ if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
- seen_names.add(name)
-
- # Create and fill-in the class template
- numfields = len(field_names)
- argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
- reprtxt = ', '.join('%s=%%r' % name for name in field_names)
- template = '''class %(typename)s(tuple):
- '%(typename)s(%(argtxt)s)' \n
- __slots__ = () \n
- _fields = %(field_names)r \n
- def __new__(_cls, %(argtxt)s):
- 'Create new instance of %(typename)s(%(argtxt)s)'
- return _tuple.__new__(_cls, (%(argtxt)s)) \n
- @classmethod
- def _make(cls, iterable, new=tuple.__new__, len=len):
- 'Make a new %(typename)s object from a sequence or iterable'
- result = new(cls, iterable)
- if len(result) != %(numfields)d:
- raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
- return result \n
- def __repr__(self):
- 'Return a nicely formatted representation string'
- return '%(typename)s(%(reprtxt)s)' %% self \n
- def _asdict(self):
- 'Return a new OrderedDict which maps field names to their values'
- return OrderedDict(zip(self._fields, self)) \n
- __dict__ = property(_asdict) \n
- def _replace(_self, **kwds):
- 'Return a new %(typename)s object replacing specified fields with new values'
- result = _self._make(map(kwds.pop, %(field_names)r, _self))
- if kwds:
- raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
- return result \n
- def __getnewargs__(self):
- 'Return self as a plain tuple. Used by copy and pickle.'
- return tuple(self) \n\n''' % locals()
- for i, name in enumerate(field_names):
- template += " %s = _property(_itemgetter(%d), doc='Alias for field number %d')\n" % (name, i, i)
+ seen.add(name)
+
+ # Fill-in the class template
+ class_definition = _class_template.format(
+ typename = typename,
+ field_names = tuple(field_names),
+ num_fields = len(field_names),
+ arg_list = repr(tuple(field_names)).replace("'", "")[1:-1],
+ repr_fmt = ', '.join(_repr_template.format(name=name)
+ for name in field_names),
+ field_defs = '\n'.join(_field_template.format(index=index, name=name)
+ for index, name in enumerate(field_names))
+ )
if verbose:
- print template
+ print class_definition
- # Execute the template string in a temporary namespace and
- # support tracing utilities by setting a value for frame.f_globals['__name__']
+ # Execute the template string in a temporary namespace and support
+ # tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
OrderedDict=OrderedDict, _property=property, _tuple=tuple)
try:
- exec template in namespace
- except SyntaxError, e:
- raise SyntaxError(e.message + ':\n' + template)
+ exec class_definition in namespace
+ except SyntaxError as e:
+ raise SyntaxError(e.message + ':\n' + class_definition)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
- # where the named tuple is created. Bypass this step in enviroments where
+ # where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
diff --git a/Lib/compiler/consts.py b/Lib/compiler/consts.py
index 022f6da..c60b1d0 100644
--- a/Lib/compiler/consts.py
+++ b/Lib/compiler/consts.py
@@ -5,7 +5,7 @@ OP_APPLY = 'OP_APPLY'
SC_LOCAL = 1
SC_GLOBAL_IMPLICIT = 2
-SC_GLOBAL_EXPLICT = 3
+SC_GLOBAL_EXPLICIT = 3
SC_FREE = 4
SC_CELL = 5
SC_UNKNOWN = 6
diff --git a/Lib/compiler/pyassem.py b/Lib/compiler/pyassem.py
index 286be0c..f52f7d0 100644
--- a/Lib/compiler/pyassem.py
+++ b/Lib/compiler/pyassem.py
@@ -125,7 +125,7 @@ def order_blocks(start_block, exit_block):
# Make sure every block appears in dominators, even if no
# other block must precede it.
dominators.setdefault(b, set())
- # preceeding blocks dominate following blocks
+ # preceding blocks dominate following blocks
for c in b.get_followers():
while 1:
dominators.setdefault(c, set()).add(b)
diff --git a/Lib/compiler/pycodegen.py b/Lib/compiler/pycodegen.py
index 4f2ecf2..6515945 100644
--- a/Lib/compiler/pycodegen.py
+++ b/Lib/compiler/pycodegen.py
@@ -7,7 +7,7 @@ from cStringIO import StringIO
from compiler import ast, parse, walk, syntax
from compiler import pyassem, misc, future, symbols
-from compiler.consts import SC_LOCAL, SC_GLOBAL_IMPLICIT, SC_GLOBAL_EXPLICT, \
+from compiler.consts import SC_LOCAL, SC_GLOBAL_IMPLICIT, SC_GLOBAL_EXPLICIT, \
SC_FREE, SC_CELL
from compiler.consts import (CO_VARARGS, CO_VARKEYWORDS, CO_NEWLOCALS,
CO_NESTED, CO_GENERATOR, CO_FUTURE_DIVISION,
@@ -283,7 +283,7 @@ class CodeGenerator:
self.emit(prefix + '_NAME', name)
else:
self.emit(prefix + '_FAST', name)
- elif scope == SC_GLOBAL_EXPLICT:
+ elif scope == SC_GLOBAL_EXPLICIT:
self.emit(prefix + '_GLOBAL', name)
elif scope == SC_GLOBAL_IMPLICIT:
if not self.optimized:
diff --git a/Lib/compiler/symbols.py b/Lib/compiler/symbols.py
index 0bbdc71..afeec50 100644
--- a/Lib/compiler/symbols.py
+++ b/Lib/compiler/symbols.py
@@ -1,7 +1,7 @@
"""Module symbol-table generator"""
from compiler import ast
-from compiler.consts import SC_LOCAL, SC_GLOBAL_IMPLICIT, SC_GLOBAL_EXPLICT, \
+from compiler.consts import SC_LOCAL, SC_GLOBAL_IMPLICIT, SC_GLOBAL_EXPLICIT, \
SC_FREE, SC_CELL, SC_UNKNOWN
from compiler.misc import mangle
import types
@@ -90,7 +90,7 @@ class Scope:
The scope of a name could be LOCAL, GLOBAL, FREE, or CELL.
"""
if name in self.globals:
- return SC_GLOBAL_EXPLICT
+ return SC_GLOBAL_EXPLICIT
if name in self.cells:
return SC_CELL
if name in self.defs:
diff --git a/Lib/cookielib.py b/Lib/cookielib.py
index 65214df..f9c8d2f 100644
--- a/Lib/cookielib.py
+++ b/Lib/cookielib.py
@@ -1,4 +1,4 @@
-"""HTTP cookie handling for web clients.
+r"""HTTP cookie handling for web clients.
This module has (now fairly distant) origins in Gisle Aas' Perl module
HTTP::Cookies, from the libwww-perl library.
diff --git a/Lib/csv.py b/Lib/csv.py
index 984ed7e..c155ada 100644
--- a/Lib/csv.py
+++ b/Lib/csv.py
@@ -93,6 +93,10 @@ class DictReader:
self.line_num = self.reader.line_num
return self._fieldnames
+ # Issue 20004: Because DictReader is a classic class, this setter is
+ # ignored. At this point in 2.7's lifecycle, it is too late to change the
+ # base class for fear of breaking working code. If you want to change
+ # fieldnames without overwriting the getter, set _fieldnames directly.
@fieldnames.setter
def fieldnames(self, value):
self._fieldnames = value
@@ -140,8 +144,8 @@ class DictWriter:
if self.extrasaction == "raise":
wrong_fields = [k for k in rowdict if k not in self.fieldnames]
if wrong_fields:
- raise ValueError("dict contains fields not in fieldnames: " +
- ", ".join(wrong_fields))
+ raise ValueError("dict contains fields not in fieldnames: "
+ + ", ".join([repr(x) for x in wrong_fields]))
return [rowdict.get(key, self.restval) for key in self.fieldnames]
def writerow(self, rowdict):
@@ -261,8 +265,9 @@ class Sniffer:
# if we see an extra quote between delimiters, we've got a
# double quoted format
- dq_regexp = re.compile(r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
- {'delim':delim, 'quote':quotechar}, re.MULTILINE)
+ dq_regexp = re.compile(
+ r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
+ {'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE)
diff --git a/Lib/ctypes/test/__init__.py b/Lib/ctypes/test/__init__.py
index 5223092..f2cc143 100644
--- a/Lib/ctypes/test/__init__.py
+++ b/Lib/ctypes/test/__init__.py
@@ -2,7 +2,15 @@ import os, sys, unittest, getopt, time
use_resources = []
-class ResourceDenied(Exception):
+import ctypes
+ctypes_symbols = dir(ctypes)
+
+def need_symbol(name):
+ return unittest.skipUnless(name in ctypes_symbols,
+ '{!r} is required'.format(name))
+
+
+class ResourceDenied(unittest.SkipTest):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
@@ -62,7 +70,7 @@ def get_tests(package, mask, verbosity, exclude=()):
continue
try:
mod = __import__(modname, globals(), locals(), ['*'])
- except ResourceDenied, detail:
+ except (ResourceDenied, unittest.SkipTest) as detail:
skipped.append(modname)
if verbosity > 1:
print >> sys.stderr, "Skipped %s: %s" % (modname, detail)
diff --git a/Lib/ctypes/test/runtests.py b/Lib/ctypes/test/runtests.py
index ec31fc8..b7a2b26 100644
--- a/Lib/ctypes/test/runtests.py
+++ b/Lib/ctypes/test/runtests.py
@@ -2,7 +2,7 @@
Run all tests found in this directory, and print a summary of the results.
Command line flags:
- -q quiet mode: don't prnt anything while the tests are running
+ -q quiet mode: don't print anything while the tests are running
-r run tests repeatedly, look for refcount leaks
-u<resources>
Add resources to the lits of allowed resources. '*' allows all
diff --git a/Lib/ctypes/test/test_arrays.py b/Lib/ctypes/test/test_arrays.py
index 925f8bf..47e95ee 100644
--- a/Lib/ctypes/test/test_arrays.py
+++ b/Lib/ctypes/test/test_arrays.py
@@ -1,6 +1,8 @@
import unittest
from ctypes import *
+from ctypes.test import need_symbol
+
formats = "bBhHiIlLqQfd"
formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \
@@ -87,8 +89,8 @@ class ArrayTestCase(unittest.TestCase):
self.assertEqual(values, [1, 2, 3, 4, 5])
def test_classcache(self):
- self.assertTrue(not ARRAY(c_int, 3) is ARRAY(c_int, 4))
- self.assertTrue(ARRAY(c_int, 3) is ARRAY(c_int, 3))
+ self.assertIsNot(ARRAY(c_int, 3), ARRAY(c_int, 4))
+ self.assertIs(ARRAY(c_int, 3), ARRAY(c_int, 3))
def test_from_address(self):
# Failed with 0.9.8, reported by JUrner
@@ -101,20 +103,16 @@ class ArrayTestCase(unittest.TestCase):
self.assertEqual(sz[1:4:2], "o")
self.assertEqual(sz.value, "foo")
- try:
- create_unicode_buffer
- except NameError:
- pass
- else:
- def test_from_addressW(self):
- p = create_unicode_buffer("foo")
- sz = (c_wchar * 3).from_address(addressof(p))
- self.assertEqual(sz[:], "foo")
- self.assertEqual(sz[::], "foo")
- self.assertEqual(sz[::-1], "oof")
- self.assertEqual(sz[::3], "f")
- self.assertEqual(sz[1:4:2], "o")
- self.assertEqual(sz.value, "foo")
+ @need_symbol('create_unicode_buffer')
+ def test_from_addressW(self):
+ p = create_unicode_buffer("foo")
+ sz = (c_wchar * 3).from_address(addressof(p))
+ self.assertEqual(sz[:], "foo")
+ self.assertEqual(sz[::], "foo")
+ self.assertEqual(sz[::-1], "oof")
+ self.assertEqual(sz[::3], "f")
+ self.assertEqual(sz[1:4:2], "o")
+ self.assertEqual(sz.value, "foo")
def test_cache(self):
# Array types are cached internally in the _ctypes extension,
@@ -128,7 +126,7 @@ class ArrayTestCase(unittest.TestCase):
# Create a new array type based on it:
t1 = my_int * 1
t2 = my_int * 1
- self.assertTrue(t1 is t2)
+ self.assertIs(t1, t2)
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/ctypes/test/test_as_parameter.py b/Lib/ctypes/test/test_as_parameter.py
index a603a64..f2fe10a 100644
--- a/Lib/ctypes/test/test_as_parameter.py
+++ b/Lib/ctypes/test/test_as_parameter.py
@@ -1,5 +1,6 @@
import unittest
from ctypes import *
+from ctypes.test import need_symbol
import _ctypes_test
dll = CDLL(_ctypes_test.__file__)
@@ -17,11 +18,8 @@ class BasicWrapTestCase(unittest.TestCase):
def wrap(self, param):
return param
+ @need_symbol('c_wchar')
def test_wchar_parm(self):
- try:
- c_wchar
- except NameError:
- return
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double]
result = f(self.wrap(1), self.wrap(u"x"), self.wrap(3), self.wrap(4), self.wrap(5.0), self.wrap(6.0))
@@ -134,7 +132,7 @@ class BasicWrapTestCase(unittest.TestCase):
f.argtypes = [c_longlong, MyCallback]
def callback(value):
- self.assertTrue(isinstance(value, (int, long)))
+ self.assertIsInstance(value, (int, long))
return value & 0x7FFFFFFF
cb = MyCallback(callback)
diff --git a/Lib/ctypes/test/test_bitfields.py b/Lib/ctypes/test/test_bitfields.py
index e9c46c2..991dbe8 100644
--- a/Lib/ctypes/test/test_bitfields.py
+++ b/Lib/ctypes/test/test_bitfields.py
@@ -1,4 +1,5 @@
from ctypes import *
+from ctypes.test import need_symbol
import unittest
import os
@@ -127,20 +128,18 @@ class BitFieldTest(unittest.TestCase):
result = self.fail_fields(("a", c_char, 1))
self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_char'))
- try:
- c_wchar
- except NameError:
- pass
- else:
- result = self.fail_fields(("a", c_wchar, 1))
- self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_wchar'))
-
class Dummy(Structure):
_fields_ = []
result = self.fail_fields(("a", Dummy, 1))
self.assertEqual(result, (TypeError, 'bit fields not allowed for type Dummy'))
+ @need_symbol('c_wchar')
+ def test_c_wchar(self):
+ result = self.fail_fields(("a", c_wchar, 1))
+ self.assertEqual(result,
+ (TypeError, 'bit fields not allowed for type c_wchar'))
+
def test_single_bitfield_size(self):
for c_typ in int_types:
result = self.fail_fields(("a", c_typ, -1))
@@ -207,7 +206,7 @@ class BitFieldTest(unittest.TestCase):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_int, 32)]
- self.assertEqual(sizeof(X), sizeof(c_int)*2)
+ self.assertEqual(sizeof(X), alignment(c_int)+sizeof(c_int))
def test_mixed_3(self):
class X(Structure):
@@ -240,5 +239,25 @@ class BitFieldTest(unittest.TestCase):
_anonymous_ = ["_"]
_fields_ = [("_", X)]
+ @need_symbol('c_uint32')
+ def test_uint32(self):
+ class X(Structure):
+ _fields_ = [("a", c_uint32, 32)]
+ x = X()
+ x.a = 10
+ self.assertEqual(x.a, 10)
+ x.a = 0xFDCBA987
+ self.assertEqual(x.a, 0xFDCBA987)
+
+ @need_symbol('c_uint64')
+ def test_uint64(self):
+ class X(Structure):
+ _fields_ = [("a", c_uint64, 64)]
+ x = X()
+ x.a = 10
+ self.assertEqual(x.a, 10)
+ x.a = 0xFEDCBA9876543211
+ self.assertEqual(x.a, 0xFEDCBA9876543211)
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/ctypes/test/test_buffers.py b/Lib/ctypes/test/test_buffers.py
index 12945ed..88d87e9 100644
--- a/Lib/ctypes/test/test_buffers.py
+++ b/Lib/ctypes/test/test_buffers.py
@@ -1,4 +1,5 @@
from ctypes import *
+from ctypes.test import need_symbol
import unittest
class StringBufferTestCase(unittest.TestCase):
@@ -7,12 +8,12 @@ class StringBufferTestCase(unittest.TestCase):
b = create_string_buffer(32)
self.assertEqual(len(b), 32)
self.assertEqual(sizeof(b), 32 * sizeof(c_char))
- self.assertTrue(type(b[0]) is str)
+ self.assertIs(type(b[0]), str)
b = create_string_buffer("abc")
self.assertEqual(len(b), 4) # trailing nul char
self.assertEqual(sizeof(b), 4 * sizeof(c_char))
- self.assertTrue(type(b[0]) is str)
+ self.assertIs(type(b[0]), str)
self.assertEqual(b[0], "a")
self.assertEqual(b[:], "abc\0")
self.assertEqual(b[::], "abc\0")
@@ -36,39 +37,36 @@ class StringBufferTestCase(unittest.TestCase):
self.assertEqual(b[::2], "ac")
self.assertEqual(b[::5], "a")
- try:
- c_wchar
- except NameError:
- pass
- else:
- def test_unicode_buffer(self):
- b = create_unicode_buffer(32)
- self.assertEqual(len(b), 32)
- self.assertEqual(sizeof(b), 32 * sizeof(c_wchar))
- self.assertTrue(type(b[0]) is unicode)
+ @need_symbol('c_wchar')
+ def test_unicode_buffer(self):
+ b = create_unicode_buffer(32)
+ self.assertEqual(len(b), 32)
+ self.assertEqual(sizeof(b), 32 * sizeof(c_wchar))
+ self.assertIs(type(b[0]), unicode)
- b = create_unicode_buffer(u"abc")
- self.assertEqual(len(b), 4) # trailing nul char
- self.assertEqual(sizeof(b), 4 * sizeof(c_wchar))
- self.assertTrue(type(b[0]) is unicode)
- self.assertEqual(b[0], u"a")
- self.assertEqual(b[:], "abc\0")
- self.assertEqual(b[::], "abc\0")
- self.assertEqual(b[::-1], "\0cba")
- self.assertEqual(b[::2], "ac")
- self.assertEqual(b[::5], "a")
+ b = create_unicode_buffer(u"abc")
+ self.assertEqual(len(b), 4) # trailing nul char
+ self.assertEqual(sizeof(b), 4 * sizeof(c_wchar))
+ self.assertIs(type(b[0]), unicode)
+ self.assertEqual(b[0], u"a")
+ self.assertEqual(b[:], "abc\0")
+ self.assertEqual(b[::], "abc\0")
+ self.assertEqual(b[::-1], "\0cba")
+ self.assertEqual(b[::2], "ac")
+ self.assertEqual(b[::5], "a")
- def test_unicode_conversion(self):
- b = create_unicode_buffer("abc")
- self.assertEqual(len(b), 4) # trailing nul char
- self.assertEqual(sizeof(b), 4 * sizeof(c_wchar))
- self.assertTrue(type(b[0]) is unicode)
- self.assertEqual(b[0], u"a")
- self.assertEqual(b[:], "abc\0")
- self.assertEqual(b[::], "abc\0")
- self.assertEqual(b[::-1], "\0cba")
- self.assertEqual(b[::2], "ac")
- self.assertEqual(b[::5], "a")
+ @need_symbol('c_wchar')
+ def test_unicode_conversion(self):
+ b = create_unicode_buffer("abc")
+ self.assertEqual(len(b), 4) # trailing nul char
+ self.assertEqual(sizeof(b), 4 * sizeof(c_wchar))
+ self.assertIs(type(b[0]), unicode)
+ self.assertEqual(b[0], u"a")
+ self.assertEqual(b[:], "abc\0")
+ self.assertEqual(b[::], "abc\0")
+ self.assertEqual(b[::-1], "\0cba")
+ self.assertEqual(b[::2], "ac")
+ self.assertEqual(b[::5], "a")
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/ctypes/test/test_byteswap.py b/Lib/ctypes/test/test_byteswap.py
index 4bd75cd..d36402b 100644
--- a/Lib/ctypes/test/test_byteswap.py
+++ b/Lib/ctypes/test/test_byteswap.py
@@ -14,7 +14,8 @@ def bin(s):
# For Structures and Unions, these types are created on demand.
class Test(unittest.TestCase):
- def X_test(self):
+ @unittest.skip('test disabled')
+ def test_X(self):
print >> sys.stderr, sys.byteorder
for i in range(32):
bits = BITS()
@@ -23,11 +24,11 @@ class Test(unittest.TestCase):
def test_endian_short(self):
if sys.byteorder == "little":
- self.assertTrue(c_short.__ctype_le__ is c_short)
- self.assertTrue(c_short.__ctype_be__.__ctype_le__ is c_short)
+ self.assertIs(c_short.__ctype_le__, c_short)
+ self.assertIs(c_short.__ctype_be__.__ctype_le__, c_short)
else:
- self.assertTrue(c_short.__ctype_be__ is c_short)
- self.assertTrue(c_short.__ctype_le__.__ctype_be__ is c_short)
+ self.assertIs(c_short.__ctype_be__, c_short)
+ self.assertIs(c_short.__ctype_le__.__ctype_be__, c_short)
s = c_short.__ctype_be__(0x1234)
self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234")
self.assertEqual(bin(s), "1234")
@@ -50,11 +51,11 @@ class Test(unittest.TestCase):
def test_endian_int(self):
if sys.byteorder == "little":
- self.assertTrue(c_int.__ctype_le__ is c_int)
- self.assertTrue(c_int.__ctype_be__.__ctype_le__ is c_int)
+ self.assertIs(c_int.__ctype_le__, c_int)
+ self.assertIs(c_int.__ctype_be__.__ctype_le__, c_int)
else:
- self.assertTrue(c_int.__ctype_be__ is c_int)
- self.assertTrue(c_int.__ctype_le__.__ctype_be__ is c_int)
+ self.assertIs(c_int.__ctype_be__, c_int)
+ self.assertIs(c_int.__ctype_le__.__ctype_be__, c_int)
s = c_int.__ctype_be__(0x12345678)
self.assertEqual(bin(struct.pack(">i", 0x12345678)), "12345678")
@@ -78,11 +79,11 @@ class Test(unittest.TestCase):
def test_endian_longlong(self):
if sys.byteorder == "little":
- self.assertTrue(c_longlong.__ctype_le__ is c_longlong)
- self.assertTrue(c_longlong.__ctype_be__.__ctype_le__ is c_longlong)
+ self.assertIs(c_longlong.__ctype_le__, c_longlong)
+ self.assertIs(c_longlong.__ctype_be__.__ctype_le__, c_longlong)
else:
- self.assertTrue(c_longlong.__ctype_be__ is c_longlong)
- self.assertTrue(c_longlong.__ctype_le__.__ctype_be__ is c_longlong)
+ self.assertIs(c_longlong.__ctype_be__, c_longlong)
+ self.assertIs(c_longlong.__ctype_le__.__ctype_be__, c_longlong)
s = c_longlong.__ctype_be__(0x1234567890ABCDEF)
self.assertEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF")
@@ -106,11 +107,11 @@ class Test(unittest.TestCase):
def test_endian_float(self):
if sys.byteorder == "little":
- self.assertTrue(c_float.__ctype_le__ is c_float)
- self.assertTrue(c_float.__ctype_be__.__ctype_le__ is c_float)
+ self.assertIs(c_float.__ctype_le__, c_float)
+ self.assertIs(c_float.__ctype_be__.__ctype_le__, c_float)
else:
- self.assertTrue(c_float.__ctype_be__ is c_float)
- self.assertTrue(c_float.__ctype_le__.__ctype_be__ is c_float)
+ self.assertIs(c_float.__ctype_be__, c_float)
+ self.assertIs(c_float.__ctype_le__.__ctype_be__, c_float)
s = c_float(math.pi)
self.assertEqual(bin(struct.pack("f", math.pi)), bin(s))
# Hm, what's the precision of a float compared to a double?
@@ -124,11 +125,11 @@ class Test(unittest.TestCase):
def test_endian_double(self):
if sys.byteorder == "little":
- self.assertTrue(c_double.__ctype_le__ is c_double)
- self.assertTrue(c_double.__ctype_be__.__ctype_le__ is c_double)
+ self.assertIs(c_double.__ctype_le__, c_double)
+ self.assertIs(c_double.__ctype_be__.__ctype_le__, c_double)
else:
- self.assertTrue(c_double.__ctype_be__ is c_double)
- self.assertTrue(c_double.__ctype_le__.__ctype_be__ is c_double)
+ self.assertIs(c_double.__ctype_be__, c_double)
+ self.assertIs(c_double.__ctype_le__.__ctype_be__, c_double)
s = c_double(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("d", math.pi)), bin(s))
@@ -140,14 +141,14 @@ class Test(unittest.TestCase):
self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s))
def test_endian_other(self):
- self.assertTrue(c_byte.__ctype_le__ is c_byte)
- self.assertTrue(c_byte.__ctype_be__ is c_byte)
+ self.assertIs(c_byte.__ctype_le__, c_byte)
+ self.assertIs(c_byte.__ctype_be__, c_byte)
- self.assertTrue(c_ubyte.__ctype_le__ is c_ubyte)
- self.assertTrue(c_ubyte.__ctype_be__ is c_ubyte)
+ self.assertIs(c_ubyte.__ctype_le__, c_ubyte)
+ self.assertIs(c_ubyte.__ctype_be__, c_ubyte)
- self.assertTrue(c_char.__ctype_le__ is c_char)
- self.assertTrue(c_char.__ctype_be__ is c_char)
+ self.assertIs(c_char.__ctype_le__, c_char)
+ self.assertIs(c_char.__ctype_be__, c_char)
def test_struct_fields_1(self):
if sys.byteorder == "little":
diff --git a/Lib/ctypes/test/test_callbacks.py b/Lib/ctypes/test/test_callbacks.py
index be0c17e..c2e953b 100644
--- a/Lib/ctypes/test/test_callbacks.py
+++ b/Lib/ctypes/test/test_callbacks.py
@@ -1,5 +1,6 @@
import unittest
from ctypes import *
+from ctypes.test import need_symbol
import _ctypes_test
class Callbacks(unittest.TestCase):
@@ -94,9 +95,10 @@ class Callbacks(unittest.TestCase):
# disabled: would now (correctly) raise a RuntimeWarning about
# a memory leak. A callback function cannot return a non-integral
# C type without causing a memory leak.
-## def test_char_p(self):
-## self.check_type(c_char_p, "abc")
-## self.check_type(c_char_p, "def")
+ @unittest.skip('test disabled')
+ def test_char_p(self):
+ self.check_type(c_char_p, "abc")
+ self.check_type(c_char_p, "def")
def test_pyobject(self):
o = ()
@@ -148,13 +150,12 @@ class Callbacks(unittest.TestCase):
CFUNCTYPE(None)(lambda x=Nasty(): None)
-try:
- WINFUNCTYPE
-except NameError:
- pass
-else:
- class StdcallCallbacks(Callbacks):
+@need_symbol('WINFUNCTYPE')
+class StdcallCallbacks(Callbacks):
+ try:
functype = WINFUNCTYPE
+ except NameError:
+ pass
################################################################
@@ -184,7 +185,7 @@ class SampleCallbacksTestCase(unittest.TestCase):
from ctypes.util import find_library
libc_path = find_library("c")
if not libc_path:
- return # cannot test
+ self.skipTest('could not find libc')
libc = CDLL(libc_path)
@CFUNCTYPE(c_int, POINTER(c_int), POINTER(c_int))
@@ -196,23 +197,19 @@ class SampleCallbacksTestCase(unittest.TestCase):
libc.qsort(array, len(array), sizeof(c_int), cmp_func)
self.assertEqual(array[:], [1, 5, 7, 33, 99])
- try:
- WINFUNCTYPE
- except NameError:
- pass
- else:
- def test_issue_8959_b(self):
- from ctypes.wintypes import BOOL, HWND, LPARAM
- global windowCount
- windowCount = 0
+ @need_symbol('WINFUNCTYPE')
+ def test_issue_8959_b(self):
+ from ctypes.wintypes import BOOL, HWND, LPARAM
+ global windowCount
+ windowCount = 0
- @WINFUNCTYPE(BOOL, HWND, LPARAM)
- def EnumWindowsCallbackFunc(hwnd, lParam):
- global windowCount
- windowCount += 1
- return True #Allow windows to keep enumerating
+ @WINFUNCTYPE(BOOL, HWND, LPARAM)
+ def EnumWindowsCallbackFunc(hwnd, lParam):
+ global windowCount
+ windowCount += 1
+ return True #Allow windows to keep enumerating
- windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0)
+ windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0)
def test_callback_register_int(self):
# Issue #8275: buggy handling of callback args under Win64
diff --git a/Lib/ctypes/test/test_cast.py b/Lib/ctypes/test/test_cast.py
index 906fffc..d24e0b5 100644
--- a/Lib/ctypes/test/test_cast.py
+++ b/Lib/ctypes/test/test_cast.py
@@ -1,4 +1,5 @@
from ctypes import *
+from ctypes.test import need_symbol
import unittest
import sys
@@ -38,14 +39,14 @@ class Test(unittest.TestCase):
p = cast(array, POINTER(c_char_p))
# array and p share a common _objects attribute
- self.assertTrue(p._objects is array._objects)
+ self.assertIs(p._objects, array._objects)
self.assertEqual(array._objects, {'0': "foo bar", id(array): array})
p[0] = "spam spam"
self.assertEqual(p._objects, {'0': "spam spam", id(array): array})
- self.assertTrue(array._objects is p._objects)
+ self.assertIs(array._objects, p._objects)
p[1] = "foo bar"
self.assertEqual(p._objects, {'1': 'foo bar', '0': "spam spam", id(array): array})
- self.assertTrue(array._objects is p._objects)
+ self.assertIs(array._objects, p._objects)
def test_other(self):
p = cast((c_int * 4)(1, 2, 3, 4), POINTER(c_int))
@@ -75,15 +76,11 @@ class Test(unittest.TestCase):
self.assertEqual(cast(cast(s, c_void_p), c_char_p).value,
"hiho")
- try:
- c_wchar_p
- except NameError:
- pass
- else:
- def test_wchar_p(self):
- s = c_wchar_p("hiho")
- self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value,
- "hiho")
+ @need_symbol('c_wchar_p')
+ def test_wchar_p(self):
+ s = c_wchar_p("hiho")
+ self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value,
+ "hiho")
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/ctypes/test/test_cfuncs.py b/Lib/ctypes/test/test_cfuncs.py
index 493cbe9..765408c 100644
--- a/Lib/ctypes/test/test_cfuncs.py
+++ b/Lib/ctypes/test/test_cfuncs.py
@@ -3,6 +3,7 @@
import unittest
from ctypes import *
+from ctypes.test import need_symbol
import _ctypes_test
@@ -188,12 +189,12 @@ class CFunctions(unittest.TestCase):
self.assertEqual(self._dll.tv_i(-42), None)
self.assertEqual(self.S(), -42)
-# The following repeates the above tests with stdcall functions (where
+# The following repeats the above tests with stdcall functions (where
# they are available)
try:
WinDLL
except NameError:
- pass
+ def stdcall_dll(*_): pass
else:
class stdcall_dll(WinDLL):
def __getattr__(self, name):
@@ -203,9 +204,9 @@ else:
setattr(self, name, func)
return func
- class stdcallCFunctions(CFunctions):
- _dll = stdcall_dll(_ctypes_test.__file__)
- pass
+@need_symbol('WinDLL')
+class stdcallCFunctions(CFunctions):
+ _dll = stdcall_dll(_ctypes_test.__file__)
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/ctypes/test/test_checkretval.py b/Lib/ctypes/test/test_checkretval.py
index 01ccc57..a0dc534 100644
--- a/Lib/ctypes/test/test_checkretval.py
+++ b/Lib/ctypes/test/test_checkretval.py
@@ -1,6 +1,7 @@
import unittest
from ctypes import *
+from ctypes.test import need_symbol
class CHECKED(c_int):
def _check_retval_(value):
@@ -25,15 +26,11 @@ class Test(unittest.TestCase):
del dll._testfunc_p_p.restype
self.assertEqual(42, dll._testfunc_p_p(42))
- try:
- oledll
- except NameError:
- pass
- else:
- def test_oledll(self):
- self.assertRaises(WindowsError,
- oledll.oleaut32.CreateTypeLib2,
- 0, None, None)
+ @need_symbol('oledll')
+ def test_oledll(self):
+ self.assertRaises(WindowsError,
+ oledll.oleaut32.CreateTypeLib2,
+ 0, None, None)
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/ctypes/test/test_errcheck.py b/Lib/ctypes/test/test_errcheck.py
deleted file mode 100644
index a4913f9..0000000
--- a/Lib/ctypes/test/test_errcheck.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import sys
-from ctypes import *
-
-##class HMODULE(Structure):
-## _fields_ = [("value", c_void_p)]
-
-## def __repr__(self):
-## return "<HMODULE %s>" % self.value
-
-##windll.kernel32.GetModuleHandleA.restype = HMODULE
-
-##print windll.kernel32.GetModuleHandleA("python23.dll")
-##print hex(sys.dllhandle)
-
-##def nonzero(handle):
-## return (GetLastError(), handle)
-
-##windll.kernel32.GetModuleHandleA.errcheck = nonzero
-##print windll.kernel32.GetModuleHandleA("spam")
diff --git a/Lib/ctypes/test/test_find.py b/Lib/ctypes/test/test_find.py
index 77e789f..c5f6ace 100644
--- a/Lib/ctypes/test/test_find.py
+++ b/Lib/ctypes/test/test_find.py
@@ -1,4 +1,5 @@
import unittest
+import os
import sys
from ctypes import *
from ctypes.util import find_library
@@ -40,43 +41,43 @@ class Test_OpenGL_libs(unittest.TestCase):
except OSError:
pass
- if lib_gl:
- def test_gl(self):
- if self.gl:
- self.gl.glClearIndex
+ @unittest.skipUnless(lib_gl, 'lib_gl not available')
+ def test_gl(self):
+ if self.gl:
+ self.gl.glClearIndex
- if lib_glu:
- def test_glu(self):
- if self.glu:
- self.glu.gluBeginCurve
+ @unittest.skipUnless(lib_glu, 'lib_glu not available')
+ def test_glu(self):
+ if self.glu:
+ self.glu.gluBeginCurve
- if lib_gle:
- def test_gle(self):
- if self.gle:
- self.gle.gleGetJoinStyle
+ @unittest.skipUnless(lib_gle, 'lib_gle not available')
+ def test_gle(self):
+ if self.gle:
+ self.gle.gleGetJoinStyle
-##if os.name == "posix" and sys.platform != "darwin":
-
-## # On platforms where the default shared library suffix is '.so',
-## # at least some libraries can be loaded as attributes of the cdll
-## # object, since ctypes now tries loading the lib again
-## # with '.so' appended of the first try fails.
-## #
-## # Won't work for libc, unfortunately. OTOH, it isn't
-## # needed for libc since this is already mapped into the current
-## # process (?)
-## #
-## # On MAC OSX, it won't work either, because dlopen() needs a full path,
-## # and the default suffix is either none or '.dylib'.
-
-## class LoadLibs(unittest.TestCase):
-## def test_libm(self):
-## import math
-## libm = cdll.libm
-## sqrt = libm.sqrt
-## sqrt.argtypes = (c_double,)
-## sqrt.restype = c_double
-## self.assertEqual(sqrt(2), math.sqrt(2))
+# On platforms where the default shared library suffix is '.so',
+# at least some libraries can be loaded as attributes of the cdll
+# object, since ctypes now tries loading the lib again
+# with '.so' appended of the first try fails.
+#
+# Won't work for libc, unfortunately. OTOH, it isn't
+# needed for libc since this is already mapped into the current
+# process (?)
+#
+# On MAC OSX, it won't work either, because dlopen() needs a full path,
+# and the default suffix is either none or '.dylib'.
+@unittest.skip('test disabled')
+@unittest.skipUnless(os.name=="posix" and sys.platform != "darwin",
+ 'test not suitable for this platform')
+class LoadLibs(unittest.TestCase):
+ def test_libm(self):
+ import math
+ libm = cdll.libm
+ sqrt = libm.sqrt
+ sqrt.argtypes = (c_double,)
+ sqrt.restype = c_double
+ self.assertEqual(sqrt(2), math.sqrt(2))
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/ctypes/test/test_frombuffer.py b/Lib/ctypes/test/test_frombuffer.py
index 624fb70..9f4bb28 100644
--- a/Lib/ctypes/test/test_frombuffer.py
+++ b/Lib/ctypes/test/test_frombuffer.py
@@ -23,7 +23,7 @@ class Test(unittest.TestCase):
a[0], a[-1] = 200, -200
self.assertEqual(x[:], a.tolist())
- self.assertTrue(a in x._objects.values())
+ self.assertIn(a, x._objects.values())
self.assertRaises(ValueError,
c_int.from_buffer, a, -1)
diff --git a/Lib/ctypes/test/test_funcptr.py b/Lib/ctypes/test/test_funcptr.py
index 99af5ed..58cbb47 100644
--- a/Lib/ctypes/test/test_funcptr.py
+++ b/Lib/ctypes/test/test_funcptr.py
@@ -75,7 +75,7 @@ class CFuncPtrTestCase(unittest.TestCase):
## "lpfnWndProc", WNDPROC_2(wndproc))
# instead:
- self.assertTrue(WNDPROC is WNDPROC_2)
+ self.assertIs(WNDPROC, WNDPROC_2)
# 'wndclass.lpfnWndProc' leaks 94 references. Why?
self.assertEqual(wndclass.lpfnWndProc(1, 2, 3, 4), 10)
diff --git a/Lib/ctypes/test/test_functions.py b/Lib/ctypes/test/test_functions.py
index 8e898a8..a374415 100644
--- a/Lib/ctypes/test/test_functions.py
+++ b/Lib/ctypes/test/test_functions.py
@@ -6,6 +6,7 @@ Later...
"""
from ctypes import *
+from ctypes.test import need_symbol
import sys, unittest
try:
@@ -63,22 +64,16 @@ class FunctionTestCase(unittest.TestCase):
pass
+ @need_symbol('c_wchar')
def test_wchar_parm(self):
- try:
- c_wchar
- except NameError:
- return
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double]
result = f(1, u"x", 3, 4, 5.0, 6.0)
self.assertEqual(result, 139)
self.assertEqual(type(result), int)
+ @need_symbol('c_wchar')
def test_wchar_result(self):
- try:
- c_wchar
- except NameError:
- return
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
f.restype = c_wchar
@@ -155,11 +150,8 @@ class FunctionTestCase(unittest.TestCase):
self.assertEqual(result, -21)
self.assertEqual(type(result), float)
+ @need_symbol('c_longlong')
def test_longlongresult(self):
- try:
- c_longlong
- except NameError:
- return
f = dll._testfunc_q_bhilfd
f.restype = c_longlong
f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double]
@@ -296,6 +288,7 @@ class FunctionTestCase(unittest.TestCase):
result = f(-10, cb)
self.assertEqual(result, -18)
+ @need_symbol('c_longlong')
def test_longlong_callbacks(self):
f = dll._testfunc_callback_q_qf
@@ -306,7 +299,7 @@ class FunctionTestCase(unittest.TestCase):
f.argtypes = [c_longlong, MyCallback]
def callback(value):
- self.assertTrue(isinstance(value, (int, long)))
+ self.assertIsInstance(value, (int, long))
return value & 0x7FFFFFFF
cb = MyCallback(callback)
@@ -348,16 +341,16 @@ class FunctionTestCase(unittest.TestCase):
s2h = dll.ret_2h_func(inp)
self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
- if sys.platform == "win32":
- def test_struct_return_2H_stdcall(self):
- class S2H(Structure):
- _fields_ = [("x", c_short),
- ("y", c_short)]
+ @unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
+ def test_struct_return_2H_stdcall(self):
+ class S2H(Structure):
+ _fields_ = [("x", c_short),
+ ("y", c_short)]
- windll.s_ret_2h_func.restype = S2H
- windll.s_ret_2h_func.argtypes = [S2H]
- s2h = windll.s_ret_2h_func(S2H(99, 88))
- self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
+ windll.s_ret_2h_func.restype = S2H
+ windll.s_ret_2h_func.argtypes = [S2H]
+ s2h = windll.s_ret_2h_func(S2H(99, 88))
+ self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
def test_struct_return_8H(self):
class S8I(Structure):
@@ -376,23 +369,24 @@ class FunctionTestCase(unittest.TestCase):
self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
- if sys.platform == "win32":
- def test_struct_return_8H_stdcall(self):
- class S8I(Structure):
- _fields_ = [("a", c_int),
- ("b", c_int),
- ("c", c_int),
- ("d", c_int),
- ("e", c_int),
- ("f", c_int),
- ("g", c_int),
- ("h", c_int)]
- windll.s_ret_8i_func.restype = S8I
- windll.s_ret_8i_func.argtypes = [S8I]
- inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
- s8i = windll.s_ret_8i_func(inp)
- self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
- (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
+ @unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
+ def test_struct_return_8H_stdcall(self):
+ class S8I(Structure):
+ _fields_ = [("a", c_int),
+ ("b", c_int),
+ ("c", c_int),
+ ("d", c_int),
+ ("e", c_int),
+ ("f", c_int),
+ ("g", c_int),
+ ("h", c_int)]
+ windll.s_ret_8i_func.restype = S8I
+ windll.s_ret_8i_func.argtypes = [S8I]
+ inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
+ s8i = windll.s_ret_8i_func(inp)
+ self.assertEqual(
+ (s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
+ (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
def test_sf1651235(self):
# see http://www.python.org/sf/1651235
diff --git a/Lib/ctypes/test/test_integers.py b/Lib/ctypes/test/test_integers.py
deleted file mode 100644
index 5b6453a..0000000
--- a/Lib/ctypes/test/test_integers.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# superseeded by test_numbers.py
-import unittest
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Lib/ctypes/test/test_keeprefs.py b/Lib/ctypes/test/test_keeprefs.py
index b504c0c..b2a50ab 100644
--- a/Lib/ctypes/test/test_keeprefs.py
+++ b/Lib/ctypes/test/test_keeprefs.py
@@ -94,7 +94,8 @@ class PointerTestCase(unittest.TestCase):
self.assertEqual(x._objects, {'1': i})
class DeletePointerTestCase(unittest.TestCase):
- def X_test(self):
+ @unittest.skip('test disabled')
+ def test_X(self):
class X(Structure):
_fields_ = [("p", POINTER(c_char_p))]
x = X()
diff --git a/Lib/ctypes/test/test_loading.py b/Lib/ctypes/test/test_loading.py
index 26683d8..81a27e3 100644
--- a/Lib/ctypes/test/test_loading.py
+++ b/Lib/ctypes/test/test_loading.py
@@ -21,18 +21,21 @@ class LoaderTest(unittest.TestCase):
unknowndll = "xxrandomnamexx"
- if libc_name is not None:
- def test_load(self):
- CDLL(libc_name)
- CDLL(os.path.basename(libc_name))
- self.assertRaises(OSError, CDLL, self.unknowndll)
+ @unittest.skipUnless(libc_name is not None, 'could not find libc')
+ def test_load(self):
+ CDLL(libc_name)
+ CDLL(os.path.basename(libc_name))
+ self.assertRaises(OSError, CDLL, self.unknowndll)
- if libc_name is not None and os.path.basename(libc_name) == "libc.so.6":
- def test_load_version(self):
- cdll.LoadLibrary("libc.so.6")
- # linux uses version, libc 9 should not exist
- self.assertRaises(OSError, cdll.LoadLibrary, "libc.so.9")
- self.assertRaises(OSError, cdll.LoadLibrary, self.unknowndll)
+ @unittest.skipUnless(libc_name is not None, 'could not find libc')
+ @unittest.skipUnless(libc_name is not None and
+ os.path.basename(libc_name) == "libc.so.6",
+ 'wrong libc path for test')
+ def test_load_version(self):
+ cdll.LoadLibrary("libc.so.6")
+ # linux uses version, libc 9 should not exist
+ self.assertRaises(OSError, cdll.LoadLibrary, "libc.so.9")
+ self.assertRaises(OSError, cdll.LoadLibrary, self.unknowndll)
def test_find(self):
for name in ("c", "m"):
@@ -41,66 +44,71 @@ class LoaderTest(unittest.TestCase):
cdll.LoadLibrary(lib)
CDLL(lib)
- if os.name in ("nt", "ce"):
- def test_load_library(self):
- self.assertFalse(libc_name is None)
- if is_resource_enabled("printing"):
- print find_library("kernel32")
- print find_library("user32")
+ @unittest.skipUnless(os.name in ("nt", "ce"),
+ 'test specific to Windows (NT/CE)')
+ def test_load_library(self):
+ self.assertIsNotNone(libc_name)
+ if is_resource_enabled("printing"):
+ print find_library("kernel32")
+ print find_library("user32")
- if os.name == "nt":
- windll.kernel32.GetModuleHandleW
- windll["kernel32"].GetModuleHandleW
- windll.LoadLibrary("kernel32").GetModuleHandleW
- WinDLL("kernel32").GetModuleHandleW
- elif os.name == "ce":
- windll.coredll.GetModuleHandleW
- windll["coredll"].GetModuleHandleW
- windll.LoadLibrary("coredll").GetModuleHandleW
- WinDLL("coredll").GetModuleHandleW
+ if os.name == "nt":
+ windll.kernel32.GetModuleHandleW
+ windll["kernel32"].GetModuleHandleW
+ windll.LoadLibrary("kernel32").GetModuleHandleW
+ WinDLL("kernel32").GetModuleHandleW
+ elif os.name == "ce":
+ windll.coredll.GetModuleHandleW
+ windll["coredll"].GetModuleHandleW
+ windll.LoadLibrary("coredll").GetModuleHandleW
+ WinDLL("coredll").GetModuleHandleW
- def test_load_ordinal_functions(self):
- import _ctypes_test
- dll = WinDLL(_ctypes_test.__file__)
- # We load the same function both via ordinal and name
- func_ord = dll[2]
- func_name = dll.GetString
- # addressof gets the address where the function pointer is stored
- a_ord = addressof(func_ord)
- a_name = addressof(func_name)
- f_ord_addr = c_void_p.from_address(a_ord).value
- f_name_addr = c_void_p.from_address(a_name).value
- self.assertEqual(hex(f_ord_addr), hex(f_name_addr))
+ @unittest.skipUnless(os.name in ("nt", "ce"),
+ 'test specific to Windows (NT/CE)')
+ def test_load_ordinal_functions(self):
+ import _ctypes_test
+ dll = WinDLL(_ctypes_test.__file__)
+ # We load the same function both via ordinal and name
+ func_ord = dll[2]
+ func_name = dll.GetString
+ # addressof gets the address where the function pointer is stored
+ a_ord = addressof(func_ord)
+ a_name = addressof(func_name)
+ f_ord_addr = c_void_p.from_address(a_ord).value
+ f_name_addr = c_void_p.from_address(a_name).value
+ self.assertEqual(hex(f_ord_addr), hex(f_name_addr))
- self.assertRaises(AttributeError, dll.__getitem__, 1234)
+ self.assertRaises(AttributeError, dll.__getitem__, 1234)
- if os.name == "nt":
- def test_1703286_A(self):
- from _ctypes import LoadLibrary, FreeLibrary
- # On winXP 64-bit, advapi32 loads at an address that does
- # NOT fit into a 32-bit integer. FreeLibrary must be able
- # to accept this address.
+ @unittest.skipUnless(os.name == "nt", 'Windows-specific test')
+ def test_1703286_A(self):
+ from _ctypes import LoadLibrary, FreeLibrary
+ # On winXP 64-bit, advapi32 loads at an address that does
+ # NOT fit into a 32-bit integer. FreeLibrary must be able
+ # to accept this address.
- # These are tests for http://www.python.org/sf/1703286
- handle = LoadLibrary("advapi32")
- FreeLibrary(handle)
+ # These are tests for http://www.python.org/sf/1703286
+ handle = LoadLibrary("advapi32")
+ FreeLibrary(handle)
- def test_1703286_B(self):
- # Since on winXP 64-bit advapi32 loads like described
- # above, the (arbitrarily selected) CloseEventLog function
- # also has a high address. 'call_function' should accept
- # addresses so large.
- from _ctypes import call_function
- advapi32 = windll.advapi32
- # Calling CloseEventLog with a NULL argument should fail,
- # but the call should not segfault or so.
- self.assertEqual(0, advapi32.CloseEventLog(None))
- windll.kernel32.GetProcAddress.argtypes = c_void_p, c_char_p
- windll.kernel32.GetProcAddress.restype = c_void_p
- proc = windll.kernel32.GetProcAddress(advapi32._handle, "CloseEventLog")
- self.assertTrue(proc)
- # This is the real test: call the function via 'call_function'
- self.assertEqual(0, call_function(proc, (None,)))
+ @unittest.skipUnless(os.name == "nt", 'Windows-specific test')
+ def test_1703286_B(self):
+ # Since on winXP 64-bit advapi32 loads like described
+ # above, the (arbitrarily selected) CloseEventLog function
+ # also has a high address. 'call_function' should accept
+ # addresses so large.
+ from _ctypes import call_function
+ advapi32 = windll.advapi32
+ # Calling CloseEventLog with a NULL argument should fail,
+ # but the call should not segfault or so.
+ self.assertEqual(0, advapi32.CloseEventLog(None))
+ windll.kernel32.GetProcAddress.argtypes = c_void_p, c_char_p
+ windll.kernel32.GetProcAddress.restype = c_void_p
+ proc = windll.kernel32.GetProcAddress(advapi32._handle,
+ "CloseEventLog")
+ self.assertTrue(proc)
+ # This is the real test: call the function via 'call_function'
+ self.assertEqual(0, call_function(proc, (None,)))
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/ctypes/test/test_macholib.py b/Lib/ctypes/test/test_macholib.py
index eda846d..9779b2f 100644
--- a/Lib/ctypes/test/test_macholib.py
+++ b/Lib/ctypes/test/test_macholib.py
@@ -45,18 +45,21 @@ def find_lib(name):
raise ValueError("%s not found" % (name,))
class MachOTest(unittest.TestCase):
- if sys.platform == "darwin":
- def test_find(self):
+ @unittest.skipUnless(sys.platform == "darwin", 'OSX-specific test')
+ def test_find(self):
- self.assertEqual(find_lib('pthread'),
- '/usr/lib/libSystem.B.dylib')
+ self.assertEqual(find_lib('pthread'),
+ '/usr/lib/libSystem.B.dylib')
- result = find_lib('z')
- self.assertTrue(result.startswith('/usr/lib/libz.1'))
- self.assertTrue(result.endswith('.dylib'))
+ result = find_lib('z')
+ # Issue #21093: dyld default search path includes $HOME/lib and
+ # /usr/local/lib before /usr/lib, which caused test failures if
+ # a local copy of libz exists in one of them. Now ignore the head
+ # of the path.
+ self.assertRegexpMatches(result, r".*/lib/libz\..*.*\.dylib")
- self.assertEqual(find_lib('IOKit'),
- '/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit')
+ self.assertEqual(find_lib('IOKit'),
+ '/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit')
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/ctypes/test/test_memfunctions.py b/Lib/ctypes/test/test_memfunctions.py
index d072603..ba9eea6 100644
--- a/Lib/ctypes/test/test_memfunctions.py
+++ b/Lib/ctypes/test/test_memfunctions.py
@@ -1,17 +1,19 @@
import sys
import unittest
from ctypes import *
+from ctypes.test import need_symbol
class MemFunctionsTest(unittest.TestCase):
-## def test_overflow(self):
-## # string_at and wstring_at must use the Python calling
-## # convention (which acquires the GIL and checks the Python
-## # error flag). Provoke an error and catch it; see also issue
-## # #3554: <http://bugs.python.org/issue3554>
-## self.assertRaises((OverflowError, MemoryError, SystemError),
-## lambda: wstring_at(u"foo", sys.maxint - 1))
-## self.assertRaises((OverflowError, MemoryError, SystemError),
-## lambda: string_at("foo", sys.maxint - 1))
+ @unittest.skip('test disabled')
+ def test_overflow(self):
+ # string_at and wstring_at must use the Python calling
+ # convention (which acquires the GIL and checks the Python
+ # error flag). Provoke an error and catch it; see also issue
+ # #3554: <http://bugs.python.org/issue3554>
+ self.assertRaises((OverflowError, MemoryError, SystemError),
+ lambda: wstring_at(u"foo", sys.maxint - 1))
+ self.assertRaises((OverflowError, MemoryError, SystemError),
+ lambda: string_at("foo", sys.maxint - 1))
def test_memmove(self):
# large buffers apparently increase the chance that the memory
@@ -59,21 +61,17 @@ class MemFunctionsTest(unittest.TestCase):
self.assertEqual(string_at("foo bar", 8), "foo bar\0")
self.assertEqual(string_at("foo bar", 3), "foo")
- try:
- create_unicode_buffer
- except NameError:
- pass
- else:
- def test_wstring_at(self):
- p = create_unicode_buffer("Hello, World")
- a = create_unicode_buffer(1000000)
- result = memmove(a, p, len(p) * sizeof(c_wchar))
- self.assertEqual(a.value, "Hello, World")
+ @need_symbol('create_unicode_buffer')
+ def test_wstring_at(self):
+ p = create_unicode_buffer("Hello, World")
+ a = create_unicode_buffer(1000000)
+ result = memmove(a, p, len(p) * sizeof(c_wchar))
+ self.assertEqual(a.value, "Hello, World")
- self.assertEqual(wstring_at(a), "Hello, World")
- self.assertEqual(wstring_at(a, 5), "Hello")
- self.assertEqual(wstring_at(a, 16), "Hello, World\0\0\0\0")
- self.assertEqual(wstring_at(a, 0), "")
+ self.assertEqual(wstring_at(a), "Hello, World")
+ self.assertEqual(wstring_at(a, 5), "Hello")
+ self.assertEqual(wstring_at(a, 16), "Hello, World\0\0\0\0")
+ self.assertEqual(wstring_at(a, 0), "")
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/ctypes/test/test_numbers.py b/Lib/ctypes/test/test_numbers.py
index 2267693..d623465 100644
--- a/Lib/ctypes/test/test_numbers.py
+++ b/Lib/ctypes/test/test_numbers.py
@@ -82,12 +82,13 @@ class NumberTestCase(unittest.TestCase):
self.assertRaises(TypeError, t, "")
self.assertRaises(TypeError, t, None)
-## def test_valid_ranges(self):
-## # invalid values of the correct type
-## # raise ValueError (not OverflowError)
-## for t, (l, h) in zip(unsigned_types, unsigned_ranges):
-## self.assertRaises(ValueError, t, l-1)
-## self.assertRaises(ValueError, t, h+1)
+ @unittest.skip('test disabled')
+ def test_valid_ranges(self):
+ # invalid values of the correct type
+ # raise ValueError (not OverflowError)
+ for t, (l, h) in zip(unsigned_types, unsigned_ranges):
+ self.assertRaises(ValueError, t, l-1)
+ self.assertRaises(ValueError, t, h+1)
def test_from_param(self):
# the from_param class method attribute always
@@ -181,10 +182,10 @@ class NumberTestCase(unittest.TestCase):
a = array(t._type_, [3.14])
v = t.from_address(a.buffer_info()[0])
self.assertEqual(v.value, a[0])
- self.assertTrue(type(v) is t)
+ self.assertIs(type(v), t)
a[0] = 2.3456e17
self.assertEqual(v.value, a[0])
- self.assertTrue(type(v) is t)
+ self.assertIs(type(v), t)
def test_char_from_address(self):
from ctypes import c_char
@@ -193,31 +194,43 @@ class NumberTestCase(unittest.TestCase):
a = array('c', 'x')
v = c_char.from_address(a.buffer_info()[0])
self.assertEqual(v.value, a[0])
- self.assertTrue(type(v) is c_char)
+ self.assertIs(type(v), c_char)
a[0] = '?'
self.assertEqual(v.value, a[0])
# array does not support c_bool / 't'
- # def test_bool_from_address(self):
- # from ctypes import c_bool
- # from array import array
- # a = array(c_bool._type_, [True])
- # v = t.from_address(a.buffer_info()[0])
- # self.assertEqual(v.value, a[0])
- # self.assertEqual(type(v) is t)
- # a[0] = False
- # self.assertEqual(v.value, a[0])
- # self.assertEqual(type(v) is t)
+ @unittest.skip('test disabled')
+ def test_bool_from_address(self):
+ from ctypes import c_bool
+ from array import array
+ a = array(c_bool._type_, [True])
+ v = t.from_address(a.buffer_info()[0])
+ self.assertEqual(v.value, a[0])
+ self.assertEqual(type(v) is t)
+ a[0] = False
+ self.assertEqual(v.value, a[0])
+ self.assertEqual(type(v) is t)
def test_init(self):
# c_int() can be initialized from Python's int, and c_int.
- # Not from c_long or so, which seems strange, abd should
+ # Not from c_long or so, which seems strange, abc should
# probably be changed:
self.assertRaises(TypeError, c_int, c_long(42))
-## def test_perf(self):
-## check_perf()
+ def test_float_overflow(self):
+ import sys
+ big_int = int(sys.float_info.max) * 2
+ for t in float_types + [c_longdouble]:
+ self.assertRaises(OverflowError, t, big_int)
+ if (hasattr(t, "__ctype_be__")):
+ self.assertRaises(OverflowError, t.__ctype_be__, big_int)
+ if (hasattr(t, "__ctype_le__")):
+ self.assertRaises(OverflowError, t.__ctype_le__, big_int)
+
+ @unittest.skip('test disabled')
+ def test_perf(self):
+ check_perf()
from ctypes import _SimpleCData
class c_int_S(_SimpleCData):
diff --git a/Lib/ctypes/test/test_objects.py b/Lib/ctypes/test/test_objects.py
index 4d921d2..a7c5247 100644
--- a/Lib/ctypes/test/test_objects.py
+++ b/Lib/ctypes/test/test_objects.py
@@ -59,12 +59,9 @@ import unittest, doctest, sys
import ctypes.test.test_objects
class TestCase(unittest.TestCase):
- if sys.hexversion > 0x02040000:
- # Python 2.3 has no ELLIPSIS flag, so we don't test with this
- # version:
- def test(self):
- doctest.testmod(ctypes.test.test_objects)
+ def test(self):
+ failures, tests = doctest.testmod(ctypes.test.test_objects)
+ self.assertFalse(failures, 'doctests failed, see output above')
if __name__ == '__main__':
- if sys.hexversion > 0x02040000:
- doctest.testmod(ctypes.test.test_objects)
+ doctest.testmod(ctypes.test.test_objects)
diff --git a/Lib/ctypes/test/test_parameters.py b/Lib/ctypes/test/test_parameters.py
index 82704d5..192d9ed 100644
--- a/Lib/ctypes/test/test_parameters.py
+++ b/Lib/ctypes/test/test_parameters.py
@@ -1,4 +1,5 @@
import unittest, sys
+from ctypes.test import need_symbol
class SimpleTypesTestCase(unittest.TestCase):
@@ -36,10 +37,9 @@ class SimpleTypesTestCase(unittest.TestCase):
self.assertEqual(CVOIDP.from_param("abc"), "abcabc")
self.assertEqual(CCHARP.from_param("abc"), "abcabcabcabc")
- try:
- from ctypes import c_wchar_p
- except ImportError:
- return
+ @need_symbol('c_wchar_p')
+ def test_subclasses_c_wchar_p(self):
+ from ctypes import c_wchar_p
class CWCHARP(c_wchar_p):
def from_param(cls, value):
@@ -55,7 +55,7 @@ class SimpleTypesTestCase(unittest.TestCase):
# c_char_p.from_param on a Python String packs the string
# into a cparam object
s = "123"
- self.assertTrue(c_char_p.from_param(s)._obj is s)
+ self.assertIs(c_char_p.from_param(s)._obj, s)
# new in 0.9.1: convert (encode) unicode to ascii
self.assertEqual(c_char_p.from_param(u"123")._obj, "123")
@@ -66,15 +66,11 @@ class SimpleTypesTestCase(unittest.TestCase):
# calling c_char_p.from_param with a c_char_p instance
# returns the argument itself:
a = c_char_p("123")
- self.assertTrue(c_char_p.from_param(a) is a)
+ self.assertIs(c_char_p.from_param(a), a)
+ @need_symbol('c_wchar_p')
def test_cw_strings(self):
- from ctypes import byref
- try:
- from ctypes import c_wchar_p
- except ImportError:
-## print "(No c_wchar_p)"
- return
+ from ctypes import byref, c_wchar_p
s = u"123"
if sys.platform == "win32":
self.assertTrue(c_wchar_p.from_param(s)._obj is s)
@@ -144,9 +140,6 @@ class SimpleTypesTestCase(unittest.TestCase):
self.assertRaises(TypeError, LPINT.from_param, c_long*3)
self.assertRaises(TypeError, LPINT.from_param, c_uint*3)
-## def test_performance(self):
-## check_perf()
-
def test_noctypes_argtype(self):
import _ctypes_test
from ctypes import CDLL, c_void_p, ArgumentError
diff --git a/Lib/ctypes/test/test_pep3118.py b/Lib/ctypes/test/test_pep3118.py
index 976473c..3e007e1 100644
--- a/Lib/ctypes/test/test_pep3118.py
+++ b/Lib/ctypes/test/test_pep3118.py
@@ -92,6 +92,10 @@ class EmptyStruct(Structure):
class aUnion(Union):
_fields_ = [("a", c_int)]
+class StructWithArrays(Structure):
+ _fields_ = [("x", c_long * 3 * 2), ("y", Point * 4)]
+
+
class Incomplete(Structure):
pass
@@ -141,10 +145,10 @@ native_types = [
## arrays and pointers
- (c_double * 4, "(4)<d", (4,), c_double),
- (c_float * 4 * 3 * 2, "(2,3,4)<f", (2,3,4), c_float),
- (POINTER(c_short) * 2, "(2)&<h", (2,), POINTER(c_short)),
- (POINTER(c_short) * 2 * 3, "(3,2)&<h", (3,2,), POINTER(c_short)),
+ (c_double * 4, "<d", (4,), c_double),
+ (c_float * 4 * 3 * 2, "<f", (2,3,4), c_float),
+ (POINTER(c_short) * 2, "&<h", (2,), POINTER(c_short)),
+ (POINTER(c_short) * 2 * 3, "&<h", (3,2,), POINTER(c_short)),
(POINTER(c_short * 2), "&(2)<h", None, POINTER(c_short)),
## structures and unions
@@ -156,6 +160,9 @@ native_types = [
(EmptyStruct, "T{}", None, EmptyStruct),
# the pep does't support unions
(aUnion, "B", None, aUnion),
+ # structure with sub-arrays
+ (StructWithArrays, "T{(2,3)<l:x:(4)T{<l:x:<l:y:}:y:}", None, StructWithArrays),
+ (StructWithArrays * 3, "T{(2,3)<l:x:(4)T{<l:x:<l:y:}:y:}", (3,), StructWithArrays),
## pointer to incomplete structure
(Incomplete, "B", None, Incomplete),
diff --git a/Lib/ctypes/test/test_pointers.py b/Lib/ctypes/test/test_pointers.py
index 92b8ce6..9531158 100644
--- a/Lib/ctypes/test/test_pointers.py
+++ b/Lib/ctypes/test/test_pointers.py
@@ -78,7 +78,7 @@ class PointersTestCase(unittest.TestCase):
## i = c_int(42)
## callback(byref(i))
-## self.assertTrue(i.value == 84)
+## self.assertEqual(i.value, 84)
doit(callback)
## print self.result
@@ -91,11 +91,11 @@ class PointersTestCase(unittest.TestCase):
i = ct(42)
p = pointer(i)
## print type(p.contents), ct
- self.assertTrue(type(p.contents) is ct)
+ self.assertIs(type(p.contents), ct)
# p.contents is the same as p[0]
## print p.contents
-## self.assertTrue(p.contents == 42)
-## self.assertTrue(p[0] == 42)
+## self.assertEqual(p.contents, 42)
+## self.assertEqual(p[0], 42)
self.assertRaises(TypeError, delitem, p, 0)
diff --git a/Lib/ctypes/test/test_prototypes.py b/Lib/ctypes/test/test_prototypes.py
index 09ba655..a10317b 100644
--- a/Lib/ctypes/test/test_prototypes.py
+++ b/Lib/ctypes/test/test_prototypes.py
@@ -1,4 +1,5 @@
from ctypes import *
+from ctypes.test import need_symbol
import unittest
# IMPORTANT INFO:
@@ -135,13 +136,14 @@ class CharPointersTestCase(unittest.TestCase):
func(pointer(c_int()))
func((c_int * 3)())
- try:
- func.restype = c_wchar_p
- except NameError:
- pass
- else:
- self.assertEqual(None, func(c_wchar_p(None)))
- self.assertEqual(u"123", func(c_wchar_p(u"123")))
+ @need_symbol('c_wchar_p')
+ def test_c_void_p_arg_with_c_wchar_p(self):
+ func = testdll._testfunc_p_p
+ func.restype = c_wchar_p
+ func.argtypes = c_void_p,
+
+ self.assertEqual(None, func(c_wchar_p(None)))
+ self.assertEqual(u"123", func(c_wchar_p(u"123")))
def test_instance(self):
func = testdll._testfunc_p_p
@@ -156,51 +158,47 @@ class CharPointersTestCase(unittest.TestCase):
func.argtypes = None
self.assertEqual(None, func(X()))
-try:
- c_wchar
-except NameError:
- pass
-else:
- class WCharPointersTestCase(unittest.TestCase):
-
- def setUp(self):
- func = testdll._testfunc_p_p
- func.restype = c_int
- func.argtypes = None
-
-
- def test_POINTER_c_wchar_arg(self):
- func = testdll._testfunc_p_p
- func.restype = c_wchar_p
- func.argtypes = POINTER(c_wchar),
-
- self.assertEqual(None, func(None))
- self.assertEqual(u"123", func(u"123"))
- self.assertEqual(None, func(c_wchar_p(None)))
- self.assertEqual(u"123", func(c_wchar_p(u"123")))
-
- self.assertEqual(u"123", func(c_wbuffer(u"123")))
- ca = c_wchar("a")
- self.assertEqual(u"a", func(pointer(ca))[0])
- self.assertEqual(u"a", func(byref(ca))[0])
-
- def test_c_wchar_p_arg(self):
- func = testdll._testfunc_p_p
- func.restype = c_wchar_p
- func.argtypes = c_wchar_p,
-
- c_wchar_p.from_param(u"123")
-
- self.assertEqual(None, func(None))
- self.assertEqual("123", func(u"123"))
- self.assertEqual(None, func(c_wchar_p(None)))
- self.assertEqual("123", func(c_wchar_p("123")))
-
- # XXX Currently, these raise TypeErrors, although they shouldn't:
- self.assertEqual("123", func(c_wbuffer("123")))
- ca = c_wchar("a")
- self.assertEqual("a", func(pointer(ca))[0])
- self.assertEqual("a", func(byref(ca))[0])
+@need_symbol('c_wchar')
+class WCharPointersTestCase(unittest.TestCase):
+
+ def setUp(self):
+ func = testdll._testfunc_p_p
+ func.restype = c_int
+ func.argtypes = None
+
+
+ def test_POINTER_c_wchar_arg(self):
+ func = testdll._testfunc_p_p
+ func.restype = c_wchar_p
+ func.argtypes = POINTER(c_wchar),
+
+ self.assertEqual(None, func(None))
+ self.assertEqual(u"123", func(u"123"))
+ self.assertEqual(None, func(c_wchar_p(None)))
+ self.assertEqual(u"123", func(c_wchar_p(u"123")))
+
+ self.assertEqual(u"123", func(c_wbuffer(u"123")))
+ ca = c_wchar("a")
+ self.assertEqual(u"a", func(pointer(ca))[0])
+ self.assertEqual(u"a", func(byref(ca))[0])
+
+ def test_c_wchar_p_arg(self):
+ func = testdll._testfunc_p_p
+ func.restype = c_wchar_p
+ func.argtypes = c_wchar_p,
+
+ c_wchar_p.from_param(u"123")
+
+ self.assertEqual(None, func(None))
+ self.assertEqual("123", func(u"123"))
+ self.assertEqual(None, func(c_wchar_p(None)))
+ self.assertEqual("123", func(c_wchar_p("123")))
+
+ # XXX Currently, these raise TypeErrors, although they shouldn't:
+ self.assertEqual("123", func(c_wbuffer("123")))
+ ca = c_wchar("a")
+ self.assertEqual("a", func(pointer(ca))[0])
+ self.assertEqual("a", func(byref(ca))[0])
class ArrayTest(unittest.TestCase):
def test(self):
diff --git a/Lib/ctypes/test/test_python_api.py b/Lib/ctypes/test/test_python_api.py
index 698170f..62728dd 100644
--- a/Lib/ctypes/test/test_python_api.py
+++ b/Lib/ctypes/test/test_python_api.py
@@ -1,6 +1,6 @@
from ctypes import *
import unittest, sys
-from ctypes.test import is_resource_enabled
+from ctypes.test import requires
################################################################
# This section should be moved into ctypes\__init__.py, when it's ready.
@@ -37,31 +37,31 @@ class PythonAPITestCase(unittest.TestCase):
del pyob
self.assertEqual(grc(s), refcnt)
- if is_resource_enabled("refcount"):
- # This test is unreliable, because it is possible that code in
- # unittest changes the refcount of the '42' integer. So, it
- # is disabled by default.
- def test_PyInt_Long(self):
- ref42 = grc(42)
- pythonapi.PyInt_FromLong.restype = py_object
- self.assertEqual(pythonapi.PyInt_FromLong(42), 42)
+ # This test is unreliable, because it is possible that code in
+ # unittest changes the refcount of the '42' integer. So, it
+ # is disabled by default.
+ @requires("refcount")
+ def test_PyInt_Long(self):
+ ref42 = grc(42)
+ pythonapi.PyInt_FromLong.restype = py_object
+ self.assertEqual(pythonapi.PyInt_FromLong(42), 42)
- self.assertEqual(grc(42), ref42)
+ self.assertEqual(grc(42), ref42)
- pythonapi.PyInt_AsLong.argtypes = (py_object,)
- pythonapi.PyInt_AsLong.restype = c_long
+ pythonapi.PyInt_AsLong.argtypes = (py_object,)
+ pythonapi.PyInt_AsLong.restype = c_long
- res = pythonapi.PyInt_AsLong(42)
- self.assertEqual(grc(res), ref42 + 1)
- del res
- self.assertEqual(grc(42), ref42)
+ res = pythonapi.PyInt_AsLong(42)
+ self.assertEqual(grc(res), ref42 + 1)
+ del res
+ self.assertEqual(grc(42), ref42)
def test_PyObj_FromPtr(self):
s = "abc def ghi jkl"
ref = grc(s)
# id(python-object) is the address
pyobj = PyObj_FromPtr(id(s))
- self.assertTrue(s is pyobj)
+ self.assertIs(s, pyobj)
self.assertEqual(grc(s), ref + 1)
del pyobj
diff --git a/Lib/ctypes/test/test_random_things.py b/Lib/ctypes/test/test_random_things.py
index 1c217c3..0caffe3 100644
--- a/Lib/ctypes/test/test_random_things.py
+++ b/Lib/ctypes/test/test_random_things.py
@@ -5,23 +5,22 @@ def callback_func(arg):
42 // arg
raise ValueError(arg)
-if sys.platform == "win32":
+@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
+class call_function_TestCase(unittest.TestCase):
+ # _ctypes.call_function is deprecated and private, but used by
+ # Gary Bishp's readline module. If we have it, we must test it as well.
- class call_function_TestCase(unittest.TestCase):
- # _ctypes.call_function is deprecated and private, but used by
- # Gary Bishp's readline module. If we have it, we must test it as well.
+ def test(self):
+ from _ctypes import call_function
+ windll.kernel32.LoadLibraryA.restype = c_void_p
+ windll.kernel32.GetProcAddress.argtypes = c_void_p, c_char_p
+ windll.kernel32.GetProcAddress.restype = c_void_p
- def test(self):
- from _ctypes import call_function
- windll.kernel32.LoadLibraryA.restype = c_void_p
- windll.kernel32.GetProcAddress.argtypes = c_void_p, c_char_p
- windll.kernel32.GetProcAddress.restype = c_void_p
+ hdll = windll.kernel32.LoadLibraryA("kernel32")
+ funcaddr = windll.kernel32.GetProcAddress(hdll, "GetModuleHandleA")
- hdll = windll.kernel32.LoadLibraryA("kernel32")
- funcaddr = windll.kernel32.GetProcAddress(hdll, "GetModuleHandleA")
-
- self.assertEqual(call_function(funcaddr, (None,)),
- windll.kernel32.GetModuleHandleA(None))
+ self.assertEqual(call_function(funcaddr, (None,)),
+ windll.kernel32.GetModuleHandleA(None))
class CallbackTracbackTestCase(unittest.TestCase):
# When an exception is raised in a ctypes callback function, the C
diff --git a/Lib/ctypes/test/test_refcounts.py b/Lib/ctypes/test/test_refcounts.py
index 35a81aa..2031346 100644
--- a/Lib/ctypes/test/test_refcounts.py
+++ b/Lib/ctypes/test/test_refcounts.py
@@ -24,7 +24,7 @@ class RefcountTestCase(unittest.TestCase):
self.assertEqual(grc(callback), 2)
cb = MyCallback(callback)
- self.assertTrue(grc(callback) > 2)
+ self.assertGreater(grc(callback), 2)
result = f(-10, cb)
self.assertEqual(result, -18)
cb = None
@@ -41,29 +41,29 @@ class RefcountTestCase(unittest.TestCase):
# this is the standard refcount for func
self.assertEqual(grc(func), 2)
- # the CFuncPtr instance holds atr least one refcount on func:
+ # the CFuncPtr instance holds at least one refcount on func:
f = OtherCallback(func)
- self.assertTrue(grc(func) > 2)
+ self.assertGreater(grc(func), 2)
# and may release it again
del f
- self.assertTrue(grc(func) >= 2)
+ self.assertGreaterEqual(grc(func), 2)
# but now it must be gone
gc.collect()
- self.assertTrue(grc(func) == 2)
+ self.assertEqual(grc(func), 2)
class X(ctypes.Structure):
_fields_ = [("a", OtherCallback)]
x = X()
x.a = OtherCallback(func)
- # the CFuncPtr instance holds atr least one refcount on func:
- self.assertTrue(grc(func) > 2)
+ # the CFuncPtr instance holds at least one refcount on func:
+ self.assertGreater(grc(func), 2)
# and may release it again
del x
- self.assertTrue(grc(func) >= 2)
+ self.assertGreaterEqual(grc(func), 2)
# and now it must be gone again
gc.collect()
@@ -71,8 +71,8 @@ class RefcountTestCase(unittest.TestCase):
f = OtherCallback(func)
- # the CFuncPtr instance holds atr least one refcount on func:
- self.assertTrue(grc(func) > 2)
+ # the CFuncPtr instance holds at least one refcount on func:
+ self.assertGreater(grc(func), 2)
# create a cycle
f.cycle = f
diff --git a/Lib/ctypes/test/test_returnfuncptrs.py b/Lib/ctypes/test/test_returnfuncptrs.py
index f766189..0827f0a 100644
--- a/Lib/ctypes/test/test_returnfuncptrs.py
+++ b/Lib/ctypes/test/test_returnfuncptrs.py
@@ -1,5 +1,6 @@
import unittest
from ctypes import *
+import os
import _ctypes_test
@@ -31,5 +32,34 @@ class ReturnFuncPtrTestCase(unittest.TestCase):
self.assertRaises(ArgumentError, strchr, "abcdef", 3)
self.assertRaises(TypeError, strchr, "abcdef")
+ def test_from_dll(self):
+ dll = CDLL(_ctypes_test.__file__)
+ # _CFuncPtr instances are now callable with a tuple argument
+ # which denotes a function name and a dll:
+ strchr = CFUNCTYPE(c_char_p, c_char_p, c_char)(("my_strchr", dll))
+ self.assertTrue(strchr(b"abcdef", b"b"), "bcdef")
+ self.assertEqual(strchr(b"abcdef", b"x"), None)
+ self.assertRaises(ArgumentError, strchr, b"abcdef", 3.0)
+ self.assertRaises(TypeError, strchr, b"abcdef")
+
+ # Issue 6083: Reference counting bug
+ def test_from_dll_refcount(self):
+ class BadSequence(tuple):
+ def __getitem__(self, key):
+ if key == 0:
+ return "my_strchr"
+ if key == 1:
+ return CDLL(_ctypes_test.__file__)
+ raise IndexError
+
+ # _CFuncPtr instances are now callable with a tuple argument
+ # which denotes a function name and a dll:
+ strchr = CFUNCTYPE(c_char_p, c_char_p, c_char)(
+ BadSequence(("my_strchr", CDLL(_ctypes_test.__file__))))
+ self.assertTrue(strchr(b"abcdef", b"b"), "bcdef")
+ self.assertEqual(strchr(b"abcdef", b"x"), None)
+ self.assertRaises(ArgumentError, strchr, b"abcdef", 3.0)
+ self.assertRaises(TypeError, strchr, b"abcdef")
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/ctypes/test/test_slicing.py b/Lib/ctypes/test/test_slicing.py
index a5632ec..a9ee3a8 100644
--- a/Lib/ctypes/test/test_slicing.py
+++ b/Lib/ctypes/test/test_slicing.py
@@ -1,5 +1,6 @@
import unittest
from ctypes import *
+from ctypes.test import need_symbol
import _ctypes_test
@@ -131,46 +132,42 @@ class SlicesTestCase(unittest.TestCase):
self.assertEqual(p[2:5:-3], s[2:5:-3])
- try:
- c_wchar
- except NameError:
- pass
- else:
- def test_wchar_ptr(self):
- s = u"abcdefghijklmnopqrstuvwxyz\0"
-
- dll = CDLL(_ctypes_test.__file__)
- dll.my_wcsdup.restype = POINTER(c_wchar)
- dll.my_wcsdup.argtypes = POINTER(c_wchar),
- dll.my_free.restype = None
- res = dll.my_wcsdup(s)
- self.assertEqual(res[:len(s)], s)
- self.assertEqual(res[:len(s):], s)
- self.assertEqual(res[len(s)-1:-1:-1], s[::-1])
- self.assertEqual(res[len(s)-1:5:-7], s[:5:-7])
-
- import operator
- self.assertRaises(TypeError, operator.setslice,
- res, 0, 5, u"abcde")
- self.assertRaises(TypeError, operator.setitem,
- res, slice(0, 5), u"abcde")
- dll.my_free(res)
-
- if sizeof(c_wchar) == sizeof(c_short):
- dll.my_wcsdup.restype = POINTER(c_short)
- elif sizeof(c_wchar) == sizeof(c_int):
- dll.my_wcsdup.restype = POINTER(c_int)
- elif sizeof(c_wchar) == sizeof(c_long):
- dll.my_wcsdup.restype = POINTER(c_long)
- else:
- return
- res = dll.my_wcsdup(s)
- tmpl = range(ord("a"), ord("z")+1)
- self.assertEqual(res[:len(s)-1], tmpl)
- self.assertEqual(res[:len(s)-1:], tmpl)
- self.assertEqual(res[len(s)-2:-1:-1], tmpl[::-1])
- self.assertEqual(res[len(s)-2:5:-7], tmpl[:5:-7])
- dll.my_free(res)
+ @need_symbol('c_wchar')
+ def test_wchar_ptr(self):
+ s = u"abcdefghijklmnopqrstuvwxyz\0"
+
+ dll = CDLL(_ctypes_test.__file__)
+ dll.my_wcsdup.restype = POINTER(c_wchar)
+ dll.my_wcsdup.argtypes = POINTER(c_wchar),
+ dll.my_free.restype = None
+ res = dll.my_wcsdup(s)
+ self.assertEqual(res[:len(s)], s)
+ self.assertEqual(res[:len(s):], s)
+ self.assertEqual(res[len(s)-1:-1:-1], s[::-1])
+ self.assertEqual(res[len(s)-1:5:-7], s[:5:-7])
+
+ import operator
+ self.assertRaises(TypeError, operator.setslice,
+ res, 0, 5, u"abcde")
+ self.assertRaises(TypeError, operator.setitem,
+ res, slice(0, 5), u"abcde")
+ dll.my_free(res)
+
+ if sizeof(c_wchar) == sizeof(c_short):
+ dll.my_wcsdup.restype = POINTER(c_short)
+ elif sizeof(c_wchar) == sizeof(c_int):
+ dll.my_wcsdup.restype = POINTER(c_int)
+ elif sizeof(c_wchar) == sizeof(c_long):
+ dll.my_wcsdup.restype = POINTER(c_long)
+ else:
+ self.skipTest('Pointers to c_wchar are not supported')
+ res = dll.my_wcsdup(s)
+ tmpl = range(ord("a"), ord("z")+1)
+ self.assertEqual(res[:len(s)-1], tmpl)
+ self.assertEqual(res[:len(s)-1:], tmpl)
+ self.assertEqual(res[len(s)-2:-1:-1], tmpl[::-1])
+ self.assertEqual(res[len(s)-2:5:-7], tmpl[:5:-7])
+ dll.my_free(res)
################################################################
diff --git a/Lib/ctypes/test/test_strings.py b/Lib/ctypes/test/test_strings.py
index 8945d0c..4b58e7c 100644
--- a/Lib/ctypes/test/test_strings.py
+++ b/Lib/ctypes/test/test_strings.py
@@ -1,5 +1,6 @@
import unittest
from ctypes import *
+from ctypes.test import need_symbol
from test import test_support
class StringArrayTestCase(unittest.TestCase):
@@ -60,29 +61,26 @@ class StringArrayTestCase(unittest.TestCase):
## print BUF.from_param(c_char_p("python"))
## print BUF.from_param(BUF(*"pyth"))
-try:
- c_wchar
-except NameError:
- pass
-else:
- class WStringArrayTestCase(unittest.TestCase):
- def test(self):
- BUF = c_wchar * 4
+@need_symbol('c_wchar')
+class WStringArrayTestCase(unittest.TestCase):
+ def test(self):
+ BUF = c_wchar * 4
- buf = BUF(u"a", u"b", u"c")
- self.assertEqual(buf.value, u"abc")
+ buf = BUF(u"a", u"b", u"c")
+ self.assertEqual(buf.value, u"abc")
- buf.value = u"ABCD"
- self.assertEqual(buf.value, u"ABCD")
+ buf.value = u"ABCD"
+ self.assertEqual(buf.value, u"ABCD")
- buf.value = u"x"
- self.assertEqual(buf.value, u"x")
+ buf.value = u"x"
+ self.assertEqual(buf.value, u"x")
- buf[1] = u"Z"
- self.assertEqual(buf.value, u"xZCD")
+ buf[1] = u"Z"
+ self.assertEqual(buf.value, u"xZCD")
class StringTestCase(unittest.TestCase):
- def XX_test_basic_strings(self):
+ @unittest.skip('test disabled')
+ def test_basic_strings(self):
cs = c_string("abcdef")
# Cannot call len on a c_string any longer
@@ -108,33 +106,36 @@ class StringTestCase(unittest.TestCase):
self.assertRaises(TypeError, c_string, u"123")
- def XX_test_sized_strings(self):
+ @unittest.skip('test disabled')
+ def test_sized_strings(self):
# New in releases later than 0.4.0:
self.assertRaises(TypeError, c_string, None)
# New in releases later than 0.4.0:
# c_string(number) returns an empty string of size number
- self.assertTrue(len(c_string(32).raw) == 32)
+ self.assertEqual(len(c_string(32).raw), 32)
self.assertRaises(ValueError, c_string, -1)
self.assertRaises(ValueError, c_string, 0)
# These tests fail, because it is no longer initialized
-## self.assertTrue(c_string(2).value == "")
-## self.assertTrue(c_string(2).raw == "\000\000")
- self.assertTrue(c_string(2).raw[-1] == "\000")
- self.assertTrue(len(c_string(2).raw) == 2)
-
- def XX_test_initialized_strings(self):
-
- self.assertTrue(c_string("ab", 4).raw[:2] == "ab")
- self.assertTrue(c_string("ab", 4).raw[:2:] == "ab")
- self.assertTrue(c_string("ab", 4).raw[:2:-1] == "ba")
- self.assertTrue(c_string("ab", 4).raw[:2:2] == "a")
- self.assertTrue(c_string("ab", 4).raw[-1] == "\000")
- self.assertTrue(c_string("ab", 2).raw == "a\000")
-
- def XX_test_toolong(self):
+## self.assertEqual(c_string(2).value, "")
+## self.assertEqual(c_string(2).raw, "\000\000")
+ self.assertEqual(c_string(2).raw[-1], "\000")
+ self.assertEqual(len(c_string(2).raw), 2)
+
+ @unittest.skip('test disabled')
+ def test_initialized_strings(self):
+
+ self.assertEqual(c_string("ab", 4).raw[:2], "ab")
+ self.assertEqual(c_string("ab", 4).raw[:2:], "ab")
+ self.assertEqual(c_string("ab", 4).raw[:2:-1], "ba")
+ self.assertEqual(c_string("ab", 4).raw[:2:2], "a")
+ self.assertEqual(c_string("ab", 4).raw[-1], "\000")
+ self.assertEqual(c_string("ab", 2).raw, "a\000")
+
+ @unittest.skip('test disabled')
+ def test_toolong(self):
cs = c_string("abcdef")
# Much too long string:
self.assertRaises(ValueError, setattr, cs, "value", "123456789012345")
@@ -142,54 +143,53 @@ class StringTestCase(unittest.TestCase):
# One char too long values:
self.assertRaises(ValueError, setattr, cs, "value", "1234567")
-## def test_perf(self):
-## check_perf()
+ @unittest.skip('test disabled')
+ def test_perf(self):
+ check_perf()
-try:
- c_wchar
-except NameError:
- pass
-else:
- class WStringTestCase(unittest.TestCase):
- def test_wchar(self):
- c_wchar(u"x")
- repr(byref(c_wchar(u"x")))
- c_wchar("x")
+@need_symbol('c_wchar')
+class WStringTestCase(unittest.TestCase):
+ def test_wchar(self):
+ c_wchar(u"x")
+ repr(byref(c_wchar(u"x")))
+ c_wchar("x")
- def X_test_basic_wstrings(self):
- cs = c_wstring(u"abcdef")
+ @unittest.skip('test disabled')
+ def test_basic_wstrings(self):
+ cs = c_wstring(u"abcdef")
- # XXX This behaviour is about to change:
- # len returns the size of the internal buffer in bytes.
- # This includes the terminating NUL character.
- self.assertTrue(sizeof(cs) == 14)
+ # XXX This behaviour is about to change:
+ # len returns the size of the internal buffer in bytes.
+ # This includes the terminating NUL character.
+ self.assertEqual(sizeof(cs), 14)
- # The value property is the string up to the first terminating NUL.
- self.assertTrue(cs.value == u"abcdef")
- self.assertTrue(c_wstring(u"abc\000def").value == u"abc")
+ # The value property is the string up to the first terminating NUL.
+ self.assertEqual(cs.value, u"abcdef")
+ self.assertEqual(c_wstring(u"abc\000def").value, u"abc")
- self.assertTrue(c_wstring(u"abc\000def").value == u"abc")
+ self.assertEqual(c_wstring(u"abc\000def").value, u"abc")
- # The raw property is the total buffer contents:
- self.assertTrue(cs.raw == u"abcdef\000")
- self.assertTrue(c_wstring(u"abc\000def").raw == u"abc\000def\000")
+ # The raw property is the total buffer contents:
+ self.assertEqual(cs.raw, u"abcdef\000")
+ self.assertEqual(c_wstring(u"abc\000def").raw, u"abc\000def\000")
- # We can change the value:
- cs.value = u"ab"
- self.assertTrue(cs.value == u"ab")
- self.assertTrue(cs.raw == u"ab\000\000\000\000\000")
+ # We can change the value:
+ cs.value = u"ab"
+ self.assertEqual(cs.value, u"ab")
+ self.assertEqual(cs.raw, u"ab\000\000\000\000\000")
- self.assertRaises(TypeError, c_wstring, "123")
- self.assertRaises(ValueError, c_wstring, 0)
+ self.assertRaises(TypeError, c_wstring, "123")
+ self.assertRaises(ValueError, c_wstring, 0)
- def X_test_toolong(self):
- cs = c_wstring(u"abcdef")
- # Much too long string:
- self.assertRaises(ValueError, setattr, cs, "value", u"123456789012345")
+ @unittest.skip('test disabled')
+ def test_toolong(self):
+ cs = c_wstring(u"abcdef")
+ # Much too long string:
+ self.assertRaises(ValueError, setattr, cs, "value", u"123456789012345")
- # One char too long values:
- self.assertRaises(ValueError, setattr, cs, "value", u"1234567")
+ # One char too long values:
+ self.assertRaises(ValueError, setattr, cs, "value", u"1234567")
def run_test(rep, msg, func, arg):
diff --git a/Lib/ctypes/test/test_structures.py b/Lib/ctypes/test/test_structures.py
index 1bde101..2fa42bd 100644
--- a/Lib/ctypes/test/test_structures.py
+++ b/Lib/ctypes/test/test_structures.py
@@ -1,6 +1,8 @@
import unittest
from ctypes import *
+from ctypes.test import need_symbol
from struct import calcsize
+import _testcapi
class SubclassesTest(unittest.TestCase):
def test_subclass(self):
@@ -82,7 +84,7 @@ class StructureTestCase(unittest.TestCase):
class Y(Structure):
_fields_ = [("x", c_char * 3),
("y", c_int)]
- self.assertEqual(alignment(Y), calcsize("i"))
+ self.assertEqual(alignment(Y), alignment(c_int))
self.assertEqual(sizeof(Y), calcsize("3si"))
class SI(Structure):
@@ -107,7 +109,7 @@ class StructureTestCase(unittest.TestCase):
def test_emtpy(self):
# I had problems with these
#
- # Although these are patological cases: Empty Structures!
+ # Although these are pathological cases: Empty Structures!
class X(Structure):
_fields_ = []
@@ -174,23 +176,23 @@ class StructureTestCase(unittest.TestCase):
self.assertEqual(sizeof(X), 10)
self.assertEqual(X.b.offset, 2)
+ import struct
+ longlong_size = struct.calcsize("q")
+ longlong_align = struct.calcsize("bq") - longlong_size
+
class X(Structure):
_fields_ = [("a", c_byte),
("b", c_longlong)]
_pack_ = 4
- self.assertEqual(sizeof(X), 12)
- self.assertEqual(X.b.offset, 4)
-
- import struct
- longlong_size = struct.calcsize("q")
- longlong_align = struct.calcsize("bq") - longlong_size
+ self.assertEqual(sizeof(X), min(4, longlong_align) + longlong_size)
+ self.assertEqual(X.b.offset, min(4, longlong_align))
class X(Structure):
_fields_ = [("a", c_byte),
("b", c_longlong)]
_pack_ = 8
- self.assertEqual(sizeof(X), longlong_align + longlong_size)
+ self.assertEqual(sizeof(X), min(8, longlong_align) + longlong_size)
self.assertEqual(X.b.offset, min(8, longlong_align))
@@ -199,6 +201,14 @@ class StructureTestCase(unittest.TestCase):
"_pack_": -1}
self.assertRaises(ValueError, type(Structure), "X", (Structure,), d)
+ # Issue 15989
+ d = {"_fields_": [("a", c_byte)],
+ "_pack_": _testcapi.INT_MAX + 1}
+ self.assertRaises(ValueError, type(Structure), "X", (Structure,), d)
+ d = {"_fields_": [("a", c_byte)],
+ "_pack_": _testcapi.UINT_MAX + 2}
+ self.assertRaises(ValueError, type(Structure), "X", (Structure,), d)
+
def test_initializers(self):
class Person(Structure):
_fields_ = [("name", c_char*6),
@@ -282,12 +292,8 @@ class StructureTestCase(unittest.TestCase):
self.assertEqual(p.phone.number, "5678")
self.assertEqual(p.age, 5)
+ @need_symbol('c_wchar')
def test_structures_with_wchar(self):
- try:
- c_wchar
- except NameError:
- return # no unicode
-
class PersonW(Structure):
_fields_ = [("name", c_wchar * 12),
("age", c_int)]
@@ -351,14 +357,14 @@ class StructureTestCase(unittest.TestCase):
except Exception, detail:
return detail.__class__, str(detail)
-
-## def test_subclass_creation(self):
-## meta = type(Structure)
-## # same as 'class X(Structure): pass'
-## # fails, since we need either a _fields_ or a _abstract_ attribute
-## cls, msg = self.get_except(meta, "X", (Structure,), {})
-## self.assertEqual((cls, msg),
-## (AttributeError, "class must define a '_fields_' attribute"))
+ @unittest.skip('test disabled')
+ def test_subclass_creation(self):
+ meta = type(Structure)
+ # same as 'class X(Structure): pass'
+ # fails, since we need either a _fields_ or a _abstract_ attribute
+ cls, msg = self.get_except(meta, "X", (Structure,), {})
+ self.assertEqual((cls, msg),
+ (AttributeError, "class must define a '_fields_' attribute"))
def test_abstract_class(self):
class X(Structure):
@@ -371,9 +377,9 @@ class StructureTestCase(unittest.TestCase):
## class X(Structure):
## _fields_ = []
- self.assertTrue("in_dll" in dir(type(Structure)))
- self.assertTrue("from_address" in dir(type(Structure)))
- self.assertTrue("in_dll" in dir(type(Structure)))
+ self.assertIn("in_dll", dir(type(Structure)))
+ self.assertIn("from_address", dir(type(Structure)))
+ self.assertIn("in_dll", dir(type(Structure)))
def test_positional_args(self):
# see also http://bugs.python.org/issue5042
@@ -443,8 +449,8 @@ class TestRecursiveStructure(unittest.TestCase):
try:
Recursive._fields_ = [("next", Recursive)]
except AttributeError, details:
- self.assertTrue("Structure or union cannot contain itself" in
- str(details))
+ self.assertIn("Structure or union cannot contain itself",
+ str(details))
else:
self.fail("Structure or union cannot contain itself")
@@ -460,8 +466,7 @@ class TestRecursiveStructure(unittest.TestCase):
try:
Second._fields_ = [("first", First)]
except AttributeError, details:
- self.assertTrue("_fields_ is final" in
- str(details))
+ self.assertIn("_fields_ is final", str(details))
else:
self.fail("AttributeError not raised")
diff --git a/Lib/ctypes/test/test_unicode.py b/Lib/ctypes/test/test_unicode.py
index 6557479..1da5a25 100644
--- a/Lib/ctypes/test/test_unicode.py
+++ b/Lib/ctypes/test/test_unicode.py
@@ -1,129 +1,138 @@
# coding: latin-1
import unittest
import ctypes
-
-try:
- ctypes.c_wchar
-except AttributeError:
- pass
-else:
- import _ctypes_test
- dll = ctypes.CDLL(_ctypes_test.__file__)
- wcslen = dll.my_wcslen
- wcslen.argtypes = [ctypes.c_wchar_p]
-
-
- class UnicodeTestCase(unittest.TestCase):
- def setUp(self):
- self.prev_conv_mode = ctypes.set_conversion_mode("ascii", "strict")
-
- def tearDown(self):
- ctypes.set_conversion_mode(*self.prev_conv_mode)
-
- def test_ascii_strict(self):
- ctypes.set_conversion_mode("ascii", "strict")
- # no conversions take place with unicode arguments
- self.assertEqual(wcslen(u"abc"), 3)
- self.assertEqual(wcslen(u"ab\u2070"), 3)
- # string args are converted
- self.assertEqual(wcslen("abc"), 3)
- self.assertRaises(ctypes.ArgumentError, wcslen, "abä")
-
- def test_ascii_replace(self):
- ctypes.set_conversion_mode("ascii", "replace")
- self.assertEqual(wcslen(u"abc"), 3)
- self.assertEqual(wcslen(u"ab\u2070"), 3)
- self.assertEqual(wcslen("abc"), 3)
- self.assertEqual(wcslen("abä"), 3)
-
- def test_ascii_ignore(self):
- ctypes.set_conversion_mode("ascii", "ignore")
- self.assertEqual(wcslen(u"abc"), 3)
- self.assertEqual(wcslen(u"ab\u2070"), 3)
- # ignore error mode skips non-ascii characters
- self.assertEqual(wcslen("abc"), 3)
- self.assertEqual(wcslen("äöüß"), 0)
-
- def test_latin1_strict(self):
- ctypes.set_conversion_mode("latin-1", "strict")
- self.assertEqual(wcslen(u"abc"), 3)
- self.assertEqual(wcslen(u"ab\u2070"), 3)
- self.assertEqual(wcslen("abc"), 3)
- self.assertEqual(wcslen("äöüß"), 4)
-
- def test_buffers(self):
- ctypes.set_conversion_mode("ascii", "strict")
- buf = ctypes.create_unicode_buffer("abc")
- self.assertEqual(len(buf), 3+1)
-
- ctypes.set_conversion_mode("ascii", "replace")
- buf = ctypes.create_unicode_buffer("abäöü")
- self.assertEqual(buf[:], u"ab\uFFFD\uFFFD\uFFFD\0")
- self.assertEqual(buf[::], u"ab\uFFFD\uFFFD\uFFFD\0")
- self.assertEqual(buf[::-1], u"\0\uFFFD\uFFFD\uFFFDba")
- self.assertEqual(buf[::2], u"a\uFFFD\uFFFD")
- self.assertEqual(buf[6:5:-1], u"")
-
- ctypes.set_conversion_mode("ascii", "ignore")
- buf = ctypes.create_unicode_buffer("abäöü")
- # is that correct? not sure. But with 'ignore', you get what you pay for..
- self.assertEqual(buf[:], u"ab\0\0\0\0")
- self.assertEqual(buf[::], u"ab\0\0\0\0")
- self.assertEqual(buf[::-1], u"\0\0\0\0ba")
- self.assertEqual(buf[::2], u"a\0\0")
- self.assertEqual(buf[6:5:-1], u"")
-
- import _ctypes_test
- func = ctypes.CDLL(_ctypes_test.__file__)._testfunc_p_p
-
- class StringTestCase(UnicodeTestCase):
- def setUp(self):
- self.prev_conv_mode = ctypes.set_conversion_mode("ascii", "strict")
- func.argtypes = [ctypes.c_char_p]
- func.restype = ctypes.c_char_p
-
- def tearDown(self):
- ctypes.set_conversion_mode(*self.prev_conv_mode)
- func.argtypes = None
- func.restype = ctypes.c_int
-
- def test_ascii_replace(self):
- ctypes.set_conversion_mode("ascii", "strict")
- self.assertEqual(func("abc"), "abc")
- self.assertEqual(func(u"abc"), "abc")
- self.assertRaises(ctypes.ArgumentError, func, u"abä")
-
- def test_ascii_ignore(self):
- ctypes.set_conversion_mode("ascii", "ignore")
- self.assertEqual(func("abc"), "abc")
- self.assertEqual(func(u"abc"), "abc")
- self.assertEqual(func(u"äöüß"), "")
-
- def test_ascii_replace(self):
- ctypes.set_conversion_mode("ascii", "replace")
- self.assertEqual(func("abc"), "abc")
- self.assertEqual(func(u"abc"), "abc")
- self.assertEqual(func(u"äöüß"), "????")
-
- def test_buffers(self):
- ctypes.set_conversion_mode("ascii", "strict")
- buf = ctypes.create_string_buffer(u"abc")
- self.assertEqual(len(buf), 3+1)
-
- ctypes.set_conversion_mode("ascii", "replace")
- buf = ctypes.create_string_buffer(u"abäöü")
- self.assertEqual(buf[:], "ab???\0")
- self.assertEqual(buf[::], "ab???\0")
- self.assertEqual(buf[::-1], "\0???ba")
- self.assertEqual(buf[::2], "a??")
- self.assertEqual(buf[6:5:-1], "")
-
- ctypes.set_conversion_mode("ascii", "ignore")
- buf = ctypes.create_string_buffer(u"abäöü")
- # is that correct? not sure. But with 'ignore', you get what you pay for..
- self.assertEqual(buf[:], "ab\0\0\0\0")
- self.assertEqual(buf[::], "ab\0\0\0\0")
- self.assertEqual(buf[::-1], "\0\0\0\0ba")
+from ctypes.test import need_symbol
+import _ctypes_test
+
+@need_symbol('c_wchar')
+class UnicodeTestCase(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ dll = ctypes.CDLL(_ctypes_test.__file__)
+ cls.wcslen = dll.my_wcslen
+ cls.wcslen.argtypes = [ctypes.c_wchar_p]
+ def setUp(self):
+ self.prev_conv_mode = ctypes.set_conversion_mode("ascii", "strict")
+
+ def tearDown(self):
+ ctypes.set_conversion_mode(*self.prev_conv_mode)
+
+ def test_ascii_strict(self):
+ wcslen = self.wcslen
+ ctypes.set_conversion_mode("ascii", "strict")
+ # no conversions take place with unicode arguments
+ self.assertEqual(wcslen(u"abc"), 3)
+ self.assertEqual(wcslen(u"ab\u2070"), 3)
+ # string args are converted
+ self.assertEqual(wcslen("abc"), 3)
+ self.assertRaises(ctypes.ArgumentError, wcslen, "abä")
+
+ def test_ascii_replace(self):
+ wcslen = self.wcslen
+ ctypes.set_conversion_mode("ascii", "replace")
+ self.assertEqual(wcslen(u"abc"), 3)
+ self.assertEqual(wcslen(u"ab\u2070"), 3)
+ self.assertEqual(wcslen("abc"), 3)
+ self.assertEqual(wcslen("abä"), 3)
+
+ def test_ascii_ignore(self):
+ wcslen = self.wcslen
+ ctypes.set_conversion_mode("ascii", "ignore")
+ self.assertEqual(wcslen(u"abc"), 3)
+ self.assertEqual(wcslen(u"ab\u2070"), 3)
+ # ignore error mode skips non-ascii characters
+ self.assertEqual(wcslen("abc"), 3)
+ self.assertEqual(wcslen("äöüß"), 0)
+
+ def test_latin1_strict(self):
+ wcslen = self.wcslen
+ ctypes.set_conversion_mode("latin-1", "strict")
+ self.assertEqual(wcslen(u"abc"), 3)
+ self.assertEqual(wcslen(u"ab\u2070"), 3)
+ self.assertEqual(wcslen("abc"), 3)
+ self.assertEqual(wcslen("äöüß"), 4)
+
+ def test_buffers(self):
+ ctypes.set_conversion_mode("ascii", "strict")
+ buf = ctypes.create_unicode_buffer("abc")
+ self.assertEqual(len(buf), 3+1)
+
+ ctypes.set_conversion_mode("ascii", "replace")
+ buf = ctypes.create_unicode_buffer("abäöü")
+ self.assertEqual(buf[:], u"ab\uFFFD\uFFFD\uFFFD\0")
+ self.assertEqual(buf[::], u"ab\uFFFD\uFFFD\uFFFD\0")
+ self.assertEqual(buf[::-1], u"\0\uFFFD\uFFFD\uFFFDba")
+ self.assertEqual(buf[::2], u"a\uFFFD\uFFFD")
+ self.assertEqual(buf[6:5:-1], u"")
+
+ ctypes.set_conversion_mode("ascii", "ignore")
+ buf = ctypes.create_unicode_buffer("abäöü")
+ # is that correct? not sure. But with 'ignore', you get what you pay for..
+ self.assertEqual(buf[:], u"ab\0\0\0\0")
+ self.assertEqual(buf[::], u"ab\0\0\0\0")
+ self.assertEqual(buf[::-1], u"\0\0\0\0ba")
+ self.assertEqual(buf[::2], u"a\0\0")
+ self.assertEqual(buf[6:5:-1], u"")
+
+@need_symbol('c_wchar')
+class StringTestCase(UnicodeTestCase):
+ @classmethod
+ def setUpClass(cls):
+ super(StringTestCase, cls).setUpClass()
+ cls.func = ctypes.CDLL(_ctypes_test.__file__)._testfunc_p_p
+
+ def setUp(self):
+ func = self.func
+ self.prev_conv_mode = ctypes.set_conversion_mode("ascii", "strict")
+ func.argtypes = [ctypes.c_char_p]
+ func.restype = ctypes.c_char_p
+
+ def tearDown(self):
+ func = self.func
+ ctypes.set_conversion_mode(*self.prev_conv_mode)
+ func.argtypes = None
+ func.restype = ctypes.c_int
+
+ def test_ascii_replace(self):
+ func = self.func
+ ctypes.set_conversion_mode("ascii", "strict")
+ self.assertEqual(func("abc"), "abc")
+ self.assertEqual(func(u"abc"), "abc")
+ self.assertRaises(ctypes.ArgumentError, func, u"abä")
+
+ def test_ascii_ignore(self):
+ func = self.func
+ ctypes.set_conversion_mode("ascii", "ignore")
+ self.assertEqual(func("abc"), "abc")
+ self.assertEqual(func(u"abc"), "abc")
+ self.assertEqual(func(u"äöüß"), "")
+
+ def test_ascii_replace(self):
+ func = self.func
+ ctypes.set_conversion_mode("ascii", "replace")
+ self.assertEqual(func("abc"), "abc")
+ self.assertEqual(func(u"abc"), "abc")
+ self.assertEqual(func(u"äöüß"), "????")
+
+ def test_buffers(self):
+ ctypes.set_conversion_mode("ascii", "strict")
+ buf = ctypes.create_string_buffer(u"abc")
+ self.assertEqual(len(buf), 3+1)
+
+ ctypes.set_conversion_mode("ascii", "replace")
+ buf = ctypes.create_string_buffer(u"abäöü")
+ self.assertEqual(buf[:], "ab???\0")
+ self.assertEqual(buf[::], "ab???\0")
+ self.assertEqual(buf[::-1], "\0???ba")
+ self.assertEqual(buf[::2], "a??")
+ self.assertEqual(buf[6:5:-1], "")
+
+ ctypes.set_conversion_mode("ascii", "ignore")
+ buf = ctypes.create_string_buffer(u"abäöü")
+ # is that correct? not sure. But with 'ignore', you get what you pay for..
+ self.assertEqual(buf[:], "ab\0\0\0\0")
+ self.assertEqual(buf[::], "ab\0\0\0\0")
+ self.assertEqual(buf[::-1], "\0\0\0\0ba")
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/ctypes/test/test_values.py b/Lib/ctypes/test/test_values.py
index 4cbfd4b..14d69fe 100644
--- a/Lib/ctypes/test/test_values.py
+++ b/Lib/ctypes/test/test_values.py
@@ -3,6 +3,7 @@ A testcase which accesses *values* in a dll.
"""
import unittest
+import sys
from ctypes import *
import _ctypes_test
@@ -21,62 +22,63 @@ class ValuesTestCase(unittest.TestCase):
ctdll = CDLL(_ctypes_test.__file__)
self.assertRaises(ValueError, c_int.in_dll, ctdll, "Undefined_Symbol")
- class Win_ValuesTestCase(unittest.TestCase):
- """This test only works when python itself is a dll/shared library"""
+@unittest.skipUnless(sys.platform == 'win32', 'Windows-specific test')
+class Win_ValuesTestCase(unittest.TestCase):
+ """This test only works when python itself is a dll/shared library"""
- def test_optimizeflag(self):
- # This test accesses the Py_OptimizeFlag intger, which is
- # exported by the Python dll.
+ def test_optimizeflag(self):
+ # This test accesses the Py_OptimizeFlag intger, which is
+ # exported by the Python dll.
- # It's value is set depending on the -O and -OO flags:
- # if not given, it is 0 and __debug__ is 1.
- # If -O is given, the flag is 1, for -OO it is 2.
- # docstrings are also removed in the latter case.
- opt = c_int.in_dll(pydll, "Py_OptimizeFlag").value
- if __debug__:
- self.assertEqual(opt, 0)
- elif ValuesTestCase.__doc__ is not None:
- self.assertEqual(opt, 1)
- else:
- self.assertEqual(opt, 2)
+ # It's value is set depending on the -O and -OO flags:
+ # if not given, it is 0 and __debug__ is 1.
+ # If -O is given, the flag is 1, for -OO it is 2.
+ # docstrings are also removed in the latter case.
+ opt = c_int.in_dll(pythonapi, "Py_OptimizeFlag").value
+ if __debug__:
+ self.assertEqual(opt, 0)
+ elif ValuesTestCase.__doc__ is not None:
+ self.assertEqual(opt, 1)
+ else:
+ self.assertEqual(opt, 2)
- def test_frozentable(self):
- # Python exports a PyImport_FrozenModules symbol. This is a
- # pointer to an array of struct _frozen entries. The end of the
- # array is marked by an entry containing a NULL name and zero
- # size.
+ def test_frozentable(self):
+ # Python exports a PyImport_FrozenModules symbol. This is a
+ # pointer to an array of struct _frozen entries. The end of the
+ # array is marked by an entry containing a NULL name and zero
+ # size.
- # In standard Python, this table contains a __hello__
- # module, and a __phello__ package containing a spam
- # module.
- class struct_frozen(Structure):
- _fields_ = [("name", c_char_p),
- ("code", POINTER(c_ubyte)),
- ("size", c_int)]
- FrozenTable = POINTER(struct_frozen)
+ # In standard Python, this table contains a __hello__
+ # module, and a __phello__ package containing a spam
+ # module.
+ class struct_frozen(Structure):
+ _fields_ = [("name", c_char_p),
+ ("code", POINTER(c_ubyte)),
+ ("size", c_int)]
+ FrozenTable = POINTER(struct_frozen)
- ft = FrozenTable.in_dll(pydll, "PyImport_FrozenModules")
- # ft is a pointer to the struct_frozen entries:
- items = []
- for entry in ft:
- # This is dangerous. We *can* iterate over a pointer, but
- # the loop will not terminate (maybe with an access
- # violation;-) because the pointer instance has no size.
- if entry.name is None:
- break
- items.append((entry.name, entry.size))
- import sys
- if sys.version_info[:2] >= (2, 3):
- expected = [("__hello__", 104), ("__phello__", -104), ("__phello__.spam", 104)]
- else:
- expected = [("__hello__", 100), ("__phello__", -100), ("__phello__.spam", 100)]
- self.assertEqual(items, expected)
+ ft = FrozenTable.in_dll(pythonapi, "PyImport_FrozenModules")
+ # ft is a pointer to the struct_frozen entries:
+ items = []
+ for entry in ft:
+ # This is dangerous. We *can* iterate over a pointer, but
+ # the loop will not terminate (maybe with an access
+ # violation;-) because the pointer instance has no size.
+ if entry.name is None:
+ break
+ items.append((entry.name, entry.size))
- from ctypes import _pointer_type_cache
- del _pointer_type_cache[struct_frozen]
+ expected = [("__hello__", 104),
+ ("__phello__", -104),
+ ("__phello__.spam", 104)]
+ self.assertEqual(items, expected)
- def test_undefined(self):
- self.assertRaises(ValueError, c_int.in_dll, pydll, "Undefined_Symbol")
+ from ctypes import _pointer_type_cache
+ del _pointer_type_cache[struct_frozen]
+
+ def test_undefined(self):
+ self.assertRaises(ValueError, c_int.in_dll, pythonapi,
+ "Undefined_Symbol")
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/ctypes/test/test_win32.py b/Lib/ctypes/test/test_win32.py
index 2534a74..8093efc 100644
--- a/Lib/ctypes/test/test_win32.py
+++ b/Lib/ctypes/test/test_win32.py
@@ -1,74 +1,79 @@
# Windows specific tests
from ctypes import *
-from ctypes.test import is_resource_enabled
+from ctypes.test import requires
import unittest, sys
+from test import test_support as support
import _ctypes_test
-if sys.platform == "win32" and sizeof(c_void_p) == sizeof(c_int):
- # Only windows 32-bit has different calling conventions.
-
- class WindowsTestCase(unittest.TestCase):
- def test_callconv_1(self):
- # Testing stdcall function
-
- IsWindow = windll.user32.IsWindow
- # ValueError: Procedure probably called with not enough arguments (4 bytes missing)
- self.assertRaises(ValueError, IsWindow)
-
- # This one should succeed...
- self.assertEqual(0, IsWindow(0))
-
- # ValueError: Procedure probably called with too many arguments (8 bytes in excess)
- self.assertRaises(ValueError, IsWindow, 0, 0, 0)
-
- def test_callconv_2(self):
- # Calling stdcall function as cdecl
-
- IsWindow = cdll.user32.IsWindow
-
- # ValueError: Procedure called with not enough arguments (4 bytes missing)
- # or wrong calling convention
- self.assertRaises(ValueError, IsWindow, None)
-
-if sys.platform == "win32":
- class FunctionCallTestCase(unittest.TestCase):
-
- if is_resource_enabled("SEH"):
- def test_SEH(self):
- # Call functions with invalid arguments, and make sure
- # that access violations are trapped and raise an
- # exception.
- self.assertRaises(WindowsError, windll.kernel32.GetModuleHandleA, 32)
-
- def test_noargs(self):
- # This is a special case on win32 x64
- windll.user32.GetDesktopWindow()
-
- class TestWintypes(unittest.TestCase):
- def test_HWND(self):
- from ctypes import wintypes
- self.assertEqual(sizeof(wintypes.HWND), sizeof(c_void_p))
-
- def test_PARAM(self):
- from ctypes import wintypes
- self.assertEqual(sizeof(wintypes.WPARAM),
- sizeof(c_void_p))
- self.assertEqual(sizeof(wintypes.LPARAM),
- sizeof(c_void_p))
-
- def test_COMError(self):
- from _ctypes import COMError
- self.assertEqual(COMError.__doc__, "Raised when a COM method call failed.")
-
- ex = COMError(-1, "text", ("details",))
- self.assertEqual(ex.hresult, -1)
- self.assertEqual(ex.text, "text")
- self.assertEqual(ex.details, ("details",))
+# Only windows 32-bit has different calling conventions.
+@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
+@unittest.skipUnless(sizeof(c_void_p) == sizeof(c_int),
+ "sizeof c_void_p and c_int differ")
+class WindowsTestCase(unittest.TestCase):
+ def test_callconv_1(self):
+ # Testing stdcall function
+
+ IsWindow = windll.user32.IsWindow
+ # ValueError: Procedure probably called with not enough arguments
+ # (4 bytes missing)
+ self.assertRaises(ValueError, IsWindow)
+
+ # This one should succeed...
+ self.assertEqual(0, IsWindow(0))
+
+ # ValueError: Procedure probably called with too many arguments
+ # (8 bytes in excess)
+ self.assertRaises(ValueError, IsWindow, 0, 0, 0)
+
+ def test_callconv_2(self):
+ # Calling stdcall function as cdecl
+
+ IsWindow = cdll.user32.IsWindow
+
+ # ValueError: Procedure called with not enough arguments
+ # (4 bytes missing) or wrong calling convention
+ self.assertRaises(ValueError, IsWindow, None)
+
+@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
+class FunctionCallTestCase(unittest.TestCase):
+ @requires("SEH")
+ def test_SEH(self):
+ # Call functions with invalid arguments, and make sure
+ # that access violations are trapped and raise an
+ # exception.
+ self.assertRaises(WindowsError, windll.kernel32.GetModuleHandleA, 32)
+
+ def test_noargs(self):
+ # This is a special case on win32 x64
+ windll.user32.GetDesktopWindow()
+
+@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
+class TestWintypes(unittest.TestCase):
+ def test_HWND(self):
+ from ctypes import wintypes
+ self.assertEqual(sizeof(wintypes.HWND), sizeof(c_void_p))
+
+ def test_PARAM(self):
+ from ctypes import wintypes
+ self.assertEqual(sizeof(wintypes.WPARAM),
+ sizeof(c_void_p))
+ self.assertEqual(sizeof(wintypes.LPARAM),
+ sizeof(c_void_p))
+
+ def test_COMError(self):
+ from _ctypes import COMError
+ if support.HAVE_DOCSTRINGS:
+ self.assertEqual(COMError.__doc__,
+ "Raised when a COM method call failed.")
+
+ ex = COMError(-1, "text", ("details",))
+ self.assertEqual(ex.hresult, -1)
+ self.assertEqual(ex.text, "text")
+ self.assertEqual(ex.details, ("details",))
class Structures(unittest.TestCase):
-
def test_struct_by_value(self):
class POINT(Structure):
_fields_ = [("x", c_long),
diff --git a/Lib/ctypes/test/test_wintypes.py b/Lib/ctypes/test/test_wintypes.py
new file mode 100644
index 0000000..71442df
--- /dev/null
+++ b/Lib/ctypes/test/test_wintypes.py
@@ -0,0 +1,41 @@
+import sys
+import unittest
+
+from ctypes import *
+
+@unittest.skipUnless(sys.platform.startswith('win'), 'Windows-only test')
+class WinTypesTest(unittest.TestCase):
+ def test_variant_bool(self):
+ from ctypes import wintypes
+ # reads 16-bits from memory, anything non-zero is True
+ for true_value in (1, 32767, 32768, 65535, 65537):
+ true = POINTER(c_int16)(c_int16(true_value))
+ value = cast(true, POINTER(wintypes.VARIANT_BOOL))
+ self.assertEqual(repr(value.contents), 'VARIANT_BOOL(True)')
+
+ vb = wintypes.VARIANT_BOOL()
+ self.assertIs(vb.value, False)
+ vb.value = True
+ self.assertIs(vb.value, True)
+ vb.value = true_value
+ self.assertIs(vb.value, True)
+
+ for false_value in (0, 65536, 262144, 2**33):
+ false = POINTER(c_int16)(c_int16(false_value))
+ value = cast(false, POINTER(wintypes.VARIANT_BOOL))
+ self.assertEqual(repr(value.contents), 'VARIANT_BOOL(False)')
+
+ # allow any bool conversion on assignment to value
+ for set_value in (65536, 262144, 2**33):
+ vb = wintypes.VARIANT_BOOL()
+ vb.value = set_value
+ self.assertIs(vb.value, True)
+
+ vb = wintypes.VARIANT_BOOL()
+ vb.value = [2, 3]
+ self.assertIs(vb.value, True)
+ vb.value = []
+ self.assertIs(vb.value, False)
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/Lib/ctypes/util.py b/Lib/ctypes/util.py
index 7aee0ef..fe0ed0a 100644
--- a/Lib/ctypes/util.py
+++ b/Lib/ctypes/util.py
@@ -93,7 +93,7 @@ elif os.name == "posix":
fdout, ccout = tempfile.mkstemp()
os.close(fdout)
cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; elif type cc >/dev/null 2>&1; then CC=cc;else exit 10; fi;' \
- '$CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name
+ 'LANG=C LC_ALL=C $CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name
try:
f = os.popen(cmd)
try:
@@ -180,6 +180,35 @@ elif os.name == "posix":
res.sort(cmp= lambda x,y: cmp(_num_version(x), _num_version(y)))
return res[-1]
+ elif sys.platform == "sunos5":
+
+ def _findLib_crle(name, is64):
+ if not os.path.exists('/usr/bin/crle'):
+ return None
+
+ if is64:
+ cmd = 'env LC_ALL=C /usr/bin/crle -64 2>/dev/null'
+ else:
+ cmd = 'env LC_ALL=C /usr/bin/crle 2>/dev/null'
+
+ for line in os.popen(cmd).readlines():
+ line = line.strip()
+ if line.startswith('Default Library Path (ELF):'):
+ paths = line.split()[4]
+
+ if not paths:
+ return None
+
+ for dir in paths.split(":"):
+ libfile = os.path.join(dir, "lib%s.so" % name)
+ if os.path.exists(libfile):
+ return libfile
+
+ return None
+
+ def find_library(name, is64 = False):
+ return _get_soname(_findLib_crle(name, is64) or _findLib_gcc(name))
+
else:
def _findSoname_ldconfig(name):
diff --git a/Lib/curses/__init__.py b/Lib/curses/__init__.py
index bd7d5f6..ecf59de 100644
--- a/Lib/curses/__init__.py
+++ b/Lib/curses/__init__.py
@@ -5,7 +5,7 @@ the package, and perhaps a particular module inside it.
import curses
from curses import textpad
- curses.initwin()
+ curses.initscr()
...
"""
diff --git a/Lib/decimal.py b/Lib/decimal.py
index 3cb3b80..04bf5c2 100644
--- a/Lib/decimal.py
+++ b/Lib/decimal.py
@@ -25,7 +25,7 @@ the General Decimal Arithmetic Specification:
and IEEE standard 854-1987:
- www.cs.berkeley.edu/~ejr/projects/754/private/drafts/854-1987/dir.html
+ http://en.wikipedia.org/wiki/IEEE_854-1987
Decimal floating point has finite precision with arbitrarily large bounds.
@@ -1581,7 +1581,13 @@ class Decimal(object):
def __float__(self):
"""Float representation."""
- return float(str(self))
+ if self._isnan():
+ if self.is_snan():
+ raise ValueError("Cannot convert signaling NaN to float")
+ s = "-nan" if self._sign else "nan"
+ else:
+ s = str(self)
+ return float(s)
def __int__(self):
"""Converts self to an int, truncating if necessary."""
diff --git a/Lib/difflib.py b/Lib/difflib.py
index 3bbcb76..3880d84 100644
--- a/Lib/difflib.py
+++ b/Lib/difflib.py
@@ -1,5 +1,3 @@
-#! /usr/bin/env python
-
"""
Module difflib -- helpers for computing deltas between objects.
@@ -525,8 +523,8 @@ class SequenceMatcher:
non_adjacent.append((i1, j1, k1))
non_adjacent.append( (la, lb, 0) )
- self.matching_blocks = non_adjacent
- return map(Match._make, self.matching_blocks)
+ self.matching_blocks = map(Match._make, non_adjacent)
+ return self.matching_blocks
def get_opcodes(self):
"""Return list of 5-tuples describing how to turn a into b.
@@ -586,7 +584,7 @@ class SequenceMatcher:
def get_grouped_opcodes(self, n=3):
""" Isolate change clusters by eliminating ranges with no changes.
- Return a generator of groups with upto n lines of context.
+ Return a generator of groups with up to n lines of context.
Each group is in the same format as returned by get_opcodes().
>>> from pprint import pprint
@@ -1361,7 +1359,7 @@ def _mdiff(fromlines, tolines, context=None, linejunk=None,
linejunk -- passed on to ndiff (see ndiff documentation)
charjunk -- passed on to ndiff (see ndiff documentation)
- This function returns an interator which returns a tuple:
+ This function returns an iterator which returns a tuple:
(from line tuple, to line tuple, boolean flag)
from/to line tuple -- (line num, line text)
@@ -1963,7 +1961,7 @@ class HtmlDiff(object):
self._make_prefix()
# change tabs to spaces before it gets more difficult after we insert
- # markkup
+ # markup
fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
# create diffs iterator which generates side by side from/to data
diff --git a/Lib/distutils/__init__.py b/Lib/distutils/__init__.py
index 036062c..7b8b924 100644
--- a/Lib/distutils/__init__.py
+++ b/Lib/distutils/__init__.py
@@ -15,5 +15,5 @@ __revision__ = "$Id$"
# Updated automatically by the Python release process.
#
#--start constants--
-__version__ = "2.7.3"
+__version__ = "2.7.8"
#--end constants--
diff --git a/Lib/distutils/ccompiler.py b/Lib/distutils/ccompiler.py
index 7076b93..4907a0a 100644
--- a/Lib/distutils/ccompiler.py
+++ b/Lib/distutils/ccompiler.py
@@ -17,6 +17,8 @@ from distutils.dir_util import mkpath
from distutils.dep_util import newer_group
from distutils.util import split_quoted, execute
from distutils import log
+# following import is for backward compatibility
+from distutils.sysconfig import customize_compiler
class CCompiler:
"""Abstract base class to define the interface that must be implemented
diff --git a/Lib/distutils/command/bdist_rpm.py b/Lib/distutils/command/bdist_rpm.py
index 0bba363..477e0ee 100644
--- a/Lib/distutils/command/bdist_rpm.py
+++ b/Lib/distutils/command/bdist_rpm.py
@@ -12,6 +12,7 @@ import string
from distutils.core import Command
from distutils.debug import DEBUG
from distutils.file_util import write_file
+from distutils.sysconfig import get_python_version
from distutils.errors import (DistutilsOptionError, DistutilsPlatformError,
DistutilsFileError, DistutilsExecError)
from distutils import log
@@ -379,16 +380,28 @@ class bdist_rpm (Command):
self.spawn(rpm_cmd)
if not self.dry_run:
+ if self.distribution.has_ext_modules():
+ pyversion = get_python_version()
+ else:
+ pyversion = 'any'
+
if not self.binary_only:
srpm = os.path.join(rpm_dir['SRPMS'], source_rpm)
assert(os.path.exists(srpm))
self.move_file(srpm, self.dist_dir)
+ filename = os.path.join(self.dist_dir, source_rpm)
+ self.distribution.dist_files.append(
+ ('bdist_rpm', pyversion, filename))
if not self.source_only:
for rpm in binary_rpms:
rpm = os.path.join(rpm_dir['RPMS'], rpm)
if os.path.exists(rpm):
self.move_file(rpm, self.dist_dir)
+ filename = os.path.join(self.dist_dir,
+ os.path.basename(rpm))
+ self.distribution.dist_files.append(
+ ('bdist_rpm', pyversion, filename))
# run()
def _dist_path(self, path):
diff --git a/Lib/distutils/command/build_ext.py b/Lib/distutils/command/build_ext.py
index 923197b..f0a7d4c 100644
--- a/Lib/distutils/command/build_ext.py
+++ b/Lib/distutils/command/build_ext.py
@@ -231,12 +231,10 @@ class build_ext (Command):
# building python standard extensions
self.library_dirs.append('.')
- # for extensions under Linux or Solaris with a shared Python library,
+ # For building extensions with a shared Python library,
# Python's library directory must be appended to library_dirs
- sysconfig.get_config_var('Py_ENABLE_SHARED')
- if ((sys.platform.startswith('linux') or sys.platform.startswith('gnu')
- or sys.platform.startswith('sunos'))
- and sysconfig.get_config_var('Py_ENABLE_SHARED')):
+ # See Issues: #1600860, #4366
+ if (sysconfig.get_config_var('Py_ENABLE_SHARED')):
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
# building third party extensions
self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
diff --git a/Lib/distutils/command/build_py.py b/Lib/distutils/command/build_py.py
index 04c455f..c123c62 100644
--- a/Lib/distutils/command/build_py.py
+++ b/Lib/distutils/command/build_py.py
@@ -128,7 +128,8 @@ class build_py(Command):
# Each pattern has to be converted to a platform-specific path
filelist = glob(os.path.join(src_dir, convert_path(pattern)))
# Files that match more than one pattern are only added once
- files.extend([fn for fn in filelist if fn not in files])
+ files.extend([fn for fn in filelist if fn not in files
+ and os.path.isfile(fn)])
return files
def build_package_data(self):
diff --git a/Lib/distutils/command/check.py b/Lib/distutils/command/check.py
index 4b64e45..152bf0d 100644
--- a/Lib/distutils/command/check.py
+++ b/Lib/distutils/command/check.py
@@ -26,6 +26,9 @@ try:
def system_message(self, level, message, *children, **kwargs):
self.messages.append((level, message, children, kwargs))
+ return nodes.system_message(message, level=level,
+ type=self.levels[level],
+ *children, **kwargs)
HAS_DOCUTILS = True
except ImportError:
diff --git a/Lib/distutils/command/install.py b/Lib/distutils/command/install.py
index f1f3bd5..b9f1c6c 100644
--- a/Lib/distutils/command/install.py
+++ b/Lib/distutils/command/install.py
@@ -265,8 +265,8 @@ class install (Command):
if self.user and (self.prefix or self.exec_prefix or self.home or
self.install_base or self.install_platbase):
- raise DistutilsOptionError("can't combine user with with prefix/"
- "exec_prefix/home or install_(plat)base")
+ raise DistutilsOptionError("can't combine user with prefix, "
+ "exec_prefix/home, or install_(plat)base")
# Next, stuff that's wrong (or dubious) only on certain platforms.
if os.name != "posix":
diff --git a/Lib/distutils/command/sdist.py b/Lib/distutils/command/sdist.py
index d30de10..821420d 100644
--- a/Lib/distutils/command/sdist.py
+++ b/Lib/distutils/command/sdist.py
@@ -183,7 +183,7 @@ class sdist(Command):
depends on the user's options.
"""
# new behavior when using a template:
- # the file list is recalculated everytime because
+ # the file list is recalculated every time because
# even if MANIFEST.in or setup.py are not changed
# the user might have added some files in the tree that
# need to be included.
diff --git a/Lib/distutils/command/upload.py b/Lib/distutils/command/upload.py
index d013335..9aa54ee 100644
--- a/Lib/distutils/command/upload.py
+++ b/Lib/distutils/command/upload.py
@@ -10,7 +10,7 @@ import urlparse
import cStringIO as StringIO
from hashlib import md5
-from distutils.errors import DistutilsOptionError
+from distutils.errors import DistutilsError, DistutilsOptionError
from distutils.core import PyPIRCCommand
from distutils.spawn import spawn
from distutils import log
@@ -177,11 +177,11 @@ class upload(PyPIRCCommand):
status = result.getcode()
reason = result.msg
if self.show_response:
- msg = '\n'.join(('-' * 75, r.read(), '-' * 75))
+ msg = '\n'.join(('-' * 75, result.read(), '-' * 75))
self.announce(msg, log.INFO)
except socket.error, e:
self.announce(str(e), log.ERROR)
- return
+ raise
except HTTPError, e:
status = e.code
reason = e.msg
@@ -190,5 +190,6 @@ class upload(PyPIRCCommand):
self.announce('Server response (%s): %s' % (status, reason),
log.INFO)
else:
- self.announce('Upload failed (%s): %s' % (status, reason),
- log.ERROR)
+ msg = 'Upload failed (%s): %s' % (status, reason)
+ self.announce(msg, log.ERROR)
+ raise DistutilsError(msg)
diff --git a/Lib/distutils/config.py b/Lib/distutils/config.py
index afa403f..7dbcc46 100644
--- a/Lib/distutils/config.py
+++ b/Lib/distutils/config.py
@@ -21,7 +21,7 @@ password:%s
class PyPIRCCommand(Command):
"""Base command that knows how to handle the .pypirc file
"""
- DEFAULT_REPOSITORY = 'http://pypi.python.org/pypi'
+ DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi'
DEFAULT_REALM = 'pypi'
repository = None
realm = None
@@ -42,16 +42,11 @@ class PyPIRCCommand(Command):
def _store_pypirc(self, username, password):
"""Creates a default .pypirc file."""
rc = self._get_rc_file()
- f = open(rc, 'w')
+ f = os.fdopen(os.open(rc, os.O_CREAT | os.O_WRONLY, 0600), 'w')
try:
f.write(DEFAULT_PYPIRC % (username, password))
finally:
f.close()
- try:
- os.chmod(rc, 0600)
- except OSError:
- # should do something better here
- pass
def _read_pypirc(self):
"""Reads the .pypirc file."""
diff --git a/Lib/distutils/core.py b/Lib/distutils/core.py
index b89557d..fcb2060 100644
--- a/Lib/distutils/core.py
+++ b/Lib/distutils/core.py
@@ -14,7 +14,6 @@ import os
from distutils.debug import DEBUG
from distutils.errors import (DistutilsSetupError, DistutilsArgError,
DistutilsError, CCompilerError)
-from distutils.util import grok_environment_error
# Mainly import these so setup scripts can "from distutils.core import" them.
from distutils.dist import Distribution
@@ -153,13 +152,11 @@ def setup(**attrs):
except KeyboardInterrupt:
raise SystemExit, "interrupted"
except (IOError, os.error), exc:
- error = grok_environment_error(exc)
-
if DEBUG:
- sys.stderr.write(error + "\n")
+ sys.stderr.write("error: %s\n" % (exc,))
raise
else:
- raise SystemExit, error
+ raise SystemExit, "error: %s" % (exc,)
except (DistutilsError,
CCompilerError), msg:
diff --git a/Lib/distutils/cygwinccompiler.py b/Lib/distutils/cygwinccompiler.py
index a1ee815..5d11687 100644
--- a/Lib/distutils/cygwinccompiler.py
+++ b/Lib/distutils/cygwinccompiler.py
@@ -319,13 +319,18 @@ class Mingw32CCompiler (CygwinCCompiler):
else:
entry_point = ''
- self.set_executables(compiler='gcc -mno-cygwin -O -Wall',
- compiler_so='gcc -mno-cygwin -mdll -O -Wall',
- compiler_cxx='g++ -mno-cygwin -O -Wall',
- linker_exe='gcc -mno-cygwin',
- linker_so='%s -mno-cygwin %s %s'
- % (self.linker_dll, shared_option,
- entry_point))
+ if self.gcc_version < '4' or is_cygwingcc():
+ no_cygwin = ' -mno-cygwin'
+ else:
+ no_cygwin = ''
+
+ self.set_executables(compiler='gcc%s -O -Wall' % no_cygwin,
+ compiler_so='gcc%s -mdll -O -Wall' % no_cygwin,
+ compiler_cxx='g++%s -O -Wall' % no_cygwin,
+ linker_exe='gcc%s' % no_cygwin,
+ linker_so='%s%s %s %s'
+ % (self.linker_dll, no_cygwin,
+ shared_option, entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
@@ -447,3 +452,12 @@ def get_versions():
else:
dllwrap_version = None
return (gcc_version, ld_version, dllwrap_version)
+
+def is_cygwingcc():
+ '''Try to determine if the gcc that would be used is from cygwin.'''
+ out = os.popen('gcc -dumpmachine', 'r')
+ out_string = out.read()
+ out.close()
+ # out_string is the target triplet cpu-vendor-os
+ # Cygwin's gcc sets the os to 'cygwin'
+ return out_string.strip().endswith('cygwin')
diff --git a/Lib/distutils/dir_util.py b/Lib/distutils/dir_util.py
index 9c5cf33..e2dc6f4 100644
--- a/Lib/distutils/dir_util.py
+++ b/Lib/distutils/dir_util.py
@@ -144,6 +144,10 @@ def copy_tree(src, dst, preserve_mode=1, preserve_times=1,
src_name = os.path.join(src, n)
dst_name = os.path.join(dst, n)
+ if n.startswith('.nfs'):
+ # skip NFS rename files
+ continue
+
if preserve_symlinks and os.path.islink(src_name):
link_dest = os.readlink(src_name)
if verbose >= 1:
@@ -181,7 +185,6 @@ def remove_tree(directory, verbose=1, dry_run=0):
Any errors are ignored (apart from being reported to stdout if 'verbose'
is true).
"""
- from distutils.util import grok_environment_error
global _path_created
if verbose >= 1:
@@ -198,8 +201,7 @@ def remove_tree(directory, verbose=1, dry_run=0):
if abspath in _path_created:
del _path_created[abspath]
except (IOError, OSError), exc:
- log.warn(grok_environment_error(
- exc, "error removing %s: " % directory))
+ log.warn("error removing %s: %s", directory, exc)
def ensure_relative(path):
"""Take the full path 'path', and make it a relative path.
diff --git a/Lib/distutils/spawn.py b/Lib/distutils/spawn.py
index 7306099..321344a 100644
--- a/Lib/distutils/spawn.py
+++ b/Lib/distutils/spawn.py
@@ -12,6 +12,7 @@ import sys
import os
from distutils.errors import DistutilsPlatformError, DistutilsExecError
+from distutils.debug import DEBUG
from distutils import log
def spawn(cmd, search_path=1, verbose=0, dry_run=0):
@@ -30,6 +31,9 @@ def spawn(cmd, search_path=1, verbose=0, dry_run=0):
Raise DistutilsExecError if running the program fails in any way; just
return on success.
"""
+ # cmd is documented as a list, but just in case some code passes a tuple
+ # in, protect our %-formatting code against horrible death
+ cmd = list(cmd)
if os.name == 'posix':
_spawn_posix(cmd, search_path, dry_run=dry_run)
elif os.name == 'nt':
@@ -69,12 +73,16 @@ def _spawn_nt(cmd, search_path=1, verbose=0, dry_run=0):
rc = os.spawnv(os.P_WAIT, executable, cmd)
except OSError, exc:
# this seems to happen when the command isn't found
+ if not DEBUG:
+ cmd = executable
raise DistutilsExecError, \
- "command '%s' failed: %s" % (cmd[0], exc[-1])
+ "command %r failed: %s" % (cmd, exc[-1])
if rc != 0:
# and this reflects the command running but failing
+ if not DEBUG:
+ cmd = executable
raise DistutilsExecError, \
- "command '%s' failed with exit status %d" % (cmd[0], rc)
+ "command %r failed with exit status %d" % (cmd, rc)
def _spawn_os2(cmd, search_path=1, verbose=0, dry_run=0):
executable = cmd[0]
@@ -88,13 +96,17 @@ def _spawn_os2(cmd, search_path=1, verbose=0, dry_run=0):
rc = os.spawnv(os.P_WAIT, executable, cmd)
except OSError, exc:
# this seems to happen when the command isn't found
+ if not DEBUG:
+ cmd = executable
raise DistutilsExecError, \
- "command '%s' failed: %s" % (cmd[0], exc[-1])
+ "command %r failed: %s" % (cmd, exc[-1])
if rc != 0:
# and this reflects the command running but failing
- log.debug("command '%s' failed with exit status %d" % (cmd[0], rc))
+ if not DEBUG:
+ cmd = executable
+ log.debug("command %r failed with exit status %d" % (cmd, rc))
raise DistutilsExecError, \
- "command '%s' failed with exit status %d" % (cmd[0], rc)
+ "command %r failed with exit status %d" % (cmd, rc)
if sys.platform == 'darwin':
from distutils import sysconfig
@@ -105,8 +117,9 @@ def _spawn_posix(cmd, search_path=1, verbose=0, dry_run=0):
log.info(' '.join(cmd))
if dry_run:
return
+ executable = cmd[0]
exec_fn = search_path and os.execvp or os.execv
- exec_args = [cmd[0], cmd]
+ env = None
if sys.platform == 'darwin':
global _cfg_target, _cfg_target_split
if _cfg_target is None:
@@ -127,18 +140,24 @@ def _spawn_posix(cmd, search_path=1, verbose=0, dry_run=0):
env = dict(os.environ,
MACOSX_DEPLOYMENT_TARGET=cur_target)
exec_fn = search_path and os.execvpe or os.execve
- exec_args.append(env)
pid = os.fork()
if pid == 0: # in the child
try:
- exec_fn(*exec_args)
+ if env is None:
+ exec_fn(executable, cmd)
+ else:
+ exec_fn(executable, cmd, env)
except OSError, e:
- sys.stderr.write("unable to execute %s: %s\n" %
- (cmd[0], e.strerror))
+ if not DEBUG:
+ cmd = executable
+ sys.stderr.write("unable to execute %r: %s\n" %
+ (cmd, e.strerror))
os._exit(1)
- sys.stderr.write("unable to execute %s for unknown reasons" % cmd[0])
+ if not DEBUG:
+ cmd = executable
+ sys.stderr.write("unable to execute %r for unknown reasons" % cmd)
os._exit(1)
else: # in the parent
# Loop until the child either exits or is terminated by a signal
@@ -150,29 +169,37 @@ def _spawn_posix(cmd, search_path=1, verbose=0, dry_run=0):
import errno
if exc.errno == errno.EINTR:
continue
+ if not DEBUG:
+ cmd = executable
raise DistutilsExecError, \
- "command '%s' failed: %s" % (cmd[0], exc[-1])
+ "command %r failed: %s" % (cmd, exc[-1])
if os.WIFSIGNALED(status):
+ if not DEBUG:
+ cmd = executable
raise DistutilsExecError, \
- "command '%s' terminated by signal %d" % \
- (cmd[0], os.WTERMSIG(status))
+ "command %r terminated by signal %d" % \
+ (cmd, os.WTERMSIG(status))
elif os.WIFEXITED(status):
exit_status = os.WEXITSTATUS(status)
if exit_status == 0:
return # hey, it succeeded!
else:
+ if not DEBUG:
+ cmd = executable
raise DistutilsExecError, \
- "command '%s' failed with exit status %d" % \
- (cmd[0], exit_status)
+ "command %r failed with exit status %d" % \
+ (cmd, exit_status)
elif os.WIFSTOPPED(status):
continue
else:
+ if not DEBUG:
+ cmd = executable
raise DistutilsExecError, \
- "unknown error executing '%s': termination status %d" % \
- (cmd[0], status)
+ "unknown error executing %r: termination status %d" % \
+ (cmd, status)
def find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
diff --git a/Lib/distutils/sysconfig.py b/Lib/distutils/sysconfig.py
index 4b193b2..4aa9334 100644
--- a/Lib/distutils/sysconfig.py
+++ b/Lib/distutils/sysconfig.py
@@ -37,6 +37,11 @@ if os.name == "nt" and "\\pcbuild\\amd64" in project_base[-14:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir,
os.path.pardir))
+# set for cross builds
+if "_PYTHON_PROJECT_BASE" in os.environ:
+ # this is the build directory, at least for posix
+ project_base = os.path.normpath(os.environ["_PYTHON_PROJECT_BASE"])
+
# python_build: (Boolean) if true, we're either building Python or
# building an extension with an un-installed Python, so we use
# different (hard-wired) directories.
@@ -141,7 +146,7 @@ def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
"I don't know where Python installs its library "
"on platform '%s'" % os.name)
-_USE_CLANG = None
+
def customize_compiler(compiler):
"""Do any platform-specific customization of a CCompiler instance.
@@ -150,41 +155,33 @@ def customize_compiler(compiler):
varies across Unices and is stored in Python's Makefile.
"""
if compiler.compiler_type == "unix":
+ if sys.platform == "darwin":
+ # Perform first-time customization of compiler-related
+ # config vars on OS X now that we know we need a compiler.
+ # This is primarily to support Pythons from binary
+ # installers. The kind and paths to build tools on
+ # the user system may vary significantly from the system
+ # that Python itself was built on. Also the user OS
+ # version and build tools may not support the same set
+ # of CPU architectures for universal builds.
+ global _config_vars
+ if not _config_vars.get('CUSTOMIZED_OSX_COMPILER', ''):
+ import _osx_support
+ _osx_support.customize_compiler(_config_vars)
+ _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
+
(cc, cxx, opt, cflags, ccshared, ldshared, so_ext, ar, ar_flags) = \
get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',
'CCSHARED', 'LDSHARED', 'SO', 'AR',
'ARFLAGS')
- newcc = None
if 'CC' in os.environ:
newcc = os.environ['CC']
- elif sys.platform == 'darwin' and cc == 'gcc-4.2':
- # Issue #13590:
- # Since Apple removed gcc-4.2 in Xcode 4.2, we can no
- # longer assume it is available for extension module builds.
- # If Python was built with gcc-4.2, check first to see if
- # it is available on this system; if not, try to use clang
- # instead unless the caller explicitly set CC.
- global _USE_CLANG
- if _USE_CLANG is None:
- from distutils import log
- from subprocess import Popen, PIPE
- p = Popen("! type gcc-4.2 && type clang && exit 2",
- shell=True, stdout=PIPE, stderr=PIPE)
- p.wait()
- if p.returncode == 2:
- _USE_CLANG = True
- log.warn("gcc-4.2 not found, using clang instead")
- else:
- _USE_CLANG = False
- if _USE_CLANG:
- newcc = 'clang'
- if newcc:
- # On OS X, if CC is overridden, use that as the default
- # command for LDSHARED as well
if (sys.platform == 'darwin'
and 'LDSHARED' not in os.environ
and ldshared.startswith(cc)):
+ # On OS X, if CC is overridden, use that as the default
+ # command for LDSHARED as well
ldshared = newcc + ldshared[len(cc):]
cc = newcc
if 'CXX' in os.environ:
@@ -244,7 +241,7 @@ def get_config_h_filename():
def get_makefile_filename():
"""Return full pathname of installed Makefile from the Python build."""
if python_build:
- return os.path.join(os.path.dirname(sys.executable), "Makefile")
+ return os.path.join(project_base, "Makefile")
lib_dir = get_python_lib(plat_specific=1, standard_lib=1)
return os.path.join(lib_dir, "config", "Makefile")
@@ -396,66 +393,11 @@ _config_vars = None
def _init_posix():
"""Initialize the module as appropriate for POSIX systems."""
- g = {}
- # load the installed Makefile:
- try:
- filename = get_makefile_filename()
- parse_makefile(filename, g)
- except IOError, msg:
- my_msg = "invalid Python installation: unable to open %s" % filename
- if hasattr(msg, "strerror"):
- my_msg = my_msg + " (%s)" % msg.strerror
-
- raise DistutilsPlatformError(my_msg)
-
- # load the installed pyconfig.h:
- try:
- filename = get_config_h_filename()
- parse_config_h(file(filename), g)
- except IOError, msg:
- my_msg = "invalid Python installation: unable to open %s" % filename
- if hasattr(msg, "strerror"):
- my_msg = my_msg + " (%s)" % msg.strerror
-
- raise DistutilsPlatformError(my_msg)
-
- # On AIX, there are wrong paths to the linker scripts in the Makefile
- # -- these paths are relative to the Python source, but when installed
- # the scripts are in another directory.
- if python_build:
- g['LDSHARED'] = g['BLDSHARED']
-
- elif get_python_version() < '2.1':
- # The following two branches are for 1.5.2 compatibility.
- if sys.platform == 'aix4': # what about AIX 3.x ?
- # Linker script is in the config directory, not in Modules as the
- # Makefile says.
- python_lib = get_python_lib(standard_lib=1)
- ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')
- python_exp = os.path.join(python_lib, 'config', 'python.exp')
-
- g['LDSHARED'] = "%s %s -bI:%s" % (ld_so_aix, g['CC'], python_exp)
-
- elif sys.platform == 'beos':
- # Linker script is in the config directory. In the Makefile it is
- # relative to the srcdir, which after installation no longer makes
- # sense.
- python_lib = get_python_lib(standard_lib=1)
- linkerscript_path = string.split(g['LDSHARED'])[0]
- linkerscript_name = os.path.basename(linkerscript_path)
- linkerscript = os.path.join(python_lib, 'config',
- linkerscript_name)
-
- # XXX this isn't the right place to do this: adding the Python
- # library to the link, if needed, should be in the "build_ext"
- # command. (It's also needed for non-MS compilers on Windows, and
- # it's taken care of for them by the 'build_ext.get_libraries()'
- # method.)
- g['LDSHARED'] = ("%s -L%s/lib -lpython%s" %
- (linkerscript, PREFIX, get_python_version()))
-
+ # _sysconfigdata is generated at build time, see the sysconfig module
+ from _sysconfigdata import build_time_vars
global _config_vars
- _config_vars = g
+ _config_vars = {}
+ _config_vars.update(build_time_vars)
def _init_nt():
@@ -518,66 +460,11 @@ def get_config_vars(*args):
_config_vars['prefix'] = PREFIX
_config_vars['exec_prefix'] = EXEC_PREFIX
+ # OS X platforms require special customization to handle
+ # multi-architecture, multi-os-version installers
if sys.platform == 'darwin':
- kernel_version = os.uname()[2] # Kernel version (8.4.3)
- major_version = int(kernel_version.split('.')[0])
-
- if major_version < 8:
- # On Mac OS X before 10.4, check if -arch and -isysroot
- # are in CFLAGS or LDFLAGS and remove them if they are.
- # This is needed when building extensions on a 10.3 system
- # using a universal build of python.
- for key in ('LDFLAGS', 'BASECFLAGS', 'LDSHARED',
- # a number of derived variables. These need to be
- # patched up as well.
- 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
- flags = _config_vars[key]
- flags = re.sub('-arch\s+\w+\s', ' ', flags)
- flags = re.sub('-isysroot [^ \t]*', ' ', flags)
- _config_vars[key] = flags
-
- else:
-
- # Allow the user to override the architecture flags using
- # an environment variable.
- # NOTE: This name was introduced by Apple in OSX 10.5 and
- # is used by several scripting languages distributed with
- # that OS release.
-
- if 'ARCHFLAGS' in os.environ:
- arch = os.environ['ARCHFLAGS']
- for key in ('LDFLAGS', 'BASECFLAGS', 'LDSHARED',
- # a number of derived variables. These need to be
- # patched up as well.
- 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
-
- flags = _config_vars[key]
- flags = re.sub('-arch\s+\w+\s', ' ', flags)
- flags = flags + ' ' + arch
- _config_vars[key] = flags
-
- # If we're on OSX 10.5 or later and the user tries to
- # compiles an extension using an SDK that is not present
- # on the current machine it is better to not use an SDK
- # than to fail.
- #
- # The major usecase for this is users using a Python.org
- # binary installer on OSX 10.6: that installer uses
- # the 10.4u SDK, but that SDK is not installed by default
- # when you install Xcode.
- #
- m = re.search('-isysroot\s+(\S+)', _config_vars['CFLAGS'])
- if m is not None:
- sdk = m.group(1)
- if not os.path.exists(sdk):
- for key in ('LDFLAGS', 'BASECFLAGS', 'LDSHARED',
- # a number of derived variables. These need to be
- # patched up as well.
- 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
-
- flags = _config_vars[key]
- flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
- _config_vars[key] = flags
+ import _osx_support
+ _osx_support.customize_config_vars(_config_vars)
if args:
vals = []
diff --git a/Lib/distutils/tests/support.py b/Lib/distutils/tests/support.py
index 4e6058d..96dcd94 100644
--- a/Lib/distutils/tests/support.py
+++ b/Lib/distutils/tests/support.py
@@ -218,4 +218,4 @@ def fixup_build_ext(cmd):
cmd.library_dirs = []
else:
name, equals, value = runshared.partition('=')
- cmd.library_dirs = value.split(os.pathsep)
+ cmd.library_dirs = [d for d in value.split(os.pathsep) if d]
diff --git a/Lib/distutils/tests/test_archive_util.py b/Lib/distutils/tests/test_archive_util.py
index f01cec3..ed7c2ce 100644
--- a/Lib/distutils/tests/test_archive_util.py
+++ b/Lib/distutils/tests/test_archive_util.py
@@ -199,7 +199,7 @@ class ArchiveUtilTestCase(support.TempdirManager,
dry_run=True)
finally:
os.chdir(old_dir)
- self.assertTrue(not os.path.exists(tarball))
+ self.assertFalse(os.path.exists(tarball))
self.assertEqual(len(w.warnings), 1)
@unittest.skipUnless(zlib, "Requires zlib")
diff --git a/Lib/distutils/tests/test_bdist_dumb.py b/Lib/distutils/tests/test_bdist_dumb.py
index 5a22a10..5db3a85 100644
--- a/Lib/distutils/tests/test_bdist_dumb.py
+++ b/Lib/distutils/tests/test_bdist_dumb.py
@@ -1,8 +1,10 @@
"""Tests for distutils.command.bdist_dumb."""
-import unittest
-import sys
import os
+import sys
+import zipfile
+import unittest
+from test.test_support import run_unittest
# zlib is not used here, but if it's not available
# test_simple_built will fail
@@ -11,8 +13,6 @@ try:
except ImportError:
zlib = None
-from test.test_support import run_unittest
-
from distutils.core import Distribution
from distutils.command.bdist_dumb import bdist_dumb
from distutils.tests import support
@@ -73,15 +73,24 @@ class BuildDumbTestCase(support.TempdirManager,
# see what we have
dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
- base = "%s.%s" % (dist.get_fullname(), cmd.plat_name)
+ base = "%s.%s.zip" % (dist.get_fullname(), cmd.plat_name)
if os.name == 'os2':
base = base.replace(':', '-')
- wanted = ['%s.zip' % base]
- self.assertEqual(dist_created, wanted)
+ self.assertEqual(dist_created, [base])
# now let's check what we have in the zip file
- # XXX to be done
+ fp = zipfile.ZipFile(os.path.join('dist', base))
+ try:
+ contents = fp.namelist()
+ finally:
+ fp.close()
+
+ contents = sorted(os.path.basename(fn) for fn in contents)
+ wanted = ['foo-0.1-py%s.%s.egg-info' % sys.version_info[:2], 'foo.py']
+ if not sys.dont_write_bytecode:
+ wanted.append('foo.pyc')
+ self.assertEqual(contents, sorted(wanted))
def test_finalize_options(self):
pkg_dir, dist = self.create_dist()
diff --git a/Lib/distutils/tests/test_bdist_msi.py b/Lib/distutils/tests/test_bdist_msi.py
index 1c897ab..f98b7a2 100644
--- a/Lib/distutils/tests/test_bdist_msi.py
+++ b/Lib/distutils/tests/test_bdist_msi.py
@@ -1,12 +1,11 @@
"""Tests for distutils.command.bdist_msi."""
-import unittest
import sys
-
+import unittest
from test.test_support import run_unittest
-
from distutils.tests import support
-@unittest.skipUnless(sys.platform=="win32", "These tests are only for win32")
+
+@unittest.skipUnless(sys.platform == 'win32', 'these tests require Windows')
class BDistMSITestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
@@ -14,10 +13,11 @@ class BDistMSITestCase(support.TempdirManager,
def test_minimal(self):
# minimal test XXX need more tests
from distutils.command.bdist_msi import bdist_msi
- pkg_pth, dist = self.create_dist()
+ project_dir, dist = self.create_dist()
cmd = bdist_msi(dist)
cmd.ensure_finalized()
+
def test_suite():
return unittest.makeSuite(BDistMSITestCase)
diff --git a/Lib/distutils/tests/test_bdist_rpm.py b/Lib/distutils/tests/test_bdist_rpm.py
index 25a5763..83d4065 100644
--- a/Lib/distutils/tests/test_bdist_rpm.py
+++ b/Lib/distutils/tests/test_bdist_rpm.py
@@ -39,18 +39,15 @@ class BuildRpmTestCase(support.TempdirManager,
sys.argv[:] = self.old_sys_argv[1]
super(BuildRpmTestCase, self).tearDown()
+ # XXX I am unable yet to make this test work without
+ # spurious sdtout/stderr output under Mac OS X
+ @unittest.skipUnless(sys.platform.startswith('linux'),
+ 'spurious sdtout/stderr output under Mac OS X')
+ @unittest.skipIf(find_executable('rpm') is None,
+ 'the rpm command is not found')
+ @unittest.skipIf(find_executable('rpmbuild') is None,
+ 'the rpmbuild command is not found')
def test_quiet(self):
-
- # XXX I am unable yet to make this test work without
- # spurious sdtout/stderr output under Mac OS X
- if sys.platform != 'linux2':
- return
-
- # this test will run only if the rpm commands are found
- if (find_executable('rpm') is None or
- find_executable('rpmbuild') is None):
- return
-
# let's create a package
tmp_dir = self.mkdtemp()
pkg_dir = os.path.join(tmp_dir, 'foo')
@@ -77,21 +74,22 @@ class BuildRpmTestCase(support.TempdirManager,
cmd.run()
dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
- self.assertTrue('foo-0.1-1.noarch.rpm' in dist_created)
-
+ self.assertIn('foo-0.1-1.noarch.rpm', dist_created)
+
+ # bug #2945: upload ignores bdist_rpm files
+ self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.src.rpm'), dist.dist_files)
+ self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.noarch.rpm'), dist.dist_files)
+
+ # XXX I am unable yet to make this test work without
+ # spurious sdtout/stderr output under Mac OS X
+ @unittest.skipUnless(sys.platform.startswith('linux'),
+ 'spurious sdtout/stderr output under Mac OS X')
+ # http://bugs.python.org/issue1533164
+ @unittest.skipIf(find_executable('rpm') is None,
+ 'the rpm command is not found')
+ @unittest.skipIf(find_executable('rpmbuild') is None,
+ 'the rpmbuild command is not found')
def test_no_optimize_flag(self):
-
- # XXX I am unable yet to make this test work without
- # spurious sdtout/stderr output under Mac OS X
- if sys.platform != 'linux2':
- return
-
- # http://bugs.python.org/issue1533164
- # this test will run only if the rpm command is found
- if (find_executable('rpm') is None or
- find_executable('rpmbuild') is None):
- return
-
# let's create a package that brakes bdist_rpm
tmp_dir = self.mkdtemp()
pkg_dir = os.path.join(tmp_dir, 'foo')
@@ -117,7 +115,12 @@ class BuildRpmTestCase(support.TempdirManager,
cmd.run()
dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
- self.assertTrue('foo-0.1-1.noarch.rpm' in dist_created)
+ self.assertIn('foo-0.1-1.noarch.rpm', dist_created)
+
+ # bug #2945: upload ignores bdist_rpm files
+ self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.src.rpm'), dist.dist_files)
+ self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.noarch.rpm'), dist.dist_files)
+
os.remove(os.path.join(pkg_dir, 'dist', 'foo-0.1-1.noarch.rpm'))
def test_suite():
diff --git a/Lib/distutils/tests/test_bdist_wininst.py b/Lib/distutils/tests/test_bdist_wininst.py
index c2b13b3..247e929 100644
--- a/Lib/distutils/tests/test_bdist_wininst.py
+++ b/Lib/distutils/tests/test_bdist_wininst.py
@@ -23,7 +23,7 @@ class BuildWinInstTestCase(support.TempdirManager,
# and make sure it finds it and returns its content
# no matter what platform we have
exe_file = cmd.get_exe_bytes()
- self.assertTrue(len(exe_file) > 10)
+ self.assertGreater(len(exe_file), 10)
def test_suite():
return unittest.makeSuite(BuildWinInstTestCase)
diff --git a/Lib/distutils/tests/test_build_clib.py b/Lib/distutils/tests/test_build_clib.py
index bef1bd9..d4a1e69 100644
--- a/Lib/distutils/tests/test_build_clib.py
+++ b/Lib/distutils/tests/test_build_clib.py
@@ -77,7 +77,7 @@ class BuildCLibTestCase(support.TempdirManager,
cmd.compiler = FakeCompiler()
- # build_libraries is also doing a bit of typoe checking
+ # build_libraries is also doing a bit of typo checking
lib = [('name', {'sources': 'notvalid'})]
self.assertRaises(DistutilsSetupError, cmd.build_libraries, lib)
@@ -102,11 +102,8 @@ class BuildCLibTestCase(support.TempdirManager,
cmd.distribution.libraries = 'WONTWORK'
self.assertRaises(DistutilsSetupError, cmd.finalize_options)
+ @unittest.skipIf(sys.platform == 'win32', "can't test on Windows")
def test_run(self):
- # can't test on windows
- if sys.platform == 'win32':
- return
-
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
@@ -131,13 +128,13 @@ class BuildCLibTestCase(support.TempdirManager,
if ccmd is None:
continue
if find_executable(ccmd[0]) is None:
- return # can't test
+ self.skipTest('The %r command is not found' % ccmd[0])
# this should work
cmd.run()
# let's check the result
- self.assertTrue('libfoo.a' in os.listdir(build_temp))
+ self.assertIn('libfoo.a', os.listdir(build_temp))
def test_suite():
return unittest.makeSuite(BuildCLibTestCase)
diff --git a/Lib/distutils/tests/test_build_ext.py b/Lib/distutils/tests/test_build_ext.py
index b71cc98..d380cb6 100644
--- a/Lib/distutils/tests/test_build_ext.py
+++ b/Lib/distutils/tests/test_build_ext.py
@@ -65,9 +65,9 @@ class BuildExtTestCase(support.TempdirManager,
sys.stdout = old_stdout
if ALREADY_TESTED:
- return
+ self.skipTest('Already tested in %s' % ALREADY_TESTED)
else:
- ALREADY_TESTED = True
+ ALREADY_TESTED = type(self).__name__
import xx
@@ -77,10 +77,11 @@ class BuildExtTestCase(support.TempdirManager,
self.assertEqual(xx.foo(2, 5), 7)
self.assertEqual(xx.foo(13,15), 28)
self.assertEqual(xx.new().demo(), None)
- doc = 'This is a template module just for instruction.'
- self.assertEqual(xx.__doc__, doc)
- self.assertTrue(isinstance(xx.Null(), xx.Null))
- self.assertTrue(isinstance(xx.Str(), xx.Str))
+ if test_support.HAVE_DOCSTRINGS:
+ doc = 'This is a template module just for instruction.'
+ self.assertEqual(xx.__doc__, doc)
+ self.assertIsInstance(xx.Null(), xx.Null)
+ self.assertIsInstance(xx.Str(), xx.Str)
def test_solaris_enable_shared(self):
dist = Distribution({'name': 'xx'})
@@ -101,13 +102,11 @@ class BuildExtTestCase(support.TempdirManager,
_config_vars['Py_ENABLE_SHARED'] = old_var
# make sure we get some library dirs under solaris
- self.assertTrue(len(cmd.library_dirs) > 0)
+ self.assertGreater(len(cmd.library_dirs), 0)
+ @unittest.skipIf(sys.version < '2.6',
+ 'site.USER_SITE was introduced in 2.6')
def test_user_site(self):
- # site.USER_SITE was introduced in 2.6
- if sys.version < '2.6':
- return
-
import site
dist = Distribution({'name': 'xx'})
cmd = build_ext(dist)
@@ -142,10 +141,10 @@ class BuildExtTestCase(support.TempdirManager,
cmd.finalize_options()
py_include = sysconfig.get_python_inc()
- self.assertTrue(py_include in cmd.include_dirs)
+ self.assertIn(py_include, cmd.include_dirs)
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
- self.assertTrue(plat_py_include in cmd.include_dirs)
+ self.assertIn(plat_py_include, cmd.include_dirs)
# make sure cmd.libraries is turned into a list
# if it's a string
@@ -225,13 +224,13 @@ class BuildExtTestCase(support.TempdirManager,
'some': 'bar'})]
cmd.check_extensions_list(exts)
ext = exts[0]
- self.assertTrue(isinstance(ext, Extension))
+ self.assertIsInstance(ext, Extension)
# check_extensions_list adds in ext the values passed
# when they are in ('include_dirs', 'library_dirs', 'libraries'
# 'extra_objects', 'extra_compile_args', 'extra_link_args')
self.assertEqual(ext.libraries, 'foo')
- self.assertTrue(not hasattr(ext, 'some'))
+ self.assertFalse(hasattr(ext, 'some'))
# 'macros' element of build info dict must be 1- or 2-tuple
exts = [('foo.bar', {'sources': [''], 'libraries': 'foo',
@@ -413,9 +412,8 @@ class BuildExtTestCase(support.TempdirManager,
wanted = os.path.join(cmd.build_lib, 'UpdateManager', 'fdsend' + ext)
self.assertEqual(ext_path, wanted)
+ @unittest.skipUnless(sys.platform == 'win32', 'these tests require Windows')
def test_build_ext_path_cross_platform(self):
- if sys.platform != 'win32':
- return
dist = Distribution({'name': 'UpdateManager'})
cmd = build_ext(dist)
cmd.ensure_finalized()
@@ -481,8 +479,16 @@ class BuildExtTestCase(support.TempdirManager,
# get the deployment target that the interpreter was built with
target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
- target = tuple(map(int, target.split('.')))
- target = '%02d%01d0' % target
+ target = tuple(map(int, target.split('.')[0:2]))
+ # format the target value as defined in the Apple
+ # Availability Macros. We can't use the macro names since
+ # at least one value we test with will not exist yet.
+ if target[1] < 10:
+ # for 10.1 through 10.9.x -> "10n0"
+ target = '%02d%01d0' % target
+ else:
+ # for 10.10 and beyond -> "10nn00"
+ target = '%02d%02d00' % target
deptarget_ext = Extension(
'deptarget',
[deptarget_c],
diff --git a/Lib/distutils/tests/test_build_py.py b/Lib/distutils/tests/test_build_py.py
index 6c6ec20..c4498bc 100644
--- a/Lib/distutils/tests/test_build_py.py
+++ b/Lib/distutils/tests/test_build_py.py
@@ -99,6 +99,37 @@ class BuildPyTestCase(support.TempdirManager,
os.chdir(cwd)
sys.stdout = old_stdout
+ def test_dir_in_package_data(self):
+ """
+ A directory in package_data should not be added to the filelist.
+ """
+ # See bug 19286
+ sources = self.mkdtemp()
+ pkg_dir = os.path.join(sources, "pkg")
+
+ os.mkdir(pkg_dir)
+ open(os.path.join(pkg_dir, "__init__.py"), "w").close()
+
+ docdir = os.path.join(pkg_dir, "doc")
+ os.mkdir(docdir)
+ open(os.path.join(docdir, "testfile"), "w").close()
+
+ # create the directory that could be incorrectly detected as a file
+ os.mkdir(os.path.join(docdir, 'otherdir'))
+
+ os.chdir(sources)
+ dist = Distribution({"packages": ["pkg"],
+ "package_data": {"pkg": ["doc/*"]}})
+ # script_name need not exist, it just need to be initialized
+ dist.script_name = os.path.join(sources, "setup.py")
+ dist.script_args = ["build"]
+ dist.parse_command_line()
+
+ try:
+ dist.run_commands()
+ except DistutilsFileError:
+ self.fail("failed package_data when data dir includes a dir")
+
def test_dont_write_bytecode(self):
# makes sure byte_compile is not used
pkg_dir, dist = self.create_dist()
diff --git a/Lib/distutils/tests/test_build_scripts.py b/Lib/distutils/tests/test_build_scripts.py
index 4da93cc..ad3c1a2 100644
--- a/Lib/distutils/tests/test_build_scripts.py
+++ b/Lib/distutils/tests/test_build_scripts.py
@@ -17,8 +17,8 @@ class BuildScriptsTestCase(support.TempdirManager,
def test_default_settings(self):
cmd = self.get_build_scripts_cmd("/foo/bar", [])
- self.assertTrue(not cmd.force)
- self.assertTrue(cmd.build_dir is None)
+ self.assertFalse(cmd.force)
+ self.assertIsNone(cmd.build_dir)
cmd.finalize_options()
@@ -38,7 +38,7 @@ class BuildScriptsTestCase(support.TempdirManager,
built = os.listdir(target)
for name in expected:
- self.assertTrue(name in built)
+ self.assertIn(name, built)
def get_build_scripts_cmd(self, target, scripts):
import sys
@@ -103,7 +103,7 @@ class BuildScriptsTestCase(support.TempdirManager,
built = os.listdir(target)
for name in expected:
- self.assertTrue(name in built)
+ self.assertIn(name, built)
def test_suite():
return unittest.makeSuite(BuildScriptsTestCase)
diff --git a/Lib/distutils/tests/test_ccompiler.py b/Lib/distutils/tests/test_ccompiler.py
index 45e477a..446eac2 100644
--- a/Lib/distutils/tests/test_ccompiler.py
+++ b/Lib/distutils/tests/test_ccompiler.py
@@ -55,12 +55,9 @@ class CCompilerTestCase(support.EnvironGuard, unittest.TestCase):
finally:
debug.DEBUG = False
+ @unittest.skipUnless(get_default_compiler() == 'unix',
+ 'not testing if default compiler is not unix')
def test_customize_compiler(self):
-
- # not testing if default compiler is not unix
- if get_default_compiler() != 'unix':
- return
-
os.environ['AR'] = 'my_ar'
os.environ['ARFLAGS'] = '-arflags'
diff --git a/Lib/distutils/tests/test_check.py b/Lib/distutils/tests/test_check.py
index f73342a..f86f129 100644
--- a/Lib/distutils/tests/test_check.py
+++ b/Lib/distutils/tests/test_check.py
@@ -56,9 +56,8 @@ class CheckTestCase(support.LoggingSilencer,
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
+ @unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_document(self):
- if not HAS_DOCUTILS: # won't test without docutils
- return
pkg_info, dist = self.create_dist()
cmd = check(dist)
@@ -72,9 +71,8 @@ class CheckTestCase(support.LoggingSilencer,
msgs = cmd._check_rst_data(rest)
self.assertEqual(len(msgs), 0)
+ @unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext(self):
- if not HAS_DOCUTILS: # won't test without docutils
- return
# let's see if it detects broken rest in long_description
broken_rest = 'title\n===\n\ntest'
pkg_info, dist = self.create_dist(long_description=broken_rest)
diff --git a/Lib/distutils/tests/test_clean.py b/Lib/distutils/tests/test_clean.py
index 7b988f7..e84670d 100644
--- a/Lib/distutils/tests/test_clean.py
+++ b/Lib/distutils/tests/test_clean.py
@@ -36,7 +36,7 @@ class cleanTestCase(support.TempdirManager,
# make sure the files where removed
for name, path in dirs:
- self.assertTrue(not os.path.exists(path),
+ self.assertFalse(os.path.exists(path),
'%s was not removed' % path)
# let's run the command again (should spit warnings but succeed)
diff --git a/Lib/distutils/tests/test_cmd.py b/Lib/distutils/tests/test_cmd.py
index e074099..51420c9 100644
--- a/Lib/distutils/tests/test_cmd.py
+++ b/Lib/distutils/tests/test_cmd.py
@@ -34,6 +34,18 @@ class CommandTestCase(unittest.TestCase):
self.assertRaises(DistutilsOptionError,
cmd.ensure_string_list, 'not_string_list2')
+ cmd.option1 = 'ok,dok'
+ cmd.ensure_string_list('option1')
+ self.assertEqual(cmd.option1, ['ok', 'dok'])
+
+ cmd.option2 = ['xxx', 'www']
+ cmd.ensure_string_list('option2')
+
+ cmd.option3 = ['ok', 2]
+ self.assertRaises(DistutilsOptionError, cmd.ensure_string_list,
+ 'option3')
+
+
def test_make_file(self):
cmd = self.cmd
@@ -77,19 +89,6 @@ class CommandTestCase(unittest.TestCase):
cmd.option3 = 1
self.assertRaises(DistutilsOptionError, cmd.ensure_string, 'option3')
- def test_ensure_string_list(self):
- cmd = self.cmd
- cmd.option1 = 'ok,dok'
- cmd.ensure_string_list('option1')
- self.assertEqual(cmd.option1, ['ok', 'dok'])
-
- cmd.option2 = ['xxx', 'www']
- cmd.ensure_string_list('option2')
-
- cmd.option3 = ['ok', 2]
- self.assertRaises(DistutilsOptionError, cmd.ensure_string_list,
- 'option3')
-
def test_ensure_filename(self):
cmd = self.cmd
cmd.option1 = __file__
diff --git a/Lib/distutils/tests/test_config.py b/Lib/distutils/tests/test_config.py
index cfd096e..17db98f 100644
--- a/Lib/distutils/tests/test_config.py
+++ b/Lib/distutils/tests/test_config.py
@@ -89,7 +89,7 @@ class PyPIRCCommandTestCase(support.TempdirManager,
config = config.items()
config.sort()
waited = [('password', 'secret'), ('realm', 'pypi'),
- ('repository', 'http://pypi.python.org/pypi'),
+ ('repository', 'https://pypi.python.org/pypi'),
('server', 'server1'), ('username', 'me')]
self.assertEqual(config, waited)
@@ -99,14 +99,14 @@ class PyPIRCCommandTestCase(support.TempdirManager,
config = config.items()
config.sort()
waited = [('password', 'secret'), ('realm', 'pypi'),
- ('repository', 'http://pypi.python.org/pypi'),
+ ('repository', 'https://pypi.python.org/pypi'),
('server', 'server-login'), ('username', 'tarek')]
self.assertEqual(config, waited)
def test_server_empty_registration(self):
cmd = self._cmd(self.dist)
rc = cmd._get_rc_file()
- self.assertTrue(not os.path.exists(rc))
+ self.assertFalse(os.path.exists(rc))
cmd._store_pypirc('tarek', 'xxx')
self.assertTrue(os.path.exists(rc))
f = open(rc)
diff --git a/Lib/distutils/tests/test_config_cmd.py b/Lib/distutils/tests/test_config_cmd.py
index 2cf3886..b2a418e 100644
--- a/Lib/distutils/tests/test_config_cmd.py
+++ b/Lib/distutils/tests/test_config_cmd.py
@@ -37,9 +37,8 @@ class ConfigTestCase(support.LoggingSilencer,
dump_file(this_file, 'I am the header')
self.assertEqual(len(self._logs), numlines+1)
+ @unittest.skipIf(sys.platform == 'win32', "can't test on Windows")
def test_search_cpp(self):
- if sys.platform == 'win32':
- return
pkg_dir, dist = self.create_dist()
cmd = config(dist)
@@ -81,7 +80,7 @@ class ConfigTestCase(support.LoggingSilencer,
cmd._clean(f1, f2)
for f in (f1, f2):
- self.assertTrue(not os.path.exists(f))
+ self.assertFalse(os.path.exists(f))
def test_suite():
return unittest.makeSuite(ConfigTestCase)
diff --git a/Lib/distutils/tests/test_dir_util.py b/Lib/distutils/tests/test_dir_util.py
index 693f77c..d82d913 100644
--- a/Lib/distutils/tests/test_dir_util.py
+++ b/Lib/distutils/tests/test_dir_util.py
@@ -101,6 +101,24 @@ class DirUtilTestCase(support.TempdirManager, unittest.TestCase):
remove_tree(self.root_target, verbose=0)
remove_tree(self.target2, verbose=0)
+ def test_copy_tree_skips_nfs_temp_files(self):
+ mkpath(self.target, verbose=0)
+
+ a_file = os.path.join(self.target, 'ok.txt')
+ nfs_file = os.path.join(self.target, '.nfs123abc')
+ for f in a_file, nfs_file:
+ fh = open(f, 'w')
+ try:
+ fh.write('some content')
+ finally:
+ fh.close()
+
+ copy_tree(self.target, self.target2)
+ self.assertEqual(os.listdir(self.target2), ['ok.txt'])
+
+ remove_tree(self.root_target, verbose=0)
+ remove_tree(self.target2, verbose=0)
+
def test_ensure_relative(self):
if os.sep == '/':
self.assertEqual(ensure_relative('/home/foo'), 'home/foo')
diff --git a/Lib/distutils/tests/test_install.py b/Lib/distutils/tests/test_install.py
index ebfb04f..c3492b8 100644
--- a/Lib/distutils/tests/test_install.py
+++ b/Lib/distutils/tests/test_install.py
@@ -65,11 +65,9 @@ class InstallTestCase(support.TempdirManager,
check_path(cmd.install_scripts, os.path.join(destination, "bin"))
check_path(cmd.install_data, destination)
+ @unittest.skipIf(sys.version < '2.6',
+ 'site.USER_SITE was introduced in 2.6')
def test_user_site(self):
- # site.USER_SITE was introduced in 2.6
- if sys.version < '2.6':
- return
-
# preparing the environment for the test
self.old_user_base = site.USER_BASE
self.old_user_site = site.USER_SITE
@@ -86,19 +84,17 @@ class InstallTestCase(support.TempdirManager,
self.old_expand = os.path.expanduser
os.path.expanduser = _expanduser
- try:
- # this is the actual test
- self._test_user_site()
- finally:
+ def cleanup():
site.USER_BASE = self.old_user_base
site.USER_SITE = self.old_user_site
install_module.USER_BASE = self.old_user_base
install_module.USER_SITE = self.old_user_site
os.path.expanduser = self.old_expand
- def _test_user_site(self):
+ self.addCleanup(cleanup)
+
for key in ('nt_user', 'unix_user', 'os2_home'):
- self.assertTrue(key in INSTALL_SCHEMES)
+ self.assertIn(key, INSTALL_SCHEMES)
dist = Distribution({'name': 'xx'})
cmd = install(dist)
@@ -106,14 +102,14 @@ class InstallTestCase(support.TempdirManager,
# making sure the user option is there
options = [name for name, short, lable in
cmd.user_options]
- self.assertTrue('user' in options)
+ self.assertIn('user', options)
# setting a value
cmd.user = 1
# user base and site shouldn't be created yet
- self.assertTrue(not os.path.exists(self.user_base))
- self.assertTrue(not os.path.exists(self.user_site))
+ self.assertFalse(os.path.exists(self.user_base))
+ self.assertFalse(os.path.exists(self.user_site))
# let's run finalize
cmd.ensure_finalized()
@@ -122,8 +118,8 @@ class InstallTestCase(support.TempdirManager,
self.assertTrue(os.path.exists(self.user_base))
self.assertTrue(os.path.exists(self.user_site))
- self.assertTrue('userbase' in cmd.config_vars)
- self.assertTrue('usersite' in cmd.config_vars)
+ self.assertIn('userbase', cmd.config_vars)
+ self.assertIn('usersite', cmd.config_vars)
def test_handle_extra_path(self):
dist = Distribution({'name': 'xx', 'extra_path': 'path,dirs'})
@@ -168,7 +164,7 @@ class InstallTestCase(support.TempdirManager,
cmd.home = 'home'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
- # can't combine user with with prefix/exec_prefix/home or
+ # can't combine user with prefix/exec_prefix/home or
# install_(plat)base
cmd.prefix = None
cmd.user = 'user'
@@ -176,15 +172,16 @@ class InstallTestCase(support.TempdirManager,
def test_record(self):
install_dir = self.mkdtemp()
- project_dir, dist = self.create_dist(scripts=['hello'])
- self.addCleanup(os.chdir, os.getcwd())
+ project_dir, dist = self.create_dist(py_modules=['hello'],
+ scripts=['sayhi'])
os.chdir(project_dir)
- self.write_file('hello', "print('o hai')")
+ self.write_file('hello.py', "def main(): print 'o hai'")
+ self.write_file('sayhi', 'from hello import main; main()')
cmd = install(dist)
dist.command_obj['install'] = cmd
cmd.root = install_dir
- cmd.record = os.path.join(project_dir, 'RECORD')
+ cmd.record = os.path.join(project_dir, 'filelist')
cmd.ensure_finalized()
cmd.run()
@@ -195,7 +192,7 @@ class InstallTestCase(support.TempdirManager,
f.close()
found = [os.path.basename(line) for line in content.splitlines()]
- expected = ['hello',
+ expected = ['hello.py', 'hello.pyc', 'sayhi',
'UNKNOWN-0.0.0-py%s.%s.egg-info' % sys.version_info[:2]]
self.assertEqual(found, expected)
@@ -203,7 +200,6 @@ class InstallTestCase(support.TempdirManager,
install_dir = self.mkdtemp()
project_dir, dist = self.create_dist(ext_modules=[
Extension('xx', ['xxmodule.c'])])
- self.addCleanup(os.chdir, os.getcwd())
os.chdir(project_dir)
support.copy_xxmodule_c(project_dir)
@@ -215,7 +211,7 @@ class InstallTestCase(support.TempdirManager,
dist.command_obj['install'] = cmd
dist.command_obj['build_ext'] = buildextcmd
cmd.root = install_dir
- cmd.record = os.path.join(project_dir, 'RECORD')
+ cmd.record = os.path.join(project_dir, 'filelist')
cmd.ensure_finalized()
cmd.run()
@@ -239,7 +235,8 @@ class InstallTestCase(support.TempdirManager,
self.test_record()
finally:
install_module.DEBUG = False
- self.assertTrue(len(self.logs) > old_logs_len)
+ self.assertGreater(len(self.logs), old_logs_len)
+
def test_suite():
return unittest.makeSuite(InstallTestCase)
diff --git a/Lib/distutils/tests/test_install_lib.py b/Lib/distutils/tests/test_install_lib.py
index 4d86308..0defbd6 100644
--- a/Lib/distutils/tests/test_install_lib.py
+++ b/Lib/distutils/tests/test_install_lib.py
@@ -65,7 +65,7 @@ class InstallLibTestCase(support.TempdirManager,
cmd.distribution.script_name = 'setup.py'
# get_output should return 4 elements
- self.assertTrue(len(cmd.get_outputs()) >= 2)
+ self.assertGreaterEqual(len(cmd.get_outputs()), 2)
def test_get_inputs(self):
pkg_dir, dist = self.create_dist()
@@ -98,7 +98,7 @@ class InstallLibTestCase(support.TempdirManager,
finally:
sys.dont_write_bytecode = old_dont_write_bytecode
- self.assertTrue('byte-compiling is disabled' in self.logs[0][1])
+ self.assertIn('byte-compiling is disabled', self.logs[0][1])
def test_suite():
return unittest.makeSuite(InstallLibTestCase)
diff --git a/Lib/distutils/tests/test_install_scripts.py b/Lib/distutils/tests/test_install_scripts.py
index 4608545..9c0ba6d 100644
--- a/Lib/distutils/tests/test_install_scripts.py
+++ b/Lib/distutils/tests/test_install_scripts.py
@@ -24,10 +24,10 @@ class InstallScriptsTestCase(support.TempdirManager,
skip_build=1,
)
cmd = install_scripts(dist)
- self.assertTrue(not cmd.force)
- self.assertTrue(not cmd.skip_build)
- self.assertTrue(cmd.build_dir is None)
- self.assertTrue(cmd.install_dir is None)
+ self.assertFalse(cmd.force)
+ self.assertFalse(cmd.skip_build)
+ self.assertIsNone(cmd.build_dir)
+ self.assertIsNone(cmd.install_dir)
cmd.finalize_options()
@@ -72,7 +72,7 @@ class InstallScriptsTestCase(support.TempdirManager,
installed = os.listdir(target)
for name in expected:
- self.assertTrue(name in installed)
+ self.assertIn(name, installed)
def test_suite():
diff --git a/Lib/distutils/tests/test_msvc9compiler.py b/Lib/distutils/tests/test_msvc9compiler.py
index 7347072..16a95ad 100644
--- a/Lib/distutils/tests/test_msvc9compiler.py
+++ b/Lib/distutils/tests/test_msvc9compiler.py
@@ -104,7 +104,7 @@ class msvc9compilerTestCase(support.TempdirManager,
unittest.TestCase):
def test_no_compiler(self):
- # makes sure query_vcvarsall throws
+ # makes sure query_vcvarsall raises
# a DistutilsPlatformError if the compiler
# is not found
from distutils.msvc9compiler import query_vcvarsall
@@ -128,7 +128,7 @@ class msvc9compilerTestCase(support.TempdirManager,
# windows registeries versions.
path = r'Control Panel\Desktop'
v = Reg.get_value(path, u'dragfullwindows')
- self.assertTrue(v in (u'0', u'1', u'2'))
+ self.assertIn(v, (u'0', u'1', u'2'))
import _winreg
HKCU = _winreg.HKEY_CURRENT_USER
@@ -136,7 +136,7 @@ class msvc9compilerTestCase(support.TempdirManager,
self.assertEqual(keys, None)
keys = Reg.read_keys(HKCU, r'Control Panel')
- self.assertTrue('Desktop' in keys)
+ self.assertIn('Desktop', keys)
def test_remove_visual_c_ref(self):
from distutils.msvc9compiler import MSVCCompiler
@@ -174,7 +174,7 @@ class msvc9compilerTestCase(support.TempdirManager,
compiler = MSVCCompiler()
got = compiler._remove_visual_c_ref(manifest)
- self.assertIs(got, None)
+ self.assertIsNone(got)
def test_suite():
diff --git a/Lib/distutils/tests/test_register.py b/Lib/distutils/tests/test_register.py
index aa9bc43..ca7d79b 100644
--- a/Lib/distutils/tests/test_register.py
+++ b/Lib/distutils/tests/test_register.py
@@ -1,6 +1,5 @@
# -*- encoding: utf8 -*-
"""Tests for distutils.command.register."""
-import sys
import os
import unittest
import getpass
@@ -11,11 +10,14 @@ from test.test_support import check_warnings, run_unittest
from distutils.command import register as register_module
from distutils.command.register import register
-from distutils.core import Distribution
from distutils.errors import DistutilsSetupError
-from distutils.tests import support
-from distutils.tests.test_config import PYPIRC, PyPIRCCommandTestCase
+from distutils.tests.test_config import PyPIRCCommandTestCase
+
+try:
+ import docutils
+except ImportError:
+ docutils = None
PYPIRC_NOPASSWORD = """\
[distutils]
@@ -97,7 +99,7 @@ class RegisterTestCase(PyPIRCCommandTestCase):
cmd = self._get_cmd()
# we shouldn't have a .pypirc file yet
- self.assertTrue(not os.path.exists(self.rc))
+ self.assertFalse(os.path.exists(self.rc))
# patching raw_input and getpass.getpass
# so register gets happy
@@ -142,7 +144,7 @@ class RegisterTestCase(PyPIRCCommandTestCase):
req1 = dict(self.conn.reqs[0].headers)
req2 = dict(self.conn.reqs[1].headers)
self.assertEqual(req2['Content-length'], req1['Content-length'])
- self.assertTrue('xxx' in self.conn.reqs[1].data)
+ self.assertIn('xxx', self.conn.reqs[1].data)
def test_password_not_in_file(self):
@@ -172,7 +174,7 @@ class RegisterTestCase(PyPIRCCommandTestCase):
req = self.conn.reqs[0]
headers = dict(req.headers)
self.assertEqual(headers['Content-length'], '608')
- self.assertTrue('tarek' in req.data)
+ self.assertIn('tarek', req.data)
def test_password_reset(self):
# this test runs choice 3
@@ -190,8 +192,9 @@ class RegisterTestCase(PyPIRCCommandTestCase):
req = self.conn.reqs[0]
headers = dict(req.headers)
self.assertEqual(headers['Content-length'], '290')
- self.assertTrue('tarek' in req.data)
+ self.assertIn('tarek', req.data)
+ @unittest.skipUnless(docutils is not None, 'needs docutils')
def test_strict(self):
# testing the script option
# when on, the register command stops if
@@ -204,13 +207,6 @@ class RegisterTestCase(PyPIRCCommandTestCase):
cmd.strict = 1
self.assertRaises(DistutilsSetupError, cmd.run)
- # we don't test the reSt feature if docutils
- # is not installed
- try:
- import docutils
- except ImportError:
- return
-
# metadata are OK but long_description is broken
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': u'éxéxé',
@@ -264,6 +260,21 @@ class RegisterTestCase(PyPIRCCommandTestCase):
finally:
del register_module.raw_input
+ @unittest.skipUnless(docutils is not None, 'needs docutils')
+ def test_register_invalid_long_description(self):
+ description = ':funkie:`str`' # mimic Sphinx-specific markup
+ metadata = {'url': 'xxx', 'author': 'xxx',
+ 'author_email': 'xxx',
+ 'name': 'xxx', 'version': 'xxx',
+ 'long_description': description}
+ cmd = self._get_cmd(metadata)
+ cmd.ensure_finalized()
+ cmd.strict = True
+ inputs = RawInputs('2', 'tarek', 'tarek@ziade.org')
+ register_module.raw_input = inputs
+ self.addCleanup(delattr, register_module, 'raw_input')
+ self.assertRaises(DistutilsSetupError, cmd.run)
+
def test_check_metadata_deprecated(self):
# makes sure make_metadata is deprecated
cmd = self._get_cmd()
diff --git a/Lib/distutils/tests/test_sdist.py b/Lib/distutils/tests/test_sdist.py
index 9e422fc..02c1d12 100644
--- a/Lib/distutils/tests/test_sdist.py
+++ b/Lib/distutils/tests/test_sdist.py
@@ -6,6 +6,7 @@ import warnings
import zipfile
from os.path import join
from textwrap import dedent
+from test.test_support import captured_stdout, check_warnings, run_unittest
# zlib is not used here, but if it's not available
# the tests that use zipfile may fail
@@ -21,7 +22,6 @@ try:
except ImportError:
UID_GID_SUPPORT = False
-from test.test_support import captured_stdout, check_warnings, run_unittest
from distutils.command.sdist import sdist, show_formats
from distutils.core import Distribution
@@ -91,9 +91,8 @@ class SDistTestCase(PyPIRCCommandTestCase):
@unittest.skipUnless(zlib, "requires zlib")
def test_prune_file_list(self):
- # this test creates a package with some vcs dirs in it
- # and launch sdist to make sure they get pruned
- # on all systems
+ # this test creates a project with some VCS dirs and an NFS rename
+ # file, then launches sdist to check they get pruned on all systems
# creating VCS directories with some files in them
os.mkdir(join(self.tmp_dir, 'somecode', '.svn'))
@@ -107,6 +106,8 @@ class SDistTestCase(PyPIRCCommandTestCase):
self.write_file((self.tmp_dir, 'somecode', '.git',
'ok'), 'xxx')
+ self.write_file((self.tmp_dir, 'somecode', '.nfs0001'), 'xxx')
+
# now building a sdist
dist, cmd = self.get_cmd()
@@ -133,12 +134,6 @@ class SDistTestCase(PyPIRCCommandTestCase):
@unittest.skipUnless(zlib, "requires zlib")
def test_make_distribution(self):
-
- # check if tar and gzip are installed
- if (find_executable('tar') is None or
- find_executable('gzip') is None):
- return
-
# now building a sdist
dist, cmd = self.get_cmd()
@@ -324,13 +319,11 @@ class SDistTestCase(PyPIRCCommandTestCase):
@unittest.skipUnless(zlib, "requires zlib")
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
+ @unittest.skipIf(find_executable('tar') is None,
+ "The tar command is not found")
+ @unittest.skipIf(find_executable('gzip') is None,
+ "The gzip command is not found")
def test_make_distribution_owner_group(self):
-
- # check if tar and gzip are installed
- if (find_executable('tar') is None or
- find_executable('gzip') is None):
- return
-
# now building a sdist
dist, cmd = self.get_cmd()
@@ -375,7 +368,7 @@ class SDistTestCase(PyPIRCCommandTestCase):
# the following tests make sure there is a nice error message instead
# of a traceback when parsing an invalid manifest template
- def _test_template(self, content):
+ def _check_template(self, content):
dist, cmd = self.get_cmd()
os.chdir(self.tmp_dir)
self.write_file('MANIFEST.in', content)
@@ -386,17 +379,17 @@ class SDistTestCase(PyPIRCCommandTestCase):
self.assertEqual(len(warnings), 1)
def test_invalid_template_unknown_command(self):
- self._test_template('taunt knights *')
+ self._check_template('taunt knights *')
def test_invalid_template_wrong_arguments(self):
# this manifest command takes one argument
- self._test_template('prune')
+ self._check_template('prune')
@unittest.skipIf(os.name != 'nt', 'test relevant for Windows only')
def test_invalid_template_wrong_path(self):
# on Windows, trailing slashes are not allowed
# this used to crash instead of raising a warning: #8286
- self._test_template('include examples/')
+ self._check_template('include examples/')
@unittest.skipUnless(zlib, "requires zlib")
def test_get_file_list(self):
diff --git a/Lib/distutils/tests/test_sysconfig.py b/Lib/distutils/tests/test_sysconfig.py
index 49570c4..ea8d7b5 100644
--- a/Lib/distutils/tests/test_sysconfig.py
+++ b/Lib/distutils/tests/test_sysconfig.py
@@ -72,6 +72,35 @@ class SysconfigTestCase(support.EnvironGuard,
'OTHER': 'foo'})
+ def test_sysconfig_module(self):
+ import sysconfig as global_sysconfig
+ self.assertEqual(global_sysconfig.get_config_var('CFLAGS'), sysconfig.get_config_var('CFLAGS'))
+ self.assertEqual(global_sysconfig.get_config_var('LDFLAGS'), sysconfig.get_config_var('LDFLAGS'))
+
+ @unittest.skipIf(sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER'),'compiler flags customized')
+ def test_sysconfig_compiler_vars(self):
+ # On OS X, binary installers support extension module building on
+ # various levels of the operating system with differing Xcode
+ # configurations. This requires customization of some of the
+ # compiler configuration directives to suit the environment on
+ # the installed machine. Some of these customizations may require
+ # running external programs and, so, are deferred until needed by
+ # the first extension module build. With Python 3.3, only
+ # the Distutils version of sysconfig is used for extension module
+ # builds, which happens earlier in the Distutils tests. This may
+ # cause the following tests to fail since no tests have caused
+ # the global version of sysconfig to call the customization yet.
+ # The solution for now is to simply skip this test in this case.
+ # The longer-term solution is to only have one version of sysconfig.
+
+ import sysconfig as global_sysconfig
+ if sysconfig.get_config_var('CUSTOMIZED_OSX_COMPILER'):
+ self.skipTest('compiler flags customized')
+ self.assertEqual(global_sysconfig.get_config_var('LDSHARED'), sysconfig.get_config_var('LDSHARED'))
+ self.assertEqual(global_sysconfig.get_config_var('CC'), sysconfig.get_config_var('CC'))
+
+
+
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SysconfigTestCase))
diff --git a/Lib/distutils/tests/test_unixccompiler.py b/Lib/distutils/tests/test_unixccompiler.py
index 40c908a..fec1ea6 100644
--- a/Lib/distutils/tests/test_unixccompiler.py
+++ b/Lib/distutils/tests/test_unixccompiler.py
@@ -1,7 +1,8 @@
"""Tests for distutils.unixccompiler."""
+import os
import sys
import unittest
-from test.test_support import run_unittest
+from test.test_support import EnvironmentVarGuard, run_unittest
from distutils import sysconfig
from distutils.unixccompiler import UnixCCompiler
@@ -20,12 +21,8 @@ class UnixCCompilerTestCase(unittest.TestCase):
sys.platform = self._backup_platform
sysconfig.get_config_var = self._backup_get_config_var
+ @unittest.skipIf(sys.platform == 'win32', "can't test on Windows")
def test_runtime_libdir_option(self):
-
- # not tested under windows
- if sys.platform == 'win32':
- return
-
# Issue#5900
#
# Ensure RUNPATH is added to extension modules with RPATH if
@@ -122,6 +119,37 @@ class UnixCCompilerTestCase(unittest.TestCase):
sysconfig.get_config_var = gcv
self.assertEqual(self.cc.rpath_foo(), '-R/foo')
+ @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for OS X')
+ def test_osx_cc_overrides_ldshared(self):
+ # Issue #18080:
+ # ensure that setting CC env variable also changes default linker
+ def gcv(v):
+ if v == 'LDSHARED':
+ return 'gcc-4.2 -bundle -undefined dynamic_lookup '
+ return 'gcc-4.2'
+ sysconfig.get_config_var = gcv
+ with EnvironmentVarGuard() as env:
+ env['CC'] = 'my_cc'
+ del env['LDSHARED']
+ sysconfig.customize_compiler(self.cc)
+ self.assertEqual(self.cc.linker_so[0], 'my_cc')
+
+ @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for OS X')
+ def test_osx_explict_ldshared(self):
+ # Issue #18080:
+ # ensure that setting CC env variable does not change
+ # explicit LDSHARED setting for linker
+ def gcv(v):
+ if v == 'LDSHARED':
+ return 'gcc-4.2 -bundle -undefined dynamic_lookup '
+ return 'gcc-4.2'
+ sysconfig.get_config_var = gcv
+ with EnvironmentVarGuard() as env:
+ env['CC'] = 'my_cc'
+ env['LDSHARED'] = 'my_ld -bundle -dynamic'
+ sysconfig.customize_compiler(self.cc)
+ self.assertEqual(self.cc.linker_so[0], 'my_ld')
+
def test_suite():
return unittest.makeSuite(UnixCCompilerTestCase)
diff --git a/Lib/distutils/tests/test_upload.py b/Lib/distutils/tests/test_upload.py
index 9911199..17735d3 100644
--- a/Lib/distutils/tests/test_upload.py
+++ b/Lib/distutils/tests/test_upload.py
@@ -7,6 +7,7 @@ from test.test_support import run_unittest
from distutils.command import upload as upload_mod
from distutils.command.upload import upload
from distutils.core import Distribution
+from distutils.errors import DistutilsError
from distutils.tests.test_config import PYPIRC, PyPIRCCommandTestCase
@@ -41,16 +42,17 @@ username:me
class FakeOpen(object):
- def __init__(self, url):
+ def __init__(self, url, msg=None, code=None):
self.url = url
if not isinstance(url, str):
self.req = url
else:
self.req = None
- self.msg = 'OK'
+ self.msg = msg or 'OK'
+ self.code = code or 200
def getcode(self):
- return 200
+ return self.code
class uploadTestCase(PyPIRCCommandTestCase):
@@ -60,13 +62,15 @@ class uploadTestCase(PyPIRCCommandTestCase):
self.old_open = upload_mod.urlopen
upload_mod.urlopen = self._urlopen
self.last_open = None
+ self.next_msg = None
+ self.next_code = None
def tearDown(self):
upload_mod.urlopen = self.old_open
super(uploadTestCase, self).tearDown()
def _urlopen(self, url):
- self.last_open = FakeOpen(url)
+ self.last_open = FakeOpen(url, msg=self.next_msg, code=self.next_code)
return self.last_open
def test_finalize_options(self):
@@ -78,7 +82,7 @@ class uploadTestCase(PyPIRCCommandTestCase):
cmd.finalize_options()
for attr, waited in (('username', 'me'), ('password', 'secret'),
('realm', 'pypi'),
- ('repository', 'http://pypi.python.org/pypi')):
+ ('repository', 'https://pypi.python.org/pypi')):
self.assertEqual(getattr(cmd, attr), waited)
def test_saved_password(self):
@@ -119,10 +123,15 @@ class uploadTestCase(PyPIRCCommandTestCase):
self.assertTrue(headers['Content-type'].startswith('multipart/form-data'))
self.assertEqual(self.last_open.req.get_method(), 'POST')
self.assertEqual(self.last_open.req.get_full_url(),
- 'http://pypi.python.org/pypi')
- self.assertTrue('xxx' in self.last_open.req.data)
+ 'https://pypi.python.org/pypi')
+ self.assertIn('xxx', self.last_open.req.data)
auth = self.last_open.req.headers['Authorization']
- self.assertFalse('\n' in auth)
+ self.assertNotIn('\n', auth)
+
+ def test_upload_fails(self):
+ self.next_msg = "Not Found"
+ self.next_code = 404
+ self.assertRaises(DistutilsError, self.test_upload)
def test_suite():
return unittest.makeSuite(uploadTestCase)
diff --git a/Lib/distutils/tests/test_util.py b/Lib/distutils/tests/test_util.py
index 67cd4cc..2d7b101 100644
--- a/Lib/distutils/tests/test_util.py
+++ b/Lib/distutils/tests/test_util.py
@@ -3,8 +3,9 @@ import sys
import unittest
from test.test_support import run_unittest
-from distutils.errors import DistutilsPlatformError, DistutilsByteCompileError
-from distutils.util import byte_compile
+from distutils.errors import DistutilsByteCompileError
+from distutils.util import byte_compile, grok_environment_error
+
class UtilTestCase(unittest.TestCase):
@@ -18,6 +19,13 @@ class UtilTestCase(unittest.TestCase):
finally:
sys.dont_write_bytecode = old_dont_write_bytecode
+ def test_grok_environment_error(self):
+ # test obsolete function to ensure backward compat (#4931)
+ exc = IOError("Unable to find batch file")
+ msg = grok_environment_error(exc)
+ self.assertEqual(msg, "error: Unable to find batch file")
+
+
def test_suite():
return unittest.makeSuite(UtilTestCase)
diff --git a/Lib/distutils/unixccompiler.py b/Lib/distutils/unixccompiler.py
index c49ac9b..2aa1cb1 100644
--- a/Lib/distutils/unixccompiler.py
+++ b/Lib/distutils/unixccompiler.py
@@ -26,6 +26,9 @@ from distutils.errors import \
DistutilsExecError, CompileError, LibError, LinkError
from distutils import log
+if sys.platform == 'darwin':
+ import _osx_support
+
# XXX Things not currently handled:
# * optimization/debug/warning flags; we just use whatever's in Python's
# Makefile and live with it. Is this adequate? If not, we might
@@ -41,68 +44,6 @@ from distutils import log
# should just happily stuff them into the preprocessor/compiler/linker
# options and carry on.
-def _darwin_compiler_fixup(compiler_so, cc_args):
- """
- This function will strip '-isysroot PATH' and '-arch ARCH' from the
- compile flags if the user has specified one them in extra_compile_flags.
-
- This is needed because '-arch ARCH' adds another architecture to the
- build, without a way to remove an architecture. Furthermore GCC will
- barf if multiple '-isysroot' arguments are present.
- """
- stripArch = stripSysroot = 0
-
- compiler_so = list(compiler_so)
- kernel_version = os.uname()[2] # 8.4.3
- major_version = int(kernel_version.split('.')[0])
-
- if major_version < 8:
- # OSX before 10.4.0, these don't support -arch and -isysroot at
- # all.
- stripArch = stripSysroot = True
- else:
- stripArch = '-arch' in cc_args
- stripSysroot = '-isysroot' in cc_args
-
- if stripArch or 'ARCHFLAGS' in os.environ:
- while 1:
- try:
- index = compiler_so.index('-arch')
- # Strip this argument and the next one:
- del compiler_so[index:index+2]
- except ValueError:
- break
-
- if 'ARCHFLAGS' in os.environ and not stripArch:
- # User specified different -arch flags in the environ,
- # see also distutils.sysconfig
- compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
-
- if stripSysroot:
- try:
- index = compiler_so.index('-isysroot')
- # Strip this argument and the next one:
- del compiler_so[index:index+2]
- except ValueError:
- pass
-
- # Check if the SDK that is used during compilation actually exists,
- # the universal build requires the usage of a universal SDK and not all
- # users have that installed by default.
- sysroot = None
- if '-isysroot' in cc_args:
- idx = cc_args.index('-isysroot')
- sysroot = cc_args[idx+1]
- elif '-isysroot' in compiler_so:
- idx = compiler_so.index('-isysroot')
- sysroot = compiler_so[idx+1]
-
- if sysroot and not os.path.isdir(sysroot):
- log.warn("Compiling with an SDK that doesn't seem to exist: %s",
- sysroot)
- log.warn("Please check your Xcode installation")
-
- return compiler_so
class UnixCCompiler(CCompiler):
@@ -172,7 +113,8 @@ class UnixCCompiler(CCompiler):
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
compiler_so = self.compiler_so
if sys.platform == 'darwin':
- compiler_so = _darwin_compiler_fixup(compiler_so, cc_args + extra_postargs)
+ compiler_so = _osx_support.compiler_fixup(compiler_so,
+ cc_args + extra_postargs)
try:
self.spawn(compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
@@ -251,7 +193,7 @@ class UnixCCompiler(CCompiler):
linker[i] = self.compiler_cxx[i]
if sys.platform == 'darwin':
- linker = _darwin_compiler_fixup(linker, ld_args)
+ linker = _osx_support.compiler_fixup(linker, ld_args)
self.spawn(linker + ld_args)
except DistutilsExecError, msg:
diff --git a/Lib/distutils/util.py b/Lib/distutils/util.py
index 0c24e8c..2b4d784 100644
--- a/Lib/distutils/util.py
+++ b/Lib/distutils/util.py
@@ -51,6 +51,10 @@ def get_platform ():
return 'win-ia64'
return sys.platform
+ # Set for cross builds explicitly
+ if "_PYTHON_HOST_PLATFORM" in os.environ:
+ return os.environ["_PYTHON_HOST_PLATFORM"]
+
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
@@ -93,94 +97,10 @@ def get_platform ():
if m:
release = m.group()
elif osname[:6] == "darwin":
- #
- # For our purposes, we'll assume that the system version from
- # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
- # to. This makes the compatibility story a bit more sane because the
- # machine is going to compile and link as if it were
- # MACOSX_DEPLOYMENT_TARGET.
- from distutils.sysconfig import get_config_vars
- cfgvars = get_config_vars()
-
- macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
-
- if 1:
- # Always calculate the release of the running machine,
- # needed to determine if we can build fat binaries or not.
-
- macrelease = macver
- # Get the system version. Reading this plist is a documented
- # way to get the system version (see the documentation for
- # the Gestalt Manager)
- try:
- f = open('/System/Library/CoreServices/SystemVersion.plist')
- except IOError:
- # We're on a plain darwin box, fall back to the default
- # behaviour.
- pass
- else:
- try:
- m = re.search(
- r'<key>ProductUserVisibleVersion</key>\s*' +
- r'<string>(.*?)</string>', f.read())
- if m is not None:
- macrelease = '.'.join(m.group(1).split('.')[:2])
- # else: fall back to the default behaviour
- finally:
- f.close()
-
- if not macver:
- macver = macrelease
-
- if macver:
- from distutils.sysconfig import get_config_vars
- release = macver
- osname = "macosx"
-
- if (macrelease + '.') >= '10.4.' and \
- '-arch' in get_config_vars().get('CFLAGS', '').strip():
- # The universal build will build fat binaries, but not on
- # systems before 10.4
- #
- # Try to detect 4-way universal builds, those have machine-type
- # 'universal' instead of 'fat'.
-
- machine = 'fat'
- cflags = get_config_vars().get('CFLAGS')
-
- archs = re.findall('-arch\s+(\S+)', cflags)
- archs = tuple(sorted(set(archs)))
-
- if len(archs) == 1:
- machine = archs[0]
- elif archs == ('i386', 'ppc'):
- machine = 'fat'
- elif archs == ('i386', 'x86_64'):
- machine = 'intel'
- elif archs == ('i386', 'ppc', 'x86_64'):
- machine = 'fat3'
- elif archs == ('ppc64', 'x86_64'):
- machine = 'fat64'
- elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
- machine = 'universal'
- else:
- raise ValueError(
- "Don't know machine value for archs=%r"%(archs,))
-
- elif machine == 'i386':
- # On OSX the machine type returned by uname is always the
- # 32-bit variant, even if the executable architecture is
- # the 64-bit variant
- if sys.maxint >= 2**32:
- machine = 'x86_64'
-
- elif machine in ('PowerPC', 'Power_Macintosh'):
- # Pick a sane name for the PPC architecture.
- machine = 'ppc'
-
- # See 'i386' case
- if sys.maxint >= 2**32:
- machine = 'ppc64'
+ import _osx_support, distutils.sysconfig
+ osname, release, machine = _osx_support.get_platform_osx(
+ distutils.sysconfig.get_config_vars(),
+ osname, release, machine)
return "%s-%s-%s" % (osname, release, machine)
@@ -293,25 +213,10 @@ def subst_vars (s, local_vars):
def grok_environment_error (exc, prefix="error: "):
- """Generate a useful error message from an EnvironmentError (IOError or
- OSError) exception object. Handles Python 1.5.1 and 1.5.2 styles, and
- does what it can to deal with exception objects that don't have a
- filename (which happens when the error is due to a two-file operation,
- such as 'rename()' or 'link()'. Returns the error message as a string
- prefixed with 'prefix'.
- """
- # check for Python 1.5.2-style {IO,OS}Error exception objects
- if hasattr(exc, 'filename') and hasattr(exc, 'strerror'):
- if exc.filename:
- error = prefix + "%s: %s" % (exc.filename, exc.strerror)
- else:
- # two-argument functions in posix module don't
- # include the filename in the exception object!
- error = prefix + "%s" % exc.strerror
- else:
- error = prefix + str(exc[-1])
-
- return error
+ # Function kept for backward compatibility.
+ # Used to try clever things with EnvironmentErrors,
+ # but nowadays str(exception) produces good messages.
+ return prefix + str(exc)
# Needed by 'split_quoted()'
diff --git a/Lib/doctest.py b/Lib/doctest.py
index 8297fad..86c9839 100644
--- a/Lib/doctest.py
+++ b/Lib/doctest.py
@@ -326,6 +326,32 @@ def _comment_line(line):
else:
return '#'
+def _strip_exception_details(msg):
+ # Support for IGNORE_EXCEPTION_DETAIL.
+ # Get rid of everything except the exception name; in particular, drop
+ # the possibly dotted module path (if any) and the exception message (if
+ # any). We assume that a colon is never part of a dotted name, or of an
+ # exception name.
+ # E.g., given
+ # "foo.bar.MyError: la di da"
+ # return "MyError"
+ # Or for "abc.def" or "abc.def:\n" return "def".
+
+ start, end = 0, len(msg)
+ # The exception name must appear on the first line.
+ i = msg.find("\n")
+ if i >= 0:
+ end = i
+ # retain up to the first colon (if any)
+ i = msg.find(':', 0, end)
+ if i >= 0:
+ end = i
+ # retain just the exception name
+ i = msg.rfind('.', 0, end)
+ if i >= 0:
+ start = i+1
+ return msg[start: end]
+
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
@@ -424,7 +450,7 @@ class Example:
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
- I.e., the number of space characters that preceed the
+ I.e., the number of space characters that precede the
example's first prompt.
- options: A dictionary mapping from option flags to True or
@@ -564,7 +590,7 @@ class DocTestParser:
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
- .*$\n? # But any other line
+ .+$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
@@ -895,7 +921,7 @@ class DocTestFinder:
if '__name__' not in globs:
globs['__name__'] = '__main__' # provide a default module name
- # Recursively expore `obj`, extracting DocTests.
+ # Recursively explore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
# Sort the tests by alpha order of names, for consistency in
@@ -1323,10 +1349,9 @@ class DocTestRunner:
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
- m1 = re.match(r'(?:[^:]*\.)?([^:]*:)', example.exc_msg)
- m2 = re.match(r'(?:[^:]*\.)?([^:]*:)', exc_msg)
- if m1 and m2 and check(m1.group(1), m2.group(1),
- self.optionflags):
+ if check(_strip_exception_details(example.exc_msg),
+ _strip_exception_details(exc_msg),
+ self.optionflags):
outcome = SUCCESS
# Report the outcome.
@@ -2314,7 +2339,8 @@ class DocTestCase(unittest.TestCase):
return "Doctest: " + self._dt_test.name
class SkipDocTestCase(DocTestCase):
- def __init__(self):
+ def __init__(self, module):
+ self.module = module
DocTestCase.__init__(self, None)
def setUp(self):
@@ -2324,7 +2350,10 @@ class SkipDocTestCase(DocTestCase):
pass
def shortDescription(self):
- return "Skipping tests from %s" % module.__name__
+ return "Skipping tests from %s" % self.module.__name__
+
+ __str__ = shortDescription
+
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
@@ -2372,12 +2401,17 @@ def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
if not tests and sys.flags.optimize >=2:
# Skip doctests when running with -O2
suite = unittest.TestSuite()
- suite.addTest(SkipDocTestCase())
+ suite.addTest(SkipDocTestCase(module))
return suite
elif not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
- raise ValueError(module, "has no tests")
+ # It is probably a bug that this exception is not also raised if the
+ # number of doctest examples in tests is zero (i.e. if no doctest
+ # examples were found). However, we should probably not be raising
+ # an exception at all here, though it is too late to make this change
+ # for a maintenance release. See also issue #14649.
+ raise ValueError(module, "has no docstrings")
tests.sort()
suite = unittest.TestSuite()
diff --git a/Lib/dumbdbm.py b/Lib/dumbdbm.py
index fb54a93..4a0c3a7 100644
--- a/Lib/dumbdbm.py
+++ b/Lib/dumbdbm.py
@@ -68,9 +68,10 @@ class _Database(UserDict.DictMixin):
try:
f = _open(self._datfile, 'r')
except IOError:
- f = _open(self._datfile, 'w')
- self._chmod(self._datfile)
- f.close()
+ with _open(self._datfile, 'w') as f:
+ self._chmod(self._datfile)
+ else:
+ f.close()
self._update()
# Read directory file into the in-memory index dict.
@@ -81,11 +82,11 @@ class _Database(UserDict.DictMixin):
except IOError:
pass
else:
- for line in f:
- line = line.rstrip()
- key, pos_and_siz_pair = eval(line)
- self._index[key] = pos_and_siz_pair
- f.close()
+ with f:
+ for line in f:
+ line = line.rstrip()
+ key, pos_and_siz_pair = eval(line)
+ self._index[key] = pos_and_siz_pair
# Write the index dict to the directory file. The original directory
# file (if any) is renamed with a .bak extension first. If a .bak
@@ -107,20 +108,18 @@ class _Database(UserDict.DictMixin):
except self._os.error:
pass
- f = self._open(self._dirfile, 'w')
- self._chmod(self._dirfile)
- for key, pos_and_siz_pair in self._index.iteritems():
- f.write("%r, %r\n" % (key, pos_and_siz_pair))
- f.close()
+ with self._open(self._dirfile, 'w') as f:
+ self._chmod(self._dirfile)
+ for key, pos_and_siz_pair in self._index.iteritems():
+ f.write("%r, %r\n" % (key, pos_and_siz_pair))
sync = _commit
def __getitem__(self, key):
pos, siz = self._index[key] # may raise KeyError
- f = _open(self._datfile, 'rb')
- f.seek(pos)
- dat = f.read(siz)
- f.close()
+ with _open(self._datfile, 'rb') as f:
+ f.seek(pos)
+ dat = f.read(siz)
return dat
# Append val to the data file, starting at a _BLOCKSIZE-aligned
@@ -128,14 +127,13 @@ class _Database(UserDict.DictMixin):
# to get to an aligned offset. Return pair
# (starting offset of val, len(val))
def _addval(self, val):
- f = _open(self._datfile, 'rb+')
- f.seek(0, 2)
- pos = int(f.tell())
- npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
- f.write('\0'*(npos-pos))
- pos = npos
- f.write(val)
- f.close()
+ with _open(self._datfile, 'rb+') as f:
+ f.seek(0, 2)
+ pos = int(f.tell())
+ npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
+ f.write('\0'*(npos-pos))
+ pos = npos
+ f.write(val)
return (pos, len(val))
# Write val to the data file, starting at offset pos. The caller
@@ -143,10 +141,9 @@ class _Database(UserDict.DictMixin):
# pos to hold val, without overwriting some other value. Return
# pair (pos, len(val)).
def _setval(self, pos, val):
- f = _open(self._datfile, 'rb+')
- f.seek(pos)
- f.write(val)
- f.close()
+ with _open(self._datfile, 'rb+') as f:
+ f.seek(pos)
+ f.write(val)
return (pos, len(val))
# key is a new key whose associated value starts in the data file
@@ -154,10 +151,9 @@ class _Database(UserDict.DictMixin):
# the in-memory index dict, and append one to the directory file.
def _addkey(self, key, pos_and_siz_pair):
self._index[key] = pos_and_siz_pair
- f = _open(self._dirfile, 'a')
- self._chmod(self._dirfile)
- f.write("%r, %r\n" % (key, pos_and_siz_pair))
- f.close()
+ with _open(self._dirfile, 'a') as f:
+ self._chmod(self._dirfile)
+ f.write("%r, %r\n" % (key, pos_and_siz_pair))
def __setitem__(self, key, val):
if not type(key) == type('') == type(val):
diff --git a/Lib/email/_parseaddr.py b/Lib/email/_parseaddr.py
index 3bd4ba4..690db2c 100644
--- a/Lib/email/_parseaddr.py
+++ b/Lib/email/_parseaddr.py
@@ -13,7 +13,7 @@ __all__ = [
'quote',
]
-import time
+import time, calendar
SPACE = ' '
EMPTYSTRING = ''
@@ -150,13 +150,13 @@ def parsedate(data):
def mktime_tz(data):
- """Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
+ """Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp."""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
- t = time.mktime(data[:8] + (0,))
- return t - data[9] - time.timezone
+ t = calendar.timegm(data)
+ return t - data[9]
def quote(str):
diff --git a/Lib/email/base64mime.py b/Lib/email/base64mime.py
index 8804427..4aa8000 100644
--- a/Lib/email/base64mime.py
+++ b/Lib/email/base64mime.py
@@ -130,7 +130,7 @@ def encode(s, binary=True, maxlinelen=76, eol=NL):
verbatim (this is the default).
Each line of encoded text will end with eol, which defaults to "\\n". Set
- this to "\r\n" if you will be using the result of this function directly
+ this to "\\r\\n" if you will be using the result of this function directly
in an email.
"""
if not s:
diff --git a/Lib/email/charset.py b/Lib/email/charset.py
index dddaa76..30a13ff 100644
--- a/Lib/email/charset.py
+++ b/Lib/email/charset.py
@@ -183,7 +183,7 @@ class Charset:
header encoding. Charset.SHORTEST is not allowed for
body_encoding.
- output_charset: Some character sets must be converted before the can be
+ output_charset: Some character sets must be converted before they can be
used in email headers or bodies. If the input_charset is
one of them, this attribute will contain the name of the
charset output will be converted to. Otherwise, it will
diff --git a/Lib/email/feedparser.py b/Lib/email/feedparser.py
index 5ff266f..15db26d 100644
--- a/Lib/email/feedparser.py
+++ b/Lib/email/feedparser.py
@@ -13,7 +13,7 @@ parser. It returns when there's nothing more it can do with the available
data. When you have no more data to push into the parser, call .close().
This completes the parsing and returns the root message object.
-The other advantage of this parser is that it will never throw a parsing
+The other advantage of this parser is that it will never raise a parsing
exception. Instead, when it finds something unexpected, it adds a 'defect' to
the current message. Defects are just instances that live on the message
object's .defects attribute.
@@ -214,7 +214,7 @@ class FeedParser:
# supposed to see in the body of the message.
self._parse_headers(headers)
# Headers-only parsing is a backwards compatibility hack, which was
- # necessary in the older parser, which could throw errors. All
+ # necessary in the older parser, which could raise errors. All
# remaining lines in the input are thrown into the message body.
if self._headersonly:
lines = []
diff --git a/Lib/email/generator.py b/Lib/email/generator.py
index eb71044..e50f912 100644
--- a/Lib/email/generator.py
+++ b/Lib/email/generator.py
@@ -212,7 +212,11 @@ class Generator:
msg.set_boundary(boundary)
# If there's a preamble, write it out, with a trailing CRLF
if msg.preamble is not None:
- print >> self._fp, msg.preamble
+ if self._mangle_from_:
+ preamble = fcre.sub('>From ', msg.preamble)
+ else:
+ preamble = msg.preamble
+ print >> self._fp, preamble
# dash-boundary transport-padding CRLF
print >> self._fp, '--' + boundary
# body-part
@@ -227,10 +231,13 @@ class Generator:
# body-part
self._fp.write(body_part)
# close-delimiter transport-padding
- self._fp.write('\n--' + boundary + '--')
+ self._fp.write('\n--' + boundary + '--' + NL)
if msg.epilogue is not None:
- print >> self._fp
- self._fp.write(msg.epilogue)
+ if self._mangle_from_:
+ epilogue = fcre.sub('>From ', msg.epilogue)
+ else:
+ epilogue = msg.epilogue
+ self._fp.write(epilogue)
def _handle_multipart_signed(self, msg):
# The contents of signed parts has to stay unmodified in order to keep
diff --git a/Lib/email/test/data/msg_02.txt b/Lib/email/test/data/msg_02.txt
index 43f2480..5d0a7e1 100644
--- a/Lib/email/test/data/msg_02.txt
+++ b/Lib/email/test/data/msg_02.txt
@@ -119,6 +119,7 @@ hello
--__--__----
+
--192.168.1.2.889.32614.987812255.500.21814
Content-type: text/plain; charset=us-ascii
Content-description: Digest Footer
diff --git a/Lib/email/test/test_email.py b/Lib/email/test/test_email.py
index 5c9a725..c4a90d8 100644
--- a/Lib/email/test/test_email.py
+++ b/Lib/email/test/test_email.py
@@ -9,6 +9,7 @@ import base64
import difflib
import unittest
import warnings
+import textwrap
from cStringIO import StringIO
import email
@@ -266,12 +267,12 @@ class TestMessageAPI(TestEmailBase):
msg['From'] = 'Me'
msg['to'] = 'You'
# Check for case insensitivity
- self.assertTrue('from' in msg)
- self.assertTrue('From' in msg)
- self.assertTrue('FROM' in msg)
- self.assertTrue('to' in msg)
- self.assertTrue('To' in msg)
- self.assertTrue('TO' in msg)
+ self.assertIn('from', msg)
+ self.assertIn('From', msg)
+ self.assertIn('FROM', msg)
+ self.assertIn('to', msg)
+ self.assertIn('To', msg)
+ self.assertIn('TO', msg)
def test_as_string(self):
eq = self.assertEqual
@@ -948,6 +949,28 @@ From the desk of A.A.A.:
Blah blah blah
""")
+ def test_mangle_from_in_preamble_and_epilog(self):
+ s = StringIO()
+ g = Generator(s, mangle_from_=True)
+ msg = email.message_from_string(textwrap.dedent("""\
+ From: foo@bar.com
+ Mime-Version: 1.0
+ Content-Type: multipart/mixed; boundary=XXX
+
+ From somewhere unknown
+
+ --XXX
+ Content-Type: text/plain
+
+ foo
+
+ --XXX--
+
+ From somewhere unknowable
+ """))
+ g.flatten(msg)
+ self.assertEqual(len([1 for x in s.getvalue().split('\n')
+ if x.startswith('>From ')]), 2)
# Test the basic MIMEAudio class
@@ -979,7 +1002,6 @@ class TestMIMEAudio(unittest.TestCase):
def test_add_header(self):
eq = self.assertEqual
- unless = self.assertTrue
self._au.add_header('Content-Disposition', 'attachment',
filename='audiotest.au')
eq(self._au['content-disposition'],
@@ -990,12 +1012,12 @@ class TestMIMEAudio(unittest.TestCase):
'audiotest.au')
missing = []
eq(self._au.get_param('attachment', header='content-disposition'), '')
- unless(self._au.get_param('foo', failobj=missing,
- header='content-disposition') is missing)
+ self.assertIs(self._au.get_param('foo', failobj=missing,
+ header='content-disposition'), missing)
# Try some missing stuff
- unless(self._au.get_param('foobar', missing) is missing)
- unless(self._au.get_param('attachment', missing,
- header='foobar') is missing)
+ self.assertIs(self._au.get_param('foobar', missing), missing)
+ self.assertIs(self._au.get_param('attachment', missing,
+ header='foobar'), missing)
@@ -1022,7 +1044,6 @@ class TestMIMEImage(unittest.TestCase):
def test_add_header(self):
eq = self.assertEqual
- unless = self.assertTrue
self._im.add_header('Content-Disposition', 'attachment',
filename='dingusfish.gif')
eq(self._im['content-disposition'],
@@ -1033,12 +1054,12 @@ class TestMIMEImage(unittest.TestCase):
'dingusfish.gif')
missing = []
eq(self._im.get_param('attachment', header='content-disposition'), '')
- unless(self._im.get_param('foo', failobj=missing,
- header='content-disposition') is missing)
+ self.assertIs(self._im.get_param('foo', failobj=missing,
+ header='content-disposition'), missing)
# Try some missing stuff
- unless(self._im.get_param('foobar', missing) is missing)
- unless(self._im.get_param('attachment', missing,
- header='foobar') is missing)
+ self.assertIs(self._im.get_param('foobar', missing), missing)
+ self.assertIs(self._im.get_param('attachment', missing,
+ header='foobar'), missing)
@@ -1049,17 +1070,16 @@ class TestMIMEText(unittest.TestCase):
def test_types(self):
eq = self.assertEqual
- unless = self.assertTrue
eq(self._msg.get_content_type(), 'text/plain')
eq(self._msg.get_param('charset'), 'us-ascii')
missing = []
- unless(self._msg.get_param('foobar', missing) is missing)
- unless(self._msg.get_param('charset', missing, header='foobar')
- is missing)
+ self.assertIs(self._msg.get_param('foobar', missing), missing)
+ self.assertIs(self._msg.get_param('charset', missing, header='foobar'),
+ missing)
def test_payload(self):
self.assertEqual(self._msg.get_payload(), 'hello there')
- self.assertTrue(not self._msg.is_multipart())
+ self.assertFalse(self._msg.is_multipart())
def test_charset(self):
eq = self.assertEqual
@@ -1078,7 +1098,7 @@ class TestMIMEText(unittest.TestCase):
msg = MIMEText(u'hello there')
eq(msg.get_charset(), 'us-ascii')
eq(msg['content-type'], 'text/plain; charset="us-ascii"')
- self.assertTrue('hello there' in msg.as_string())
+ self.assertIn('hello there', msg.as_string())
def test_8bit_unicode_input(self):
teststr = u'\u043a\u0438\u0440\u0438\u043b\u0438\u0446\u0430'
@@ -1139,21 +1159,20 @@ This is the dingus fish.
def test_hierarchy(self):
# convenience
eq = self.assertEqual
- unless = self.assertTrue
raises = self.assertRaises
# tests
m = self._msg
- unless(m.is_multipart())
+ self.assertTrue(m.is_multipart())
eq(m.get_content_type(), 'multipart/mixed')
eq(len(m.get_payload()), 2)
raises(IndexError, m.get_payload, 2)
m0 = m.get_payload(0)
m1 = m.get_payload(1)
- unless(m0 is self._txt)
- unless(m1 is self._im)
+ self.assertIs(m0, self._txt)
+ self.assertIs(m1, self._im)
eq(m.get_payload(), [m0, m1])
- unless(not m0.is_multipart())
- unless(not m1.is_multipart())
+ self.assertFalse(m0.is_multipart())
+ self.assertFalse(m1.is_multipart())
def test_empty_multipart_idempotent(self):
text = """\
@@ -1187,7 +1206,8 @@ From: bperson@dom.ain
--BOUNDARY
---BOUNDARY--''')
+--BOUNDARY--
+''')
def test_no_parts_in_a_multipart_with_empty_epilogue(self):
outer = MIMEBase('multipart', 'mixed')
@@ -1232,7 +1252,8 @@ MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
---BOUNDARY--''')
+--BOUNDARY--
+''')
def test_seq_parts_in_a_multipart_with_empty_preamble(self):
eq = self.ndiffAssertEqual
@@ -1258,7 +1279,8 @@ MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
---BOUNDARY--''')
+--BOUNDARY--
+''')
def test_seq_parts_in_a_multipart_with_none_preamble(self):
@@ -1284,7 +1306,8 @@ MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
---BOUNDARY--''')
+--BOUNDARY--
+''')
def test_seq_parts_in_a_multipart_with_none_epilogue(self):
@@ -1310,7 +1333,8 @@ MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
---BOUNDARY--''')
+--BOUNDARY--
+''')
def test_seq_parts_in_a_multipart_with_empty_epilogue(self):
@@ -1483,23 +1507,22 @@ class TestNonConformant(TestEmailBase):
eq(msg.get_content_subtype(), 'plain')
def test_same_boundary_inner_outer(self):
- unless = self.assertTrue
msg = self._msgobj('msg_15.txt')
# XXX We can probably eventually do better
inner = msg.get_payload(0)
- unless(hasattr(inner, 'defects'))
+ self.assertTrue(hasattr(inner, 'defects'))
self.assertEqual(len(inner.defects), 1)
- unless(isinstance(inner.defects[0],
- Errors.StartBoundaryNotFoundDefect))
+ self.assertIsInstance(inner.defects[0],
+ Errors.StartBoundaryNotFoundDefect)
def test_multipart_no_boundary(self):
- unless = self.assertTrue
msg = self._msgobj('msg_25.txt')
- unless(isinstance(msg.get_payload(), str))
+ self.assertIsInstance(msg.get_payload(), str)
self.assertEqual(len(msg.defects), 2)
- unless(isinstance(msg.defects[0], Errors.NoBoundaryInMultipartDefect))
- unless(isinstance(msg.defects[1],
- Errors.MultipartInvariantViolationDefect))
+ self.assertIsInstance(msg.defects[0],
+ Errors.NoBoundaryInMultipartDefect)
+ self.assertIsInstance(msg.defects[1],
+ Errors.MultipartInvariantViolationDefect)
def test_invalid_content_type(self):
eq = self.assertEqual
@@ -1551,13 +1574,13 @@ counter to RFC 2822, there's no separating newline here
""")
def test_lying_multipart(self):
- unless = self.assertTrue
msg = self._msgobj('msg_41.txt')
- unless(hasattr(msg, 'defects'))
+ self.assertTrue(hasattr(msg, 'defects'))
self.assertEqual(len(msg.defects), 2)
- unless(isinstance(msg.defects[0], Errors.NoBoundaryInMultipartDefect))
- unless(isinstance(msg.defects[1],
- Errors.MultipartInvariantViolationDefect))
+ self.assertIsInstance(msg.defects[0],
+ Errors.NoBoundaryInMultipartDefect)
+ self.assertIsInstance(msg.defects[1],
+ Errors.MultipartInvariantViolationDefect)
def test_missing_start_boundary(self):
outer = self._msgobj('msg_42.txt')
@@ -1571,8 +1594,8 @@ counter to RFC 2822, there's no separating newline here
# [*] This message is missing its start boundary
bad = outer.get_payload(1).get_payload(0)
self.assertEqual(len(bad.defects), 1)
- self.assertTrue(isinstance(bad.defects[0],
- Errors.StartBoundaryNotFoundDefect))
+ self.assertIsInstance(bad.defects[0],
+ Errors.StartBoundaryNotFoundDefect)
def test_first_line_is_continuation_header(self):
eq = self.assertEqual
@@ -1581,8 +1604,8 @@ counter to RFC 2822, there's no separating newline here
eq(msg.keys(), [])
eq(msg.get_payload(), 'Line 2\nLine 3')
eq(len(msg.defects), 1)
- self.assertTrue(isinstance(msg.defects[0],
- Errors.FirstHeaderLineIsContinuationDefect))
+ self.assertIsInstance(msg.defects[0],
+ Errors.FirstHeaderLineIsContinuationDefect)
eq(msg.defects[0].line, ' Line 1\n')
@@ -1664,17 +1687,16 @@ class TestMIMEMessage(TestEmailBase):
def test_valid_argument(self):
eq = self.assertEqual
- unless = self.assertTrue
subject = 'A sub-message'
m = Message()
m['Subject'] = subject
r = MIMEMessage(m)
eq(r.get_content_type(), 'message/rfc822')
payload = r.get_payload()
- unless(isinstance(payload, list))
+ self.assertIsInstance(payload, list)
eq(len(payload), 1)
subpart = payload[0]
- unless(subpart is m)
+ self.assertIs(subpart, m)
eq(subpart['subject'], subject)
def test_bad_multipart(self):
@@ -1708,24 +1730,22 @@ Here is the body of the message.
def test_parse_message_rfc822(self):
eq = self.assertEqual
- unless = self.assertTrue
msg = self._msgobj('msg_11.txt')
eq(msg.get_content_type(), 'message/rfc822')
payload = msg.get_payload()
- unless(isinstance(payload, list))
+ self.assertIsInstance(payload, list)
eq(len(payload), 1)
submsg = payload[0]
- self.assertTrue(isinstance(submsg, Message))
+ self.assertIsInstance(submsg, Message)
eq(submsg['subject'], 'An enclosed message')
eq(submsg.get_payload(), 'Here is the body of the message.\n')
def test_dsn(self):
eq = self.assertEqual
- unless = self.assertTrue
# msg 16 is a Delivery Status Notification, see RFC 1894
msg = self._msgobj('msg_16.txt')
eq(msg.get_content_type(), 'multipart/report')
- unless(msg.is_multipart())
+ self.assertTrue(msg.is_multipart())
eq(len(msg.get_payload()), 3)
# Subpart 1 is a text/plain, human readable section
subpart = msg.get_payload(0)
@@ -1754,13 +1774,13 @@ Your message cannot be delivered to the following recipients:
# message/delivery-status should treat each block as a bunch of
# headers, i.e. a bunch of Message objects.
dsn1 = subpart.get_payload(0)
- unless(isinstance(dsn1, Message))
+ self.assertIsInstance(dsn1, Message)
eq(dsn1['original-envelope-id'], '0GK500B4HD0888@cougar.noc.ucla.edu')
eq(dsn1.get_param('dns', header='reporting-mta'), '')
# Try a missing one <wink>
eq(dsn1.get_param('nsd', header='reporting-mta'), None)
dsn2 = subpart.get_payload(1)
- unless(isinstance(dsn2, Message))
+ self.assertIsInstance(dsn2, Message)
eq(dsn2['action'], 'failed')
eq(dsn2.get_params(header='original-recipient'),
[('rfc822', ''), ('jangel1@cougar.noc.ucla.edu', '')])
@@ -1769,10 +1789,10 @@ Your message cannot be delivered to the following recipients:
subpart = msg.get_payload(2)
eq(subpart.get_content_type(), 'message/rfc822')
payload = subpart.get_payload()
- unless(isinstance(payload, list))
+ self.assertIsInstance(payload, list)
eq(len(payload), 1)
subsubpart = payload[0]
- unless(isinstance(subsubpart, Message))
+ self.assertIsInstance(subsubpart, Message)
eq(subsubpart.get_content_type(), 'text/plain')
eq(subsubpart['message-id'],
'<002001c144a6$8752e060$56104586@oxy.edu>')
@@ -2071,7 +2091,6 @@ class TestIdempotent(TestEmailBase):
def test_content_type(self):
eq = self.assertEqual
- unless = self.assertTrue
# Get a message object and reset the seek pointer for other tests
msg, text = self._msgobj('msg_05.txt')
eq(msg.get_content_type(), 'multipart/report')
@@ -2093,29 +2112,28 @@ class TestIdempotent(TestEmailBase):
eq(msg2.get_payload(), 'Yadda yadda yadda\n')
msg3 = msg.get_payload(2)
eq(msg3.get_content_type(), 'message/rfc822')
- self.assertTrue(isinstance(msg3, Message))
+ self.assertIsInstance(msg3, Message)
payload = msg3.get_payload()
- unless(isinstance(payload, list))
+ self.assertIsInstance(payload, list)
eq(len(payload), 1)
msg4 = payload[0]
- unless(isinstance(msg4, Message))
+ self.assertIsInstance(msg4, Message)
eq(msg4.get_payload(), 'Yadda yadda yadda\n')
def test_parser(self):
eq = self.assertEqual
- unless = self.assertTrue
msg, text = self._msgobj('msg_06.txt')
# Check some of the outer headers
eq(msg.get_content_type(), 'message/rfc822')
# Make sure the payload is a list of exactly one sub-Message, and that
# that submessage has a type of text/plain
payload = msg.get_payload()
- unless(isinstance(payload, list))
+ self.assertIsInstance(payload, list)
eq(len(payload), 1)
msg1 = payload[0]
- self.assertTrue(isinstance(msg1, Message))
+ self.assertIsInstance(msg1, Message)
eq(msg1.get_content_type(), 'text/plain')
- self.assertTrue(isinstance(msg1.get_payload(), str))
+ self.assertIsInstance(msg1.get_payload(), str)
eq(msg1.get_payload(), '\n')
@@ -2152,7 +2170,6 @@ class TestMiscellaneous(TestEmailBase):
fp.close()
def test_message_from_string_with_class(self):
- unless = self.assertTrue
fp = openfile('msg_01.txt')
try:
text = fp.read()
@@ -2163,7 +2180,7 @@ class TestMiscellaneous(TestEmailBase):
pass
msg = email.message_from_string(text, MyMessage)
- unless(isinstance(msg, MyMessage))
+ self.assertIsInstance(msg, MyMessage)
# Try something more complicated
fp = openfile('msg_02.txt')
try:
@@ -2172,10 +2189,9 @@ class TestMiscellaneous(TestEmailBase):
fp.close()
msg = email.message_from_string(text, MyMessage)
for subpart in msg.walk():
- unless(isinstance(subpart, MyMessage))
+ self.assertIsInstance(subpart, MyMessage)
def test_message_from_file_with_class(self):
- unless = self.assertTrue
# Create a subclass
class MyMessage(Message):
pass
@@ -2185,7 +2201,7 @@ class TestMiscellaneous(TestEmailBase):
msg = email.message_from_file(fp, MyMessage)
finally:
fp.close()
- unless(isinstance(msg, MyMessage))
+ self.assertIsInstance(msg, MyMessage)
# Try something more complicated
fp = openfile('msg_02.txt')
try:
@@ -2193,7 +2209,7 @@ class TestMiscellaneous(TestEmailBase):
finally:
fp.close()
for subpart in msg.walk():
- unless(isinstance(subpart, MyMessage))
+ self.assertIsInstance(subpart, MyMessage)
def test__all__(self):
module = __import__('email')
@@ -2262,6 +2278,12 @@ class TestMiscellaneous(TestEmailBase):
eq(time.localtime(t)[:6], timetup[:6])
eq(int(time.strftime('%Y', timetup[:9])), 2003)
+ def test_mktime_tz(self):
+ self.assertEqual(Utils.mktime_tz((1970, 1, 1, 0, 0, 0,
+ -1, -1, -1, 0)), 0)
+ self.assertEqual(Utils.mktime_tz((1970, 1, 1, 0, 0, 0,
+ -1, -1, -1, 1234)), -1234)
+
def test_parsedate_y2k(self):
"""Test for parsing a date with a two-digit year.
@@ -2562,9 +2584,9 @@ Do you like this message?
break
om.append(ol)
n1 += 1
- self.assertTrue(n == n1)
- self.assertTrue(len(om) == nt)
- self.assertTrue(''.join([il for il, n in imt]) == ''.join(om))
+ self.assertEqual(n, n1)
+ self.assertEqual(len(om), nt)
+ self.assertEqual(''.join([il for il, n in imt]), ''.join(om))
@@ -2581,7 +2603,7 @@ class TestParsers(TestEmailBase):
eq(msg['to'], 'ppp@zzz.org')
eq(msg.get_content_type(), 'multipart/mixed')
self.assertFalse(msg.is_multipart())
- self.assertTrue(isinstance(msg.get_payload(), str))
+ self.assertIsInstance(msg.get_payload(), str)
def test_whitespace_continuation(self):
eq = self.assertEqual
@@ -2998,7 +3020,7 @@ class TestHeader(TestEmailBase):
h = Header("I am the very model of a modern Major-General; I've information vegetable, animal, and mineral; I know the kings of England, and I quote the fights historical from Marathon to Waterloo, in order categorical; I'm very well acquainted, too, with matters mathematical; I understand equations, both the simple and quadratical; about binomial theorem I'm teeming with a lot o' news, with many cheerful facts about the square of the hypotenuse.",
maxlinelen=76)
for l in h.encode(splitchars=' ').split('\n '):
- self.assertTrue(len(l) <= 76)
+ self.assertLessEqual(len(l), 76)
def test_multilingual(self):
eq = self.ndiffAssertEqual
@@ -3250,7 +3272,7 @@ Content-Type: text/html; NAME*0=file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOC
'''
msg = email.message_from_string(m)
param = msg.get_param('NAME')
- self.assertFalse(isinstance(param, tuple))
+ self.assertNotIsInstance(param, tuple)
self.assertEqual(
param,
'file____C__DOCUMENTS_20AND_20SETTINGS_FABIEN_LOCAL_20SETTINGS_TEMP_nsmail.htm')
@@ -3403,7 +3425,7 @@ Content-Type: application/x-foo; name*0=\"Frank's\"; name*1=\" Document\"
"""
msg = email.message_from_string(m)
param = msg.get_param('name')
- self.assertFalse(isinstance(param, tuple))
+ self.assertNotIsInstance(param, tuple)
self.assertEqual(param, "Frank's Document")
def test_rfc2231_tick_attack_extended(self):
@@ -3427,7 +3449,7 @@ Content-Type: application/x-foo;
"""
msg = email.message_from_string(m)
param = msg.get_param('name')
- self.assertFalse(isinstance(param, tuple))
+ self.assertNotIsInstance(param, tuple)
self.assertEqual(param, "us-ascii'en-us'Frank's Document")
def test_rfc2231_no_extended_values(self):
diff --git a/Lib/email/test/test_email_renamed.py b/Lib/email/test/test_email_renamed.py
index 497b66b..5a41701 100644
--- a/Lib/email/test/test_email_renamed.py
+++ b/Lib/email/test/test_email_renamed.py
@@ -231,12 +231,12 @@ class TestMessageAPI(TestEmailBase):
msg['From'] = 'Me'
msg['to'] = 'You'
# Check for case insensitivity
- self.assertTrue('from' in msg)
- self.assertTrue('From' in msg)
- self.assertTrue('FROM' in msg)
- self.assertTrue('to' in msg)
- self.assertTrue('To' in msg)
- self.assertTrue('TO' in msg)
+ self.assertIn('from', msg)
+ self.assertIn('From', msg)
+ self.assertIn('FROM', msg)
+ self.assertIn('to', msg)
+ self.assertIn('To', msg)
+ self.assertIn('TO', msg)
def test_as_string(self):
eq = self.assertEqual
@@ -916,7 +916,6 @@ class TestMIMEAudio(unittest.TestCase):
def test_add_header(self):
eq = self.assertEqual
- unless = self.assertTrue
self._au.add_header('Content-Disposition', 'attachment',
filename='audiotest.au')
eq(self._au['content-disposition'],
@@ -927,12 +926,13 @@ class TestMIMEAudio(unittest.TestCase):
'audiotest.au')
missing = []
eq(self._au.get_param('attachment', header='content-disposition'), '')
- unless(self._au.get_param('foo', failobj=missing,
- header='content-disposition') is missing)
+ self.assertIs(self._au.get_param('foo', failobj=missing,
+ header='content-disposition'),
+ missing)
# Try some missing stuff
- unless(self._au.get_param('foobar', missing) is missing)
- unless(self._au.get_param('attachment', missing,
- header='foobar') is missing)
+ self.assertIs(self._au.get_param('foobar', missing), missing)
+ self.assertIs(self._au.get_param('attachment', missing,
+ header='foobar'), missing)
@@ -959,7 +959,6 @@ class TestMIMEImage(unittest.TestCase):
def test_add_header(self):
eq = self.assertEqual
- unless = self.assertTrue
self._im.add_header('Content-Disposition', 'attachment',
filename='dingusfish.gif')
eq(self._im['content-disposition'],
@@ -970,12 +969,13 @@ class TestMIMEImage(unittest.TestCase):
'dingusfish.gif')
missing = []
eq(self._im.get_param('attachment', header='content-disposition'), '')
- unless(self._im.get_param('foo', failobj=missing,
- header='content-disposition') is missing)
+ self.assertIs(self._im.get_param('foo', failobj=missing,
+ header='content-disposition'),
+ missing)
# Try some missing stuff
- unless(self._im.get_param('foobar', missing) is missing)
- unless(self._im.get_param('attachment', missing,
- header='foobar') is missing)
+ self.assertIs(self._im.get_param('foobar', missing), missing)
+ self.assertIs(self._im.get_param('attachment', missing,
+ header='foobar'), missing)
@@ -994,6 +994,38 @@ class TestMIMEApplication(unittest.TestCase):
eq(msg.get_payload(), '+vv8/f7/')
eq(msg.get_payload(decode=True), bytes)
+ def test_binary_body_with_encode_7or8bit(self):
+ # Issue 17171.
+ bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
+ msg = MIMEApplication(bytesdata, _encoder=encoders.encode_7or8bit)
+ # Treated as a string, this will be invalid code points.
+ self.assertEqual(msg.get_payload(), bytesdata)
+ self.assertEqual(msg.get_payload(decode=True), bytesdata)
+ self.assertEqual(msg['Content-Transfer-Encoding'], '8bit')
+ s = StringIO()
+ g = Generator(s)
+ g.flatten(msg)
+ wireform = s.getvalue()
+ msg2 = email.message_from_string(wireform)
+ self.assertEqual(msg.get_payload(), bytesdata)
+ self.assertEqual(msg2.get_payload(decode=True), bytesdata)
+ self.assertEqual(msg2['Content-Transfer-Encoding'], '8bit')
+
+ def test_binary_body_with_encode_noop(self):
+ # Issue 16564: This does not produce an RFC valid message, since to be
+ # valid it should have a CTE of binary. But the below works, and is
+ # documented as working this way.
+ bytesdata = b'\xfa\xfb\xfc\xfd\xfe\xff'
+ msg = MIMEApplication(bytesdata, _encoder=encoders.encode_noop)
+ self.assertEqual(msg.get_payload(), bytesdata)
+ self.assertEqual(msg.get_payload(decode=True), bytesdata)
+ s = StringIO()
+ g = Generator(s)
+ g.flatten(msg)
+ wireform = s.getvalue()
+ msg2 = email.message_from_string(wireform)
+ self.assertEqual(msg.get_payload(), bytesdata)
+ self.assertEqual(msg2.get_payload(decode=True), bytesdata)
# Test the basic MIMEText class
@@ -1003,17 +1035,16 @@ class TestMIMEText(unittest.TestCase):
def test_types(self):
eq = self.assertEqual
- unless = self.assertTrue
eq(self._msg.get_content_type(), 'text/plain')
eq(self._msg.get_param('charset'), 'us-ascii')
missing = []
- unless(self._msg.get_param('foobar', missing) is missing)
- unless(self._msg.get_param('charset', missing, header='foobar')
- is missing)
+ self.assertIs(self._msg.get_param('foobar', missing), missing)
+ self.assertIs(self._msg.get_param('charset', missing, header='foobar'),
+ missing)
def test_payload(self):
self.assertEqual(self._msg.get_payload(), 'hello there')
- self.assertTrue(not self._msg.is_multipart())
+ self.assertFalse(self._msg.is_multipart())
def test_charset(self):
eq = self.assertEqual
@@ -1068,21 +1099,20 @@ This is the dingus fish.
def test_hierarchy(self):
# convenience
eq = self.assertEqual
- unless = self.assertTrue
raises = self.assertRaises
# tests
m = self._msg
- unless(m.is_multipart())
+ self.assertTrue(m.is_multipart())
eq(m.get_content_type(), 'multipart/mixed')
eq(len(m.get_payload()), 2)
raises(IndexError, m.get_payload, 2)
m0 = m.get_payload(0)
m1 = m.get_payload(1)
- unless(m0 is self._txt)
- unless(m1 is self._im)
+ self.assertIs(m0, self._txt)
+ self.assertIs(m1, self._im)
eq(m.get_payload(), [m0, m1])
- unless(not m0.is_multipart())
- unless(not m1.is_multipart())
+ self.assertFalse(m0.is_multipart())
+ self.assertFalse(m1.is_multipart())
def test_empty_multipart_idempotent(self):
text = """\
@@ -1116,7 +1146,8 @@ From: bperson@dom.ain
--BOUNDARY
---BOUNDARY--''')
+--BOUNDARY--
+''')
def test_no_parts_in_a_multipart_with_empty_epilogue(self):
outer = MIMEBase('multipart', 'mixed')
@@ -1161,7 +1192,8 @@ MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
---BOUNDARY--''')
+--BOUNDARY--
+''')
def test_seq_parts_in_a_multipart_with_empty_preamble(self):
eq = self.ndiffAssertEqual
@@ -1187,7 +1219,8 @@ MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
---BOUNDARY--''')
+--BOUNDARY--
+''')
def test_seq_parts_in_a_multipart_with_none_preamble(self):
@@ -1213,7 +1246,8 @@ MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
---BOUNDARY--''')
+--BOUNDARY--
+''')
def test_seq_parts_in_a_multipart_with_none_epilogue(self):
@@ -1239,7 +1273,8 @@ MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
hello world
---BOUNDARY--''')
+--BOUNDARY--
+''')
def test_seq_parts_in_a_multipart_with_empty_epilogue(self):
@@ -1412,23 +1447,22 @@ class TestNonConformant(TestEmailBase):
eq(msg.get_content_subtype(), 'plain')
def test_same_boundary_inner_outer(self):
- unless = self.assertTrue
msg = self._msgobj('msg_15.txt')
# XXX We can probably eventually do better
inner = msg.get_payload(0)
- unless(hasattr(inner, 'defects'))
+ self.assertTrue(hasattr(inner, 'defects'))
self.assertEqual(len(inner.defects), 1)
- unless(isinstance(inner.defects[0],
- errors.StartBoundaryNotFoundDefect))
+ self.assertIsInstance(inner.defects[0],
+ errors.StartBoundaryNotFoundDefect)
def test_multipart_no_boundary(self):
- unless = self.assertTrue
msg = self._msgobj('msg_25.txt')
- unless(isinstance(msg.get_payload(), str))
+ self.assertIsInstance(msg.get_payload(), str)
self.assertEqual(len(msg.defects), 2)
- unless(isinstance(msg.defects[0], errors.NoBoundaryInMultipartDefect))
- unless(isinstance(msg.defects[1],
- errors.MultipartInvariantViolationDefect))
+ self.assertIsInstance(msg.defects[0],
+ errors.NoBoundaryInMultipartDefect)
+ self.assertIsInstance(msg.defects[1],
+ errors.MultipartInvariantViolationDefect)
def test_invalid_content_type(self):
eq = self.assertEqual
@@ -1480,13 +1514,13 @@ counter to RFC 2822, there's no separating newline here
""")
def test_lying_multipart(self):
- unless = self.assertTrue
msg = self._msgobj('msg_41.txt')
- unless(hasattr(msg, 'defects'))
+ self.assertTrue(hasattr(msg, 'defects'))
self.assertEqual(len(msg.defects), 2)
- unless(isinstance(msg.defects[0], errors.NoBoundaryInMultipartDefect))
- unless(isinstance(msg.defects[1],
- errors.MultipartInvariantViolationDefect))
+ self.assertIsInstance(msg.defects[0],
+ errors.NoBoundaryInMultipartDefect)
+ self.assertIsInstance(msg.defects[1],
+ errors.MultipartInvariantViolationDefect)
def test_missing_start_boundary(self):
outer = self._msgobj('msg_42.txt')
@@ -1500,8 +1534,8 @@ counter to RFC 2822, there's no separating newline here
# [*] This message is missing its start boundary
bad = outer.get_payload(1).get_payload(0)
self.assertEqual(len(bad.defects), 1)
- self.assertTrue(isinstance(bad.defects[0],
- errors.StartBoundaryNotFoundDefect))
+ self.assertIsInstance(bad.defects[0],
+ errors.StartBoundaryNotFoundDefect)
def test_first_line_is_continuation_header(self):
eq = self.assertEqual
@@ -1510,8 +1544,8 @@ counter to RFC 2822, there's no separating newline here
eq(msg.keys(), [])
eq(msg.get_payload(), 'Line 2\nLine 3')
eq(len(msg.defects), 1)
- self.assertTrue(isinstance(msg.defects[0],
- errors.FirstHeaderLineIsContinuationDefect))
+ self.assertIsInstance(msg.defects[0],
+ errors.FirstHeaderLineIsContinuationDefect)
eq(msg.defects[0].line, ' Line 1\n')
@@ -1577,17 +1611,16 @@ class TestMIMEMessage(TestEmailBase):
def test_valid_argument(self):
eq = self.assertEqual
- unless = self.assertTrue
subject = 'A sub-message'
m = Message()
m['Subject'] = subject
r = MIMEMessage(m)
eq(r.get_content_type(), 'message/rfc822')
payload = r.get_payload()
- unless(isinstance(payload, list))
+ self.assertIsInstance(payload, list)
eq(len(payload), 1)
subpart = payload[0]
- unless(subpart is m)
+ self.assertIs(subpart, m)
eq(subpart['subject'], subject)
def test_bad_multipart(self):
@@ -1621,24 +1654,22 @@ Here is the body of the message.
def test_parse_message_rfc822(self):
eq = self.assertEqual
- unless = self.assertTrue
msg = self._msgobj('msg_11.txt')
eq(msg.get_content_type(), 'message/rfc822')
payload = msg.get_payload()
- unless(isinstance(payload, list))
+ self.assertIsInstance(payload, list)
eq(len(payload), 1)
submsg = payload[0]
- self.assertTrue(isinstance(submsg, Message))
+ self.assertIsInstance(submsg, Message)
eq(submsg['subject'], 'An enclosed message')
eq(submsg.get_payload(), 'Here is the body of the message.\n')
def test_dsn(self):
eq = self.assertEqual
- unless = self.assertTrue
# msg 16 is a Delivery Status Notification, see RFC 1894
msg = self._msgobj('msg_16.txt')
eq(msg.get_content_type(), 'multipart/report')
- unless(msg.is_multipart())
+ self.assertTrue(msg.is_multipart())
eq(len(msg.get_payload()), 3)
# Subpart 1 is a text/plain, human readable section
subpart = msg.get_payload(0)
@@ -1667,13 +1698,13 @@ Your message cannot be delivered to the following recipients:
# message/delivery-status should treat each block as a bunch of
# headers, i.e. a bunch of Message objects.
dsn1 = subpart.get_payload(0)
- unless(isinstance(dsn1, Message))
+ self.assertIsInstance(dsn1, Message)
eq(dsn1['original-envelope-id'], '0GK500B4HD0888@cougar.noc.ucla.edu')
eq(dsn1.get_param('dns', header='reporting-mta'), '')
# Try a missing one <wink>
eq(dsn1.get_param('nsd', header='reporting-mta'), None)
dsn2 = subpart.get_payload(1)
- unless(isinstance(dsn2, Message))
+ self.assertIsInstance(dsn2, Message)
eq(dsn2['action'], 'failed')
eq(dsn2.get_params(header='original-recipient'),
[('rfc822', ''), ('jangel1@cougar.noc.ucla.edu', '')])
@@ -1682,10 +1713,10 @@ Your message cannot be delivered to the following recipients:
subpart = msg.get_payload(2)
eq(subpart.get_content_type(), 'message/rfc822')
payload = subpart.get_payload()
- unless(isinstance(payload, list))
+ self.assertIsInstance(payload, list)
eq(len(payload), 1)
subsubpart = payload[0]
- unless(isinstance(subsubpart, Message))
+ self.assertIsInstance(subsubpart, Message)
eq(subsubpart.get_content_type(), 'text/plain')
eq(subsubpart['message-id'],
'<002001c144a6$8752e060$56104586@oxy.edu>')
@@ -1981,7 +2012,6 @@ class TestIdempotent(TestEmailBase):
def test_content_type(self):
eq = self.assertEqual
- unless = self.assertTrue
# Get a message object and reset the seek pointer for other tests
msg, text = self._msgobj('msg_05.txt')
eq(msg.get_content_type(), 'multipart/report')
@@ -2003,29 +2033,28 @@ class TestIdempotent(TestEmailBase):
eq(msg2.get_payload(), 'Yadda yadda yadda\n')
msg3 = msg.get_payload(2)
eq(msg3.get_content_type(), 'message/rfc822')
- self.assertTrue(isinstance(msg3, Message))
+ self.assertIsInstance(msg3, Message)
payload = msg3.get_payload()
- unless(isinstance(payload, list))
+ self.assertIsInstance(payload, list)
eq(len(payload), 1)
msg4 = payload[0]
- unless(isinstance(msg4, Message))
+ self.assertIsInstance(msg4, Message)
eq(msg4.get_payload(), 'Yadda yadda yadda\n')
def test_parser(self):
eq = self.assertEqual
- unless = self.assertTrue
msg, text = self._msgobj('msg_06.txt')
# Check some of the outer headers
eq(msg.get_content_type(), 'message/rfc822')
# Make sure the payload is a list of exactly one sub-Message, and that
# that submessage has a type of text/plain
payload = msg.get_payload()
- unless(isinstance(payload, list))
+ self.assertIsInstance(payload, list)
eq(len(payload), 1)
msg1 = payload[0]
- self.assertTrue(isinstance(msg1, Message))
+ self.assertIsInstance(msg1, Message)
eq(msg1.get_content_type(), 'text/plain')
- self.assertTrue(isinstance(msg1.get_payload(), str))
+ self.assertIsInstance(msg1.get_payload(), str)
eq(msg1.get_payload(), '\n')
@@ -2062,7 +2091,6 @@ class TestMiscellaneous(TestEmailBase):
fp.close()
def test_message_from_string_with_class(self):
- unless = self.assertTrue
fp = openfile('msg_01.txt')
try:
text = fp.read()
@@ -2073,7 +2101,7 @@ class TestMiscellaneous(TestEmailBase):
pass
msg = email.message_from_string(text, MyMessage)
- unless(isinstance(msg, MyMessage))
+ self.assertIsInstance(msg, MyMessage)
# Try something more complicated
fp = openfile('msg_02.txt')
try:
@@ -2082,10 +2110,9 @@ class TestMiscellaneous(TestEmailBase):
fp.close()
msg = email.message_from_string(text, MyMessage)
for subpart in msg.walk():
- unless(isinstance(subpart, MyMessage))
+ self.assertIsInstance(subpart, MyMessage)
def test_message_from_file_with_class(self):
- unless = self.assertTrue
# Create a subclass
class MyMessage(Message):
pass
@@ -2095,7 +2122,7 @@ class TestMiscellaneous(TestEmailBase):
msg = email.message_from_file(fp, MyMessage)
finally:
fp.close()
- unless(isinstance(msg, MyMessage))
+ self.assertIsInstance(msg, MyMessage)
# Try something more complicated
fp = openfile('msg_02.txt')
try:
@@ -2103,7 +2130,7 @@ class TestMiscellaneous(TestEmailBase):
finally:
fp.close()
for subpart in msg.walk():
- unless(isinstance(subpart, MyMessage))
+ self.assertIsInstance(subpart, MyMessage)
def test__all__(self):
module = __import__('email')
@@ -2428,7 +2455,7 @@ class TestParsers(TestEmailBase):
eq(msg['to'], 'ppp@zzz.org')
eq(msg.get_content_type(), 'multipart/mixed')
self.assertFalse(msg.is_multipart())
- self.assertTrue(isinstance(msg.get_payload(), str))
+ self.assertIsInstance(msg.get_payload(), str)
def test_whitespace_continuation(self):
eq = self.assertEqual
@@ -2824,7 +2851,7 @@ class TestHeader(TestEmailBase):
h = Header("I am the very model of a modern Major-General; I've information vegetable, animal, and mineral; I know the kings of England, and I quote the fights historical from Marathon to Waterloo, in order categorical; I'm very well acquainted, too, with matters mathematical; I understand equations, both the simple and quadratical; about binomial theorem I'm teeming with a lot o' news, with many cheerful facts about the square of the hypotenuse.",
maxlinelen=76)
for l in h.encode(splitchars=' ').split('\n '):
- self.assertTrue(len(l) <= 76)
+ self.assertLessEqual(len(l), 76)
def test_multilingual(self):
eq = self.ndiffAssertEqual
diff --git a/Lib/email/utils.py b/Lib/email/utils.py
index 6d22ca7..c976021 100644
--- a/Lib/email/utils.py
+++ b/Lib/email/utils.py
@@ -63,7 +63,7 @@ def _bdecode(s):
"""Decodes a base64 string.
This function is equivalent to base64.decodestring and it's retained only
- for backward compatibility. It used to remove the last \n of the decoded
+ for backward compatibility. It used to remove the last \\n of the decoded
string, if it had any (see issue 7143).
"""
if not s:
@@ -73,7 +73,7 @@ def _bdecode(s):
def fix_eols(s):
- """Replace all line-ending characters with \r\n."""
+ """Replace all line-ending characters with \\r\\n."""
# Fix newlines with no preceding carriage return
s = re.sub(r'(?<!\r)\n', CRLF, s)
# Fix carriage returns with no following newline
diff --git a/Lib/encodings/rot_13.py b/Lib/encodings/rot_13.py
index 52b6431..52b6431 100644..100755
--- a/Lib/encodings/rot_13.py
+++ b/Lib/encodings/rot_13.py
diff --git a/Lib/filecmp.py b/Lib/filecmp.py
index 4728317..3a79381 100644
--- a/Lib/filecmp.py
+++ b/Lib/filecmp.py
@@ -268,7 +268,7 @@ def cmpfiles(a, b, common, shallow=1):
def _cmp(a, b, sh, abs=abs, cmp=cmp):
try:
return not abs(cmp(a, b, sh))
- except os.error:
+ except (os.error, IOError):
return 2
diff --git a/Lib/fileinput.py b/Lib/fileinput.py
index ba48575..21c2d1f 100644
--- a/Lib/fileinput.py
+++ b/Lib/fileinput.py
@@ -90,12 +90,11 @@ DEFAULT_BUFSIZE = 8*1024
def input(files=None, inplace=0, backup="", bufsize=0,
mode="r", openhook=None):
- """input([files[, inplace[, backup[, mode[, openhook]]]]])
+ """Return an instance of the FileInput class, which can be iterated.
- Create an instance of the FileInput class. The instance will be used
- as global state for the functions of this module, and is also returned
- to use during iteration. The parameters to this function will be passed
- along to the constructor of the FileInput class.
+ The parameters are passed to the constructor of the FileInput class.
+ The returned instance, in addition to being an iterator,
+ keeps global state for the functions of this module,.
"""
global _state
if _state and _state._file:
@@ -182,7 +181,7 @@ def isstdin():
return _state.isstdin()
class FileInput:
- """class FileInput([files[, inplace[, backup[, mode[, openhook]]]]])
+ """FileInput([files[, inplace[, backup[, bufsize[, mode[, openhook]]]]]])
Class FileInput is the implementation of the module; its methods
filename(), lineno(), fileline(), isfirstline(), isstdin(), fileno(),
@@ -388,9 +387,10 @@ def hook_compressed(filename, mode):
def hook_encoded(encoding):
- import codecs
+ import io
def openhook(filename, mode):
- return codecs.open(filename, mode, encoding)
+ mode = mode.replace('U', '').replace('b', '') or 'r'
+ return io.open(filename, mode, encoding=encoding, newline='')
return openhook
diff --git a/Lib/ftplib.py b/Lib/ftplib.py
index c896433..c98290c 100644
--- a/Lib/ftplib.py
+++ b/Lib/ftplib.py
@@ -55,6 +55,8 @@ MSG_OOB = 0x1 # Process data out of band
# The standard FTP server control port
FTP_PORT = 21
+# The sizehint parameter passed to readline() calls
+MAXLINE = 8192
# Exception raised when an error or invalid response is received
@@ -101,6 +103,7 @@ class FTP:
debugging = 0
host = ''
port = FTP_PORT
+ maxline = MAXLINE
sock = None
file = None
welcome = None
@@ -180,7 +183,9 @@ class FTP:
# Internal: return one line from the server, stripping CRLF.
# Raise EOFError if the connection is closed
def getline(self):
- line = self.file.readline()
+ line = self.file.readline(self.maxline + 1)
+ if len(line) > self.maxline:
+ raise Error("got more than %d bytes" % self.maxline)
if self.debugging > 1:
print '*get*', self.sanitize(line)
if not line: raise EOFError
@@ -273,21 +278,24 @@ class FTP:
def makeport(self):
'''Create a new socket and send a PORT command for it.'''
- msg = "getaddrinfo returns an empty list"
+ err = None
sock = None
for res in socket.getaddrinfo(None, 0, self.af, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
sock.bind(sa)
- except socket.error, msg:
+ except socket.error, err:
if sock:
sock.close()
sock = None
continue
break
- if not sock:
- raise socket.error, msg
+ if sock is None:
+ if err is not None:
+ raise err
+ else:
+ raise socket.error("getaddrinfo returns an empty list")
sock.listen(1)
port = sock.getsockname()[1] # Get proper port
host = self.sock.getsockname()[0] # Get proper host
@@ -429,7 +437,9 @@ class FTP:
conn = self.transfercmd(cmd)
fp = conn.makefile('rb')
while 1:
- line = fp.readline()
+ line = fp.readline(self.maxline + 1)
+ if len(line) > self.maxline:
+ raise Error("got more than %d bytes" % self.maxline)
if self.debugging > 2: print '*retr*', repr(line)
if not line:
break
@@ -451,7 +461,7 @@ class FTP:
blocksize: The maximum data size to read from fp and send over
the connection at once. [default: 8192]
callback: An optional single parameter callable that is called on
- on each block of data after it is sent. [default: None]
+ each block of data after it is sent. [default: None]
rest: Passed to transfercmd(). [default: None]
Returns:
@@ -474,7 +484,7 @@ class FTP:
cmd: A STOR command.
fp: A file-like object with a readline() method.
callback: An optional single parameter callable that is called on
- on each line after it is sent. [default: None]
+ each line after it is sent. [default: None]
Returns:
The response code.
@@ -482,7 +492,9 @@ class FTP:
self.voidcmd('TYPE A')
conn = self.transfercmd(cmd)
while 1:
- buf = fp.readline()
+ buf = fp.readline(self.maxline + 1)
+ if len(buf) > self.maxline:
+ raise Error("got more than %d bytes" % self.maxline)
if not buf: break
if buf[-2:] != CRLF:
if buf[-1] in CRLF: buf = buf[:-1]
@@ -707,7 +719,9 @@ else:
fp = conn.makefile('rb')
try:
while 1:
- line = fp.readline()
+ line = fp.readline(self.maxline + 1)
+ if len(line) > self.maxline:
+ raise Error("got more than %d bytes" % self.maxline)
if self.debugging > 2: print '*retr*', repr(line)
if not line:
break
@@ -745,7 +759,9 @@ else:
conn = self.transfercmd(cmd)
try:
while 1:
- buf = fp.readline()
+ buf = fp.readline(self.maxline + 1)
+ if len(buf) > self.maxline:
+ raise Error("got more than %d bytes" % self.maxline)
if not buf: break
if buf[-2:] != CRLF:
if buf[-1] in CRLF: buf = buf[:-1]
@@ -902,7 +918,9 @@ class Netrc:
fp = open(filename, "r")
in_macro = 0
while 1:
- line = fp.readline()
+ line = fp.readline(self.maxline + 1)
+ if len(line) > self.maxline:
+ raise Error("got more than %d bytes" % self.maxline)
if not line: break
if in_macro and line.strip():
macro_lines.append(line)
diff --git a/Lib/genericpath.py b/Lib/genericpath.py
index a0bf601..7ddb94c 100644
--- a/Lib/genericpath.py
+++ b/Lib/genericpath.py
@@ -22,7 +22,7 @@ def exists(path):
# This follows symbolic links, so both islink() and isdir() can be true
-# for the same path ono systems that support symlinks
+# for the same path on systems that support symlinks
def isfile(path):
"""Test whether a path is a regular file"""
try:
diff --git a/Lib/glob.py b/Lib/glob.py
index 04364be..f34534b 100644
--- a/Lib/glob.py
+++ b/Lib/glob.py
@@ -5,12 +5,23 @@ import os
import re
import fnmatch
+try:
+ _unicode = unicode
+except NameError:
+ # If Python is built without Unicode support, the unicode type
+ # will not exist. Fake one.
+ class _unicode(object):
+ pass
+
__all__ = ["glob", "iglob"]
def glob(pathname):
"""Return a list of paths matching a pathname pattern.
- The pattern may contain simple shell-style wildcards a la fnmatch.
+ The pattern may contain simple shell-style wildcards a la
+ fnmatch. However, unlike fnmatch, filenames starting with a
+ dot are special cases that are not matched by '*' and '?'
+ patterns.
"""
return list(iglob(pathname))
@@ -18,7 +29,10 @@ def glob(pathname):
def iglob(pathname):
"""Return an iterator which yields the paths matching a pathname pattern.
- The pattern may contain simple shell-style wildcards a la fnmatch.
+ The pattern may contain simple shell-style wildcards a la
+ fnmatch. However, unlike fnmatch, filenames starting with a
+ dot are special cases that are not matched by '*' and '?'
+ patterns.
"""
if not has_magic(pathname):
@@ -30,7 +44,10 @@ def iglob(pathname):
for name in glob1(os.curdir, basename):
yield name
return
- if has_magic(dirname):
+ # `os.path.split()` returns the argument itself as a dirname if it is a
+ # drive or UNC path. Prevent an infinite recursion if a drive or UNC path
+ # contains magic characters (i.e. r'\\?\C:').
+ if dirname != pathname and has_magic(dirname):
dirs = iglob(dirname)
else:
dirs = [dirname]
@@ -49,7 +66,7 @@ def iglob(pathname):
def glob1(dirname, pattern):
if not dirname:
dirname = os.curdir
- if isinstance(pattern, unicode) and not isinstance(dirname, unicode):
+ if isinstance(pattern, _unicode) and not isinstance(dirname, unicode):
dirname = unicode(dirname, sys.getfilesystemencoding() or
sys.getdefaultencoding())
try:
diff --git a/Lib/gzip.py b/Lib/gzip.py
index 8fdac83..a613bae 100644
--- a/Lib/gzip.py
+++ b/Lib/gzip.py
@@ -66,9 +66,10 @@ class GzipFile(io.BufferedIOBase):
Be aware that only the 'rb', 'ab', and 'wb' values should be used
for cross-platform portability.
- The compresslevel argument is an integer from 1 to 9 controlling the
+ The compresslevel argument is an integer from 0 to 9 controlling the
level of compression; 1 is fastest and produces the least compression,
- and 9 is slowest and produces the most compression. The default is 9.
+ and 9 is slowest and produces the most compression. 0 is no compression
+ at all. The default is 9.
The mtime argument is an optional numeric timestamp to be written
to the stream when compressing. All gzip compressed streams
@@ -81,6 +82,10 @@ class GzipFile(io.BufferedIOBase):
"""
+ # Make sure we don't inadvertently enable universal newlines on the
+ # underlying file object - in read mode, this causes data corruption.
+ if mode:
+ mode = mode.replace('U', '')
# guarantee the file is opened in binary mode on platforms
# that care about that sort of thing
if mode and 'b' not in mode:
@@ -417,7 +422,7 @@ class GzipFile(io.BufferedIOBase):
if offset < self.offset:
raise IOError('Negative seek in write mode')
count = offset - self.offset
- for i in range(count // 1024):
+ for i in xrange(count // 1024):
self.write(1024 * '\0')
self.write((count % 1024) * '\0')
elif self.mode == READ:
@@ -425,7 +430,7 @@ class GzipFile(io.BufferedIOBase):
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
- for i in range(count // 1024):
+ for i in xrange(count // 1024):
self.read(1024)
self.read(count % 1024)
diff --git a/Lib/hashlib.py b/Lib/hashlib.py
index 2732d18..6d69ad2 100644
--- a/Lib/hashlib.py
+++ b/Lib/hashlib.py
@@ -60,7 +60,7 @@ __always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
algorithms = __always_supported
-__all__ = __always_supported + ('new', 'algorithms')
+__all__ = __always_supported + ('new', 'algorithms', 'pbkdf2_hmac')
def __get_builtin_constructor(name):
@@ -88,7 +88,7 @@ def __get_builtin_constructor(name):
except ImportError:
pass # no extension module, this hash is unsupported.
- raise ValueError('unsupported hash type %s' % name)
+ raise ValueError('unsupported hash type ' + name)
def __get_openssl_constructor(name):
@@ -141,6 +141,73 @@ for __func_name in __always_supported:
import logging
logging.exception('code for hash %s was not found.', __func_name)
+
+try:
+ # OpenSSL's PKCS5_PBKDF2_HMAC requires OpenSSL 1.0+ with HMAC and SHA
+ from _hashlib import pbkdf2_hmac
+except ImportError:
+ import binascii
+ import struct
+
+ _trans_5C = b"".join(chr(x ^ 0x5C) for x in range(256))
+ _trans_36 = b"".join(chr(x ^ 0x36) for x in range(256))
+
+ def pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None):
+ """Password based key derivation function 2 (PKCS #5 v2.0)
+
+ This Python implementations based on the hmac module about as fast
+ as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster
+ for long passwords.
+ """
+ if not isinstance(hash_name, str):
+ raise TypeError(hash_name)
+
+ if not isinstance(password, (bytes, bytearray)):
+ password = bytes(buffer(password))
+ if not isinstance(salt, (bytes, bytearray)):
+ salt = bytes(buffer(salt))
+
+ # Fast inline HMAC implementation
+ inner = new(hash_name)
+ outer = new(hash_name)
+ blocksize = getattr(inner, 'block_size', 64)
+ if len(password) > blocksize:
+ password = new(hash_name, password).digest()
+ password = password + b'\x00' * (blocksize - len(password))
+ inner.update(password.translate(_trans_36))
+ outer.update(password.translate(_trans_5C))
+
+ def prf(msg, inner=inner, outer=outer):
+ # PBKDF2_HMAC uses the password as key. We can re-use the same
+ # digest objects and and just update copies to skip initialization.
+ icpy = inner.copy()
+ ocpy = outer.copy()
+ icpy.update(msg)
+ ocpy.update(icpy.digest())
+ return ocpy.digest()
+
+ if iterations < 1:
+ raise ValueError(iterations)
+ if dklen is None:
+ dklen = outer.digest_size
+ if dklen < 1:
+ raise ValueError(dklen)
+
+ hex_format_string = "%%0%ix" % (new(hash_name).digest_size * 2)
+
+ dkey = b''
+ loop = 1
+ while len(dkey) < dklen:
+ prev = prf(salt + struct.pack(b'>I', loop))
+ rkey = int(binascii.hexlify(prev), 16)
+ for i in xrange(iterations - 1):
+ prev = prf(prev)
+ rkey ^= int(binascii.hexlify(prev), 16)
+ loop += 1
+ dkey += binascii.unhexlify(hex_format_string % rkey)
+
+ return dkey[:dklen]
+
# Cleanup locals()
del __always_supported, __func_name, __get_hash
del __py_new, __hash_new, __get_openssl_constructor
diff --git a/Lib/heapq.py b/Lib/heapq.py
index 6a4e0f4..4b2c0c4 100644
--- a/Lib/heapq.py
+++ b/Lib/heapq.py
@@ -129,9 +129,8 @@ From all times, sorting has always been a Great Art! :-)
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
-from itertools import islice, repeat, count, imap, izip, tee, chain
+from itertools import islice, count, imap, izip, tee, chain
from operator import itemgetter
-import bisect
def cmp_lt(x, y):
# Use __lt__ if available; otherwise, try __le__.
@@ -188,6 +187,19 @@ def heapify(x):
for i in reversed(xrange(n//2)):
_siftup(x, i)
+def _heappushpop_max(heap, item):
+ """Maxheap version of a heappush followed by a heappop."""
+ if heap and cmp_lt(item, heap[0]):
+ item, heap[0] = heap[0], item
+ _siftup_max(heap, 0)
+ return item
+
+def _heapify_max(x):
+ """Transform list into a maxheap, in-place, in O(len(x)) time."""
+ n = len(x)
+ for i in reversed(range(n//2)):
+ _siftup_max(x, i)
+
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
@@ -213,30 +225,16 @@ def nsmallest(n, iterable):
"""
if n < 0:
return []
- if hasattr(iterable, '__len__') and n * 10 <= len(iterable):
- # For smaller values of n, the bisect method is faster than a minheap.
- # It is also memory efficient, consuming only n elements of space.
- it = iter(iterable)
- result = sorted(islice(it, 0, n))
- if not result:
- return result
- insort = bisect.insort
- pop = result.pop
- los = result[-1] # los --> Largest of the nsmallest
- for elem in it:
- if cmp_lt(elem, los):
- insort(result, elem)
- pop()
- los = result[-1]
+ it = iter(iterable)
+ result = list(islice(it, n))
+ if not result:
return result
- # An alternative approach manifests the whole iterable in memory but
- # saves comparisons by heapifying all at once. Also, saves time
- # over bisect.insort() which has O(n) data movement time for every
- # insertion. Finding the n smallest of an m length iterable requires
- # O(m) + O(n log m) comparisons.
- h = list(iterable)
- heapify(h)
- return map(heappop, repeat(h, min(n, len(h))))
+ _heapify_max(result)
+ _heappushpop = _heappushpop_max
+ for elem in it:
+ _heappushpop(result, elem)
+ result.sort()
+ return result
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
@@ -314,6 +312,42 @@ def _siftup(heap, pos):
heap[pos] = newitem
_siftdown(heap, startpos, pos)
+def _siftdown_max(heap, startpos, pos):
+ 'Maxheap variant of _siftdown'
+ newitem = heap[pos]
+ # Follow the path to the root, moving parents down until finding a place
+ # newitem fits.
+ while pos > startpos:
+ parentpos = (pos - 1) >> 1
+ parent = heap[parentpos]
+ if cmp_lt(parent, newitem):
+ heap[pos] = parent
+ pos = parentpos
+ continue
+ break
+ heap[pos] = newitem
+
+def _siftup_max(heap, pos):
+ 'Maxheap variant of _siftup'
+ endpos = len(heap)
+ startpos = pos
+ newitem = heap[pos]
+ # Bubble up the larger child until hitting a leaf.
+ childpos = 2*pos + 1 # leftmost child position
+ while childpos < endpos:
+ # Set childpos to index of larger child.
+ rightpos = childpos + 1
+ if rightpos < endpos and not cmp_lt(heap[rightpos], heap[childpos]):
+ childpos = rightpos
+ # Move the larger child up.
+ heap[pos] = heap[childpos]
+ pos = childpos
+ childpos = 2*pos + 1
+ # The leaf at pos is empty now. Put newitem there, and bubble it up
+ # to its final resting place (by sifting its parents down).
+ heap[pos] = newitem
+ _siftdown_max(heap, startpos, pos)
+
# If available, use C implementation
try:
from _heapq import *
@@ -332,6 +366,7 @@ def merge(*iterables):
'''
_heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
+ _len = len
h = []
h_append = h.append
@@ -343,17 +378,21 @@ def merge(*iterables):
pass
heapify(h)
- while 1:
+ while _len(h) > 1:
try:
while 1:
- v, itnum, next = s = h[0] # raises IndexError when h is empty
+ v, itnum, next = s = h[0]
yield v
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
- except IndexError:
- return
+ if h:
+ # fast case when only a single iterator remains
+ v, itnum, next = h[0]
+ yield v
+ for v in next.__self__:
+ yield v
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
diff --git a/Lib/hmac.py b/Lib/hmac.py
index 5388106..9cd1a9f 100644
--- a/Lib/hmac.py
+++ b/Lib/hmac.py
@@ -5,6 +5,9 @@ Implements the HMAC algorithm as described by RFC 2104.
import warnings as _warnings
+from operator import _compare_digest as compare_digest
+
+
trans_5C = "".join ([chr (x ^ 0x5C) for x in xrange(256)])
trans_36 = "".join ([chr (x ^ 0x36) for x in xrange(256)])
diff --git a/Lib/httplib.py b/Lib/httplib.py
index 19bcd1b..5368cd9 100644
--- a/Lib/httplib.py
+++ b/Lib/httplib.py
@@ -1,4 +1,4 @@
-"""HTTP/1.1 client library
+r"""HTTP/1.1 client library
<intro stuff goes here>
<other stuff, too>
@@ -362,7 +362,9 @@ class HTTPResponse:
def _read_status(self):
# Initialize with Simple-Response defaults
- line = self.fp.readline()
+ line = self.fp.readline(_MAXLINE + 1)
+ if len(line) > _MAXLINE:
+ raise LineTooLong("header line")
if self.debuglevel > 0:
print "reply:", repr(line)
if not line:
@@ -545,7 +547,11 @@ class HTTPResponse:
if self.length is None:
s = self.fp.read()
else:
- s = self._safe_read(self.length)
+ try:
+ s = self._safe_read(self.length)
+ except IncompleteRead:
+ self.close()
+ raise
self.length = 0
self.close() # we read everything
return s
@@ -559,10 +565,15 @@ class HTTPResponse:
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
s = self.fp.read(amt)
+ if not s and amt:
+ # Ideally, we would raise IncompleteRead if the content-length
+ # wasn't satisfied, but it might break compatibility.
+ self.close()
if self.length is not None:
self.length -= len(s)
if not self.length:
self.close()
+
return s
def _read_chunked(self, amt):
@@ -689,17 +700,33 @@ class HTTPConnection:
self._tunnel_host = None
self._tunnel_port = None
self._tunnel_headers = {}
-
- self._set_hostport(host, port)
if strict is not None:
self.strict = strict
+ (self.host, self.port) = self._get_hostport(host, port)
+
+ # This is stored as an instance variable to allow unittests
+ # to replace with a suitable mock
+ self._create_connection = socket.create_connection
+
def set_tunnel(self, host, port=None, headers=None):
- """ Sets up the host and the port for the HTTP CONNECT Tunnelling.
+ """ Set up host and port for HTTP CONNECT tunnelling.
+
+ In a connection that uses HTTP Connect tunneling, the host passed to the
+ constructor is used as proxy server that relays all communication to the
+ endpoint passed to set_tunnel. This is done by sending a HTTP CONNECT
+ request to the proxy server when the connection is established.
+
+ This method must be called before the HTML connection has been
+ established.
The headers argument should be a mapping of extra HTTP headers
to send with the CONNECT request.
"""
+ # Verify if this is required.
+ if self.sock:
+ raise RuntimeError("Can't setup tunnel for established connection.")
+
self._tunnel_host = host
self._tunnel_port = port
if headers:
@@ -707,7 +734,7 @@ class HTTPConnection:
else:
self._tunnel_headers.clear()
- def _set_hostport(self, host, port):
+ def _get_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
@@ -724,15 +751,14 @@ class HTTPConnection:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
- self.host = host
- self.port = port
+ return (host, port)
def set_debuglevel(self, level):
self.debuglevel = level
def _tunnel(self):
- self._set_hostport(self._tunnel_host, self._tunnel_port)
- self.send("CONNECT %s:%d HTTP/1.0\r\n" % (self.host, self.port))
+ (host, port) = self._get_hostport(self._tunnel_host, self._tunnel_port)
+ self.send("CONNECT %s:%d HTTP/1.0\r\n" % (host, port))
for header, value in self._tunnel_headers.iteritems():
self.send("%s: %s\r\n" % (header, value))
self.send("\r\n")
@@ -748,13 +774,17 @@ class HTTPConnection:
line = response.fp.readline(_MAXLINE + 1)
if len(line) > _MAXLINE:
raise LineTooLong("header line")
- if line == '\r\n': break
+ if not line:
+ # for sites which EOF without sending trailer
+ break
+ if line == '\r\n':
+ break
def connect(self):
"""Connect to the host and port specified in __init__."""
- self.sock = socket.create_connection((self.host,self.port),
- self.timeout, self.source_address)
+ self.sock = self._create_connection((self.host,self.port),
+ self.timeout, self.source_address)
if self._tunnel_host:
self._tunnel()
@@ -892,17 +922,24 @@ class HTTPConnection:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
+ if self._tunnel_host:
+ host = self._tunnel_host
+ port = self._tunnel_port
+ else:
+ host = self.host
+ port = self.port
+
try:
- host_enc = self.host.encode("ascii")
+ host_enc = host.encode("ascii")
except UnicodeEncodeError:
- host_enc = self.host.encode("idna")
+ host_enc = host.encode("idna")
# Wrap the IPv6 Host Header with [] (RFC 2732)
if host_enc.find(':') >= 0:
host_enc = "[" + host_enc + "]"
- if self.port == self.default_port:
+ if port == self.default_port:
self.putheader('Host', host_enc)
else:
- self.putheader('Host', "%s:%s" % (host_enc, self.port))
+ self.putheader('Host', "%s:%s" % (host_enc, port))
# note: we are assuming that clients will not attempt to set these
# headers since *this* library must deal with the
@@ -985,7 +1022,7 @@ class HTTPConnection:
self.putrequest(method, url, **skips)
- if body and ('content-length' not in header_names):
+ if body is not None and 'content-length' not in header_names:
self._set_content_length(body)
for hdr, value in headers.iteritems():
self.putheader(hdr, value)
@@ -1058,7 +1095,7 @@ class HTTP:
if port == 0:
port = None
- # Note that we may pass an empty string as the host; this will throw
+ # Note that we may pass an empty string as the host; this will raise
# an error when we attempt to connect. Presumably, the client code
# will call connect before then, with a proper host.
self._setup(self._connection_class(host, port, strict))
@@ -1153,8 +1190,8 @@ else:
def connect(self):
"Connect to a host on a given (SSL) port."
- sock = socket.create_connection((self.host, self.port),
- self.timeout, self.source_address)
+ sock = self._create_connection((self.host, self.port),
+ self.timeout, self.source_address)
if self._tunnel_host:
self.sock = sock
self._tunnel()
diff --git a/Lib/idlelib/AutoComplete.py b/Lib/idlelib/AutoComplete.py
index 4e17325..1248f00 100644
--- a/Lib/idlelib/AutoComplete.py
+++ b/Lib/idlelib/AutoComplete.py
@@ -156,12 +156,9 @@ class AutoComplete:
if not comp_lists[0]:
return
self.autocompletewindow = self._make_autocomplete_window()
- self.autocompletewindow.show_window(comp_lists,
- "insert-%dc" % len(comp_start),
- complete,
- mode,
- userWantsWin)
- return True
+ return not self.autocompletewindow.show_window(
+ comp_lists, "insert-%dc" % len(comp_start),
+ complete, mode, userWantsWin)
def fetch_completions(self, what, mode):
"""Return a pair of lists of completions for something. The first list
@@ -225,3 +222,8 @@ class AutoComplete:
namespace = sys.modules.copy()
namespace.update(__main__.__dict__)
return eval(name, namespace)
+
+
+if __name__ == '__main__':
+ from unittest import main
+ main('idlelib.idle_test.test_autocomplete', verbosity=2)
diff --git a/Lib/idlelib/AutoCompleteWindow.py b/Lib/idlelib/AutoCompleteWindow.py
index 298177f..27b0e56 100644
--- a/Lib/idlelib/AutoCompleteWindow.py
+++ b/Lib/idlelib/AutoCompleteWindow.py
@@ -157,13 +157,14 @@ class AutoCompleteWindow:
self.start = self.widget.get(self.startindex, "insert")
if complete:
completed = self._complete_string(self.start)
+ start = self.start
self._change_start(completed)
i = self._binary_search(completed)
if self.completions[i] == completed and \
(i == len(self.completions)-1 or
self.completions[i+1][:len(completed)] != completed):
# There is exactly one matching completion
- return
+ return completed == start
self.userwantswindow = userWantsWin
self.lasttypedstart = self.start
diff --git a/Lib/idlelib/AutoExpand.py b/Lib/idlelib/AutoExpand.py
index 9e93d57..7059054 100644
--- a/Lib/idlelib/AutoExpand.py
+++ b/Lib/idlelib/AutoExpand.py
@@ -1,3 +1,17 @@
+'''Complete the current word before the cursor with words in the editor.
+
+Each menu selection or shortcut key selection replaces the word with a
+different word with the same prefix. The search for matches begins
+before the target and moves toward the top of the editor. It then starts
+after the cursor and moves down. It then returns to the original word and
+the cycle starts again.
+
+Changing the current text line or leaving the cursor in a different
+place before requesting the next selection causes AutoExpand to reset
+its state.
+
+This is an extension file and there is only one instance of AutoExpand.
+'''
import string
import re
@@ -20,6 +34,7 @@ class AutoExpand:
self.state = None
def expand_word_event(self, event):
+ "Replace the current word with the next expansion."
curinsert = self.text.index("insert")
curline = self.text.get("insert linestart", "insert lineend")
if not self.state:
@@ -46,6 +61,7 @@ class AutoExpand:
return "break"
def getwords(self):
+ "Return a list of words that match the prefix before the cursor."
word = self.getprevword()
if not word:
return []
@@ -76,8 +92,13 @@ class AutoExpand:
return words
def getprevword(self):
+ "Return the word prefix before the cursor."
line = self.text.get("insert linestart", "insert")
i = len(line)
while i > 0 and line[i-1] in self.wordchars:
i = i-1
return line[i:]
+
+if __name__ == '__main__':
+ import unittest
+ unittest.main('idlelib.idle_test.test_autoexpand', verbosity=2)
diff --git a/Lib/idlelib/Bindings.py b/Lib/idlelib/Bindings.py
index ec2720b..df2b251 100644
--- a/Lib/idlelib/Bindings.py
+++ b/Lib/idlelib/Bindings.py
@@ -8,14 +8,19 @@ the PythonShell window, and a Format menu which is only present in the Editor
windows.
"""
-import sys
from idlelib.configHandler import idleConf
-from idlelib import macosxSupport
+
+# Warning: menudefs is altered in macosxSupport.overrideRootMenu()
+# after it is determined that an OS X Aqua Tk is in use,
+# which cannot be done until after Tk() is first called.
+# Do not alter the 'file', 'options', or 'help' cascades here
+# without altering overrideRootMenu() as well.
+# TODO: Make this more robust
menudefs = [
# underscore prefixes character to underscore
('file', [
- ('_New Window', '<<open-new-window>>'),
+ ('_New File', '<<open-new-window>>'),
('_Open...', '<<open-window-from-file>>'),
('Open _Module...', '<<open-module>>'),
('Class _Browser', '<<open-class-browser>>'),
@@ -81,23 +86,4 @@ menudefs = [
]),
]
-if macosxSupport.runningAsOSXApp():
- # Running as a proper MacOS application bundle. This block restructures
- # the menus a little to make them conform better to the HIG.
-
- quitItem = menudefs[0][1][-1]
- closeItem = menudefs[0][1][-2]
-
- # Remove the last 3 items of the file menu: a separator, close window and
- # quit. Close window will be reinserted just above the save item, where
- # it should be according to the HIG. Quit is in the application menu.
- del menudefs[0][1][-3:]
- menudefs[0][1].insert(6, closeItem)
-
- # Remove the 'About' entry from the help menu, it is in the application
- # menu
- del menudefs[-1][1][0:2]
-
default_keydefs = idleConf.GetCurrentKeySet()
-
-del sys
diff --git a/Lib/idlelib/CallTipWindow.py b/Lib/idlelib/CallTipWindow.py
index 2223885..265b2fa 100644
--- a/Lib/idlelib/CallTipWindow.py
+++ b/Lib/idlelib/CallTipWindow.py
@@ -22,6 +22,7 @@ class CallTip:
self.parenline = self.parencol = None
self.lastline = None
self.hideid = self.checkhideid = None
+ self.checkhide_after_id = None
def position_window(self):
"""Check if needs to reposition the window, and if so - do it."""
@@ -47,13 +48,7 @@ class CallTip:
def showtip(self, text, parenleft, parenright):
"""Show the calltip, bind events which will close it and reposition it.
"""
- # truncate overly long calltip
- if len(text) >= 79:
- textlines = text.splitlines()
- for i, line in enumerate(textlines):
- if len(line) > 79:
- textlines[i] = line[:75] + ' ...'
- text = '\n'.join(textlines)
+ # Only called in CallTips, where lines are truncated
self.text = text
if self.tipwindow or not self.text:
return
@@ -102,7 +97,10 @@ class CallTip:
self.hidetip()
else:
self.position_window()
- self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
+ if self.checkhide_after_id is not None:
+ self.widget.after_cancel(self.checkhide_after_id)
+ self.checkhide_after_id = \
+ self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
def hide_event(self, event):
if not self.tipwindow:
@@ -135,37 +133,36 @@ class CallTip:
return bool(self.tipwindow)
+def _calltip_window(parent):
+ root = Tk()
+ root.title("Test calltips")
+ width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
+ root.geometry("+%d+%d"%(x, y + 150))
-###############################
-#
-# Test Code
-#
-class container: # Conceptually an editor_window
- def __init__(self):
- root = Tk()
- text = self.text = Text(root)
- text.pack(side=LEFT, fill=BOTH, expand=1)
- text.insert("insert", "string.split")
- root.update()
- self.calltip = CallTip(text)
+ class MyEditWin: # comparenceptually an editor_window
+ def __init__(self):
+ text = self.text = Text(root)
+ text.pack(side=LEFT, fill=BOTH, expand=1)
+ text.insert("insert", "string.split")
+ root.update()
+ self.calltip = CallTip(text)
- text.event_add("<<calltip-show>>", "(")
- text.event_add("<<calltip-hide>>", ")")
- text.bind("<<calltip-show>>", self.calltip_show)
- text.bind("<<calltip-hide>>", self.calltip_hide)
+ text.event_add("<<calltip-show>>", "(")
+ text.event_add("<<calltip-hide>>", ")")
+ text.bind("<<calltip-show>>", self.calltip_show)
+ text.bind("<<calltip-hide>>", self.calltip_hide)
- text.focus_set()
- root.mainloop()
+ text.focus_set()
+ root.mainloop()
- def calltip_show(self, event):
- self.calltip.showtip("Hello world")
+ def calltip_show(self, event):
+ self.calltip.showtip("Hello world", "insert", "end")
- def calltip_hide(self, event):
- self.calltip.hidetip()
+ def calltip_hide(self, event):
+ self.calltip.hidetip()
-def main():
- # Test code
- c=container()
+ editwin = MyEditWin()
if __name__=='__main__':
- main()
+ from idlelib.idle_test.htest import run
+ run(_calltip_window)
diff --git a/Lib/idlelib/CallTips.py b/Lib/idlelib/CallTips.py
index f8f31e2..3db2636 100644
--- a/Lib/idlelib/CallTips.py
+++ b/Lib/idlelib/CallTips.py
@@ -5,14 +5,15 @@ parameter and docstring information when you type an opening parenthesis, and
which disappear when you type a closing parenthesis.
"""
+import __main__
import re
import sys
+import textwrap
import types
from idlelib import CallTipWindow
from idlelib.HyperParser import HyperParser
-import __main__
class CallTips:
@@ -71,16 +72,16 @@ class CallTips:
if not sur_paren:
return
hp.set_index(sur_paren[0])
- name = hp.get_expression()
- if not name or (not evalfuncs and name.find('(') != -1):
+ expression = hp.get_expression()
+ if not expression or (not evalfuncs and expression.find('(') != -1):
return
- arg_text = self.fetch_tip(name)
+ arg_text = self.fetch_tip(expression)
if not arg_text:
return
self.calltip = self._make_calltip_window()
self.calltip.showtip(arg_text, sur_paren[0], sur_paren[1])
- def fetch_tip(self, name):
+ def fetch_tip(self, expression):
"""Return the argument list and docstring of a function or class
If there is a Python subprocess, get the calltip there. Otherwise,
@@ -96,23 +97,27 @@ class CallTips:
"""
try:
rpcclt = self.editwin.flist.pyshell.interp.rpcclt
- except:
+ except AttributeError:
rpcclt = None
if rpcclt:
return rpcclt.remotecall("exec", "get_the_calltip",
- (name,), {})
+ (expression,), {})
else:
- entity = self.get_entity(name)
+ entity = self.get_entity(expression)
return get_arg_text(entity)
- def get_entity(self, name):
- "Lookup name in a namespace spanning sys.modules and __main.dict__"
- if name:
+ def get_entity(self, expression):
+ """Return the object corresponding to expression evaluated
+ in a namespace spanning sys.modules and __main.dict__.
+ """
+ if expression:
namespace = sys.modules.copy()
namespace.update(__main__.__dict__)
try:
- return eval(name, namespace)
- except (NameError, AttributeError):
+ return eval(expression, namespace)
+ except BaseException:
+ # An uncaught exception closes idle, and eval can raise any
+ # exception, especially if user classes are involved.
return None
def _find_constructor(class_ob):
@@ -126,96 +131,89 @@ def _find_constructor(class_ob):
if rc is not None: return rc
return None
+# The following are used in get_arg_text
+_MAX_COLS = 85
+_MAX_LINES = 5 # enough for bytes
+_INDENT = ' '*4 # for wrapped signatures
+
def get_arg_text(ob):
- """Get a string describing the arguments for the given object"""
- arg_text = ""
- if ob is not None:
- arg_offset = 0
- if type(ob) in (types.ClassType, types.TypeType):
- # Look for the highest __init__ in the class chain.
- fob = _find_constructor(ob)
- if fob is None:
- fob = lambda: None
- else:
- arg_offset = 1
- elif type(ob)==types.MethodType:
- # bit of a hack for methods - turn it into a function
- # but we drop the "self" param.
- fob = ob.im_func
- arg_offset = 1
+ '''Return a string describing the signature of a callable object, or ''.
+
+ For Python-coded functions and methods, the first line is introspected.
+ Delete 'self' parameter for classes (.__init__) and bound methods.
+ The next lines are the first lines of the doc string up to the first
+ empty line or _MAX_LINES. For builtins, this typically includes
+ the arguments in addition to the return value.
+ '''
+ argspec = ""
+ try:
+ ob_call = ob.__call__
+ except BaseException:
+ if type(ob) is types.ClassType: # old-style
+ ob_call = ob
+ else:
+ return argspec
+
+ arg_offset = 0
+ if type(ob) in (types.ClassType, types.TypeType):
+ # Look for the first __init__ in the class chain with .im_func.
+ # Slot wrappers (builtins, classes defined in funcs) do not.
+ fob = _find_constructor(ob)
+ if fob is None:
+ fob = lambda: None
else:
- fob = ob
- # Try to build one for Python defined functions
- if type(fob) in [types.FunctionType, types.LambdaType]:
- argcount = fob.func_code.co_argcount
- real_args = fob.func_code.co_varnames[arg_offset:argcount]
- defaults = fob.func_defaults or []
- defaults = list(map(lambda name: "=%s" % repr(name), defaults))
- defaults = [""] * (len(real_args) - len(defaults)) + defaults
- items = map(lambda arg, dflt: arg + dflt, real_args, defaults)
- if fob.func_code.co_flags & 0x4:
- items.append("...")
- if fob.func_code.co_flags & 0x8:
- items.append("***")
- arg_text = ", ".join(items)
- arg_text = "(%s)" % re.sub("\.\d+", "<tuple>", arg_text)
- # See if we can use the docstring
+ arg_offset = 1
+ elif type(ob) == types.MethodType:
+ # bit of a hack for methods - turn it into a function
+ # and drop the "self" param for bound methods
+ fob = ob.im_func
+ if ob.im_self is not None:
+ arg_offset = 1
+ elif type(ob_call) == types.MethodType:
+ # a callable class instance
+ fob = ob_call.im_func
+ arg_offset = 1
+ else:
+ fob = ob
+ # Try to build one for Python defined functions
+ if type(fob) in [types.FunctionType, types.LambdaType]:
+ argcount = fob.func_code.co_argcount
+ real_args = fob.func_code.co_varnames[arg_offset:argcount]
+ defaults = fob.func_defaults or []
+ defaults = list(map(lambda name: "=%s" % repr(name), defaults))
+ defaults = [""] * (len(real_args) - len(defaults)) + defaults
+ items = map(lambda arg, dflt: arg + dflt, real_args, defaults)
+ for flag, pre, name in ((0x4, '*', 'args'), (0x8, '**', 'kwargs')):
+ if fob.func_code.co_flags & flag:
+ pre_name = pre + name
+ if name not in real_args:
+ items.append(pre_name)
+ else:
+ i = 1
+ while ((name+'%s') % i) in real_args:
+ i += 1
+ items.append((pre_name+'%s') % i)
+ argspec = ", ".join(items)
+ argspec = "(%s)" % re.sub("(?<!\d)\.\d+", "<tuple>", argspec)
+
+ lines = (textwrap.wrap(argspec, _MAX_COLS, subsequent_indent=_INDENT)
+ if len(argspec) > _MAX_COLS else [argspec] if argspec else [])
+
+ if isinstance(ob_call, types.MethodType):
+ doc = ob_call.__doc__
+ else:
doc = getattr(ob, "__doc__", "")
- if doc:
- doc = doc.lstrip()
- pos = doc.find("\n")
- if pos < 0 or pos > 70:
- pos = 70
- if arg_text:
- arg_text += "\n"
- arg_text += doc[:pos]
- return arg_text
-
-#################################################
-#
-# Test code
-#
-if __name__=='__main__':
-
- def t1(): "()"
- def t2(a, b=None): "(a, b=None)"
- def t3(a, *args): "(a, ...)"
- def t4(*args): "(...)"
- def t5(a, *args): "(a, ...)"
- def t6(a, b=None, *args, **kw): "(a, b=None, ..., ***)"
- def t7((a, b), c, (d, e)): "(<tuple>, c, <tuple>)"
-
- class TC(object):
- "(ai=None, ...)"
- def __init__(self, ai=None, *b): "(ai=None, ...)"
- def t1(self): "()"
- def t2(self, ai, b=None): "(ai, b=None)"
- def t3(self, ai, *args): "(ai, ...)"
- def t4(self, *args): "(...)"
- def t5(self, ai, *args): "(ai, ...)"
- def t6(self, ai, b=None, *args, **kw): "(ai, b=None, ..., ***)"
- def t7(self, (ai, b), c, (d, e)): "(<tuple>, c, <tuple>)"
-
- def test(tests):
- ct = CallTips()
- failed=[]
- for t in tests:
- expected = t.__doc__ + "\n" + t.__doc__
- name = t.__name__
- # exercise fetch_tip(), not just get_arg_text()
- try:
- qualified_name = "%s.%s" % (t.im_class.__name__, name)
- except AttributeError:
- qualified_name = name
- arg_text = ct.fetch_tip(qualified_name)
- if arg_text != expected:
- failed.append(t)
- fmt = "%s - expected %s, but got %s"
- print fmt % (t.__name__, expected, get_arg_text(t))
- print "%d of %d tests failed" % (len(failed), len(tests))
-
- tc = TC()
- tests = (t1, t2, t3, t4, t5, t6, t7,
- TC, tc.t1, tc.t2, tc.t3, tc.t4, tc.t5, tc.t6, tc.t7)
-
- test(tests)
+ if doc:
+ for line in doc.split('\n', _MAX_LINES)[:_MAX_LINES]:
+ line = line.strip()
+ if not line:
+ break
+ if len(line) > _MAX_COLS:
+ line = line[: _MAX_COLS - 3] + '...'
+ lines.append(line)
+ argspec = '\n'.join(lines)
+ return argspec
+
+if __name__ == '__main__':
+ from unittest import main
+ main('idlelib.idle_test.test_calltips', verbosity=2)
diff --git a/Lib/idlelib/ClassBrowser.py b/Lib/idlelib/ClassBrowser.py
index 095b30d..6183be9 100644
--- a/Lib/idlelib/ClassBrowser.py
+++ b/Lib/idlelib/ClassBrowser.py
@@ -21,11 +21,15 @@ from idlelib.configHandler import idleConf
class ClassBrowser:
- def __init__(self, flist, name, path):
+ def __init__(self, flist, name, path, _htest=False):
# XXX This API should change, if the file doesn't end in ".py"
# XXX the code here is bogus!
+ """
+ _htest - bool, change box when location running htest.
+ """
self.name = name
self.file = os.path.join(path[0], self.name + ".py")
+ self._htest = _htest
self.init(flist)
def close(self, event=None):
@@ -40,6 +44,9 @@ class ClassBrowser:
self.top = top = ListedToplevel(flist.root)
top.protocol("WM_DELETE_WINDOW", self.close)
top.bind("<Escape>", self.close)
+ if self._htest: # place dialog below parent if running htest
+ top.geometry("+%d+%d" %
+ (flist.root.winfo_rootx(), flist.root.winfo_rooty() + 200))
self.settitle()
top.focus_set()
# create scrolled canvas
@@ -202,7 +209,7 @@ class MethodBrowserTreeItem(TreeItem):
edit = PyShell.flist.open(self.file)
edit.gotoline(self.cl.methods[self.name])
-def main():
+def _class_browser(parent): #Wrapper for htest
try:
file = __file__
except NameError:
@@ -213,9 +220,10 @@ def main():
file = sys.argv[0]
dir, file = os.path.split(file)
name = os.path.splitext(file)[0]
- ClassBrowser(PyShell.flist, name, [dir])
- if sys.stdin is sys.__stdin__:
- mainloop()
+ flist = PyShell.PyShellFileList(parent)
+ ClassBrowser(flist, name, [dir], _htest=True)
+ parent.mainloop()
if __name__ == "__main__":
- main()
+ from idlelib.idle_test.htest import run
+ run(_class_browser)
diff --git a/Lib/idlelib/ColorDelegator.py b/Lib/idlelib/ColorDelegator.py
index 7f4d740..0c625d3 100644
--- a/Lib/idlelib/ColorDelegator.py
+++ b/Lib/idlelib/ColorDelegator.py
@@ -16,14 +16,18 @@ def make_pat():
kw = r"\b" + any("KEYWORD", keyword.kwlist) + r"\b"
builtinlist = [str(name) for name in dir(__builtin__)
if not name.startswith('_')]
+ # We don't know whether "print" is a function or a keyword,
+ # so we always treat is as a keyword (the most common case).
+ builtinlist.remove('print')
# self.file = file("file") :
# 1st 'file' colorized normal, 2nd as builtin, 3rd as string
builtin = r"([^.'\"\\#]\b|^)" + any("BUILTIN", builtinlist) + r"\b"
comment = any("COMMENT", [r"#[^\n]*"])
- sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*'?"
- dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*"?'
- sq3string = r"(\b[rRuU])?'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(''')?"
- dq3string = r'(\b[rRuU])?"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(""")?'
+ stringprefix = r"(\br|u|ur|R|U|UR|Ur|uR|b|B|br|Br|bR|BR)?"
+ sqstring = stringprefix + r"'[^'\\\n]*(\\.[^'\\\n]*)*'?"
+ dqstring = stringprefix + r'"[^"\\\n]*(\\.[^"\\\n]*)*"?'
+ sq3string = stringprefix + r"'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(''')?"
+ dq3string = stringprefix + r'"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(""")?'
string = any("STRING", [sq3string, dq3string, sqstring, dqstring])
return kw + "|" + builtin + "|" + comment + "|" + string +\
"|" + any("SYNC", [r"\n"])
@@ -49,6 +53,10 @@ class ColorDelegator(Delegator):
self.config_colors()
self.bind("<<toggle-auto-coloring>>", self.toggle_colorize_event)
self.notify_range("1.0", "end")
+ else:
+ # No delegate - stop any colorizing
+ self.stop_colorizing = True
+ self.allow_colorizing = False
def config_colors(self):
for tag, cnf in self.tagdefs.items():
@@ -247,17 +255,21 @@ class ColorDelegator(Delegator):
for tag in self.tagdefs.keys():
self.tag_remove(tag, "1.0", "end")
-def main():
+def _color_delegator(parent):
from idlelib.Percolator import Percolator
root = Tk()
- root.wm_protocol("WM_DELETE_WINDOW", root.quit)
- text = Text(background="white")
+ root.title("Test ColorDelegator")
+ width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
+ root.geometry("+%d+%d"%(x, y + 150))
+ source = "if somename: x = 'abc' # comment\nprint"
+ text = Text(root, background="white")
+ text.insert("insert", source)
text.pack(expand=1, fill="both")
- text.focus_set()
p = Percolator(text)
d = ColorDelegator()
p.insertfilter(d)
root.mainloop()
if __name__ == "__main__":
- main()
+ from idlelib.idle_test.htest import run
+ run(_color_delegator)
diff --git a/Lib/idlelib/Debugger.py b/Lib/idlelib/Debugger.py
index 04eea32..94a8cb2 100644
--- a/Lib/idlelib/Debugger.py
+++ b/Lib/idlelib/Debugger.py
@@ -253,8 +253,8 @@ class Debugger:
if self.vsource.get():
self.sync_source_line()
- def show_frame(self, (frame, lineno)):
- self.frame = frame
+ def show_frame(self, stackitem):
+ self.frame = stackitem[0] # lineno is stackitem[1]
self.show_variables()
localsviewer = None
@@ -323,7 +323,7 @@ class Debugger:
class StackViewer(ScrolledList):
def __init__(self, master, flist, gui):
- if macosxSupport.runningAsOSXApp():
+ if macosxSupport.isAquaTk():
# At least on with the stock AquaTk version on OSX 10.4 you'll
# get an shaking GUI that eventually kills IDLE if the width
# argument is specified.
diff --git a/Lib/idlelib/Delegator.py b/Lib/idlelib/Delegator.py
index 6125591..c476516 100644
--- a/Lib/idlelib/Delegator.py
+++ b/Lib/idlelib/Delegator.py
@@ -4,30 +4,22 @@ class Delegator:
def __init__(self, delegate=None):
self.delegate = delegate
- self.__cache = {}
+ self.__cache = set()
def __getattr__(self, name):
attr = getattr(self.delegate, name) # May raise AttributeError
setattr(self, name, attr)
- self.__cache[name] = attr
+ self.__cache.add(name)
return attr
def resetcache(self):
- for key in self.__cache.keys():
+ for key in self.__cache:
try:
delattr(self, key)
except AttributeError:
pass
self.__cache.clear()
- def cachereport(self):
- keys = self.__cache.keys()
- keys.sort()
- print keys
-
def setdelegate(self, delegate):
self.resetcache()
self.delegate = delegate
-
- def getdelegate(self):
- return self.delegate
diff --git a/Lib/idlelib/EditorWindow.py b/Lib/idlelib/EditorWindow.py
index fb05245..957d550 100644
--- a/Lib/idlelib/EditorWindow.py
+++ b/Lib/idlelib/EditorWindow.py
@@ -1,5 +1,6 @@
import sys
import os
+from platform import python_version
import re
import imp
from Tkinter import *
@@ -107,6 +108,8 @@ class HelpDialog(object):
self.parent = None
helpDialog = HelpDialog() # singleton instance
+def _help_dialog(parent): # wrapper for htest
+ helpDialog.show_dialog(parent)
class EditorWindow(object):
@@ -137,8 +140,8 @@ class EditorWindow(object):
'Python%s.chm' % _sphinx_version())
if os.path.isfile(chmfile):
dochome = chmfile
- elif macosxSupport.runningAsOSXApp():
- # documentation is stored inside the python framework
+ elif sys.platform == 'darwin':
+ # documentation may be stored inside a python framework
dochome = os.path.join(sys.prefix,
'Resources/English.lproj/Documentation/index.html')
dochome = os.path.normpath(dochome)
@@ -172,13 +175,13 @@ class EditorWindow(object):
'recent-files.lst')
self.text_frame = text_frame = Frame(top)
self.vbar = vbar = Scrollbar(text_frame, name='vbar')
- self.width = idleConf.GetOption('main','EditorWindow','width')
+ self.width = idleConf.GetOption('main','EditorWindow','width', type='int')
text_options = {
'name': 'text',
'padx': 5,
'wrap': 'none',
'width': self.width,
- 'height': idleConf.GetOption('main', 'EditorWindow', 'height')}
+ 'height': idleConf.GetOption('main', 'EditorWindow', 'height', type='int')}
if TkVersion >= 8.5:
# Starting with tk 8.5 we have to set the new tabstyle option
# to 'wordprocessor' to achieve the same display of tabs as in
@@ -192,7 +195,7 @@ class EditorWindow(object):
self.top.protocol("WM_DELETE_WINDOW", self.close)
self.top.bind("<<close-window>>", self.close_event)
- if macosxSupport.runningAsOSXApp():
+ if macosxSupport.isAquaTk():
# Command-W on editorwindows doesn't work without this.
text.bind('<<close-window>>', self.close_event)
# Some OS X systems have only one mouse button,
@@ -255,7 +258,8 @@ class EditorWindow(object):
if idleConf.GetOption('main', 'EditorWindow', 'font-bold', type='bool'):
fontWeight='bold'
text.config(font=(idleConf.GetOption('main', 'EditorWindow', 'font'),
- idleConf.GetOption('main', 'EditorWindow', 'font-size'),
+ idleConf.GetOption('main', 'EditorWindow',
+ 'font-size', type='int'),
fontWeight))
text_frame.pack(side=LEFT, fill=BOTH, expand=1)
text.pack(side=TOP, fill=BOTH, expand=1)
@@ -345,6 +349,36 @@ class EditorWindow(object):
self.askinteger = tkSimpleDialog.askinteger
self.showerror = tkMessageBox.showerror
+ self._highlight_workaround() # Fix selection tags on Windows
+
+ def _highlight_workaround(self):
+ # On Windows, Tk removes painting of the selection
+ # tags which is different behavior than on Linux and Mac.
+ # See issue14146 for more information.
+ if not sys.platform.startswith('win'):
+ return
+
+ text = self.text
+ text.event_add("<<Highlight-FocusOut>>", "<FocusOut>")
+ text.event_add("<<Highlight-FocusIn>>", "<FocusIn>")
+ def highlight_fix(focus):
+ sel_range = text.tag_ranges("sel")
+ if sel_range:
+ if focus == 'out':
+ HILITE_CONFIG = idleConf.GetHighlight(
+ idleConf.CurrentTheme(), 'hilite')
+ text.tag_config("sel_fix", HILITE_CONFIG)
+ text.tag_raise("sel_fix")
+ text.tag_add("sel_fix", *sel_range)
+ elif focus == 'in':
+ text.tag_remove("sel_fix", "1.0", "end")
+
+ text.bind("<<Highlight-FocusOut>>",
+ lambda ev: highlight_fix("out"))
+ text.bind("<<Highlight-FocusIn>>",
+ lambda ev: highlight_fix("in"))
+
+
def _filename_to_unicode(self, filename):
"""convert filename to unicode in order to display it in Tk"""
if isinstance(filename, unicode) or not filename:
@@ -408,7 +442,7 @@ class EditorWindow(object):
def set_status_bar(self):
self.status_bar = self.MultiStatusBar(self.top)
- if macosxSupport.runningAsOSXApp():
+ if sys.platform == "darwin":
# Insert some padding to avoid obscuring some of the statusbar
# by the resize widget.
self.status_bar.set_label('_padding1', ' ', side=RIGHT)
@@ -435,8 +469,7 @@ class EditorWindow(object):
("help", "_Help"),
]
- if macosxSupport.runningAsOSXApp():
- del menu_specs[-3]
+ if sys.platform == "darwin":
menu_specs[-2] = ("windows", "_Window")
@@ -448,7 +481,7 @@ class EditorWindow(object):
menudict[name] = menu = Menu(mbar, name=name)
mbar.add_cascade(label=label, menu=menu, underline=underline)
- if macosxSupport.isCarbonAquaTk(self.root):
+ if macosxSupport.isCarbonTk():
# Insert the application menu
menudict['application'] = menu = Menu(mbar, name='apple')
mbar.add_cascade(label='IDLE', menu=menu)
@@ -470,7 +503,6 @@ class EditorWindow(object):
rmenu = None
def right_menu_event(self, event):
- self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "@%d,%d" % (event.x, event.y))
if not self.rmenu:
self.make_rmenu()
@@ -479,23 +511,58 @@ class EditorWindow(object):
iswin = sys.platform[:3] == 'win'
if iswin:
self.text.config(cursor="arrow")
+
+ for item in self.rmenu_specs:
+ try:
+ label, eventname, verify_state = item
+ except ValueError: # see issue1207589
+ continue
+
+ if verify_state is None:
+ continue
+ state = getattr(self, verify_state)()
+ rmenu.entryconfigure(label, state=state)
+
rmenu.tk_popup(event.x_root, event.y_root)
if iswin:
self.text.config(cursor="ibeam")
rmenu_specs = [
- # ("Label", "<<virtual-event>>"), ...
- ("Close", "<<close-window>>"), # Example
+ # ("Label", "<<virtual-event>>", "statefuncname"), ...
+ ("Close", "<<close-window>>", None), # Example
]
def make_rmenu(self):
rmenu = Menu(self.text, tearoff=0)
- for label, eventname in self.rmenu_specs:
- def command(text=self.text, eventname=eventname):
- text.event_generate(eventname)
- rmenu.add_command(label=label, command=command)
+ for item in self.rmenu_specs:
+ label, eventname = item[0], item[1]
+ if label is not None:
+ def command(text=self.text, eventname=eventname):
+ text.event_generate(eventname)
+ rmenu.add_command(label=label, command=command)
+ else:
+ rmenu.add_separator()
self.rmenu = rmenu
+ def rmenu_check_cut(self):
+ return self.rmenu_check_copy()
+
+ def rmenu_check_copy(self):
+ try:
+ indx = self.text.index('sel.first')
+ except TclError:
+ return 'disabled'
+ else:
+ return 'normal' if indx else 'disabled'
+
+ def rmenu_check_paste(self):
+ try:
+ self.text.tk.call('tk::GetSelection', self.text, 'CLIPBOARD')
+ except TclError:
+ return 'disabled'
+ else:
+ return 'normal'
+
def about_dialog(self, event=None):
aboutDialog.AboutDialog(self.top,'About IDLE')
@@ -625,7 +692,7 @@ class EditorWindow(object):
# XXX Ought to insert current file's directory in front of path
try:
(f, file, (suffix, mode, type)) = _find_module(name)
- except (NameError, ImportError), msg:
+ except (NameError, ImportError) as msg:
tkMessageBox.showerror("Import error", str(msg), parent=self.text)
return
if type != imp.PY_SOURCE:
@@ -735,7 +802,8 @@ class EditorWindow(object):
if idleConf.GetOption('main','EditorWindow','font-bold',type='bool'):
fontWeight='bold'
self.text.config(font=(idleConf.GetOption('main','EditorWindow','font'),
- idleConf.GetOption('main','EditorWindow','font-size'),
+ idleConf.GetOption('main','EditorWindow','font-size',
+ type='int'),
fontWeight))
def RemoveKeybindings(self):
@@ -768,7 +836,11 @@ class EditorWindow(object):
menuEventDict[menu[0]][prepstr(item[0])[1]] = item[1]
for menubarItem in self.menudict.keys():
menu = self.menudict[menubarItem]
- end = menu.index(END) + 1
+ end = menu.index(END)
+ if end is None:
+ # Skip empty menus
+ continue
+ end += 1
for index in range(0, end):
if menu.type(index) == 'command':
accel = menu.entrycget(index, 'accelerator')
@@ -825,11 +897,8 @@ class EditorWindow(object):
"Load and update the recent files list and menus"
rf_list = []
if os.path.exists(self.recent_files_path):
- rf_list_file = open(self.recent_files_path,'r')
- try:
+ with open(self.recent_files_path, 'r') as rf_list_file:
rf_list = rf_list_file.readlines()
- finally:
- rf_list_file.close()
if new_file:
new_file = os.path.abspath(new_file) + '\n'
if new_file in rf_list:
@@ -856,7 +925,7 @@ class EditorWindow(object):
# for each edit window instance, construct the recent files menu
for instance in self.top.instance_dict.keys():
menu = instance.recent_files_menu
- menu.delete(1, END) # clear, and rebuild:
+ menu.delete(0, END) # clear, and rebuild:
for i, file_name in enumerate(rf_list):
file_name = file_name.rstrip() # zap \n
# make unicode string to display non-ASCII chars correctly
@@ -899,11 +968,14 @@ class EditorWindow(object):
self.undo.reset_undo()
def short_title(self):
+ pyversion = "Python " + python_version() + ": "
filename = self.io.filename
if filename:
filename = os.path.basename(filename)
+ else:
+ filename = "Untitled"
# return unicode string to display non-ASCII chars correctly
- return self._filename_to_unicode(filename)
+ return pyversion + self._filename_to_unicode(filename)
def long_title(self):
# return unicode string to display non-ASCII chars correctly
@@ -1395,6 +1467,7 @@ class EditorWindow(object):
def tabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
+ if tabwidth is None: return
for pos in range(len(lines)):
line = lines[pos]
if line:
@@ -1406,6 +1479,7 @@ class EditorWindow(object):
def untabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
+ if tabwidth is None: return
for pos in range(len(lines)):
lines[pos] = lines[pos].expandtabs(tabwidth)
self.set_region(head, tail, chars, lines)
@@ -1499,7 +1573,7 @@ class EditorWindow(object):
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
- maxvalue=16) or self.tabwidth
+ maxvalue=16)
# Guess indentwidth from text content.
# Return guessed indentwidth. This should not be believed unless
@@ -1581,7 +1655,7 @@ class IndentSearcher(object):
try:
try:
_tokenize.tokenize(self.readline, self.tokeneater)
- except _tokenize.TokenError:
+ except (_tokenize.TokenError, SyntaxError):
# since we cut off the tokenizer early, we can trigger
# spurious errors
pass
@@ -1610,7 +1684,7 @@ def get_accelerator(keydefs, eventname):
keylist = keydefs.get(eventname)
# issue10940: temporary workaround to prevent hang with OS X Cocoa Tk 8.5
# if not keylist:
- if (not keylist) or (macosxSupport.runningAsOSXApp() and eventname in {
+ if (not keylist) or (macosxSupport.isCocoaTk() and eventname in {
"<<open-module>>",
"<<goto-line>>",
"<<change-indentwidth>>"}):
@@ -1637,19 +1711,19 @@ def fixwordbreaks(root):
tk.call('set', 'tcl_nonwordchars', '[^a-zA-Z0-9_]')
-def test():
- root = Tk()
+def _editor_window(parent):
+ root = parent
fixwordbreaks(root)
- root.withdraw()
if sys.argv[1:]:
filename = sys.argv[1]
else:
filename = None
+ macosxSupport.setupApp(root, None)
edit = EditorWindow(root=root, filename=filename)
- edit.set_close_hook(root.quit)
edit.text.bind("<<close-all-windows>>", edit.close_event)
- root.mainloop()
- root.destroy()
+ parent.mainloop()
+
if __name__ == '__main__':
- test()
+ from idlelib.idle_test.htest import run
+ run(_help_dialog, _editor_window)
diff --git a/Lib/idlelib/FormatParagraph.py b/Lib/idlelib/FormatParagraph.py
index 6a5f9b5..9b10c0a 100644
--- a/Lib/idlelib/FormatParagraph.py
+++ b/Lib/idlelib/FormatParagraph.py
@@ -1,18 +1,19 @@
-# Extension to format a paragraph
-
-# Does basic, standard text formatting, and also understands Python
-# comment blocks. Thus, for editing Python source code, this
-# extension is really only suitable for reformatting these comment
-# blocks or triple-quoted strings.
-
-# Known problems with comment reformatting:
-# * If there is a selection marked, and the first line of the
-# selection is not complete, the block will probably not be detected
-# as comments, and will have the normal "text formatting" rules
-# applied.
-# * If a comment block has leading whitespace that mixes tabs and
-# spaces, they will not be considered part of the same block.
-# * Fancy comments, like this bulleted list, arent handled :-)
+"""Extension to format a paragraph or selection to a max width.
+
+Does basic, standard text formatting, and also understands Python
+comment blocks. Thus, for editing Python source code, this
+extension is really only suitable for reformatting these comment
+blocks or triple-quoted strings.
+
+Known problems with comment reformatting:
+* If there is a selection marked, and the first line of the
+ selection is not complete, the block will probably not be detected
+ as comments, and will have the normal "text formatting" rules
+ applied.
+* If a comment block has leading whitespace that mixes tabs and
+ spaces, they will not be considered part of the same block.
+* Fancy comments, like this bulleted list, aren't handled :-)
+"""
import re
from idlelib.configHandler import idleConf
@@ -31,41 +32,35 @@ class FormatParagraph:
def close(self):
self.editwin = None
- def format_paragraph_event(self, event):
- maxformatwidth = int(idleConf.GetOption('main','FormatParagraph','paragraph'))
+ def format_paragraph_event(self, event, limit=None):
+ """Formats paragraph to a max width specified in idleConf.
+
+ If text is selected, format_paragraph_event will start breaking lines
+ at the max width, starting from the beginning selection.
+
+ If no text is selected, format_paragraph_event uses the current
+ cursor location to determine the paragraph (lines of text surrounded
+ by blank lines) and formats it.
+
+ The length limit parameter is for testing with a known value.
+ """
+ if limit == None:
+ limit = idleConf.GetOption(
+ 'main', 'FormatParagraph', 'paragraph', type='int')
text = self.editwin.text
first, last = self.editwin.get_selection_indices()
if first and last:
data = text.get(first, last)
- comment_header = ''
+ comment_header = get_comment_header(data)
else:
first, last, comment_header, data = \
find_paragraph(text, text.index("insert"))
if comment_header:
- # Reformat the comment lines - convert to text sans header.
- lines = data.split("\n")
- lines = map(lambda st, l=len(comment_header): st[l:], lines)
- data = "\n".join(lines)
- # Reformat to maxformatwidth chars or a 20 char width, whichever is greater.
- format_width = max(maxformatwidth - len(comment_header), 20)
- newdata = reformat_paragraph(data, format_width)
- # re-split and re-insert the comment header.
- newdata = newdata.split("\n")
- # If the block ends in a \n, we dont want the comment
- # prefix inserted after it. (Im not sure it makes sense to
- # reformat a comment block that isnt made of complete
- # lines, but whatever!) Can't think of a clean solution,
- # so we hack away
- block_suffix = ""
- if not newdata[-1]:
- block_suffix = "\n"
- newdata = newdata[:-1]
- builder = lambda item, prefix=comment_header: prefix+item
- newdata = '\n'.join(map(builder, newdata)) + block_suffix
+ newdata = reformat_comment(data, limit, comment_header)
else:
- # Just a normal text format
- newdata = reformat_paragraph(data, maxformatwidth)
+ newdata = reformat_paragraph(data, limit)
text.tag_remove("sel", "1.0", "end")
+
if newdata != data:
text.mark_set("insert", first)
text.undo_block_start()
@@ -78,31 +73,44 @@ class FormatParagraph:
return "break"
def find_paragraph(text, mark):
+ """Returns the start/stop indices enclosing the paragraph that mark is in.
+
+ Also returns the comment format string, if any, and paragraph of text
+ between the start/stop indices.
+ """
lineno, col = map(int, mark.split("."))
- line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
+ line = text.get("%d.0" % lineno, "%d.end" % lineno)
+
+ # Look for start of next paragraph if the index passed in is a blank line
while text.compare("%d.0" % lineno, "<", "end") and is_all_white(line):
lineno = lineno + 1
- line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
+ line = text.get("%d.0" % lineno, "%d.end" % lineno)
first_lineno = lineno
comment_header = get_comment_header(line)
comment_header_len = len(comment_header)
+
+ # Once start line found, search for end of paragraph (a blank line)
while get_comment_header(line)==comment_header and \
not is_all_white(line[comment_header_len:]):
lineno = lineno + 1
- line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
+ line = text.get("%d.0" % lineno, "%d.end" % lineno)
last = "%d.0" % lineno
- # Search back to beginning of paragraph
+
+ # Search back to beginning of paragraph (first blank line before)
lineno = first_lineno - 1
- line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
+ line = text.get("%d.0" % lineno, "%d.end" % lineno)
while lineno > 0 and \
get_comment_header(line)==comment_header and \
not is_all_white(line[comment_header_len:]):
lineno = lineno - 1
- line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
+ line = text.get("%d.0" % lineno, "%d.end" % lineno)
first = "%d.0" % (lineno+1)
+
return first, last, comment_header, text.get(first, last)
+# This should perhaps be replaced with textwrap.wrap
def reformat_paragraph(data, limit):
+ """Return data reformatted to specified width (limit)."""
lines = data.split("\n")
i = 0
n = len(lines)
@@ -125,7 +133,7 @@ def reformat_paragraph(data, limit):
if not word:
continue # Can happen when line ends in whitespace
if len((partial + word).expandtabs()) > limit and \
- partial != indent1:
+ partial != indent1:
new.append(partial.rstrip())
partial = indent2
partial = partial + word + " "
@@ -137,13 +145,49 @@ def reformat_paragraph(data, limit):
new.extend(lines[i:])
return "\n".join(new)
+def reformat_comment(data, limit, comment_header):
+ """Return data reformatted to specified width with comment header."""
+
+ # Remove header from the comment lines
+ lc = len(comment_header)
+ data = "\n".join(line[lc:] for line in data.split("\n"))
+ # Reformat to maxformatwidth chars or a 20 char width,
+ # whichever is greater.
+ format_width = max(limit - len(comment_header), 20)
+ newdata = reformat_paragraph(data, format_width)
+ # re-split and re-insert the comment header.
+ newdata = newdata.split("\n")
+ # If the block ends in a \n, we dont want the comment prefix
+ # inserted after it. (Im not sure it makes sense to reformat a
+ # comment block that is not made of complete lines, but whatever!)
+ # Can't think of a clean solution, so we hack away
+ block_suffix = ""
+ if not newdata[-1]:
+ block_suffix = "\n"
+ newdata = newdata[:-1]
+ return '\n'.join(comment_header+line for line in newdata) + block_suffix
+
def is_all_white(line):
+ """Return True if line is empty or all whitespace."""
+
return re.match(r"^\s*$", line) is not None
def get_indent(line):
- return re.match(r"^(\s*)", line).group()
+ """Return the initial space or tab indent of line."""
+ return re.match(r"^([ \t]*)", line).group()
def get_comment_header(line):
- m = re.match(r"^(\s*#*)", line)
+ """Return string with leading whitespace and '#' from line or ''.
+
+ A null return indicates that the line is not a comment line. A non-
+ null return, such as ' #', will be used to find the other lines of
+ a comment block with the same indent.
+ """
+ m = re.match(r"^([ \t]*#*)", line)
if m is None: return ""
return m.group(1)
+
+if __name__ == "__main__":
+ import unittest
+ unittest.main('idlelib.idle_test.test_formatparagraph',
+ verbosity=2, exit=False)
diff --git a/Lib/idlelib/GrepDialog.py b/Lib/idlelib/GrepDialog.py
index e40e546..ce40c96 100644
--- a/Lib/idlelib/GrepDialog.py
+++ b/Lib/idlelib/GrepDialog.py
@@ -1,9 +1,14 @@
import os
import fnmatch
+import re # for htest
import sys
-from Tkinter import *
+from Tkinter import StringVar, BooleanVar, Checkbutton # for GrepDialog
+from Tkinter import Tk, Text, Button, SEL, END # for htest
from idlelib import SearchEngine
+import itertools
from idlelib.SearchDialogBase import SearchDialogBase
+# Importing OutputWindow fails due to import loop
+# EditorWindow -> GrepDialop -> OutputWindow -> EditorWindow
def grep(text, io=None, flist=None):
root = text._root()
@@ -63,7 +68,7 @@ class GrepDialog(SearchDialogBase):
if not path:
self.top.bell()
return
- from idlelib.OutputWindow import OutputWindow
+ from idlelib.OutputWindow import OutputWindow # leave here!
save = sys.stdout
try:
sys.stdout = OutputWindow(self.flist)
@@ -77,41 +82,34 @@ class GrepDialog(SearchDialogBase):
list.sort()
self.close()
pat = self.engine.getpat()
- print "Searching %r in %s ..." % (pat, path)
+ print("Searching %r in %s ..." % (pat, path))
hits = 0
- for fn in list:
- try:
- f = open(fn)
- except IOError, msg:
- print msg
- continue
- lineno = 0
- while 1:
- block = f.readlines(100000)
- if not block:
- break
- for line in block:
- lineno = lineno + 1
- if line[-1:] == '\n':
- line = line[:-1]
- if prog.search(line):
- sys.stdout.write("%s: %s: %s\n" % (fn, lineno, line))
- hits = hits + 1
- if hits:
- if hits == 1:
- s = ""
- else:
- s = "s"
- print "Found", hits, "hit%s." % s
- print "(Hint: right-click to open locations.)"
- else:
- print "No hits."
+ try:
+ for fn in list:
+ try:
+ with open(fn) as f:
+ for lineno, line in enumerate(f, 1):
+ if line[-1:] == '\n':
+ line = line[:-1]
+ if prog.search(line):
+ sys.stdout.write("%s: %s: %s\n" %
+ (fn, lineno, line))
+ hits += 1
+ except IOError as msg:
+ print(msg)
+ print(("Hits found: %s\n"
+ "(Hint: right-click to open locations.)"
+ % hits) if hits else "No hits.")
+ except AttributeError:
+ # Tk window has been closed, OutputWindow.text = None,
+ # so in OW.write, OW.text.insert fails.
+ pass
def findfiles(self, dir, base, rec):
try:
names = os.listdir(dir or os.curdir)
- except os.error, msg:
- print msg
+ except os.error as msg:
+ print(msg)
return []
list = []
subdirs = []
@@ -131,3 +129,31 @@ class GrepDialog(SearchDialogBase):
if self.top:
self.top.grab_release()
self.top.withdraw()
+
+
+def _grep_dialog(parent): # for htest
+ from idlelib.PyShell import PyShellFileList
+ root = Tk()
+ root.title("Test GrepDialog")
+ width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
+ root.geometry("+%d+%d"%(x, y + 150))
+
+ flist = PyShellFileList(root)
+ text = Text(root, height=5)
+ text.pack()
+
+ def show_grep_dialog():
+ text.tag_add(SEL, "1.0", END)
+ grep(text, flist=flist)
+ text.tag_remove(SEL, "1.0", END)
+
+ button = Button(root, text="Show GrepDialog", command=show_grep_dialog)
+ button.pack()
+ root.mainloop()
+
+if __name__ == "__main__":
+ import unittest
+ unittest.main('idlelib.idle_test.test_grep', verbosity=2, exit=False)
+
+ from idlelib.idle_test.htest import run
+ run(_grep_dialog)
diff --git a/Lib/idlelib/HyperParser.py b/Lib/idlelib/HyperParser.py
index 38a19f2..5816d00 100644
--- a/Lib/idlelib/HyperParser.py
+++ b/Lib/idlelib/HyperParser.py
@@ -1,11 +1,8 @@
-"""
-HyperParser
-===========
-This module defines the HyperParser class, which provides advanced parsing
-abilities for the ParenMatch and other extensions.
-The HyperParser uses PyParser. PyParser is intended mostly to give information
-on the proper indentation of code. HyperParser gives some information on the
-structure of code, used by extensions to help the user.
+"""Provide advanced parsing abilities for ParenMatch and other extensions.
+
+HyperParser uses PyParser. PyParser mostly gives information on the
+proper indentation of code. HyperParser gives additional information on
+the structure of code.
"""
import string
@@ -15,9 +12,7 @@ from idlelib import PyParse
class HyperParser:
def __init__(self, editwin, index):
- """Initialize the HyperParser to analyze the surroundings of the given
- index.
- """
+ "To initialize, analyze the surroundings of the given index."
self.editwin = editwin
self.text = text = editwin.text
@@ -33,9 +28,10 @@ class HyperParser:
startat = max(lno - context, 1)
startatindex = repr(startat) + ".0"
stopatindex = "%d.end" % lno
- # We add the newline because PyParse requires a newline at end.
- # We add a space so that index won't be at end of line, so that
- # its status will be the same as the char before it, if should.
+ # We add the newline because PyParse requires a newline
+ # at end. We add a space so that index won't be at end
+ # of line, so that its status will be the same as the
+ # char before it, if should.
parser.set_str(text.get(startatindex, stopatindex)+' \n')
bod = parser.find_good_parse_start(
editwin._build_char_in_string_func(startatindex))
@@ -49,122 +45,130 @@ class HyperParser:
else:
startatindex = "1.0"
stopatindex = "%d.end" % lno
- # We add the newline because PyParse requires a newline at end.
- # We add a space so that index won't be at end of line, so that
- # its status will be the same as the char before it, if should.
+ # We add the newline because PyParse requires it. We add a
+ # space so that index won't be at end of line, so that its
+ # status will be the same as the char before it, if should.
parser.set_str(text.get(startatindex, stopatindex)+' \n')
parser.set_lo(0)
- # We want what the parser has, except for the last newline and space.
+ # We want what the parser has, minus the last newline and space.
self.rawtext = parser.str[:-2]
- # As far as I can see, parser.str preserves the statement we are in,
- # so that stopatindex can be used to synchronize the string with the
- # text box indices.
+ # Parser.str apparently preserves the statement we are in, so
+ # that stopatindex can be used to synchronize the string with
+ # the text box indices.
self.stopatindex = stopatindex
self.bracketing = parser.get_last_stmt_bracketing()
- # find which pairs of bracketing are openers. These always correspond
- # to a character of rawtext.
- self.isopener = [i>0 and self.bracketing[i][1] > self.bracketing[i-1][1]
+ # find which pairs of bracketing are openers. These always
+ # correspond to a character of rawtext.
+ self.isopener = [i>0 and self.bracketing[i][1] >
+ self.bracketing[i-1][1]
for i in range(len(self.bracketing))]
self.set_index(index)
def set_index(self, index):
- """Set the index to which the functions relate. Note that it must be
- in the same statement.
+ """Set the index to which the functions relate.
+
+ The index must be in the same statement.
"""
- indexinrawtext = \
- len(self.rawtext) - len(self.text.get(index, self.stopatindex))
+ indexinrawtext = (len(self.rawtext) -
+ len(self.text.get(index, self.stopatindex)))
if indexinrawtext < 0:
- raise ValueError("The index given is before the analyzed statement")
+ raise ValueError("Index %s precedes the analyzed statement"
+ % index)
self.indexinrawtext = indexinrawtext
# find the rightmost bracket to which index belongs
self.indexbracket = 0
- while self.indexbracket < len(self.bracketing)-1 and \
- self.bracketing[self.indexbracket+1][0] < self.indexinrawtext:
+ while (self.indexbracket < len(self.bracketing)-1 and
+ self.bracketing[self.indexbracket+1][0] < self.indexinrawtext):
self.indexbracket += 1
- if self.indexbracket < len(self.bracketing)-1 and \
- self.bracketing[self.indexbracket+1][0] == self.indexinrawtext and \
- not self.isopener[self.indexbracket+1]:
+ if (self.indexbracket < len(self.bracketing)-1 and
+ self.bracketing[self.indexbracket+1][0] == self.indexinrawtext and
+ not self.isopener[self.indexbracket+1]):
self.indexbracket += 1
def is_in_string(self):
- """Is the index given to the HyperParser is in a string?"""
+ """Is the index given to the HyperParser in a string?"""
# The bracket to which we belong should be an opener.
# If it's an opener, it has to have a character.
- return self.isopener[self.indexbracket] and \
- self.rawtext[self.bracketing[self.indexbracket][0]] in ('"', "'")
+ return (self.isopener[self.indexbracket] and
+ self.rawtext[self.bracketing[self.indexbracket][0]]
+ in ('"', "'"))
def is_in_code(self):
- """Is the index given to the HyperParser is in a normal code?"""
- return not self.isopener[self.indexbracket] or \
- self.rawtext[self.bracketing[self.indexbracket][0]] not in \
- ('#', '"', "'")
+ """Is the index given to the HyperParser in normal code?"""
+ return (not self.isopener[self.indexbracket] or
+ self.rawtext[self.bracketing[self.indexbracket][0]]
+ not in ('#', '"', "'"))
def get_surrounding_brackets(self, openers='([{', mustclose=False):
- """If the index given to the HyperParser is surrounded by a bracket
- defined in openers (or at least has one before it), return the
- indices of the opening bracket and the closing bracket (or the
- end of line, whichever comes first).
- If it is not surrounded by brackets, or the end of line comes before
- the closing bracket and mustclose is True, returns None.
+ """Return bracket indexes or None.
+
+ If the index given to the HyperParser is surrounded by a
+ bracket defined in openers (or at least has one before it),
+ return the indices of the opening bracket and the closing
+ bracket (or the end of line, whichever comes first).
+
+ If it is not surrounded by brackets, or the end of line comes
+ before the closing bracket and mustclose is True, returns None.
"""
+
bracketinglevel = self.bracketing[self.indexbracket][1]
before = self.indexbracket
- while not self.isopener[before] or \
- self.rawtext[self.bracketing[before][0]] not in openers or \
- self.bracketing[before][1] > bracketinglevel:
+ while (not self.isopener[before] or
+ self.rawtext[self.bracketing[before][0]] not in openers or
+ self.bracketing[before][1] > bracketinglevel):
before -= 1
if before < 0:
return None
bracketinglevel = min(bracketinglevel, self.bracketing[before][1])
after = self.indexbracket + 1
- while after < len(self.bracketing) and \
- self.bracketing[after][1] >= bracketinglevel:
+ while (after < len(self.bracketing) and
+ self.bracketing[after][1] >= bracketinglevel):
after += 1
beforeindex = self.text.index("%s-%dc" %
(self.stopatindex, len(self.rawtext)-self.bracketing[before][0]))
- if after >= len(self.bracketing) or \
- self.bracketing[after][0] > len(self.rawtext):
+ if (after >= len(self.bracketing) or
+ self.bracketing[after][0] > len(self.rawtext)):
if mustclose:
return None
afterindex = self.stopatindex
else:
- # We are after a real char, so it is a ')' and we give the index
- # before it.
- afterindex = self.text.index("%s-%dc" %
- (self.stopatindex,
+ # We are after a real char, so it is a ')' and we give the
+ # index before it.
+ afterindex = self.text.index(
+ "%s-%dc" % (self.stopatindex,
len(self.rawtext)-(self.bracketing[after][0]-1)))
return beforeindex, afterindex
- # This string includes all chars that may be in a white space
+ # Ascii chars that may be in a white space
_whitespace_chars = " \t\n\\"
- # This string includes all chars that may be in an identifier
+ # Ascii chars that may be in an identifier
_id_chars = string.ascii_letters + string.digits + "_"
- # This string includes all chars that may be the first char of an identifier
+ # Ascii chars that may be the first char of an identifier
_id_first_chars = string.ascii_letters + "_"
- # Given a string and pos, return the number of chars in the identifier
- # which ends at pos, or 0 if there is no such one. Saved words are not
- # identifiers.
+ # Given a string and pos, return the number of chars in the
+ # identifier which ends at pos, or 0 if there is no such one. Saved
+ # words are not identifiers.
def _eat_identifier(self, str, limit, pos):
i = pos
while i > limit and str[i-1] in self._id_chars:
i -= 1
- if i < pos and (str[i] not in self._id_first_chars or \
- keyword.iskeyword(str[i:pos])):
+ if (i < pos and (str[i] not in self._id_first_chars or
+ keyword.iskeyword(str[i:pos]))):
i = pos
return pos - i
def get_expression(self):
- """Return a string with the Python expression which ends at the given
- index, which is empty if there is no real one.
+ """Return a string with the Python expression which ends at the
+ given index, which is empty if there is no real one.
"""
if not self.is_in_code():
- raise ValueError("get_expression should only be called if index "\
- "is inside a code.")
+ raise ValueError("get_expression should only be called"
+ "if index is inside a code.")
rawtext = self.rawtext
bracketing = self.bracketing
@@ -177,20 +181,20 @@ class HyperParser:
postdot_phase = True
while 1:
- # Eat whitespaces, comments, and if postdot_phase is False - one dot
+ # Eat whitespaces, comments, and if postdot_phase is False - a dot
while 1:
if pos>brck_limit and rawtext[pos-1] in self._whitespace_chars:
# Eat a whitespace
pos -= 1
- elif not postdot_phase and \
- pos > brck_limit and rawtext[pos-1] == '.':
+ elif (not postdot_phase and
+ pos > brck_limit and rawtext[pos-1] == '.'):
# Eat a dot
pos -= 1
postdot_phase = True
- # The next line will fail if we are *inside* a comment, but we
- # shouldn't be.
- elif pos == brck_limit and brck_index > 0 and \
- rawtext[bracketing[brck_index-1][0]] == '#':
+ # The next line will fail if we are *inside* a comment,
+ # but we shouldn't be.
+ elif (pos == brck_limit and brck_index > 0 and
+ rawtext[bracketing[brck_index-1][0]] == '#'):
# Eat a comment
brck_index -= 2
brck_limit = bracketing[brck_index][0]
@@ -200,8 +204,8 @@ class HyperParser:
break
if not postdot_phase:
- # We didn't find a dot, so the expression end at the last
- # identifier pos.
+ # We didn't find a dot, so the expression end at the
+ # last identifier pos.
break
ret = self._eat_identifier(rawtext, brck_limit, pos)
@@ -209,13 +213,13 @@ class HyperParser:
# There is an identifier to eat
pos = pos - ret
last_identifier_pos = pos
- # Now, in order to continue the search, we must find a dot.
+ # Now, to continue the search, we must find a dot.
postdot_phase = False
# (the loop continues now)
elif pos == brck_limit:
- # We are at a bracketing limit. If it is a closing bracket,
- # eat the bracket, otherwise, stop the search.
+ # We are at a bracketing limit. If it is a closing
+ # bracket, eat the bracket, otherwise, stop the search.
level = bracketing[brck_index][1]
while brck_index > 0 and bracketing[brck_index-1][1] > level:
brck_index -= 1
@@ -232,6 +236,11 @@ class HyperParser:
pass
else:
# We can't continue after other types of brackets
+ if rawtext[pos] in "'\"":
+ # Scan a string prefix
+ while pos > 0 and rawtext[pos - 1] in "rRbBuU":
+ pos -= 1
+ last_identifier_pos = pos
break
else:
@@ -239,3 +248,8 @@ class HyperParser:
break
return rawtext[last_identifier_pos:self.indexinrawtext]
+
+
+if __name__ == '__main__':
+ import unittest
+ unittest.main('idlelib.idle_test.test_hyperparser', verbosity=2)
diff --git a/Lib/idlelib/IOBinding.py b/Lib/idlelib/IOBinding.py
index c515432..aedd372 100644
--- a/Lib/idlelib/IOBinding.py
+++ b/Lib/idlelib/IOBinding.py
@@ -7,6 +7,7 @@
import os
import types
+import pipes
import sys
import codecs
import tempfile
@@ -70,7 +71,7 @@ else:
encoding = encoding.lower()
-coding_re = re.compile("coding[:=]\s*([-\w_.]+)")
+coding_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)')
class EncodingMessage(SimpleDialog):
"Inform user that an encoding declaration is needed."
@@ -124,11 +125,12 @@ def coding_spec(str):
Raise LookupError if the encoding is declared but unknown.
"""
# Only consider the first two lines
- str = str.split("\n")[:2]
- str = "\n".join(str)
-
- match = coding_re.search(str)
- if not match:
+ lst = str.split("\n", 2)[:2]
+ for line in lst:
+ match = coding_re.match(line)
+ if match is not None:
+ break
+ else:
return None
name = match.group(1)
# Check whether the encoding is known
@@ -196,29 +198,33 @@ class IOBinding:
self.filename_change_hook()
def open(self, event=None, editFile=None):
- if self.editwin.flist:
+ flist = self.editwin.flist
+ # Save in case parent window is closed (ie, during askopenfile()).
+ if flist:
if not editFile:
filename = self.askopenfile()
else:
filename=editFile
if filename:
- # If the current window has no filename and hasn't been
- # modified, we replace its contents (no loss). Otherwise
- # we open a new window. But we won't replace the
- # shell window (which has an interp(reter) attribute), which
- # gets set to "not modified" at every new prompt.
- try:
- interp = self.editwin.interp
- except AttributeError:
- interp = None
- if not self.filename and self.get_saved() and not interp:
- self.editwin.flist.open(filename, self.loadfile)
+ # If editFile is valid and already open, flist.open will
+ # shift focus to its existing window.
+ # If the current window exists and is a fresh unnamed,
+ # unmodified editor window (not an interpreter shell),
+ # pass self.loadfile to flist.open so it will load the file
+ # in the current window (if the file is not already open)
+ # instead of a new window.
+ if (self.editwin and
+ not getattr(self.editwin, 'interp', None) and
+ not self.filename and
+ self.get_saved()):
+ flist.open(filename, self.loadfile)
else:
- self.editwin.flist.open(filename)
+ flist.open(filename)
else:
- self.text.focus_set()
+ if self.text:
+ self.text.focus_set()
return "break"
- #
+
# Code for use outside IDLE:
if self.get_saved():
reply = self.maybesave()
@@ -243,10 +249,9 @@ class IOBinding:
try:
# open the file in binary mode so that we can handle
# end-of-line convention ourselves.
- f = open(filename,'rb')
- chars = f.read()
- f.close()
- except IOError, msg:
+ with open(filename, 'rb') as f:
+ chars = f.read()
+ except IOError as msg:
tkMessageBox.showerror("I/O Error", str(msg), master=self.text)
return False
@@ -289,7 +294,7 @@ class IOBinding:
# Next look for coding specification
try:
enc = coding_spec(chars)
- except LookupError, name:
+ except LookupError as name:
tkMessageBox.showerror(
title="Error loading the file",
message="The encoding '%s' is not known to this Python "\
@@ -378,12 +383,10 @@ class IOBinding:
if self.eol_convention != "\n":
chars = chars.replace("\n", self.eol_convention)
try:
- f = open(filename, "wb")
- f.write(chars)
- f.flush()
- f.close()
+ with open(filename, "wb") as f:
+ f.write(chars)
return True
- except IOError, msg:
+ except IOError as msg:
tkMessageBox.showerror("I/O Error", str(msg),
master=self.text)
return False
@@ -403,7 +406,7 @@ class IOBinding:
try:
enc = coding_spec(chars)
failed = None
- except LookupError, msg:
+ except LookupError as msg:
failed = msg
enc = None
if enc:
@@ -499,7 +502,7 @@ class IOBinding:
else: #no printing for this platform
printPlatform = False
if printPlatform: #we can try to print for this platform
- command = command % filename
+ command = command % pipes.quote(filename)
pipe = os.popen(command, "r")
# things can get ugly on NT if there is no printer available.
output = pipe.read().strip()
@@ -562,16 +565,17 @@ class IOBinding:
"Update recent file list on all editor windows"
self.editwin.update_recent_files_list(filename)
-def test():
+def _io_binding(parent):
root = Tk()
+ root.title("Test IOBinding")
+ width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
+ root.geometry("+%d+%d"%(x, y + 150))
class MyEditWin:
def __init__(self, text):
self.text = text
self.flist = None
self.text.bind("<Control-o>", self.open)
self.text.bind("<Control-s>", self.save)
- self.text.bind("<Alt-s>", self.save_as)
- self.text.bind("<Alt-z>", self.save_a_copy)
def get_saved(self): return 0
def set_saved(self, flag): pass
def reset_undo(self): pass
@@ -579,16 +583,13 @@ def test():
self.text.event_generate("<<open-window-from-file>>")
def save(self, event):
self.text.event_generate("<<save-window>>")
- def save_as(self, event):
- self.text.event_generate("<<save-window-as-file>>")
- def save_a_copy(self, event):
- self.text.event_generate("<<save-copy-of-window-as-file>>")
+
text = Text(root)
text.pack()
text.focus_set()
editwin = MyEditWin(text)
io = IOBinding(editwin)
- root.mainloop()
if __name__ == "__main__":
- test()
+ from idlelib.idle_test.htest import run
+ run(_io_binding)
diff --git a/Lib/idlelib/Icons/idle.ico b/Lib/idlelib/Icons/idle.ico
new file mode 100644
index 0000000..3357aef
--- /dev/null
+++ b/Lib/idlelib/Icons/idle.ico
Binary files differ
diff --git a/Lib/idlelib/Icons/idle_16.gif b/Lib/idlelib/Icons/idle_16.gif
new file mode 100644
index 0000000..9f001b1
--- /dev/null
+++ b/Lib/idlelib/Icons/idle_16.gif
Binary files differ
diff --git a/Lib/idlelib/Icons/idle_16.png b/Lib/idlelib/Icons/idle_16.png
new file mode 100644
index 0000000..6abde0a
--- /dev/null
+++ b/Lib/idlelib/Icons/idle_16.png
Binary files differ
diff --git a/Lib/idlelib/Icons/idle_32.gif b/Lib/idlelib/Icons/idle_32.gif
new file mode 100644
index 0000000..af5b2d5
--- /dev/null
+++ b/Lib/idlelib/Icons/idle_32.gif
Binary files differ
diff --git a/Lib/idlelib/Icons/idle_32.png b/Lib/idlelib/Icons/idle_32.png
new file mode 100644
index 0000000..41b70db
--- /dev/null
+++ b/Lib/idlelib/Icons/idle_32.png
Binary files differ
diff --git a/Lib/idlelib/Icons/idle_48.gif b/Lib/idlelib/Icons/idle_48.gif
new file mode 100644
index 0000000..fc5304f
--- /dev/null
+++ b/Lib/idlelib/Icons/idle_48.gif
Binary files differ
diff --git a/Lib/idlelib/Icons/idle_48.png b/Lib/idlelib/Icons/idle_48.png
new file mode 100644
index 0000000..e5fa928
--- /dev/null
+++ b/Lib/idlelib/Icons/idle_48.png
Binary files differ
diff --git a/Lib/idlelib/Icons/python.gif b/Lib/idlelib/Icons/python.gif
index 58271ed..b189c2c 100644
--- a/Lib/idlelib/Icons/python.gif
+++ b/Lib/idlelib/Icons/python.gif
Binary files differ
diff --git a/Lib/idlelib/IdleHistory.py b/Lib/idlelib/IdleHistory.py
index 983a140..078af29 100644
--- a/Lib/idlelib/IdleHistory.py
+++ b/Lib/idlelib/IdleHistory.py
@@ -1,81 +1,93 @@
+"Implement Idle Shell history mechanism with History class"
+
from idlelib.configHandler import idleConf
class History:
+ ''' Implement Idle Shell history mechanism.
+
+ store - Store source statement (called from PyShell.resetoutput).
+ fetch - Fetch stored statement matching prefix already entered.
+ history_next - Bound to <<history-next>> event (default Alt-N).
+ history_prev - Bound to <<history-prev>> event (default Alt-P).
+ '''
+ def __init__(self, text):
+ '''Initialize data attributes and bind event methods.
- def __init__(self, text, output_sep = "\n"):
+ .text - Idle wrapper of tk Text widget, with .bell().
+ .history - source statements, possibly with multiple lines.
+ .prefix - source already entered at prompt; filters history list.
+ .pointer - index into history.
+ .cyclic - wrap around history list (or not).
+ '''
self.text = text
self.history = []
- self.history_prefix = None
- self.history_pointer = None
- self.output_sep = output_sep
+ self.prefix = None
+ self.pointer = None
self.cyclic = idleConf.GetOption("main", "History", "cyclic", 1, "bool")
text.bind("<<history-previous>>", self.history_prev)
text.bind("<<history-next>>", self.history_next)
def history_next(self, event):
- self.history_do(0)
+ "Fetch later statement; start with ealiest if cyclic."
+ self.fetch(reverse=False)
return "break"
def history_prev(self, event):
- self.history_do(1)
+ "Fetch earlier statement; start with most recent."
+ self.fetch(reverse=True)
return "break"
- def _get_source(self, start, end):
- # Get source code from start index to end index. Lines in the
- # text control may be separated by sys.ps2 .
- lines = self.text.get(start, end).split(self.output_sep)
- return "\n".join(lines)
+ def fetch(self, reverse):
+ '''Fetch statememt and replace current line in text widget.
- def _put_source(self, where, source):
- output = self.output_sep.join(source.split("\n"))
- self.text.insert(where, output)
-
- def history_do(self, reverse):
+ Set prefix and pointer as needed for successive fetches.
+ Reset them to None, None when returning to the start line.
+ Sound bell when return to start line or cannot leave a line
+ because cyclic is False.
+ '''
nhist = len(self.history)
- pointer = self.history_pointer
- prefix = self.history_prefix
+ pointer = self.pointer
+ prefix = self.prefix
if pointer is not None and prefix is not None:
if self.text.compare("insert", "!=", "end-1c") or \
- self._get_source("iomark", "end-1c") != self.history[pointer]:
+ self.text.get("iomark", "end-1c") != self.history[pointer]:
pointer = prefix = None
+ self.text.mark_set("insert", "end-1c") # != after cursor move
if pointer is None or prefix is None:
- prefix = self._get_source("iomark", "end-1c")
+ prefix = self.text.get("iomark", "end-1c")
if reverse:
- pointer = nhist
+ pointer = nhist # will be decremented
else:
if self.cyclic:
- pointer = -1
- else:
+ pointer = -1 # will be incremented
+ else: # abort history_next
self.text.bell()
return
nprefix = len(prefix)
while 1:
- if reverse:
- pointer = pointer - 1
- else:
- pointer = pointer + 1
+ pointer += -1 if reverse else 1
if pointer < 0 or pointer >= nhist:
self.text.bell()
- if not self.cyclic and pointer < 0:
+ if not self.cyclic and pointer < 0: # abort history_prev
return
else:
- if self._get_source("iomark", "end-1c") != prefix:
+ if self.text.get("iomark", "end-1c") != prefix:
self.text.delete("iomark", "end-1c")
- self._put_source("iomark", prefix)
+ self.text.insert("iomark", prefix)
pointer = prefix = None
break
item = self.history[pointer]
if item[:nprefix] == prefix and len(item) > nprefix:
self.text.delete("iomark", "end-1c")
- self._put_source("iomark", item)
+ self.text.insert("iomark", item)
break
- self.text.mark_set("insert", "end-1c")
self.text.see("insert")
self.text.tag_remove("sel", "1.0", "end")
- self.history_pointer = pointer
- self.history_prefix = prefix
+ self.pointer = pointer
+ self.prefix = prefix
- def history_store(self, source):
+ def store(self, source):
+ "Store Shell input statement into history list."
source = source.strip()
if len(source) > 2:
# avoid duplicates
@@ -84,5 +96,9 @@ class History:
except ValueError:
pass
self.history.append(source)
- self.history_pointer = None
- self.history_prefix = None
+ self.pointer = None
+ self.prefix = None
+
+if __name__ == "__main__":
+ from unittest import main
+ main('idlelib.idle_test.test_idlehistory', verbosity=2, exit=False)
diff --git a/Lib/idlelib/MultiCall.py b/Lib/idlelib/MultiCall.py
index b81c5ed..31a4fb7 100644
--- a/Lib/idlelib/MultiCall.py
+++ b/Lib/idlelib/MultiCall.py
@@ -33,7 +33,6 @@ import sys
import string
import re
import Tkinter
-from idlelib import macosxSupport
# the event type constants, which define the meaning of mc_type
MC_KEYPRESS=0; MC_KEYRELEASE=1; MC_BUTTONPRESS=2; MC_BUTTONRELEASE=3;
@@ -46,7 +45,7 @@ MC_SHIFT = 1<<0; MC_CONTROL = 1<<2; MC_ALT = 1<<3; MC_META = 1<<5
MC_OPTION = 1<<6; MC_COMMAND = 1<<7
# define the list of modifiers, to be used in complex event types.
-if macosxSupport.runningAsOSXApp():
+if sys.platform == "darwin":
_modifiers = (("Shift",), ("Control",), ("Option",), ("Command",))
_modifier_masks = (MC_SHIFT, MC_CONTROL, MC_OPTION, MC_COMMAND)
else:
@@ -171,8 +170,9 @@ class _ComplexBinder:
break
ishandlerrunning[:] = []
# Call all functions in doafterhandler and remove them from list
- while doafterhandler:
- doafterhandler.pop()()
+ for f in doafterhandler:
+ f()
+ doafterhandler[:] = []
if r:
return r
return handler
@@ -397,9 +397,12 @@ def MultiCallCreator(widget):
_multicall_dict[widget] = MultiCall
return MultiCall
-if __name__ == "__main__":
- # Test
+
+def _multi_call(parent):
root = Tkinter.Tk()
+ root.title("Test MultiCall")
+ width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
+ root.geometry("+%d+%d"%(x, y + 150))
text = MultiCallCreator(Tkinter.Text)(root)
text.pack()
def bindseq(seq, n=[0]):
@@ -415,8 +418,13 @@ if __name__ == "__main__":
bindseq("<Alt-Control-Key-a>")
bindseq("<Key-b>")
bindseq("<Control-Button-1>")
+ bindseq("<Button-2>")
bindseq("<Alt-Button-1>")
bindseq("<FocusOut>")
bindseq("<Enter>")
bindseq("<Leave>")
root.mainloop()
+
+if __name__ == "__main__":
+ from idlelib.idle_test.htest import run
+ run(_multi_call)
diff --git a/Lib/idlelib/MultiStatusBar.py b/Lib/idlelib/MultiStatusBar.py
index 8ee2d03..df136b8 100644
--- a/Lib/idlelib/MultiStatusBar.py
+++ b/Lib/idlelib/MultiStatusBar.py
@@ -17,16 +17,29 @@ class MultiStatusBar(Frame):
label = self.labels[name]
label.config(text=text)
-def _test():
- b = Frame()
- c = Text(b)
- c.pack(side=TOP)
- a = MultiStatusBar(b)
- a.set_label("one", "hello")
- a.set_label("two", "world")
- a.pack(side=BOTTOM, fill=X)
- b.pack()
- b.mainloop()
+def _multistatus_bar(parent):
+ root = Tk()
+ width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
+ root.geometry("+%d+%d" %(x, y + 150))
+ root.title("Test multistatus bar")
+ frame = Frame(root)
+ text = Text(frame)
+ text.pack()
+ msb = MultiStatusBar(frame)
+ msb.set_label("one", "hello")
+ msb.set_label("two", "world")
+ msb.pack(side=BOTTOM, fill=X)
+
+ def change():
+ msb.set_label("one", "foo")
+ msb.set_label("two", "bar")
+
+ button = Button(root, text="Update status", command=change)
+ button.pack(side=BOTTOM)
+ frame.pack()
+ frame.mainloop()
+ root.mainloop()
if __name__ == '__main__':
- _test()
+ from idlelib.idle_test.htest import run
+ run(_multistatus_bar)
diff --git a/Lib/idlelib/NEWS.txt b/Lib/idlelib/NEWS.txt
index 46ef3cb..07ab083 100644
--- a/Lib/idlelib/NEWS.txt
+++ b/Lib/idlelib/NEWS.txt
@@ -1,5 +1,45 @@
+What's New in IDLE 2.7.5?
+=========================
+
+- Issue #17390: Display Python version on Idle title bar.
+ Initial patch by Edmond Burnett.
+
+
+What's New in IDLE 2.7.4?
+=========================
+
+- Issue #15318: Prevent writing to sys.stdin.
+
+- Issue #13532, #15319: Check that arguments to sys.stdout.write are strings.
+
+- Issue # 12510: Attempt to get certain tool tips no longer crashes IDLE.
+
+- Issue10365: File open dialog now works instead of crashing even when
+ parent window is closed while dialog is open.
+
+- Issue 14876: use user-selected font for highlight configuration.
+
+- Issue #14018: Update checks for unstable system Tcl/Tk versions on OS X
+ to include versions shipped with OS X 10.7 and 10.8 in addition to 10.6.
+
+- Issue #15853: Prevent IDLE crash on OS X when opening Preferences menu
+ with certain versions of Tk 8.5. Initial patch by Kevin Walzer.
+
+
+What's New in IDLE 2.7.3?
+=========================
+
+- Issue #14409: IDLE now properly executes commands in the Shell window
+ when it cannot read the normal config files on startup and
+ has to use the built-in default key bindings.
+ There was previously a bug in one of the defaults.
+
+- Issue #3573: IDLE hangs when passing invalid command line args
+ (directory(ies) instead of file(s)).
+
+
What's New in IDLE 2.7.2?
-=======================
+=========================
*Release date: 29-May-2011*
diff --git a/Lib/idlelib/ObjectBrowser.py b/Lib/idlelib/ObjectBrowser.py
index 7de6988..e69365c 100644
--- a/Lib/idlelib/ObjectBrowser.py
+++ b/Lib/idlelib/ObjectBrowser.py
@@ -9,6 +9,8 @@
# XXX TO DO:
# - for classes/modules, add "open source" to object browser
+import re
+
from idlelib.TreeWidget import TreeItem, TreeNode, ScrolledCanvas
from repr import Repr
@@ -132,12 +134,14 @@ def make_objecttreeitem(labeltext, object, setfunction=None):
c = ObjectTreeItem
return c(labeltext, object, setfunction)
-# Test script
-def _test():
+def _object_browser(parent):
import sys
from Tkinter import Tk
root = Tk()
+ root.title("Test ObjectBrowser")
+ width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
+ root.geometry("+%d+%d"%(x, y + 150))
root.configure(bd=0, bg="yellow")
root.focus_set()
sc = ScrolledCanvas(root, bg="white", highlightthickness=0, takefocus=1)
@@ -148,4 +152,5 @@ def _test():
root.mainloop()
if __name__ == '__main__':
- _test()
+ from idlelib.idle_test.htest import run
+ run(_object_browser)
diff --git a/Lib/idlelib/OutputWindow.py b/Lib/idlelib/OutputWindow.py
index 60d09c0..e18d846 100644
--- a/Lib/idlelib/OutputWindow.py
+++ b/Lib/idlelib/OutputWindow.py
@@ -57,7 +57,11 @@ class OutputWindow(EditorWindow):
# Our own right-button menu
rmenu_specs = [
- ("Go to file/line", "<<goto-file-line>>"),
+ ("Cut", "<<cut>>", "rmenu_check_cut"),
+ ("Copy", "<<copy>>", "rmenu_check_copy"),
+ ("Paste", "<<paste>>", "rmenu_check_paste"),
+ (None, None, None),
+ ("Go to file/line", "<<goto-file-line>>", None),
]
file_line_pats = [
diff --git a/Lib/idlelib/ParenMatch.py b/Lib/idlelib/ParenMatch.py
index 6d91b39..19bad8c 100644
--- a/Lib/idlelib/ParenMatch.py
+++ b/Lib/idlelib/ParenMatch.py
@@ -90,7 +90,8 @@ class ParenMatch:
self.set_timeout = self.set_timeout_none
def flash_paren_event(self, event):
- indices = HyperParser(self.editwin, "insert").get_surrounding_brackets()
+ indices = (HyperParser(self.editwin, "insert")
+ .get_surrounding_brackets())
if indices is None:
self.warn_mismatched()
return
@@ -167,6 +168,11 @@ class ParenMatch:
# associate a counter with an event; only disable the "paren"
# tag if the event is for the most recent timer.
self.counter += 1
- self.editwin.text_frame.after(self.FLASH_DELAY,
- lambda self=self, c=self.counter: \
- self.handle_restore_timer(c))
+ self.editwin.text_frame.after(
+ self.FLASH_DELAY,
+ lambda self=self, c=self.counter: self.handle_restore_timer(c))
+
+
+if __name__ == '__main__':
+ import unittest
+ unittest.main('idlelib.idle_test.test_parenmatch', verbosity=2)
diff --git a/Lib/idlelib/PathBrowser.py b/Lib/idlelib/PathBrowser.py
index d88a48e..ef7f8ff 100644
--- a/Lib/idlelib/PathBrowser.py
+++ b/Lib/idlelib/PathBrowser.py
@@ -4,10 +4,16 @@ import imp
from idlelib.TreeWidget import TreeItem
from idlelib.ClassBrowser import ClassBrowser, ModuleBrowserTreeItem
+from idlelib.PyShell import PyShellFileList
+
class PathBrowser(ClassBrowser):
- def __init__(self, flist):
+ def __init__(self, flist, _htest=False):
+ """
+ _htest - bool, change box location when running htest
+ """
+ self._htest = _htest
self.init(flist)
def settitle(self):
@@ -85,11 +91,14 @@ class DirBrowserTreeItem(TreeItem):
sorted.sort()
return sorted
-def main():
- from idlelib import PyShell
- PathBrowser(PyShell.flist)
- if sys.stdin is sys.__stdin__:
- mainloop()
+def _path_browser(parent):
+ flist = PyShellFileList(parent)
+ PathBrowser(flist, _htest=True)
+ parent.mainloop()
if __name__ == "__main__":
- main()
+ from unittest import main
+ main('idlelib.idle_test.test_pathbrowser', verbosity=2, exit=False)
+
+ from idlelib.idle_test.htest import run
+ run(_path_browser)
diff --git a/Lib/idlelib/Percolator.py b/Lib/idlelib/Percolator.py
index e24689b..e0e8cad 100644
--- a/Lib/idlelib/Percolator.py
+++ b/Lib/idlelib/Percolator.py
@@ -52,7 +52,9 @@ class Percolator:
filter.setdelegate(None)
-def main():
+def _percolator(parent):
+ import Tkinter as tk
+ import re
class Tracer(Delegator):
def __init__(self, name):
self.name = name
@@ -63,23 +65,39 @@ def main():
def delete(self, *args):
print self.name, ": delete", args
self.delegate.delete(*args)
- root = Tk()
- root.wm_protocol("WM_DELETE_WINDOW", root.quit)
- text = Text()
- text.pack()
- text.focus_set()
+ root = tk.Tk()
+ root.title("Test Percolator")
+ width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
+ root.geometry("+%d+%d"%(x, y + 150))
+ text = tk.Text(root)
p = Percolator(text)
t1 = Tracer("t1")
t2 = Tracer("t2")
- p.insertfilter(t1)
- p.insertfilter(t2)
- root.mainloop()
- p.removefilter(t2)
- root.mainloop()
- p.insertfilter(t2)
- p.removefilter(t1)
- root.mainloop()
+
+ def toggle1():
+ if var1.get() == 0:
+ var1.set(1)
+ p.insertfilter(t1)
+ elif var1.get() == 1:
+ var1.set(0)
+ p.removefilter(t1)
+
+ def toggle2():
+ if var2.get() == 0:
+ var2.set(1)
+ p.insertfilter(t2)
+ elif var2.get() == 1:
+ var2.set(0)
+ p.removefilter(t2)
+
+ text.pack()
+ var1 = tk.IntVar()
+ cb1 = tk.Checkbutton(root, text="Tracer1", command=toggle1, variable=var1)
+ cb1.pack()
+ var2 = tk.IntVar()
+ cb2 = tk.Checkbutton(root, text="Tracer2", command=toggle2, variable=var2)
+ cb2.pack()
if __name__ == "__main__":
- from Tkinter import *
- main()
+ from idlelib.idle_test.htest import run
+ run(_percolator)
diff --git a/Lib/idlelib/PyShell.py b/Lib/idlelib/PyShell.py
index 895d7da..0d94944 100644..100755
--- a/Lib/idlelib/PyShell.py
+++ b/Lib/idlelib/PyShell.py
@@ -11,9 +11,11 @@ import time
import threading
import traceback
import types
+import io
import linecache
from code import InteractiveInterpreter
+from platform import python_version, system
try:
from Tkinter import *
@@ -48,35 +50,55 @@ except ImportError:
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
-global warning_stream
-warning_stream = sys.__stderr__
-try:
- import warnings
-except ImportError:
- pass
-else:
- def idle_showwarning(message, category, filename, lineno,
- file=None, line=None):
- if file is None:
- file = warning_stream
- try:
- file.write(warnings.formatwarning(message, category, filename,
- lineno, line=line))
- except IOError:
- pass ## file (probably __stderr__) is invalid, warning dropped.
- warnings.showwarning = idle_showwarning
- def idle_formatwarning(message, category, filename, lineno, line=None):
- """Format warnings the IDLE way"""
- s = "\nWarning (from warnings module):\n"
- s += ' File \"%s\", line %s\n' % (filename, lineno)
- if line is None:
- line = linecache.getline(filename, lineno)
- line = line.strip()
- if line:
- s += " %s\n" % line
- s += "%s: %s\n>>> " % (category.__name__, message)
- return s
- warnings.formatwarning = idle_formatwarning
+warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
+import warnings
+
+def idle_formatwarning(message, category, filename, lineno, line=None):
+ """Format warnings the IDLE way."""
+
+ s = "\nWarning (from warnings module):\n"
+ s += ' File \"%s\", line %s\n' % (filename, lineno)
+ if line is None:
+ line = linecache.getline(filename, lineno)
+ line = line.strip()
+ if line:
+ s += " %s\n" % line
+ s += "%s: %s\n" % (category.__name__, message)
+ return s
+
+def idle_showwarning(
+ message, category, filename, lineno, file=None, line=None):
+ """Show Idle-format warning (after replacing warnings.showwarning).
+
+ The differences are the formatter called, the file=None replacement,
+ which can be None, the capture of the consequence AttributeError,
+ and the output of a hard-coded prompt.
+ """
+ if file is None:
+ file = warning_stream
+ try:
+ file.write(idle_formatwarning(
+ message, category, filename, lineno, line=line))
+ file.write(">>> ")
+ except (AttributeError, IOError):
+ pass # if file (probably __stderr__) is invalid, skip warning.
+
+_warnings_showwarning = None
+
+def capture_warnings(capture):
+ "Replace warning.showwarning with idle_showwarning, or reverse."
+
+ global _warnings_showwarning
+ if capture:
+ if _warnings_showwarning is None:
+ _warnings_showwarning = warnings.showwarning
+ warnings.showwarning = idle_showwarning
+ else:
+ if _warnings_showwarning is not None:
+ warnings.showwarning = _warnings_showwarning
+ _warnings_showwarning = None
+
+capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
@@ -114,15 +136,21 @@ class PyShellEditorWindow(EditorWindow):
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
- if self.io.filename: self.restore_file_breaks()
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
+ if self.io.filename:
+ self.restore_file_breaks()
- rmenu_specs = [("Set Breakpoint", "<<set-breakpoint-here>>"),
- ("Clear Breakpoint", "<<clear-breakpoint-here>>")]
+ rmenu_specs = [
+ ("Cut", "<<cut>>", "rmenu_check_cut"),
+ ("Copy", "<<copy>>", "rmenu_check_copy"),
+ ("Paste", "<<paste>>", "rmenu_check_paste"),
+ ("Set Breakpoint", "<<set-breakpoint-here>>", None),
+ ("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
+ ]
def set_breakpoint(self, lineno):
text = self.text
@@ -230,6 +258,9 @@ class PyShellEditorWindow(EditorWindow):
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
+ if self.io is None:
+ # can happen if IDLE closes due to the .update() call
+ return
filename = self.io.filename
if filename is None:
return
@@ -251,8 +282,8 @@ class PyShellEditorWindow(EditorWindow):
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
- lineno = int(float(ranges[index]))
- end = int(float(ranges[index+1]))
+ lineno = int(float(ranges[index].string))
+ end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
@@ -313,6 +344,11 @@ class ModifiedColorDelegator(ColorDelegator):
"console": idleConf.GetHighlight(theme, "console"),
})
+ def removecolors(self):
+ # Don't remove shell color tags before "iomark"
+ for tag in self.tagdefs:
+ self.tag_remove(tag, "iomark", "end")
+
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
@@ -354,6 +390,7 @@ class ModifiedInterpreter(InteractiveInterpreter):
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
+ _afterid = None
rpcclt = None
rpcpid = None
@@ -393,7 +430,7 @@ class ModifiedInterpreter(InteractiveInterpreter):
try:
self.rpcclt = MyRPCClient(addr)
break
- except socket.error, err:
+ except socket.error as err:
pass
else:
self.display_port_binding_error()
@@ -414,10 +451,11 @@ class ModifiedInterpreter(InteractiveInterpreter):
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
- except socket.timeout, err:
+ except socket.timeout as err:
self.display_no_subprocess_error()
return None
- self.rpcclt.register("stdin", self.tkconsole)
+ self.rpcclt.register("console", self.tkconsole)
+ self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
@@ -448,10 +486,11 @@ class ModifiedInterpreter(InteractiveInterpreter):
self.spawn_subprocess()
try:
self.rpcclt.accept()
- except socket.timeout, err:
+ except socket.timeout as err:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
+ console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
if was_executing:
@@ -479,6 +518,8 @@ class ModifiedInterpreter(InteractiveInterpreter):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
+ if self._afterid is not None:
+ self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.close()
except AttributeError: # no socket
@@ -551,8 +592,8 @@ class ModifiedInterpreter(InteractiveInterpreter):
pass
# Reschedule myself
if not self.tkconsole.closing:
- self.tkconsole.text.after(self.tkconsole.pollinterval,
- self.poll_subprocess)
+ self._afterid = self.tkconsole.text.after(
+ self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
@@ -809,7 +850,7 @@ class ModifiedInterpreter(InteractiveInterpreter):
class PyShell(OutputWindow):
- shell_title = "Python Shell"
+ shell_title = "Python " + python_version() + " Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
@@ -825,8 +866,7 @@ class PyShell(OutputWindow):
("help", "_Help"),
]
- if macosxSupport.runningAsOSXApp():
- del menu_specs[-3]
+ if sys.platform == "darwin":
menu_specs[-2] = ("windows", "_Window")
@@ -870,13 +910,14 @@ class PyShell(OutputWindow):
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import IOBinding
- self.stdout = PseudoFile(self, "stdout", IOBinding.encoding)
- self.stderr = PseudoFile(self, "stderr", IOBinding.encoding)
- self.console = PseudoFile(self, "console", IOBinding.encoding)
+ self.stdin = PseudoInputFile(self, "stdin", IOBinding.encoding)
+ self.stdout = PseudoOutputFile(self, "stdout", IOBinding.encoding)
+ self.stderr = PseudoOutputFile(self, "stderr", IOBinding.encoding)
+ self.console = PseudoOutputFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
- sys.stdin = self
+ sys.stdin = self.stdin
#
self.history = self.History(self.text)
#
@@ -890,6 +931,7 @@ class PyShell(OutputWindow):
canceled = False
endoffile = False
closing = False
+ _stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
@@ -965,14 +1007,9 @@ class PyShell(OutputWindow):
parent=self.text)
if response is False:
return "cancel"
- if self.reading:
- self.top.quit()
+ self.stop_readline()
self.canceled = True
self.closing = True
- # Wait for poll_subprocess() rescheduling to stop
- self.text.after(2 * self.pollinterval, self.close2)
-
- def close2(self):
return EditorWindow.close(self)
def _close(self):
@@ -1018,6 +1055,12 @@ class PyShell(OutputWindow):
Tkinter._default_root = None # 03Jan04 KBK What's this?
return True
+ def stop_readline(self):
+ if not self.reading: # no nested mainloop to exit.
+ return
+ self._stop_readline_flag = True
+ self.top.quit()
+
def readline(self):
save = self.reading
try:
@@ -1025,6 +1068,9 @@ class PyShell(OutputWindow):
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
+ if self._stop_readline_flag:
+ self._stop_readline_flag = False
+ return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
@@ -1232,7 +1278,7 @@ class PyShell(OutputWindow):
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
- self.history.history_store(source)
+ self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
@@ -1251,27 +1297,111 @@ class PyShell(OutputWindow):
if not use_subprocess:
raise KeyboardInterrupt
-class PseudoFile(object):
+ def rmenu_check_cut(self):
+ try:
+ if self.text.compare('sel.first', '<', 'iomark'):
+ return 'disabled'
+ except TclError: # no selection, so the index 'sel.first' doesn't exist
+ return 'disabled'
+ return super(PyShell, self).rmenu_check_cut()
+
+ def rmenu_check_paste(self):
+ if self.text.compare('insert', '<', 'iomark'):
+ return 'disabled'
+ return super(PyShell, self).rmenu_check_paste()
+
+class PseudoFile(io.TextIOBase):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.softspace = 0
- self.encoding = encoding
+ self._encoding = encoding
+
+ @property
+ def encoding(self):
+ return self._encoding
+
+ @property
+ def name(self):
+ return '<%s>' % self.tags
+
+ def isatty(self):
+ return True
+
+
+class PseudoOutputFile(PseudoFile):
+
+ def writable(self):
+ return True
def write(self, s):
- self.shell.write(s, self.tags)
+ if self.closed:
+ raise ValueError("write to closed file")
+ if type(s) not in (unicode, str, bytearray):
+ # See issue #19481
+ if isinstance(s, unicode):
+ s = unicode.__getslice__(s, None, None)
+ elif isinstance(s, str):
+ s = str.__str__(s)
+ elif isinstance(s, bytearray):
+ s = bytearray.__str__(s)
+ else:
+ raise TypeError('must be string, not ' + type(s).__name__)
+ return self.shell.write(s, self.tags)
- def writelines(self, lines):
- for line in lines:
- self.write(line)
- def flush(self):
- pass
+class PseudoInputFile(PseudoFile):
- def isatty(self):
+ def __init__(self, shell, tags, encoding=None):
+ PseudoFile.__init__(self, shell, tags, encoding)
+ self._line_buffer = ''
+
+ def readable(self):
return True
+ def read(self, size=-1):
+ if self.closed:
+ raise ValueError("read from closed file")
+ if size is None:
+ size = -1
+ elif not isinstance(size, int):
+ raise TypeError('must be int, not ' + type(size).__name__)
+ result = self._line_buffer
+ self._line_buffer = ''
+ if size < 0:
+ while True:
+ line = self.shell.readline()
+ if not line: break
+ result += line
+ else:
+ while len(result) < size:
+ line = self.shell.readline()
+ if not line: break
+ result += line
+ self._line_buffer = result[size:]
+ result = result[:size]
+ return result
+
+ def readline(self, size=-1):
+ if self.closed:
+ raise ValueError("read from closed file")
+ if size is None:
+ size = -1
+ elif not isinstance(size, int):
+ raise TypeError('must be int, not ' + type(size).__name__)
+ line = self._line_buffer or self.shell.readline()
+ if size < 0:
+ size = len(line)
+ eol = line.find('\n', 0, size)
+ if eol >= 0:
+ size = eol + 1
+ self._line_buffer = line[size:]
+ return line[:size]
+
+ def close(self):
+ self.shell.close()
+
usage_msg = """\
@@ -1329,8 +1459,9 @@ echo "import sys; print sys.argv" | idle - "foobar"
def main():
global flist, root, use_subprocess
+ capture_warnings(True)
use_subprocess = True
- enable_shell = True
+ enable_shell = False
enable_edit = False
debug = False
cmd = None
@@ -1338,7 +1469,7 @@ def main():
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
- except getopt.error, msg:
+ except getopt.error as msg:
sys.stderr.write("Error: %s\n" % str(msg))
sys.stderr.write(usage_msg)
sys.exit(2)
@@ -1351,7 +1482,6 @@ def main():
enable_shell = True
if o == '-e':
enable_edit = True
- enable_shell = False
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
@@ -1402,9 +1532,22 @@ def main():
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
+ enable_shell = enable_shell or not enable_edit
# start editor and/or shell windows:
root = Tk(className="Idle")
+ # set application icon
+ icondir = os.path.join(os.path.dirname(__file__), 'Icons')
+ if system() == 'Windows':
+ iconfile = os.path.join(icondir, 'idle.ico')
+ root.wm_iconbitmap(default=iconfile)
+ elif TkVersion >= 8.5:
+ ext = '.png' if TkVersion >= 8.6 else '.gif'
+ iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
+ for size in (16, 32, 48)]
+ icons = [PhotoImage(file=iconfile) for iconfile in iconfiles]
+ root.tk.call('wm', 'iconphoto', str(root), "-default", *icons)
+
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
@@ -1412,24 +1555,28 @@ def main():
if enable_edit:
if not (cmd or script):
- for filename in args:
- flist.open(filename)
+ for filename in args[:]:
+ if flist.open(filename) is None:
+ # filename is a directory actually, disconsider it
+ args.remove(filename)
if not args:
flist.new()
+
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
-
- if macosxSupport.runningAsOSXApp() and flist.dict:
+ if macosxSupport.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
+ else:
+ shell = flist.pyshell
- shell = flist.pyshell
- # handle remaining options:
+ # Handle remaining options. If any of these are set, enable_shell
+ # was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
@@ -1437,7 +1584,7 @@ def main():
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
- if shell and cmd or script:
+ if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
@@ -1448,17 +1595,22 @@ def main():
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
-
- # Check for problematic OS X Tk versions and print a warning message
- # in the IDLE shell window; this is less intrusive than always opening
- # a separate window.
- tkversionwarning = macosxSupport.tkVersionWarning(root)
- if tkversionwarning:
- shell.interp.runcommand(''.join(("print('", tkversionwarning, "')")))
-
- root.mainloop()
+ elif shell:
+ # If there is a shell window and no cmd or script in progress,
+ # check for problematic OS X Tk versions and print a warning
+ # message in the IDLE shell window; this is less intrusive
+ # than always opening a separate window.
+ tkversionwarning = macosxSupport.tkVersionWarning(root)
+ if tkversionwarning:
+ shell.interp.runcommand("print('%s')" % tkversionwarning)
+
+ while flist.inversedict: # keep IDLE running while files are open.
+ root.mainloop()
root.destroy()
+ capture_warnings(False)
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
+
+capture_warnings(False) # Make sure turned off; see issue 18081
diff --git a/Lib/idlelib/ReplaceDialog.py b/Lib/idlelib/ReplaceDialog.py
index 2d6c802..21cc927 100644
--- a/Lib/idlelib/ReplaceDialog.py
+++ b/Lib/idlelib/ReplaceDialog.py
@@ -2,6 +2,8 @@ from Tkinter import *
from idlelib import SearchEngine
from idlelib.SearchDialogBase import SearchDialogBase
+import re
+
def replace(text):
root = text._root()
@@ -11,6 +13,7 @@ def replace(text):
dialog = engine._replacedialog
dialog.open(text)
+
class ReplaceDialog(SearchDialogBase):
title = "Replace Dialog"
@@ -55,8 +58,22 @@ class ReplaceDialog(SearchDialogBase):
def default_command(self, event=None):
if self.do_find(self.ok):
- self.do_replace()
- self.do_find(0)
+ if self.do_replace(): # Only find next match if replace succeeded.
+ # A bad re can cause a it to fail.
+ self.do_find(0)
+
+ def _replace_expand(self, m, repl):
+ """ Helper function for expanding a regular expression
+ in the replace field, if needed. """
+ if self.engine.isre():
+ try:
+ new = m.expand(repl)
+ except re.error:
+ self.engine.report_error(repl, 'Invalid Replace Expression')
+ new = None
+ else:
+ new = repl
+ return new
def replace_all(self, event=None):
prog = self.engine.getprog()
@@ -86,7 +103,9 @@ class ReplaceDialog(SearchDialogBase):
line, m = res
chars = text.get("%d.0" % line, "%d.0" % (line+1))
orig = m.group()
- new = m.expand(repl)
+ new = self._replace_expand(m, repl)
+ if new is None:
+ break
i, j = m.span()
first = "%d.%d" % (line, i)
last = "%d.%d" % (line, j)
@@ -138,7 +157,9 @@ class ReplaceDialog(SearchDialogBase):
m = prog.match(chars, col)
if not prog:
return False
- new = m.expand(self.replvar.get())
+ new = self._replace_expand(m, self.replvar.get())
+ if new is None:
+ return False
text.mark_set("insert", first)
text.undo_block_start()
if m.group():
@@ -166,3 +187,34 @@ class ReplaceDialog(SearchDialogBase):
def close(self, event=None):
SearchDialogBase.close(self, event)
self.text.tag_remove("hit", "1.0", "end")
+
+def _replace_dialog(parent):
+ root = Tk()
+ root.title("Test ReplaceDialog")
+ width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
+ root.geometry("+%d+%d"%(x, y + 150))
+
+ # mock undo delegator methods
+ def undo_block_start():
+ pass
+
+ def undo_block_stop():
+ pass
+
+ text = Text(root)
+ text.undo_block_start = undo_block_start
+ text.undo_block_stop = undo_block_stop
+ text.pack()
+ text.insert("insert","This is a sample string.\n"*10)
+
+ def show_replace():
+ text.tag_add(SEL, "1.0", END)
+ replace(text)
+ text.tag_remove(SEL, "1.0", END)
+
+ button = Button(root, text="Replace", command=show_replace)
+ button.pack()
+
+if __name__ == '__main__':
+ from idlelib.idle_test.htest import run
+ run(_replace_dialog)
diff --git a/Lib/idlelib/RstripExtension.py b/Lib/idlelib/RstripExtension.py
index 19e35d4..2ce3c7e 100644
--- a/Lib/idlelib/RstripExtension.py
+++ b/Lib/idlelib/RstripExtension.py
@@ -1,13 +1,9 @@
'Provides "Strip trailing whitespace" under the "Format" menu.'
-__author__ = "Roger D. Serwy <roger.serwy at gmail.com>"
-
class RstripExtension:
menudefs = [
- ('format', [None,
- ('Strip trailing whitespace', '<<do-rstrip>>'),
- ]),]
+ ('format', [None, ('Strip trailing whitespace', '<<do-rstrip>>'), ] ), ]
def __init__(self, editwin):
self.editwin = editwin
@@ -20,10 +16,18 @@ class RstripExtension:
undo.undo_block_start()
- end_line = int(float(text.index('end'))) + 1
+ end_line = int(float(text.index('end')))
for cur in range(1, end_line):
- txt = text.get('%i.0' % cur, '%i.0 lineend' % cur)
+ txt = text.get('%i.0' % cur, '%i.end' % cur)
+ raw = len(txt)
cut = len(txt.rstrip())
- text.delete('%i.%i' % (cur, cut), '%i.0 lineend' % cur)
+ # Since text.delete() marks file as changed, even if not,
+ # only call it when needed to actually delete something.
+ if cut < raw:
+ text.delete('%i.%i' % (cur, cut), '%i.end' % cur)
undo.undo_block_stop()
+
+if __name__ == "__main__":
+ import unittest
+ unittest.main('idlelib.idle_test.test_rstrip', verbosity=2, exit=False)
diff --git a/Lib/idlelib/ScriptBinding.py b/Lib/idlelib/ScriptBinding.py
index 01ac474..ab2a3f2 100644
--- a/Lib/idlelib/ScriptBinding.py
+++ b/Lib/idlelib/ScriptBinding.py
@@ -54,7 +54,7 @@ class ScriptBinding:
self.flist = self.editwin.flist
self.root = self.editwin.root
- if macosxSupport.runningAsOSXApp():
+ if macosxSupport.isCocoaTk():
self.editwin.text_frame.bind('<<run-module-event-2>>', self._run_module_event)
def check_module_event(self, event):
@@ -70,13 +70,13 @@ class ScriptBinding:
f = open(filename, 'r')
try:
tabnanny.process_tokens(tokenize.generate_tokens(f.readline))
- except tokenize.TokenError, msg:
+ except tokenize.TokenError as msg:
msgtxt, (lineno, start) = msg
self.editwin.gotoline(lineno)
self.errorbox("Tabnanny Tokenizing Error",
"Token Error: %s" % msgtxt)
return False
- except tabnanny.NannyNag, nag:
+ except tabnanny.NannyNag as nag:
# The error messages from tabnanny are too confusing...
self.editwin.gotoline(nag.get_lineno())
self.errorbox("Tab/space error", indent_message)
@@ -87,9 +87,8 @@ class ScriptBinding:
self.shell = shell = self.flist.open_shell()
saved_stream = shell.get_warning_stream()
shell.set_warning_stream(shell.stderr)
- f = open(filename, 'r')
- source = f.read()
- f.close()
+ with open(filename, 'r') as f:
+ source = f.read()
if '\r' in source:
source = re.sub(r"\r\n", "\n", source)
source = re.sub(r"\r", "\n", source)
@@ -101,7 +100,7 @@ class ScriptBinding:
try:
# If successful, return the compiled code
return compile(source, filename, "exec")
- except (SyntaxError, OverflowError, ValueError), err:
+ except (SyntaxError, OverflowError, ValueError) as err:
try:
msg, (errorfilename, lineno, offset, line) = err
if not errorfilename:
@@ -152,16 +151,16 @@ class ScriptBinding:
dirname = os.path.dirname(filename)
# XXX Too often this discards arguments the user just set...
interp.runcommand("""if 1:
- _filename = %r
+ __file__ = {filename!r}
import sys as _sys
from os.path import basename as _basename
if (not _sys.argv or
- _basename(_sys.argv[0]) != _basename(_filename)):
- _sys.argv = [_filename]
+ _basename(_sys.argv[0]) != _basename(__file__)):
+ _sys.argv = [__file__]
import os as _os
- _os.chdir(%r)
- del _filename, _sys, _basename, _os
- \n""" % (filename, dirname))
+ _os.chdir({dirname!r})
+ del _sys, _basename, _os
+ \n""".format(filename=filename, dirname=dirname))
interp.prepend_syspath(filename)
# XXX KBK 03Jul04 When run w/o subprocess, runtime warnings still
# go to __stderr__. With subprocess, they go to the shell.
@@ -169,7 +168,7 @@ class ScriptBinding:
interp.runcode(code)
return 'break'
- if macosxSupport.runningAsOSXApp():
+ if macosxSupport.isCocoaTk():
# Tk-Cocoa in MacOSX is broken until at least
# Tk 8.5.9, and without this rather
# crude workaround IDLE would hang when a user
diff --git a/Lib/idlelib/ScrolledList.py b/Lib/idlelib/ScrolledList.py
index 9211936..e235661 100644
--- a/Lib/idlelib/ScrolledList.py
+++ b/Lib/idlelib/ScrolledList.py
@@ -119,21 +119,22 @@ class ScrolledList:
pass
-def test():
+def _scrolled_list(parent):
root = Tk()
- root.protocol("WM_DELETE_WINDOW", root.destroy)
+ root.title("Test ScrolledList")
+ width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
+ root.geometry("+%d+%d"%(x, y + 150))
class MyScrolledList(ScrolledList):
- def fill_menu(self): self.menu.add_command(label="pass")
+ def fill_menu(self): self.menu.add_command(label="right click")
def on_select(self, index): print "select", self.get(index)
def on_double(self, index): print "double", self.get(index)
- s = MyScrolledList(root)
+
+ scrolled_list = MyScrolledList(root)
for i in range(30):
- s.append("item %02d" % i)
- return root
+ scrolled_list.append("Item %02d" % i)
-def main():
- root = test()
root.mainloop()
if __name__ == '__main__':
- main()
+ from idlelib.idle_test.htest import run
+ run(_scrolled_list)
diff --git a/Lib/idlelib/SearchDialog.py b/Lib/idlelib/SearchDialog.py
index 7c70b84..2aadb84 100644
--- a/Lib/idlelib/SearchDialog.py
+++ b/Lib/idlelib/SearchDialog.py
@@ -24,13 +24,12 @@ class SearchDialog(SearchDialogBase):
def create_widgets(self):
f = SearchDialogBase.create_widgets(self)
- self.make_button("Find", self.default_command, 1)
+ self.make_button("Find Next", self.default_command, 1)
def default_command(self, event=None):
if not self.engine.getprog():
return
- if self.find_again(self.text):
- self.close()
+ self.find_again(self.text)
def find_again(self, text):
if not self.engine.getpat():
@@ -66,3 +65,25 @@ class SearchDialog(SearchDialogBase):
if pat:
self.engine.setcookedpat(pat)
return self.find_again(text)
+
+def _search_dialog(parent):
+ root = Tk()
+ root.title("Test SearchDialog")
+ width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
+ root.geometry("+%d+%d"%(x, y + 150))
+ text = Text(root)
+ text.pack()
+ text.insert("insert","This is a sample string.\n"*10)
+
+ def show_find():
+ text.tag_add(SEL, "1.0", END)
+ s = _setup(text)
+ s.open(text)
+ text.tag_remove(SEL, "1.0", END)
+
+ button = Button(root, text="Search", command=show_find)
+ button.pack()
+
+if __name__ == '__main__':
+ from idlelib.idle_test.htest import run
+ run(_search_dialog)
diff --git a/Lib/idlelib/SearchDialogBase.py b/Lib/idlelib/SearchDialogBase.py
index f63e7ae..b90daba 100644
--- a/Lib/idlelib/SearchDialogBase.py
+++ b/Lib/idlelib/SearchDialogBase.py
@@ -1,17 +1,48 @@
-from Tkinter import *
+'''Define SearchDialogBase used by Search, Replace, and Grep dialogs.'''
+
+from Tkinter import (Toplevel, Frame, Entry, Label, Button,
+ Checkbutton, Radiobutton)
class SearchDialogBase:
+ '''Create most of a 3 or 4 row, 3 column search dialog.
+
+ The left and wide middle column contain:
+ 1 or 2 labeled text entry lines (make_entry, create_entries);
+ a row of standard Checkbuttons (make_frame, create_option_buttons),
+ each of which corresponds to a search engine Variable;
+ a row of dialog-specific Check/Radiobuttons (create_other_buttons).
+
+ The narrow right column contains command buttons
+ (make_button, create_command_buttons).
+ These are bound to functions that execute the command.
- title = "Search Dialog"
+ Except for command buttons, this base class is not limited to
+ items common to all three subclasses. Rather, it is the Find dialog
+ minus the "Find Next" command and its execution function.
+ The other dialogs override methods to replace and add widgets.
+ '''
+
+ title = "Search Dialog" # replace in subclasses
icon = "Search"
- needwrapbutton = 1
+ needwrapbutton = 1 # not in Find in Files
def __init__(self, root, engine):
+ '''Initialize root, engine, and top attributes.
+
+ top (level widget): set in create_widgets() called from open().
+ text (Text being searched): set in open(), only used in subclasses().
+ ent (ry): created in make_entry() called from create_entry().
+ row (of grid): 0 in create_widgets(), +1 in make_entry/frame().
+
+ title (of dialog): class attribute, override in subclasses.
+ icon (of dialog): ditto, use unclear if cannot minimize dialog.
+ '''
self.root = root
self.engine = engine
self.top = None
def open(self, text, searchphrase=None):
+ "Make dialog visible on top of others and ready to use."
self.text = text
if not self.top:
self.create_widgets()
@@ -27,11 +58,17 @@ class SearchDialogBase:
self.top.grab_set()
def close(self, event=None):
+ "Put dialog away for later use."
if self.top:
self.top.grab_release()
self.top.withdraw()
def create_widgets(self):
+ '''Create basic 3 row x 3 col search (find) dialog.
+
+ Other dialogs override subsidiary create_x methods as needed.
+ Replace and Find-in-Files add another entry row.
+ '''
top = Toplevel(self.root)
top.bind("<Return>", self.default_command)
top.bind("<Escape>", self.close)
@@ -44,12 +81,13 @@ class SearchDialogBase:
self.top.grid_columnconfigure(0, pad=2, weight=0)
self.top.grid_columnconfigure(1, pad=2, minsize=100, weight=100)
- self.create_entries()
- self.create_option_buttons()
- self.create_other_buttons()
- return self.create_command_buttons()
+ self.create_entries() # row 0 (and maybe 1), cols 0, 1
+ self.create_option_buttons() # next row, cols 0, 1
+ self.create_other_buttons() # next row, cols 0, 1
+ self.create_command_buttons() # col 2, all rows
def make_entry(self, label, var):
+ "Return gridded labeled Entry."
l = Label(self.top, text=label)
l.grid(row=self.row, column=0, sticky="nw")
e = Entry(self.top, textvariable=var, exportselection=0)
@@ -57,7 +95,12 @@ class SearchDialogBase:
self.row = self.row + 1
return e
+ def create_entries(self):
+ "Create one or more entry lines with make_entry."
+ self.ent = self.make_entry("Find:", self.engine.patvar)
+
def make_frame(self,labeltext=None):
+ "Return gridded labeled Frame for option or other buttons."
if labeltext:
l = Label(self.top, text=labeltext)
l.grid(row=self.row, column=0, sticky="nw")
@@ -66,19 +109,8 @@ class SearchDialogBase:
self.row = self.row + 1
return f
- def make_button(self, label, command, isdef=0):
- b = Button(self.buttonframe,
- text=label, command=command,
- default=isdef and "active" or "normal")
- cols,rows=self.buttonframe.grid_size()
- b.grid(pady=1,row=rows,column=0,sticky="ew")
- self.buttonframe.grid(rowspan=rows+1)
- return b
-
- def create_entries(self):
- self.ent = self.make_entry("Find:", self.engine.patvar)
-
def create_option_buttons(self):
+ "Fill frame with Checkbuttons bound to SearchEngine booleanvars."
f = self.make_frame("Options")
btn = Checkbutton(f, anchor="w",
@@ -111,11 +143,9 @@ class SearchDialogBase:
btn.select()
def create_other_buttons(self):
+ "Fill frame with buttons tied to other options."
f = self.make_frame("Direction")
- #lbl = Label(f, text="Direction: ")
- #lbl.pack(side="left")
-
btn = Radiobutton(f, anchor="w",
variable=self.engine.backvar, value=1,
text="Up")
@@ -130,11 +160,25 @@ class SearchDialogBase:
if not self.engine.isback():
btn.select()
+ def make_button(self, label, command, isdef=0):
+ "Return command button gridded in command frame."
+ b = Button(self.buttonframe,
+ text=label, command=command,
+ default=isdef and "active" or "normal")
+ cols,rows=self.buttonframe.grid_size()
+ b.grid(pady=1,row=rows,column=0,sticky="ew")
+ self.buttonframe.grid(rowspan=rows+1)
+ return b
+
def create_command_buttons(self):
- #
- # place button frame on the right
+ "Place buttons in vertical command frame gridded on right."
f = self.buttonframe = Frame(self.top)
f.grid(row=0,column=2,padx=2,pady=2,ipadx=2,ipady=2)
b = self.make_button("close", self.close)
b.lower()
+
+if __name__ == '__main__':
+ import unittest
+ unittest.main(
+ 'idlelib.idle_test.test_searchdialogbase', verbosity=2)
diff --git a/Lib/idlelib/SearchEngine.py b/Lib/idlelib/SearchEngine.py
index cc40a00..b3b7b4d 100644
--- a/Lib/idlelib/SearchEngine.py
+++ b/Lib/idlelib/SearchEngine.py
@@ -1,26 +1,34 @@
+'''Define SearchEngine for search dialogs.'''
import re
-from Tkinter import *
+from Tkinter import StringVar, BooleanVar, TclError
import tkMessageBox
def get(root):
+ '''Return the singleton SearchEngine instance for the process.
+
+ The single SearchEngine saves settings between dialog instances.
+ If there is not a SearchEngine already, make one.
+ '''
if not hasattr(root, "_searchengine"):
root._searchengine = SearchEngine(root)
- # XXX This will never garbage-collect -- who cares
+ # This creates a cycle that persists until root is deleted.
return root._searchengine
class SearchEngine:
+ """Handles searching a text widget for Find, Replace, and Grep."""
def __init__(self, root):
- self.root = root
- # State shared by search, replace, and grep;
- # the search dialogs bind these to UI elements.
- self.patvar = StringVar(root) # search pattern
- self.revar = BooleanVar(root) # regular expression?
- self.casevar = BooleanVar(root) # match case?
- self.wordvar = BooleanVar(root) # match whole word?
- self.wrapvar = BooleanVar(root) # wrap around buffer?
- self.wrapvar.set(1) # (on by default)
- self.backvar = BooleanVar(root) # search backwards?
+ '''Initialize Variables that save search state.
+
+ The dialogs bind these to the UI elements present in the dialogs.
+ '''
+ self.root = root # need for report_error()
+ self.patvar = StringVar(root, '') # search pattern
+ self.revar = BooleanVar(root, False) # regular expression?
+ self.casevar = BooleanVar(root, False) # match case?
+ self.wordvar = BooleanVar(root, False) # match whole word?
+ self.wrapvar = BooleanVar(root, True) # wrap around buffer?
+ self.backvar = BooleanVar(root, False) # search backwards?
# Access methods
@@ -47,15 +55,23 @@ class SearchEngine:
# Higher level access methods
+ def setcookedpat(self, pat):
+ "Set pattern after escaping if re."
+ # called only in SearchDialog.py: 66
+ if self.isre():
+ pat = re.escape(pat)
+ self.setpat(pat)
+
def getcookedpat(self):
pat = self.getpat()
- if not self.isre():
+ if not self.isre(): # if True, see setcookedpat
pat = re.escape(pat)
if self.isword():
pat = r"\b%s\b" % pat
return pat
def getprog(self):
+ "Return compiled cooked search pattern."
pat = self.getpat()
if not pat:
self.report_error(pat, "Empty regular expression")
@@ -66,51 +82,42 @@ class SearchEngine:
flags = flags | re.IGNORECASE
try:
prog = re.compile(pat, flags)
- except re.error, what:
- try:
- msg, col = what
- except:
- msg = str(what)
- col = -1
+ except re.error as what:
+ args = what.args
+ msg = args[0]
+ col = arg[1] if len(args) >= 2 else -1
self.report_error(pat, msg, col)
return None
return prog
def report_error(self, pat, msg, col=-1):
- # Derived class could overrid this with something fancier
+ # Derived class could override this with something fancier
msg = "Error: " + str(msg)
if pat:
- msg = msg + "\np\Pattern: " + str(pat)
+ msg = msg + "\nPattern: " + str(pat)
if col >= 0:
msg = msg + "\nOffset: " + str(col)
tkMessageBox.showerror("Regular expression error",
msg, master=self.root)
- def setcookedpat(self, pat):
- if self.isre():
- pat = re.escape(pat)
- self.setpat(pat)
-
def search_text(self, text, prog=None, ok=0):
- """Search a text widget for the pattern.
+ '''Return (lineno, matchobj) or None for forward/backward search.
- If prog is given, it should be the precompiled pattern.
- Return a tuple (lineno, matchobj); None if not found.
+ This function calls the right function with the right arguments.
+ It directly return the result of that call.
- This obeys the wrap and direction (back) settings.
+ Text is a text widget. Prog is a precompiled pattern.
+ The ok parameteris a bit complicated as it has two effects.
- The search starts at the selection (if there is one) or
- at the insert mark (otherwise). If the search is forward,
- it starts at the right of the selection; for a backward
- search, it starts at the left end. An empty match exactly
- at either end of the selection (or at the insert mark if
- there is no selection) is ignored unless the ok flag is true
- -- this is done to guarantee progress.
+ If there is a selection, the search begin at either end,
+ depending on the direction setting and ok, with ok meaning that
+ the search starts with the selection. Otherwise, search begins
+ at the insert mark.
- If the search is allowed to wrap around, it will return the
- original selection if (and only if) it is the only match.
+ To aid progress, the search functions do not return an empty
+ match at the starting position unless ok is True.
+ '''
- """
if not prog:
prog = self.getprog()
if not prog:
@@ -179,15 +186,19 @@ class SearchEngine:
col = len(chars) - 1
return None
-# Helper to search backwards in a string.
-# (Optimized for the case where the pattern isn't found.)
-
def search_reverse(prog, chars, col):
+ '''Search backwards and return an re match object or None.
+
+ This is done by searching forwards until there is no match.
+ Prog: compiled re object with a search method returning a match.
+ Chars: line of text, without \n.
+ Col: stop index for the search; the limit for match.end().
+ '''
m = prog.search(chars)
if not m:
return None
found = None
- i, j = m.span()
+ i, j = m.span() # m.start(), m.end() == match slice indexes
while i < col and j <= col:
found = m
if i == j:
@@ -198,10 +209,9 @@ def search_reverse(prog, chars, col):
i, j = m.span()
return found
-# Helper to get selection end points, defaulting to insert mark.
-# Return a tuple of indices ("line.col" strings).
-
def get_selection(text):
+ '''Return tuple of 'line.col' indexes from selection or insert mark.
+ '''
try:
first = text.index("sel.first")
last = text.index("sel.last")
@@ -213,8 +223,11 @@ def get_selection(text):
last = first
return first, last
-# Helper to parse a text index into a (line, col) tuple.
-
def get_line_col(index):
+ '''Return (line, col) tuple of ints from 'line.col' string.'''
line, col = map(int, index.split(".")) # Fails on invalid index
return line, col
+
+if __name__ == "__main__":
+ import unittest
+ unittest.main('idlelib.idle_test.test_searchengine', verbosity=2, exit=False)
diff --git a/Lib/idlelib/StackViewer.py b/Lib/idlelib/StackViewer.py
index 732773f..74c9136 100644
--- a/Lib/idlelib/StackViewer.py
+++ b/Lib/idlelib/StackViewer.py
@@ -1,9 +1,12 @@
import os
import sys
import linecache
+import re
+import Tkinter as tk
from idlelib.TreeWidget import TreeNode, TreeItem, ScrolledCanvas
from idlelib.ObjectBrowser import ObjectTreeItem, make_objecttreeitem
+from idlelib.PyShell import PyShellFileList
def StackBrowser(root, flist=None, tb=None, top=None):
if top is None:
@@ -121,17 +124,29 @@ class VariablesTreeItem(ObjectTreeItem):
sublist.append(item)
return sublist
-
-def _test():
- try:
- import testcode
- reload(testcode)
+def _stack_viewer(parent):
+ root = tk.Tk()
+ root.title("Test StackViewer")
+ width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
+ root.geometry("+%d+%d"%(x, y + 150))
+ flist = PyShellFileList(root)
+ try: # to obtain a traceback object
+ a
except:
- sys.last_type, sys.last_value, sys.last_traceback = sys.exc_info()
- from Tkinter import Tk
- root = Tk()
- StackBrowser(None, top=root)
- root.mainloop()
-
-if __name__ == "__main__":
- _test()
+ exc_type, exc_value, exc_tb = sys.exc_info()
+
+ # inject stack trace to sys
+ sys.last_type = exc_type
+ sys.last_value = exc_value
+ sys.last_traceback = exc_tb
+
+ StackBrowser(root, flist=flist, top=root, tb=exc_tb)
+
+ # restore sys to original state
+ del sys.last_type
+ del sys.last_value
+ del sys.last_traceback
+
+if __name__ == '__main__':
+ from idlelib.idle_test.htest import run
+ run(_stack_viewer)
diff --git a/Lib/idlelib/ToolTip.py b/Lib/idlelib/ToolTip.py
index ce7a3d3..11136c4 100644
--- a/Lib/idlelib/ToolTip.py
+++ b/Lib/idlelib/ToolTip.py
@@ -76,14 +76,22 @@ class ListboxToolTip(ToolTipBase):
for item in self.items:
listbox.insert(END, item)
-def main():
- # Test code
+def _tooltip(parent):
root = Tk()
- b = Button(root, text="Hello", command=root.destroy)
- b.pack()
- root.update()
- tip = ListboxToolTip(b, ["Hello", "world"])
+ root.title("Test tooltip")
+ width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
+ root.geometry("+%d+%d"%(x, y + 150))
+ label = Label(root, text="Place your mouse over buttons")
+ label.pack()
+ button1 = Button(root, text="Button 1")
+ button2 = Button(root, text="Button 2")
+ button1.pack()
+ button2.pack()
+ ToolTip(button1, "This is tooltip text for button1.")
+ ListboxToolTip(button2, ["This is","multiple line",
+ "tooltip text","for button2"])
root.mainloop()
if __name__ == '__main__':
- main()
+ from idlelib.idle_test.htest import run
+ run(_tooltip)
diff --git a/Lib/idlelib/TreeWidget.py b/Lib/idlelib/TreeWidget.py
index 0feca01..ebb1f02 100644
--- a/Lib/idlelib/TreeWidget.py
+++ b/Lib/idlelib/TreeWidget.py
@@ -449,29 +449,18 @@ class ScrolledCanvas:
return "break"
-# Testing functions
-
-def test():
- from idlelib import PyShell
- root = Toplevel(PyShell.root)
- root.configure(bd=0, bg="yellow")
- root.focus_set()
+def _tree_widget(parent):
+ root = Tk()
+ root.title("Test TreeWidget")
+ width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
+ root.geometry("+%d+%d"%(x, y + 150))
sc = ScrolledCanvas(root, bg="white", highlightthickness=0, takefocus=1)
- sc.frame.pack(expand=1, fill="both")
- item = FileTreeItem("C:/windows/desktop")
+ sc.frame.pack(expand=1, fill="both", side=LEFT)
+ item = FileTreeItem(os.getcwd())
node = TreeNode(sc.canvas, None, item)
node.expand()
-
-def test2():
- # test w/o scrolling canvas
- root = Tk()
- root.configure(bd=0)
- canvas = Canvas(root, bg="white", highlightthickness=0)
- canvas.pack(expand=1, fill="both")
- item = FileTreeItem(os.curdir)
- node = TreeNode(canvas, None, item)
- node.update()
- canvas.focus_set()
+ root.mainloop()
if __name__ == '__main__':
- test()
+ from idlelib.idle_test.htest import run
+ run(_tree_widget)
diff --git a/Lib/idlelib/UndoDelegator.py b/Lib/idlelib/UndoDelegator.py
index 16d3ae1..cdeacea 100644
--- a/Lib/idlelib/UndoDelegator.py
+++ b/Lib/idlelib/UndoDelegator.py
@@ -336,17 +336,30 @@ class CommandSequence(Command):
self.depth = self.depth + incr
return self.depth
-def main():
+def _undo_delegator(parent):
from idlelib.Percolator import Percolator
root = Tk()
- root.wm_protocol("WM_DELETE_WINDOW", root.quit)
- text = Text()
+ root.title("Test UndoDelegator")
+ width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
+ root.geometry("+%d+%d"%(x, y + 150))
+
+ text = Text(root)
+ text.config(height=10)
text.pack()
text.focus_set()
p = Percolator(text)
d = UndoDelegator()
p.insertfilter(d)
+
+ undo = Button(root, text="Undo", command=lambda:d.undo_event(None))
+ undo.pack(side='left')
+ redo = Button(root, text="Redo", command=lambda:d.redo_event(None))
+ redo.pack(side='left')
+ dump = Button(root, text="Dump", command=lambda:d.dump_event(None))
+ dump.pack(side='left')
+
root.mainloop()
if __name__ == "__main__":
- main()
+ from idlelib.idle_test.htest import run
+ run(_undo_delegator)
diff --git a/Lib/idlelib/WidgetRedirector.py b/Lib/idlelib/WidgetRedirector.py
index 7c341f2..b1534a7 100644
--- a/Lib/idlelib/WidgetRedirector.py
+++ b/Lib/idlelib/WidgetRedirector.py
@@ -104,10 +104,12 @@ class OriginalCommand:
return self.tk_call(self.orig_and_operation + args)
-def main():
+def _widget_redirector(parent):
root = Tk()
- root.wm_protocol("WM_DELETE_WINDOW", root.quit)
- text = Text()
+ root.title("Test WidgetRedirector")
+ width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
+ root.geometry("+%d+%d"%(x, y + 150))
+ text = Text(root)
text.pack()
text.focus_set()
redir = WidgetRedirector(text)
@@ -117,10 +119,7 @@ def main():
previous_tcl_fcn(*args)
previous_tcl_fcn = redir.register("insert", my_insert)
root.mainloop()
- redir.unregister("insert") # runs after first 'close window'
- redir.close()
- root.mainloop()
- root.destroy()
if __name__ == "__main__":
- main()
+ from idlelib.idle_test.htest import run
+ run(_widget_redirector)
diff --git a/Lib/idlelib/ZoomHeight.py b/Lib/idlelib/ZoomHeight.py
index e8d1710..a5d679e 100644
--- a/Lib/idlelib/ZoomHeight.py
+++ b/Lib/idlelib/ZoomHeight.py
@@ -32,7 +32,7 @@ def zoom_height(top):
newy = 0
newheight = newheight - 72
- elif macosxSupport.runningAsOSXApp():
+ elif macosxSupport.isAquaTk():
# The '88' below is a magic number that avoids placing the bottom
# of the window below the panel on my machine. I don't know how
# to calculate the correct value for this with tkinter.
diff --git a/Lib/idlelib/aboutDialog.py b/Lib/idlelib/aboutDialog.py
index 43a1313..fc5c531 100644
--- a/Lib/idlelib/aboutDialog.py
+++ b/Lib/idlelib/aboutDialog.py
@@ -12,11 +12,16 @@ class AboutDialog(Toplevel):
"""Modal about dialog for idle
"""
- def __init__(self,parent,title):
+ def __init__(self, parent, title, _htest=False):
+ """
+ _htest - bool, change box location when running htest
+ """
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
- self.geometry("+%d+%d" % (parent.winfo_rootx()+30,
- parent.winfo_rooty()+30))
+ # place dialog below parent if running htest
+ self.geometry("+%d+%d" % (
+ parent.winfo_rootx()+30,
+ parent.winfo_rooty()+(30 if not _htest else 100)))
self.bg = "#707070"
self.fg = "#ffffff"
self.CreateWidgets()
@@ -66,12 +71,7 @@ class AboutDialog(Toplevel):
labelPythonVer = Label(frameBg, text='Python version: ' + \
sys.version.split()[0], fg=self.fg, bg=self.bg)
labelPythonVer.grid(row=9, column=0, sticky=W, padx=10, pady=0)
- # handle weird tk version num in windoze python >= 1.6 (?!?)
- tkVer = repr(TkVersion).split('.')
- tkVer[len(tkVer)-1] = str('%.3g' % (float('.'+tkVer[len(tkVer)-1])))[2:]
- if tkVer[len(tkVer)-1] == '':
- tkVer[len(tkVer)-1] = '0'
- tkVer = '.'.join(tkVer)
+ tkVer = self.tk.call('info', 'patchlevel')
labelTkVer = Label(frameBg, text='Tk version: '+
tkVer, fg=self.fg, bg=self.bg)
labelTkVer.grid(row=9, column=1, sticky=W, padx=2, pady=0)
@@ -141,10 +141,5 @@ class AboutDialog(Toplevel):
self.destroy()
if __name__ == '__main__':
- # test the dialog
- root = Tk()
- def run():
- from idlelib import aboutDialog
- aboutDialog.AboutDialog(root, 'About')
- Button(root, text='Dialog', command=run).pack()
- root.mainloop()
+ from idlelib.idle_test.htest import run
+ run(AboutDialog)
diff --git a/Lib/idlelib/config-extensions.def b/Lib/idlelib/config-extensions.def
index 78b68f6..39e69ce 100644
--- a/Lib/idlelib/config-extensions.def
+++ b/Lib/idlelib/config-extensions.def
@@ -46,6 +46,8 @@ zoom-height=<Alt-Key-2>
[ScriptBinding]
enable=1
+enable_shell=0
+enable_editor=1
[ScriptBinding_cfgBindings]
run-module=<Key-F5>
check-module=<Alt-Key-x>
diff --git a/Lib/idlelib/config-keys.def b/Lib/idlelib/config-keys.def
index fdc35ba..3bfcb69 100644
--- a/Lib/idlelib/config-keys.def
+++ b/Lib/idlelib/config-keys.def
@@ -13,37 +13,37 @@ cut=<Control-Key-x> <Control-Key-X>
paste=<Control-Key-v> <Control-Key-V>
beginning-of-line= <Key-Home>
center-insert=<Control-Key-l> <Control-Key-L>
-close-all-windows=<Control-Key-q>
+close-all-windows=<Control-Key-q> <Control-Key-Q>
close-window=<Alt-Key-F4> <Meta-Key-F4>
do-nothing=<Control-Key-F12>
end-of-file=<Control-Key-d> <Control-Key-D>
python-docs=<Key-F1>
python-context-help=<Shift-Key-F1>
-history-next=<Alt-Key-n> <Meta-Key-n>
-history-previous=<Alt-Key-p> <Meta-Key-p>
+history-next=<Alt-Key-n> <Meta-Key-n> <Alt-Key-N> <Meta-Key-N>
+history-previous=<Alt-Key-p> <Meta-Key-p> <Alt-Key-P> <Meta-Key-P>
interrupt-execution=<Control-Key-c> <Control-Key-C>
view-restart=<Key-F6>
restart-shell=<Control-Key-F6>
-open-class-browser=<Alt-Key-c> <Meta-Key-c> <Alt-Key-C>
-open-module=<Alt-Key-m> <Meta-Key-m> <Alt-Key-M>
+open-class-browser=<Alt-Key-c> <Meta-Key-c> <Alt-Key-C> <Meta-Key-C>
+open-module=<Alt-Key-m> <Meta-Key-m> <Alt-Key-M> <Meta-Key-M>
open-new-window=<Control-Key-n> <Control-Key-N>
open-window-from-file=<Control-Key-o> <Control-Key-O>
plain-newline-and-indent=<Control-Key-j> <Control-Key-J>
print-window=<Control-Key-p> <Control-Key-P>
-redo=<Control-Shift-Key-Z>
+redo=<Control-Shift-Key-Z> <Control-Shift-Key-z>
remove-selection=<Key-Escape>
-save-copy-of-window-as-file=<Alt-Shift-Key-S>
-save-window-as-file=<Control-Shift-Key-S>
-save-window=<Control-Key-s>
-select-all=<Control-Key-a>
+save-copy-of-window-as-file=<Alt-Shift-Key-S> <Alt-Shift-Key-s>
+save-window-as-file=<Control-Shift-Key-S> <Control-Shift-Key-s>
+save-window=<Control-Key-s> <Control-Key-S>
+select-all=<Control-Key-a> <Control-Key-A>
toggle-auto-coloring=<Control-Key-slash>
undo=<Control-Key-z> <Control-Key-Z>
find=<Control-Key-f> <Control-Key-F>
-find-again=<Control-Key-g> <Key-F3>
+find-again=<Control-Key-g> <Key-F3> <Control-Key-G>
find-in-files=<Alt-Key-F3> <Meta-Key-F3>
find-selection=<Control-Key-F3>
replace=<Control-Key-h> <Control-Key-H>
-goto-line=<Alt-Key-g> <Meta-Key-g>
+goto-line=<Alt-Key-g> <Meta-Key-g> <Alt-Key-G> <Meta-Key-G>
smart-backspace=<Key-BackSpace>
newline-and-indent=<Key-Return> <Key-KP_Enter>
smart-indent=<Key-Tab>
@@ -53,8 +53,8 @@ comment-region=<Alt-Key-3> <Meta-Key-3>
uncomment-region=<Alt-Key-4> <Meta-Key-4>
tabify-region=<Alt-Key-5> <Meta-Key-5>
untabify-region=<Alt-Key-6> <Meta-Key-6>
-toggle-tabs=<Alt-Key-t> <Meta-Key-t> <Alt-Key-T>
-change-indentwidth=<Alt-Key-u> <Meta-Key-u> <Alt-Key-U>
+toggle-tabs=<Alt-Key-t> <Meta-Key-t> <Alt-Key-T> <Meta-Key-T>
+change-indentwidth=<Alt-Key-u> <Meta-Key-u> <Alt-Key-U> <Meta-Key-U>
del-word-left=<Control-Key-BackSpace>
del-word-right=<Control-Key-Delete>
diff --git a/Lib/idlelib/config-main.def b/Lib/idlelib/config-main.def
index 5ddd098..132797c 100644
--- a/Lib/idlelib/config-main.def
+++ b/Lib/idlelib/config-main.def
@@ -59,7 +59,7 @@ font-bold= 0
encoding= none
[FormatParagraph]
-paragraph=70
+paragraph=72
[Indent]
use-spaces= 1
diff --git a/Lib/idlelib/configDialog.py b/Lib/idlelib/configDialog.py
index dbaedc7..56c1bb8 100644
--- a/Lib/idlelib/configDialog.py
+++ b/Lib/idlelib/configDialog.py
@@ -23,14 +23,19 @@ from idlelib import macosxSupport
class ConfigDialog(Toplevel):
- def __init__(self,parent,title):
+ def __init__(self,parent,title,_htest=False):
+ """
+ _htest - bool, change box location when running htest
+ """
Toplevel.__init__(self, parent)
self.wm_withdraw()
self.configure(borderwidth=5)
self.title('IDLE Preferences')
+ if _htest:
+ parent.instance_dict = {}
self.geometry("+%d+%d" % (parent.winfo_rootx()+20,
- parent.winfo_rooty()+30))
+ parent.winfo_rooty()+(30 if not _htest else 150)))
#Theme Elements. Each theme element key is its display name.
#The first value of the tuple is the sample area tag name.
#The second value is the display name list sort index.
@@ -71,16 +76,17 @@ class ConfigDialog(Toplevel):
page_names=['Fonts/Tabs','Highlighting','Keys','General'])
frameActionButtons = Frame(self,pady=2)
#action buttons
- if macosxSupport.runningAsOSXApp():
+ if macosxSupport.isAquaTk():
# Changing the default padding on OSX results in unreadable
# text in the buttons
paddingArgs={}
else:
paddingArgs={'padx':6, 'pady':3}
- self.buttonHelp = Button(frameActionButtons,text='Help',
- command=self.Help,takefocus=FALSE,
- **paddingArgs)
+# Comment out button creation and packing until implement self.Help
+## self.buttonHelp = Button(frameActionButtons,text='Help',
+## command=self.Help,takefocus=FALSE,
+## **paddingArgs)
self.buttonOk = Button(frameActionButtons,text='Ok',
command=self.Ok,takefocus=FALSE,
**paddingArgs)
@@ -94,7 +100,7 @@ class ConfigDialog(Toplevel):
self.CreatePageHighlight()
self.CreatePageKeys()
self.CreatePageGeneral()
- self.buttonHelp.pack(side=RIGHT,padx=5)
+## self.buttonHelp.pack(side=RIGHT,padx=5)
self.buttonOk.pack(side=LEFT,padx=5)
self.buttonApply.pack(side=LEFT,padx=5)
self.buttonCancel.pack(side=LEFT,padx=5)
@@ -183,7 +189,7 @@ class ConfigDialog(Toplevel):
text=' Highlighting Theme ')
#frameCustom
self.textHighlightSample=Text(frameCustom,relief=SOLID,borderwidth=1,
- font=('courier',12,''),cursor='hand2',width=21,height=10,
+ font=('courier',12,''),cursor='hand2',width=21,height=11,
takefocus=FALSE,highlightthickness=0,wrap=NONE)
text=self.textHighlightSample
text.bind('<Double-Button-1>',lambda e: 'break')
@@ -832,8 +838,9 @@ class ConfigDialog(Toplevel):
fontWeight=tkFont.BOLD
else:
fontWeight=tkFont.NORMAL
- self.editFont.config(size=self.fontSize.get(),
- weight=fontWeight,family=fontName)
+ newFont = (fontName, self.fontSize.get(), fontWeight)
+ self.labelFontSample.config(font=newFont)
+ self.textHighlightSample.configure(font=newFont)
def SetHighlightTarget(self):
if self.highlightTarget.get()=='Cursor': #bg not possible
@@ -946,7 +953,7 @@ class ConfigDialog(Toplevel):
self.listFontName.select_anchor(currentFontIndex)
##font size dropdown
fontSize=idleConf.GetOption('main','EditorWindow','font-size',
- default='10')
+ type='int', default='10')
self.optMenuFontSize.SetMenu(('7','8','9','10','11','12','13','14',
'16','18','20','22'),fontSize )
##fontWeight
@@ -1032,10 +1039,13 @@ class ConfigDialog(Toplevel):
self.autoSave.set(idleConf.GetOption('main', 'General', 'autosave',
default=0, type='bool'))
#initial window size
- self.winWidth.set(idleConf.GetOption('main','EditorWindow','width'))
- self.winHeight.set(idleConf.GetOption('main','EditorWindow','height'))
+ self.winWidth.set(idleConf.GetOption('main','EditorWindow','width',
+ type='int'))
+ self.winHeight.set(idleConf.GetOption('main','EditorWindow','height',
+ type='int'))
#initial paragraph reformat size
- self.paraWidth.set(idleConf.GetOption('main','FormatParagraph','paragraph'))
+ self.paraWidth.set(idleConf.GetOption('main','FormatParagraph','paragraph',
+ type='int'))
# default source encoding
self.encoding.set(idleConf.GetOption('main', 'EditorWindow',
'encoding', default='none'))
@@ -1146,9 +1156,5 @@ class ConfigDialog(Toplevel):
pass
if __name__ == '__main__':
- #test the dialog
- root=Tk()
- Button(root,text='Dialog',
- command=lambda:ConfigDialog(root,'Settings')).pack()
- root.instance_dict={}
- root.mainloop()
+ from idlelib.idle_test.htest import run
+ run(ConfigDialog)
diff --git a/Lib/idlelib/configHandler.py b/Lib/idlelib/configHandler.py
index 73487d5..67ab1f6 100644
--- a/Lib/idlelib/configHandler.py
+++ b/Lib/idlelib/configHandler.py
@@ -20,7 +20,6 @@ configuration problem notification and resolution.
import os
import sys
import string
-from idlelib import macosxSupport
from ConfigParser import ConfigParser, NoOptionError, NoSectionError
class InvalidConfigType(Exception): pass
@@ -237,24 +236,39 @@ class IdleConf:
printed to stderr.
"""
- if self.userCfg[configType].has_option(section,option):
- return self.userCfg[configType].Get(section, option,
- type=type, raw=raw)
- elif self.defaultCfg[configType].has_option(section,option):
- return self.defaultCfg[configType].Get(section, option,
- type=type, raw=raw)
- else: #returning default, print warning
- if warn_on_default:
- warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n'
- ' problem retrieving configuration option %r\n'
- ' from section %r.\n'
- ' returning default value: %r\n' %
- (option, section, default))
- try:
- sys.stderr.write(warning)
- except IOError:
- pass
- return default
+ try:
+ if self.userCfg[configType].has_option(section,option):
+ return self.userCfg[configType].Get(section, option,
+ type=type, raw=raw)
+ except ValueError:
+ warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n'
+ ' invalid %r value for configuration option %r\n'
+ ' from section %r: %r\n' %
+ (type, option, section,
+ self.userCfg[configType].Get(section, option,
+ raw=raw)))
+ try:
+ sys.stderr.write(warning)
+ except IOError:
+ pass
+ try:
+ if self.defaultCfg[configType].has_option(section,option):
+ return self.defaultCfg[configType].Get(section, option,
+ type=type, raw=raw)
+ except ValueError:
+ pass
+ #returning default, print warning
+ if warn_on_default:
+ warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n'
+ ' problem retrieving configuration option %r\n'
+ ' from section %r.\n'
+ ' returning default value: %r\n' %
+ (option, section, default))
+ try:
+ sys.stderr.write(warning)
+ except IOError:
+ pass
+ return default
def SetOption(self, configType, section, option, value):
"""In user's config file, set section's option to value.
@@ -511,10 +525,13 @@ class IdleConf:
def GetCurrentKeySet(self):
result = self.GetKeySet(self.CurrentKeys())
- if macosxSupport.runningAsOSXApp():
- # We're using AquaTk, replace all keybingings that use the
- # Alt key by ones that use the Option key because the former
- # don't work reliably.
+ if sys.platform == "darwin":
+ # OS X Tk variants do not support the "Alt" keyboard modifier.
+ # So replace all keybingings that use "Alt" with ones that
+ # use the "Option" keyboard modifier.
+ # TO DO: the "Option" modifier does not work properly for
+ # Cocoa Tk and XQuartz Tk so we should not use it
+ # in default OS X KeySets.
for k, v in result.items():
v2 = [ x.replace('<Alt-', '<Option-') for x in v ]
if v != v2:
@@ -595,7 +612,7 @@ class IdleConf:
'<<replace>>': ['<Control-h>'],
'<<goto-line>>': ['<Alt-g>'],
'<<smart-backspace>>': ['<Key-BackSpace>'],
- '<<newline-and-indent>>': ['<Key-Return> <Key-KP_Enter>'],
+ '<<newline-and-indent>>': ['<Key-Return>', '<Key-KP_Enter>'],
'<<smart-indent>>': ['<Key-Tab>'],
'<<indent-region>>': ['<Control-Key-bracketright>'],
'<<dedent-region>>': ['<Control-Key-bracketleft>'],
diff --git a/Lib/idlelib/configHelpSourceEdit.py b/Lib/idlelib/configHelpSourceEdit.py
index 6611621..5816449 100644
--- a/Lib/idlelib/configHelpSourceEdit.py
+++ b/Lib/idlelib/configHelpSourceEdit.py
@@ -8,13 +8,14 @@ import tkMessageBox
import tkFileDialog
class GetHelpSourceDialog(Toplevel):
- def __init__(self, parent, title, menuItem='', filePath=''):
+ def __init__(self, parent, title, menuItem='', filePath='', _htest=False):
"""Get menu entry and url/ local file location for Additional Help
User selects a name for the Help resource and provides a web url
or a local file as its source. The user can enter a url or browse
for the file.
+ _htest - bool, change box location when running htest
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
@@ -31,12 +32,14 @@ class GetHelpSourceDialog(Toplevel):
self.withdraw() #hide while setting geometry
#needs to be done here so that the winfo_reqwidth is valid
self.update_idletasks()
- #centre dialog over parent:
- self.geometry("+%d+%d" %
- ((parent.winfo_rootx() + ((parent.winfo_width()/2)
- -(self.winfo_reqwidth()/2)),
- parent.winfo_rooty() + ((parent.winfo_height()/2)
- -(self.winfo_reqheight()/2)))))
+ #centre dialog over parent. below parent if running htest.
+ self.geometry(
+ "+%d+%d" % (
+ parent.winfo_rootx() +
+ (parent.winfo_width()/2 - self.winfo_reqwidth()/2),
+ parent.winfo_rooty() +
+ ((parent.winfo_height()/2 - self.winfo_reqheight()/2)
+ if not _htest else 150)))
self.deiconify() #geometry set, unhide
self.bind('<Return>', self.Ok)
self.wait_window()
@@ -159,11 +162,5 @@ class GetHelpSourceDialog(Toplevel):
self.destroy()
if __name__ == '__main__':
- #test the dialog
- root = Tk()
- def run():
- keySeq = ''
- dlg = GetHelpSourceDialog(root, 'Get Help Source')
- print dlg.result
- Button(root,text='Dialog', command=run).pack()
- root.mainloop()
+ from idlelib.idle_test.htest import run
+ run(GetHelpSourceDialog)
diff --git a/Lib/idlelib/configSectionNameDialog.py b/Lib/idlelib/configSectionNameDialog.py
index 4f1b002..c09dca8 100644
--- a/Lib/idlelib/configSectionNameDialog.py
+++ b/Lib/idlelib/configSectionNameDialog.py
@@ -1,97 +1,92 @@
"""
Dialog that allows user to specify a new config file section name.
Used to get new highlight theme and keybinding set names.
+The 'return value' for the dialog, used two placed in configDialog.py,
+is the .result attribute set in the Ok and Cancel methods.
"""
from Tkinter import *
import tkMessageBox
-
class GetCfgSectionNameDialog(Toplevel):
- def __init__(self,parent,title,message,usedNames):
+ def __init__(self, parent, title, message, used_names, _htest=False):
"""
message - string, informational message to display
- usedNames - list, list of names already in use for validity check
+ used_names - string collection, names already in use for validity check
+ _htest - bool, change box location when running htest
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
- self.resizable(height=FALSE,width=FALSE)
+ self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Cancel)
self.parent = parent
- self.message=message
- self.usedNames=usedNames
- self.result=''
- self.CreateWidgets()
- self.withdraw() #hide while setting geometry
+ self.message = message
+ self.used_names = used_names
+ self.create_widgets()
+ self.withdraw() #hide while setting geometry
self.update_idletasks()
#needs to be done here so that the winfo_reqwidth is valid
self.messageInfo.config(width=self.frameMain.winfo_reqwidth())
- self.geometry("+%d+%d" %
- ((parent.winfo_rootx()+((parent.winfo_width()/2)
- -(self.winfo_reqwidth()/2)),
- parent.winfo_rooty()+((parent.winfo_height()/2)
- -(self.winfo_reqheight()/2)) )) ) #centre dialog over parent
- self.deiconify() #geometry set, unhide
+ self.geometry(
+ "+%d+%d" % (
+ parent.winfo_rootx() +
+ (parent.winfo_width()/2 - self.winfo_reqwidth()/2),
+ parent.winfo_rooty() +
+ ((parent.winfo_height()/2 - self.winfo_reqheight()/2)
+ if not _htest else 100)
+ ) ) #centre dialog over parent (or below htest box)
+ self.deiconify() #geometry set, unhide
self.wait_window()
-
- def CreateWidgets(self):
- self.name=StringVar(self)
- self.fontSize=StringVar(self)
- self.frameMain = Frame(self,borderwidth=2,relief=SUNKEN)
- self.frameMain.pack(side=TOP,expand=TRUE,fill=BOTH)
- self.messageInfo=Message(self.frameMain,anchor=W,justify=LEFT,padx=5,pady=5,
- text=self.message)#,aspect=200)
- entryName=Entry(self.frameMain,textvariable=self.name,width=30)
+ def create_widgets(self):
+ self.name = StringVar(self.parent)
+ self.fontSize = StringVar(self.parent)
+ self.frameMain = Frame(self, borderwidth=2, relief=SUNKEN)
+ self.frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
+ self.messageInfo = Message(self.frameMain, anchor=W, justify=LEFT,
+ padx=5, pady=5, text=self.message) #,aspect=200)
+ entryName = Entry(self.frameMain, textvariable=self.name, width=30)
entryName.focus_set()
- self.messageInfo.pack(padx=5,pady=5)#,expand=TRUE,fill=BOTH)
- entryName.pack(padx=5,pady=5)
- frameButtons=Frame(self)
- frameButtons.pack(side=BOTTOM,fill=X)
- self.buttonOk = Button(frameButtons,text='Ok',
- width=8,command=self.Ok)
- self.buttonOk.grid(row=0,column=0,padx=5,pady=5)
- self.buttonCancel = Button(frameButtons,text='Cancel',
- width=8,command=self.Cancel)
- self.buttonCancel.grid(row=0,column=1,padx=5,pady=5)
+ self.messageInfo.pack(padx=5, pady=5) #, expand=TRUE, fill=BOTH)
+ entryName.pack(padx=5, pady=5)
+ frameButtons = Frame(self, pady=2)
+ frameButtons.pack(side=BOTTOM)
+ self.buttonOk = Button(frameButtons, text='Ok',
+ width=8, command=self.Ok)
+ self.buttonOk.pack(side=LEFT, padx=5)
+ self.buttonCancel = Button(frameButtons, text='Cancel',
+ width=8, command=self.Cancel)
+ self.buttonCancel.pack(side=RIGHT, padx=5)
- def NameOk(self):
- #simple validity check for a sensible
- #ConfigParser file section name
- nameOk=1
- name=self.name.get()
- name.strip()
+ def name_ok(self):
+ ''' After stripping entered name, check that it is a sensible
+ ConfigParser file section name. Return it if it is, '' if not.
+ '''
+ name = self.name.get().strip()
if not name: #no name specified
tkMessageBox.showerror(title='Name Error',
message='No name specified.', parent=self)
- nameOk=0
elif len(name)>30: #name too long
tkMessageBox.showerror(title='Name Error',
message='Name too long. It should be no more than '+
'30 characters.', parent=self)
- nameOk=0
- elif name in self.usedNames:
+ name = ''
+ elif name in self.used_names:
tkMessageBox.showerror(title='Name Error',
message='This name is already in use.', parent=self)
- nameOk=0
- return nameOk
-
+ name = ''
+ return name
def Ok(self, event=None):
- if self.NameOk():
- self.result=self.name.get().strip()
+ name = self.name_ok()
+ if name:
+ self.result = name
self.destroy()
-
def Cancel(self, event=None):
- self.result=''
+ self.result = ''
self.destroy()
-
if __name__ == '__main__':
- #test the dialog
- root=Tk()
- def run():
- keySeq=''
- dlg=GetCfgSectionNameDialog(root,'Get Name',
- 'The information here should need to be word wrapped. Test.')
- print dlg.result
- Button(root,text='Dialog',command=run).pack()
- root.mainloop()
+ import unittest
+ unittest.main('idlelib.idle_test.test_config_name', verbosity=2, exit=False)
+
+ from idlelib.idle_test.htest import run
+ run(GetCfgSectionNameDialog)
diff --git a/Lib/idlelib/dynOptionMenuWidget.py b/Lib/idlelib/dynOptionMenuWidget.py
index e81f7ba..133c6fd 100644
--- a/Lib/idlelib/dynOptionMenuWidget.py
+++ b/Lib/idlelib/dynOptionMenuWidget.py
@@ -2,9 +2,10 @@
OptionMenu widget modified to allow dynamic menu reconfiguration
and setting of highlightthickness
"""
-from Tkinter import OptionMenu
-from Tkinter import _setit
+from Tkinter import OptionMenu, _setit, Tk, StringVar, Button
+
import copy
+import re
class DynOptionMenu(OptionMenu):
"""
@@ -33,3 +34,24 @@ class DynOptionMenu(OptionMenu):
command=_setit(self.variable,item,self.command))
if value:
self.variable.set(value)
+
+def _dyn_option_menu(parent):
+ root = Tk()
+ root.title("Tets dynamic option menu")
+ var = StringVar(root)
+ width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
+ root.geometry("+%d+%d"%(x, y + 150))
+ var.set("Old option set") #Set the default value
+ dyn = DynOptionMenu(root,var, "old1","old2","old3","old4")
+ dyn.pack()
+
+ def update():
+ dyn.SetMenu(["new1","new2","new3","new4"],value="new option set")
+
+ button = Button(root, text="Change option set", command=update)
+ button.pack()
+ root.mainloop()
+
+if __name__ == '__main__':
+ from idlelib.idle_test.htest import run
+ run(_dyn_option_menu)
diff --git a/Lib/idlelib/extend.txt b/Lib/idlelib/extend.txt
index 165e044..c9cb2e8 100644
--- a/Lib/idlelib/extend.txt
+++ b/Lib/idlelib/extend.txt
@@ -54,7 +54,7 @@ Extensions are not required to define menu entries for all the events they
implement. (They are also not required to create keybindings, but in that
case there must be empty bindings in cofig-extensions.def)
-Here is a complete example example:
+Here is a complete example:
class ZoomHeight:
@@ -72,7 +72,7 @@ class ZoomHeight:
"...Do what you want here..."
The final piece of the puzzle is the file "config-extensions.def", which is
-used to to configure the loading of extensions and to establish key (or, more
+used to configure the loading of extensions and to establish key (or, more
generally, event) bindings to the virtual events defined in the extensions.
See the comments at the top of config-extensions.def for information. It's
diff --git a/Lib/idlelib/help.txt b/Lib/idlelib/help.txt
index 7bfd2ca..bd6822c 100644
--- a/Lib/idlelib/help.txt
+++ b/Lib/idlelib/help.txt
@@ -5,7 +5,7 @@ separate window containing the menu is created.
File Menu:
- New Window -- Create a new editing window
+ New File -- Create a new editing window
Open... -- Open an existing file
Recent Files... -- Open a list of recent files
Open Module... -- Open an existing module (searches sys.path)
@@ -80,7 +80,7 @@ Shell Menu (only in Shell window):
Debug Menu (only in Shell window):
Go to File/Line -- look around the insert point for a filename
- and linenumber, open the file, and show the line
+ and line number, open the file, and show the line
Debugger (toggle) -- Run commands in the shell under the debugger
Stack Viewer -- Show the stack traceback of the last exception
Auto-open Stack Viewer (toggle) -- Open stack viewer on traceback
@@ -92,7 +92,7 @@ Options Menu:
Startup Preferences may be set, and Additional Help
Sources can be specified.
- On MacOS X this menu is not present, use
+ On OS X this menu is not present, use
menu 'IDLE -> Preferences...' instead.
---
Code Context -- Open a pane at the top of the edit window which
@@ -120,6 +120,24 @@ Help Menu:
---
(Additional Help Sources may be added here)
+Edit context menu (Right-click / Control-click on OS X in Edit window):
+
+ Cut -- Copy a selection into system-wide clipboard,
+ then delete the selection
+ Copy -- Copy selection into system-wide clipboard
+ Paste -- Insert system-wide clipboard into window
+ Set Breakpoint -- Sets a breakpoint (when debugger open)
+ Clear Breakpoint -- Clears the breakpoint on that line
+
+Shell context menu (Right-click / Control-click on OS X in Shell window):
+
+ Cut -- Copy a selection into system-wide clipboard,
+ then delete the selection
+ Copy -- Copy selection into system-wide clipboard
+ Paste -- Insert system-wide clipboard into window
+ ---
+ Go to file/line -- Same as in Debug menu
+
** TIPS **
==========
@@ -215,14 +233,13 @@ Completions:
Python Shell window:
Control-c interrupts executing command.
- Control-d sends end-of-file; closes window if typed at >>> prompt
- (this is Control-z on Windows).
+ Control-d sends end-of-file; closes window if typed at >>> prompt.
Command history:
Alt-p retrieves previous command matching what you have typed.
Alt-n retrieves next.
- (These are Control-p, Control-n on the Mac)
+ (These are Control-p, Control-n on OS X)
Return while cursor is on a previous command retrieves that command.
Expand word is also useful to reduce typing.
diff --git a/Lib/idlelib/idle_test/README.txt b/Lib/idlelib/idle_test/README.txt
new file mode 100644
index 0000000..f6b6a21
--- /dev/null
+++ b/Lib/idlelib/idle_test/README.txt
@@ -0,0 +1,115 @@
+README FOR IDLE TESTS IN IDLELIB.IDLE_TEST
+
+
+1. Test Files
+
+The idle directory, idlelib, has over 60 xyz.py files. The idle_test
+subdirectory should contain a test_xyy.py for each. (For test modules, make
+'xyz' lower case, and possibly shorten it.) Each file should start with the
+something like the following template, with the blanks after after '.' and 'as',
+and before and after '_' filled in.
+---
+import unittest
+from test.support import requires
+import idlelib. as
+
+class _Test(unittest.TestCase):
+
+ def test_(self):
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2, exit=2)
+---
+Idle tests are run with unittest; do not use regrtest's test_main.
+
+Once test_xyy is written, the following should go at the end of xyy.py,
+with xyz (lowercased) added after 'test_'.
+---
+if __name__ == "__main__":
+ import unittest
+ unittest.main('idlelib.idle_test.test_', verbosity=2, exit=False)
+---
+
+
+2. Gui Tests
+
+Gui tests need 'requires' from test.support (test.test_support in 2.7). A
+test is a gui test if it creates a Tk root or master object either directly
+or indirectly by instantiating a tkinter or idle class. For the benefit of
+test processes that either have no graphical environment available or are not
+allowed to use it, gui tests must be 'guarded' by "requires('gui')" in a
+setUp function or method. This will typically be setUpClass.
+
+To avoid interfering with other gui tests, all gui objects must be destroyed
+and deleted by the end of the test. If a widget, such as a Tk root, is created
+in a setUpX function, destroy it in the corresponding tearDownX. For module
+and class attributes, also delete the widget.
+---
+ @classmethod
+ def setUpClass(cls):
+ requires('gui')
+ cls.root = tk.Tk()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.root.destroy()
+ del cls.root
+---
+
+Support.requires('gui') causes the test(s) it guards to be skipped if any of
+a few conditions are met:
+ - The tests are being run by regrtest.py, and it was started without
+ enabling the "gui" resource with the "-u" command line option.
+ - The tests are being run on Windows by a service that is not allowed to
+ interact with the graphical environment.
+ - The tests are being run on Mac OSX in a process that cannot make a window
+ manager connection.
+ - tkinter.Tk cannot be successfully instantiated for some reason.
+ - test.support.use_resources has been set by something other than
+ regrtest.py and does not contain "gui".
+
+Since non-gui tests always run, but gui tests only sometimes, tests of non-gui
+operations should best avoid needing a gui. Methods that make incidental use of
+tkinter (tk) variables and messageboxes can do this by using the mock classes in
+idle_test/mock_tk.py. There is also a mock text that will handle some uses of the
+tk Text widget.
+
+
+3. Running Tests
+
+Assume that xyz.py and test_xyz.py end with the "if __name__" statements given
+above. In Idle, pressing F5 in an editor window with either loaded will run all
+tests in the test_xyz file with the version of Python running Idle. The test
+report and any tracebacks will appear in the Shell window. The options in these
+"if __name__" statements are appropriate for developers running (as opposed to
+importing) either of the files during development: verbosity=2 lists all test
+methods in the file; exit=False avoids a spurious sys.exit traceback that would
+otherwise occur when running in Idle. The following command lines also run
+all test methods, including gui tests, in test_xyz.py. (The exceptions are that
+idlelib and idlelib.idle start Idle and idlelib.PyShell should (issue 18330).)
+
+python -m idlelib.xyz # With the capitalization of the xyz module
+python -m idlelib.idle_test.test_xyz
+
+To run all idle_test/test_*.py tests, either interactively
+('>>>', with unittest imported) or from a command line, use one of the
+following. (Notes: in 2.7, 'test ' (with the space) is 'test.regrtest ';
+where present, -v and -ugui can be omitted.)
+
+>>> unittest.main('idlelib.idle_test', verbosity=2, exit=False)
+python -m unittest -v idlelib.idle_test
+python -m test -v -ugui test_idle
+python -m test.test_idle
+
+The idle tests are 'discovered' by idlelib.idle_test.__init__.load_tests,
+which is also imported into test.test_idle. Normally, neither file should be
+changed when working on individual test modules. The third command runs
+unittest indirectly through regrtest. The same happens when the entire test
+suite is run with 'python -m test'. So that command must work for buildbots
+to stay green. Idle tests must not disturb the environment in a way that
+makes other tests fail (issue 18081).
+
+To run an individual Testcase or test method, extend the dotted name given to
+unittest on the command line.
+
+python -m unittest -v idlelib.idle_test.test_xyz.Test_case.test_meth
diff --git a/Lib/idlelib/idle_test/__init__.py b/Lib/idlelib/idle_test/__init__.py
new file mode 100644
index 0000000..1bc9536
--- /dev/null
+++ b/Lib/idlelib/idle_test/__init__.py
@@ -0,0 +1,9 @@
+from os.path import dirname
+
+def load_tests(loader, standard_tests, pattern):
+ this_dir = dirname(__file__)
+ top_dir = dirname(dirname(this_dir))
+ package_tests = loader.discover(start_dir=this_dir, pattern='test*.py',
+ top_level_dir=top_dir)
+ standard_tests.addTests(package_tests)
+ return standard_tests
diff --git a/Lib/idlelib/idle_test/htest.py b/Lib/idlelib/idle_test/htest.py
new file mode 100644
index 0000000..f6e02b8
--- /dev/null
+++ b/Lib/idlelib/idle_test/htest.py
@@ -0,0 +1,368 @@
+'''Run human tests of Idle's window, dialog, and popup widgets.
+
+run(*tests)
+Run each callable in tests after finding the matching test spec in this file.
+If there are none, run an htest for each spec dict in this file after finding
+the matching callable in the module named in the spec.
+
+In a tested module, let X be a global name bound to a widget callable.
+End the module with
+
+if __name__ == '__main__':
+ <unittest, if there is one>
+ from idlelib.idle_test.htest import run
+ run(X)
+
+The X object must have a .__name__ attribute and a 'parent' parameter.
+X will often be a widget class, but a callable instance with .__name__
+or a wrapper function also work. The name of wrapper functions, like
+'_editor_window', should start with '_'.
+
+This file must contain a matching instance of the following template,
+with X.__name__ prepended, as in '_editor_window_spec ...'.
+
+_spec = {
+ 'file': '',
+ 'kwds': {'title': ''},
+ 'msg': ""
+ }
+
+file (no .py): used in run() to import the file and get X.
+kwds: passed to X (**kwds), after 'parent' is added, to initialize X.
+title: an example; used for some widgets, delete if not.
+msg: displayed in a master window. Hints as to how the user might
+ test the widget. Close the window to skip or end the test.
+
+Modules not being tested at the moment:
+PyShell.PyShellEditorWindow
+Debugger.Debugger
+AutoCompleteWindow.AutoCompleteWindow
+OutputWindow.OutputWindow (indirectly being tested with grep test)
+'''
+from importlib import import_module
+from idlelib.macosxSupport import _initializeTkVariantTests
+import Tkinter as tk
+
+AboutDialog_spec = {
+ 'file': 'aboutDialog',
+ 'kwds': {'title': 'aboutDialog test',
+ '_htest': True,
+ },
+ 'msg': "Test every button. Ensure Python, TK and IDLE versions "
+ "are correctly displayed.\n [Close] to exit.",
+ }
+
+_calltip_window_spec = {
+ 'file': 'CallTipWindow',
+ 'kwds': {},
+ 'msg': "Typing '(' should display a calltip.\n"
+ "Typing ') should hide the calltip.\n"
+ }
+
+_class_browser_spec = {
+ 'file': 'ClassBrowser',
+ 'kwds': {},
+ 'msg': "Inspect names of module, class(with superclass if "
+ "applicable), methods and functions.\nToggle nested items.\n"
+ "Double clicking on items prints a traceback for an exception "
+ "that is ignored."
+ }
+
+_color_delegator_spec = {
+ 'file': 'ColorDelegator',
+ 'kwds': {},
+ 'msg': "The text is sample Python code.\n"
+ "Ensure components like comments, keywords, builtins,\n"
+ "string, definitions, and break are correctly colored.\n"
+ "The default color scheme is in idlelib/config-highlight.def"
+ }
+
+ConfigDialog_spec = {
+ 'file': 'configDialog',
+ 'kwds': {'title': 'Settings',
+ '_htest': True,},
+ 'msg': "IDLE preferences dialog.\n"
+ "In the 'Fonts/Tabs' tab, changing font face, should update the "
+ "font face of the text in the area below it.\nIn the "
+ "'Highlighting' tab, try different color schemes. Clicking "
+ "items in the sample program should update the choices above it."
+ "\nIn the 'Keys' and 'General' tab, test settings of interest."
+ "\n[Ok] to close the dialog.[Apply] to apply the settings and "
+ "and [Cancel] to revert all changes.\nRe-run the test to ensure "
+ "changes made have persisted."
+ }
+
+_dyn_option_menu_spec = {
+ 'file': 'dynOptionMenuWidget',
+ 'kwds': {},
+ 'msg': "Select one of the many options in the 'old option set'.\n"
+ "Click the button to change the option set.\n"
+ "Select one of the many options in the 'new option set'."
+ }
+
+_editor_window_spec = {
+ 'file': 'EditorWindow',
+ 'kwds': {},
+ 'msg': "Test editor functions of interest."
+ }
+
+GetCfgSectionNameDialog_spec = {
+ 'file': 'configSectionNameDialog',
+ 'kwds': {'title':'Get Name',
+ 'message':'Enter something',
+ 'used_names': {'abc'},
+ '_htest': True},
+ 'msg': "After the text entered with [Ok] is stripped, <nothing>, "
+ "'abc', or more that 30 chars are errors.\n"
+ "Close 'Get Name' with a valid entry (printed to Shell), "
+ "[Cancel], or [X]",
+ }
+
+GetHelpSourceDialog_spec = {
+ 'file': 'configHelpSourceEdit',
+ 'kwds': {'title': 'Get helpsource',
+ '_htest': True},
+ 'msg': "Enter menu item name and help file path\n "
+ "<nothing> and more than 30 chars are invalid menu item names.\n"
+ "<nothing>, file does not exist are invalid path items.\n"
+ "Test for incomplete web address for help file path.\n"
+ "A valid entry will be printed to shell with [0k].\n"
+ "[Cancel] will print None to shell",
+ }
+
+# Update once issue21519 is resolved.
+GetKeysDialog_spec = {
+ 'file': 'keybindingDialog',
+ 'kwds': {'title': 'Test keybindings',
+ 'action': 'find-again',
+ 'currentKeySequences': [''] ,
+ '_htest': True,
+ },
+ 'msg': "Test for different key modifier sequences.\n"
+ "<nothing> is invalid.\n"
+ "No modifier key is invalid.\n"
+ "Shift key with [a-z],[0-9], function key, move key, tab, space"
+ "is invalid.\nNo validitity checking if advanced key binding "
+ "entry is used."
+ }
+
+_grep_dialog_spec = {
+ 'file': 'GrepDialog',
+ 'kwds': {},
+ 'msg': "Click the 'Show GrepDialog' button.\n"
+ "Test the various 'Find-in-files' functions.\n"
+ "The results should be displayed in a new '*Output*' window.\n"
+ "'Right-click'->'Goto file/line' anywhere in the search results "
+ "should open that file \nin a new EditorWindow."
+ }
+
+_help_dialog_spec = {
+ 'file': 'EditorWindow',
+ 'kwds': {},
+ 'msg': "If the help text displays, this works.\n"
+ "Text is selectable. Window is scrollable."
+ }
+
+_io_binding_spec = {
+ 'file': 'IOBinding',
+ 'kwds': {},
+ 'msg': "Test the following bindings\n"
+ "<Control-o> to display open window from file dialog.\n"
+ "<Control-s> to save the file\n"
+ }
+
+_multi_call_spec = {
+ 'file': 'MultiCall',
+ 'kwds': {},
+ 'msg': "The following actions should trigger a print to console or IDLE"
+ " Shell.\nEntering and leaving the text area, key entry, "
+ "<Control-Key>,\n<Alt-Key-a>, <Control-Key-a>, "
+ "<Alt-Control-Key-a>, \n<Control-Button-1>, <Alt-Button-1> and "
+ "focusing out of the window\nare sequences to be tested."
+ }
+
+_multistatus_bar_spec = {
+ 'file': 'MultiStatusBar',
+ 'kwds': {},
+ 'msg': "Ensure presence of multi-status bar below text area.\n"
+ "Click 'Update Status' to change the multi-status text"
+ }
+
+_object_browser_spec = {
+ 'file': 'ObjectBrowser',
+ 'kwds': {},
+ 'msg': "Double click on items upto the lowest level.\n"
+ "Attributes of the objects and related information "
+ "will be displayed side-by-side at each level."
+ }
+
+_path_browser_spec = {
+ 'file': 'PathBrowser',
+ 'kwds': {},
+ 'msg': "Test for correct display of all paths in sys.path.\n"
+ "Toggle nested items upto the lowest level.\n"
+ "Double clicking on an item prints a traceback\n"
+ "for an exception that is ignored."
+ }
+
+_percolator_spec = {
+ 'file': 'Percolator',
+ 'kwds': {},
+ 'msg': "There are two tracers which can be toggled using a checkbox.\n"
+ "Toggling a tracer 'on' by checking it should print tracer"
+ "output to the console or to the IDLE shell.\n"
+ "If both the tracers are 'on', the output from the tracer which "
+ "was switched 'on' later, should be printed first\n"
+ "Test for actions like text entry, and removal."
+ }
+
+_replace_dialog_spec = {
+ 'file': 'ReplaceDialog',
+ 'kwds': {},
+ 'msg': "Click the 'Replace' button.\n"
+ "Test various replace options in the 'Replace dialog'.\n"
+ "Click [Close] or [X] to close the 'Replace Dialog'."
+ }
+
+_search_dialog_spec = {
+ 'file': 'SearchDialog',
+ 'kwds': {},
+ 'msg': "Click the 'Search' button.\n"
+ "Test various search options in the 'Search dialog'.\n"
+ "Click [Close] or [X] to close the 'Search Dialog'."
+ }
+
+_scrolled_list_spec = {
+ 'file': 'ScrolledList',
+ 'kwds': {},
+ 'msg': "You should see a scrollable list of items\n"
+ "Selecting (clicking) or double clicking an item "
+ "prints the name to the console or Idle shell.\n"
+ "Right clicking an item will display a popup."
+ }
+
+_stack_viewer_spec = {
+ 'file': 'StackViewer',
+ 'kwds': {},
+ 'msg': "A stacktrace for a NameError exception.\n"
+ "Expand 'idlelib ...' and '<locals>'.\n"
+ "Check that exc_value, exc_tb, and exc_type are correct.\n"
+ }
+
+_tabbed_pages_spec = {
+ 'file': 'tabbedpages',
+ 'kwds': {},
+ 'msg': "Toggle between the two tabs 'foo' and 'bar'\n"
+ "Add a tab by entering a suitable name for it.\n"
+ "Remove an existing tab by entering its name.\n"
+ "Remove all existing tabs.\n"
+ "<nothing> is an invalid add page and remove page name.\n"
+ }
+
+TextViewer_spec = {
+ 'file': 'textView',
+ 'kwds': {'title': 'Test textView',
+ 'text':'The quick brown fox jumps over the lazy dog.\n'*35,
+ '_htest': True},
+ 'msg': "Test for read-only property of text.\n"
+ "Text is selectable. Window is scrollable.",
+ }
+
+_tooltip_spec = {
+ 'file': 'ToolTip',
+ 'kwds': {},
+ 'msg': "Place mouse cursor over both the buttons\n"
+ "A tooltip should appear with some text."
+ }
+
+_tree_widget_spec = {
+ 'file': 'TreeWidget',
+ 'kwds': {},
+ 'msg': "The canvas is scrollable.\n"
+ "Click on folders upto to the lowest level."
+ }
+
+_undo_delegator_spec = {
+ 'file': 'UndoDelegator',
+ 'kwds': {},
+ 'msg': "Click [Undo] to undo any action.\n"
+ "Click [Redo] to redo any action.\n"
+ "Click [Dump] to dump the current state "
+ "by printing to the console or the IDLE shell.\n"
+ }
+
+_widget_redirector_spec = {
+ 'file': 'WidgetRedirector',
+ 'kwds': {},
+ 'msg': "Every text insert should be printed to the console."
+ "or the IDLE shell."
+ }
+
+def run(*tests):
+ root = tk.Tk()
+ root.title('IDLE htest')
+ root.resizable(0, 0)
+ _initializeTkVariantTests(root)
+
+ # a scrollable Label like constant width text widget.
+ frameLabel = tk.Frame(root, padx=10)
+ frameLabel.pack()
+ text = tk.Text(frameLabel, wrap='word')
+ text.configure(bg=root.cget('bg'), relief='flat', height=4, width=70)
+ scrollbar = tk.Scrollbar(frameLabel, command=text.yview)
+ text.config(yscrollcommand=scrollbar.set)
+ scrollbar.pack(side='right', fill='y', expand=False)
+ text.pack(side='left', fill='both', expand=True)
+
+ test_list = [] # List of tuples of the form (spec, callable widget)
+ if tests:
+ for test in tests:
+ test_spec = globals()[test.__name__ + '_spec']
+ test_spec['name'] = test.__name__
+ test_list.append((test_spec, test))
+ else:
+ for k, d in globals().items():
+ if k.endswith('_spec'):
+ test_name = k[:-5]
+ test_spec = d
+ test_spec['name'] = test_name
+ mod = import_module('idlelib.' + test_spec['file'])
+ test = getattr(mod, test_name)
+ test_list.append((test_spec, test))
+
+ test_name = [tk.StringVar('')]
+ callable_object = [None]
+ test_kwds = [None]
+
+
+ def next():
+ if len(test_list) == 1:
+ next_button.pack_forget()
+ test_spec, callable_object[0] = test_list.pop()
+ test_kwds[0] = test_spec['kwds']
+ test_kwds[0]['parent'] = root
+ test_name[0].set('Test ' + test_spec['name'])
+
+ text.configure(state='normal') # enable text editing
+ text.delete('1.0','end')
+ text.insert("1.0",test_spec['msg'])
+ text.configure(state='disabled') # preserve read-only property
+
+ def run_test():
+ widget = callable_object[0](**test_kwds[0])
+ try:
+ print(widget.result)
+ except AttributeError:
+ pass
+
+ button = tk.Button(root, textvariable=test_name[0], command=run_test)
+ button.pack()
+ next_button = tk.Button(root, text="Next", command=next)
+ next_button.pack()
+
+ next()
+
+ root.mainloop()
+
+if __name__ == '__main__':
+ run()
diff --git a/Lib/idlelib/idle_test/mock_idle.py b/Lib/idlelib/idle_test/mock_idle.py
new file mode 100644
index 0000000..d76b783
--- /dev/null
+++ b/Lib/idlelib/idle_test/mock_idle.py
@@ -0,0 +1,52 @@
+'''Mock classes that imitate idlelib modules or classes.
+
+Attributes and methods will be added as needed for tests.
+'''
+
+from idlelib.idle_test.mock_tk import Text
+
+class Func(object):
+ '''Mock function captures args and returns result set by test.
+
+ Attributes:
+ self.called - records call even if no args, kwds passed.
+ self.result - set by init, returned by call.
+ self.args - captures positional arguments.
+ self.kwds - captures keyword arguments.
+
+ Most common use will probably be to mock methods.
+ Mock_tk.Var and Mbox_func are special variants of this.
+ '''
+ def __init__(self, result=None):
+ self.called = False
+ self.result = result
+ self.args = None
+ self.kwds = None
+ def __call__(self, *args, **kwds):
+ self.called = True
+ self.args = args
+ self.kwds = kwds
+ return self.result
+
+
+class Editor(object):
+ '''Minimally imitate EditorWindow.EditorWindow class.
+ '''
+ def __init__(self, flist=None, filename=None, key=None, root=None):
+ self.text = Text()
+ self.undo = UndoDelegator()
+
+ def get_selection_indices(self):
+ first = self.text.index('1.0')
+ last = self.text.index('end')
+ return first, last
+
+
+class UndoDelegator(object):
+ '''Minimally imitate UndoDelegator,UndoDelegator class.
+ '''
+ # A real undo block is only needed for user interaction.
+ def undo_block_start(*args):
+ pass
+ def undo_block_stop(*args):
+ pass
diff --git a/Lib/idlelib/idle_test/mock_tk.py b/Lib/idlelib/idle_test/mock_tk.py
new file mode 100644
index 0000000..612ea1a
--- /dev/null
+++ b/Lib/idlelib/idle_test/mock_tk.py
@@ -0,0 +1,298 @@
+"""Classes that replace tkinter gui objects used by an object being tested.
+
+A gui object is anything with a master or parent paramenter, which is
+typically required in spite of what the doc strings say.
+"""
+
+class Event(object):
+ '''Minimal mock with attributes for testing event handlers.
+
+ This is not a gui object, but is used as an argument for callbacks
+ that access attributes of the event passed. If a callback ignores
+ the event, other than the fact that is happened, pass 'event'.
+
+ Keyboard, mouse, window, and other sources generate Event instances.
+ Event instances have the following attributes: serial (number of
+ event), time (of event), type (of event as number), widget (in which
+ event occurred), and x,y (position of mouse). There are other
+ attributes for specific events, such as keycode for key events.
+ tkinter.Event.__doc__ has more but is still not complete.
+ '''
+ def __init__(self, **kwds):
+ "Create event with attributes needed for test"
+ self.__dict__.update(kwds)
+
+class Var(object):
+ "Use for String/Int/BooleanVar: incomplete"
+ def __init__(self, master=None, value=None, name=None):
+ self.master = master
+ self.value = value
+ self.name = name
+ def set(self, value):
+ self.value = value
+ def get(self):
+ return self.value
+
+class Mbox_func(object):
+ """Generic mock for messagebox functions, which all have the same signature.
+
+ Instead of displaying a message box, the mock's call method saves the
+ arguments as instance attributes, which test functions can then examime.
+ The test can set the result returned to ask function
+ """
+ def __init__(self, result=None):
+ self.result = result # Return None for all show funcs
+ def __call__(self, title, message, *args, **kwds):
+ # Save all args for possible examination by tester
+ self.title = title
+ self.message = message
+ self.args = args
+ self.kwds = kwds
+ return self.result # Set by tester for ask functions
+
+class Mbox(object):
+ """Mock for tkinter.messagebox with an Mbox_func for each function.
+
+ This module was 'tkMessageBox' in 2.x; hence the 'import as' in 3.x.
+ Example usage in test_module.py for testing functions in module.py:
+ ---
+from idlelib.idle_test.mock_tk import Mbox
+import module
+
+orig_mbox = module.tkMessageBox
+showerror = Mbox.showerror # example, for attribute access in test methods
+
+class Test(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ module.tkMessageBox = Mbox
+
+ @classmethod
+ def tearDownClass(cls):
+ module.tkMessageBox = orig_mbox
+ ---
+ For 'ask' functions, set func.result return value before calling the method
+ that uses the message function. When tkMessageBox functions are the
+ only gui alls in a method, this replacement makes the method gui-free,
+ """
+ askokcancel = Mbox_func() # True or False
+ askquestion = Mbox_func() # 'yes' or 'no'
+ askretrycancel = Mbox_func() # True or False
+ askyesno = Mbox_func() # True or False
+ askyesnocancel = Mbox_func() # True, False, or None
+ showerror = Mbox_func() # None
+ showinfo = Mbox_func() # None
+ showwarning = Mbox_func() # None
+
+from _tkinter import TclError
+
+class Text(object):
+ """A semi-functional non-gui replacement for tkinter.Text text editors.
+
+ The mock's data model is that a text is a list of \n-terminated lines.
+ The mock adds an empty string at the beginning of the list so that the
+ index of actual lines start at 1, as with Tk. The methods never see this.
+ Tk initializes files with a terminal \n that cannot be deleted. It is
+ invisible in the sense that one cannot move the cursor beyond it.
+
+ This class is only tested (and valid) with strings of ascii chars.
+ For testing, we are not concerned with Tk Text's treatment of,
+ for instance, 0-width characters or character + accent.
+ """
+ def __init__(self, master=None, cnf={}, **kw):
+ '''Initialize mock, non-gui, text-only Text widget.
+
+ At present, all args are ignored. Almost all affect visual behavior.
+ There are just a few Text-only options that affect text behavior.
+ '''
+ self.data = ['', '\n']
+
+ def index(self, index):
+ "Return string version of index decoded according to current text."
+ return "%s.%s" % self._decode(index, endflag=1)
+
+ def _decode(self, index, endflag=0):
+ """Return a (line, char) tuple of int indexes into self.data.
+
+ This implements .index without converting the result back to a string.
+ The result is contrained by the number of lines and linelengths of
+ self.data. For many indexes, the result is initially (1, 0).
+
+ The input index may have any of several possible forms:
+ * line.char float: converted to 'line.char' string;
+ * 'line.char' string, where line and char are decimal integers;
+ * 'line.char lineend', where lineend='lineend' (and char is ignored);
+ * 'line.end', where end='end' (same as above);
+ * 'insert', the positions before terminal \n;
+ * 'end', whose meaning depends on the endflag passed to ._endex.
+ * 'sel.first' or 'sel.last', where sel is a tag -- not implemented.
+ """
+ if isinstance(index, (float, bytes)):
+ index = str(index)
+ try:
+ index=index.lower()
+ except AttributeError:
+ raise TclError('bad text index "%s"' % index)
+
+ lastline = len(self.data) - 1 # same as number of text lines
+ if index == 'insert':
+ return lastline, len(self.data[lastline]) - 1
+ elif index == 'end':
+ return self._endex(endflag)
+
+ line, char = index.split('.')
+ line = int(line)
+
+ # Out of bounds line becomes first or last ('end') index
+ if line < 1:
+ return 1, 0
+ elif line > lastline:
+ return self._endex(endflag)
+
+ linelength = len(self.data[line]) -1 # position before/at \n
+ if char.endswith(' lineend') or char == 'end':
+ return line, linelength
+ # Tk requires that ignored chars before ' lineend' be valid int
+
+ # Out of bounds char becomes first or last index of line
+ char = int(char)
+ if char < 0:
+ char = 0
+ elif char > linelength:
+ char = linelength
+ return line, char
+
+ def _endex(self, endflag):
+ '''Return position for 'end' or line overflow corresponding to endflag.
+
+ -1: position before terminal \n; for .insert(), .delete
+ 0: position after terminal \n; for .get, .delete index 1
+ 1: same viewed as beginning of non-existent next line (for .index)
+ '''
+ n = len(self.data)
+ if endflag == 1:
+ return n, 0
+ else:
+ n -= 1
+ return n, len(self.data[n]) + endflag
+
+
+ def insert(self, index, chars):
+ "Insert chars before the character at index."
+
+ if not chars: # ''.splitlines() is [], not ['']
+ return
+ chars = chars.splitlines(True)
+ if chars[-1][-1] == '\n':
+ chars.append('')
+ line, char = self._decode(index, -1)
+ before = self.data[line][:char]
+ after = self.data[line][char:]
+ self.data[line] = before + chars[0]
+ self.data[line+1:line+1] = chars[1:]
+ self.data[line+len(chars)-1] += after
+
+
+ def get(self, index1, index2=None):
+ "Return slice from index1 to index2 (default is 'index1+1')."
+
+ startline, startchar = self._decode(index1)
+ if index2 is None:
+ endline, endchar = startline, startchar+1
+ else:
+ endline, endchar = self._decode(index2)
+
+ if startline == endline:
+ return self.data[startline][startchar:endchar]
+ else:
+ lines = [self.data[startline][startchar:]]
+ for i in range(startline+1, endline):
+ lines.append(self.data[i])
+ lines.append(self.data[endline][:endchar])
+ return ''.join(lines)
+
+
+ def delete(self, index1, index2=None):
+ '''Delete slice from index1 to index2 (default is 'index1+1').
+
+ Adjust default index2 ('index+1) for line ends.
+ Do not delete the terminal \n at the very end of self.data ([-1][-1]).
+ '''
+ startline, startchar = self._decode(index1, -1)
+ if index2 is None:
+ if startchar < len(self.data[startline])-1:
+ # not deleting \n
+ endline, endchar = startline, startchar+1
+ elif startline < len(self.data) - 1:
+ # deleting non-terminal \n, convert 'index1+1 to start of next line
+ endline, endchar = startline+1, 0
+ else:
+ # do not delete terminal \n if index1 == 'insert'
+ return
+ else:
+ endline, endchar = self._decode(index2, -1)
+ # restricting end position to insert position excludes terminal \n
+
+ if startline == endline and startchar < endchar:
+ self.data[startline] = self.data[startline][:startchar] + \
+ self.data[startline][endchar:]
+ elif startline < endline:
+ self.data[startline] = self.data[startline][:startchar] + \
+ self.data[endline][endchar:]
+ startline += 1
+ for i in range(startline, endline+1):
+ del self.data[startline]
+
+ def compare(self, index1, op, index2):
+ line1, char1 = self._decode(index1)
+ line2, char2 = self._decode(index2)
+ if op == '<':
+ return line1 < line2 or line1 == line2 and char1 < char2
+ elif op == '<=':
+ return line1 < line2 or line1 == line2 and char1 <= char2
+ elif op == '>':
+ return line1 > line2 or line1 == line2 and char1 > char2
+ elif op == '>=':
+ return line1 > line2 or line1 == line2 and char1 >= char2
+ elif op == '==':
+ return line1 == line2 and char1 == char2
+ elif op == '!=':
+ return line1 != line2 or char1 != char2
+ else:
+ raise TclError('''bad comparison operator "%s":'''
+ '''must be <, <=, ==, >=, >, or !=''' % op)
+
+ # The following Text methods normally do something and return None.
+ # Whether doing nothing is sufficient for a test will depend on the test.
+
+ def mark_set(self, name, index):
+ "Set mark *name* before the character at index."
+ pass
+
+ def mark_unset(self, *markNames):
+ "Delete all marks in markNames."
+
+ def tag_remove(self, tagName, index1, index2=None):
+ "Remove tag tagName from all characters between index1 and index2."
+ pass
+
+ # The following Text methods affect the graphics screen and return None.
+ # Doing nothing should always be sufficient for tests.
+
+ def scan_dragto(self, x, y):
+ "Adjust the view of the text according to scan_mark"
+
+ def scan_mark(self, x, y):
+ "Remember the current X, Y coordinates."
+
+ def see(self, index):
+ "Scroll screen to make the character at INDEX is visible."
+ pass
+
+ # The following is a Misc method inherited by Text.
+ # It should properly go in a Misc mock, but is included here for now.
+
+ def bind(sequence=None, func=None, add=None):
+ "Bind to this widget at event sequence a call to function func."
+ pass
diff --git a/Lib/idlelib/idle_test/test_autocomplete.py b/Lib/idlelib/idle_test/test_autocomplete.py
new file mode 100644
index 0000000..ee9d0ed
--- /dev/null
+++ b/Lib/idlelib/idle_test/test_autocomplete.py
@@ -0,0 +1,143 @@
+import unittest
+from test.test_support import requires
+from Tkinter import Tk, Text, TclError
+
+import idlelib.AutoComplete as ac
+import idlelib.AutoCompleteWindow as acw
+import idlelib.macosxSupport as mac
+from idlelib.idle_test.mock_idle import Func
+from idlelib.idle_test.mock_tk import Event
+
+class AutoCompleteWindow:
+ def complete():
+ return
+
+class DummyEditwin:
+ def __init__(self, root, text):
+ self.root = root
+ self.text = text
+ self.indentwidth = 8
+ self.tabwidth = 8
+ self.context_use_ps1 = True
+
+
+class AutoCompleteTest(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ requires('gui')
+ cls.root = Tk()
+ mac.setupApp(cls.root, None)
+ cls.text = Text(cls.root)
+ cls.editor = DummyEditwin(cls.root, cls.text)
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.root.destroy()
+ del cls.text
+ del cls.editor
+ del cls.root
+
+ def setUp(self):
+ self.editor.text.delete('1.0', 'end')
+ self.autocomplete = ac.AutoComplete(self.editor)
+
+ def test_init(self):
+ self.assertEqual(self.autocomplete.editwin, self.editor)
+
+ def test_make_autocomplete_window(self):
+ testwin = self.autocomplete._make_autocomplete_window()
+ self.assertIsInstance(testwin, acw.AutoCompleteWindow)
+
+ def test_remove_autocomplete_window(self):
+ self.autocomplete.autocompletewindow = (
+ self.autocomplete._make_autocomplete_window())
+ self.autocomplete._remove_autocomplete_window()
+ self.assertIsNone(self.autocomplete.autocompletewindow)
+
+ def test_force_open_completions_event(self):
+ # Test that force_open_completions_event calls _open_completions
+ o_cs = Func()
+ self.autocomplete.open_completions = o_cs
+ self.autocomplete.force_open_completions_event('event')
+ self.assertEqual(o_cs.args, (True, False, True))
+
+ def test_try_open_completions_event(self):
+ Equal = self.assertEqual
+ autocomplete = self.autocomplete
+ trycompletions = self.autocomplete.try_open_completions_event
+ o_c_l = Func()
+ autocomplete._open_completions_later = o_c_l
+
+ # _open_completions_later should not be called with no text in editor
+ trycompletions('event')
+ Equal(o_c_l.args, None)
+
+ # _open_completions_later should be called with COMPLETE_ATTRIBUTES (1)
+ self.text.insert('1.0', 're.')
+ trycompletions('event')
+ Equal(o_c_l.args, (False, False, False, 1))
+
+ # _open_completions_later should be called with COMPLETE_FILES (2)
+ self.text.delete('1.0', 'end')
+ self.text.insert('1.0', '"./Lib/')
+ trycompletions('event')
+ Equal(o_c_l.args, (False, False, False, 2))
+
+ def test_autocomplete_event(self):
+ Equal = self.assertEqual
+ autocomplete = self.autocomplete
+
+ # Test that the autocomplete event is ignored if user is pressing a
+ # modifier key in addition to the tab key
+ ev = Event(mc_state=True)
+ self.assertIsNone(autocomplete.autocomplete_event(ev))
+ del ev.mc_state
+
+ # If autocomplete window is open, complete() method is called
+ testwin = self.autocomplete._make_autocomplete_window()
+ self.text.insert('1.0', 're.')
+ Equal(self.autocomplete.autocomplete_event(ev), 'break')
+
+ # If autocomplete window is not active or does not exist,
+ # open_completions is called. Return depends on its return.
+ autocomplete._remove_autocomplete_window()
+ o_cs = Func() # .result = None
+ autocomplete.open_completions = o_cs
+ Equal(self.autocomplete.autocomplete_event(ev), None)
+ Equal(o_cs.args, (False, True, True))
+ o_cs.result = True
+ Equal(self.autocomplete.autocomplete_event(ev), 'break')
+ Equal(o_cs.args, (False, True, True))
+
+ def test_open_completions_later(self):
+ # Test that autocomplete._delayed_completion_id is set
+ pass
+
+ def test_delayed_open_completions(self):
+ # Test that autocomplete._delayed_completion_id set to None and that
+ # open_completions only called if insertion index is the same as
+ # _delayed_completion_index
+ pass
+
+ def test_open_completions(self):
+ # Test completions of files and attributes as well as non-completion
+ # of errors
+ pass
+
+ def test_fetch_completions(self):
+ # Test that fetch_completions returns 2 lists:
+ # For attribute completion, a large list containing all variables, and
+ # a small list containing non-private variables.
+ # For file completion, a large list containing all files in the path,
+ # and a small list containing files that do not start with '.'
+ pass
+
+ def test_get_entity(self):
+ # Test that a name is in the namespace of sys.modules and
+ # __main__.__dict__
+ pass
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/Lib/idlelib/idle_test/test_autoexpand.py b/Lib/idlelib/idle_test/test_autoexpand.py
new file mode 100644
index 0000000..bdb7e8b
--- /dev/null
+++ b/Lib/idlelib/idle_test/test_autoexpand.py
@@ -0,0 +1,141 @@
+"""Unit tests for idlelib.AutoExpand"""
+import unittest
+from test.test_support import requires
+from Tkinter import Text, Tk
+#from idlelib.idle_test.mock_tk import Text
+from idlelib.AutoExpand import AutoExpand
+
+
+class Dummy_Editwin:
+ # AutoExpand.__init__ only needs .text
+ def __init__(self, text):
+ self.text = text
+
+class AutoExpandTest(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ if 'Tkinter' in str(Text):
+ requires('gui')
+ cls.tk = Tk()
+ cls.text = Text(cls.tk)
+ else:
+ cls.text = Text()
+ cls.auto_expand = AutoExpand(Dummy_Editwin(cls.text))
+
+ @classmethod
+ def tearDownClass(cls):
+ if hasattr(cls, 'tk'):
+ cls.tk.destroy()
+ del cls.tk
+ del cls.text, cls.auto_expand
+
+ def tearDown(self):
+ self.text.delete('1.0', 'end')
+
+ def test_get_prevword(self):
+ text = self.text
+ previous = self.auto_expand.getprevword
+ equal = self.assertEqual
+
+ equal(previous(), '')
+
+ text.insert('insert', 't')
+ equal(previous(), 't')
+
+ text.insert('insert', 'his')
+ equal(previous(), 'this')
+
+ text.insert('insert', ' ')
+ equal(previous(), '')
+
+ text.insert('insert', 'is')
+ equal(previous(), 'is')
+
+ text.insert('insert', '\nsample\nstring')
+ equal(previous(), 'string')
+
+ text.delete('3.0', 'insert')
+ equal(previous(), '')
+
+ text.delete('1.0', 'end')
+ equal(previous(), '')
+
+ def test_before_only(self):
+ previous = self.auto_expand.getprevword
+ expand = self.auto_expand.expand_word_event
+ equal = self.assertEqual
+
+ self.text.insert('insert', 'ab ac bx ad ab a')
+ equal(self.auto_expand.getwords(), ['ab', 'ad', 'ac', 'a'])
+ expand('event')
+ equal(previous(), 'ab')
+ expand('event')
+ equal(previous(), 'ad')
+ expand('event')
+ equal(previous(), 'ac')
+ expand('event')
+ equal(previous(), 'a')
+
+ def test_after_only(self):
+ # Also add punctuation 'noise' that shoud be ignored.
+ text = self.text
+ previous = self.auto_expand.getprevword
+ expand = self.auto_expand.expand_word_event
+ equal = self.assertEqual
+
+ text.insert('insert', 'a, [ab] ac: () bx"" cd ac= ad ya')
+ text.mark_set('insert', '1.1')
+ equal(self.auto_expand.getwords(), ['ab', 'ac', 'ad', 'a'])
+ expand('event')
+ equal(previous(), 'ab')
+ expand('event')
+ equal(previous(), 'ac')
+ expand('event')
+ equal(previous(), 'ad')
+ expand('event')
+ equal(previous(), 'a')
+
+ def test_both_before_after(self):
+ text = self.text
+ previous = self.auto_expand.getprevword
+ expand = self.auto_expand.expand_word_event
+ equal = self.assertEqual
+
+ text.insert('insert', 'ab xy yz\n')
+ text.insert('insert', 'a ac by ac')
+
+ text.mark_set('insert', '2.1')
+ equal(self.auto_expand.getwords(), ['ab', 'ac', 'a'])
+ expand('event')
+ equal(previous(), 'ab')
+ expand('event')
+ equal(previous(), 'ac')
+ expand('event')
+ equal(previous(), 'a')
+
+ def test_other_expand_cases(self):
+ text = self.text
+ expand = self.auto_expand.expand_word_event
+ equal = self.assertEqual
+
+ # no expansion candidate found
+ equal(self.auto_expand.getwords(), [])
+ equal(expand('event'), 'break')
+
+ text.insert('insert', 'bx cy dz a')
+ equal(self.auto_expand.getwords(), [])
+
+ # reset state by successfully expanding once
+ # move cursor to another position and expand again
+ text.insert('insert', 'ac xy a ac ad a')
+ text.mark_set('insert', '1.7')
+ expand('event')
+ initial_state = self.auto_expand.state
+ text.mark_set('insert', '1.end')
+ expand('event')
+ new_state = self.auto_expand.state
+ self.assertNotEqual(initial_state, new_state)
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/Lib/idlelib/idle_test/test_calltips.py b/Lib/idlelib/idle_test/test_calltips.py
new file mode 100644
index 0000000..8371809
--- /dev/null
+++ b/Lib/idlelib/idle_test/test_calltips.py
@@ -0,0 +1,180 @@
+import unittest
+import idlelib.CallTips as ct
+CTi = ct.CallTips() # needed for get_entity test in 2.7
+import textwrap
+import types
+import warnings
+
+default_tip = ''
+
+# Test Class TC is used in multiple get_argspec test methods
+class TC(object):
+ 'doc'
+ tip = "(ai=None, *args)"
+ def __init__(self, ai=None, *b): 'doc'
+ __init__.tip = "(self, ai=None, *args)"
+ def t1(self): 'doc'
+ t1.tip = "(self)"
+ def t2(self, ai, b=None): 'doc'
+ t2.tip = "(self, ai, b=None)"
+ def t3(self, ai, *args): 'doc'
+ t3.tip = "(self, ai, *args)"
+ def t4(self, *args): 'doc'
+ t4.tip = "(self, *args)"
+ def t5(self, ai, b=None, *args, **kw): 'doc'
+ t5.tip = "(self, ai, b=None, *args, **kwargs)"
+ def t6(no, self): 'doc'
+ t6.tip = "(no, self)"
+ def __call__(self, ci): 'doc'
+ __call__.tip = "(self, ci)"
+ # attaching .tip to wrapped methods does not work
+ @classmethod
+ def cm(cls, a): 'doc'
+ @staticmethod
+ def sm(b): 'doc'
+
+tc = TC()
+
+signature = ct.get_arg_text # 2.7 and 3.x use different functions
+class Get_signatureTest(unittest.TestCase):
+ # The signature function must return a string, even if blank.
+ # Test a variety of objects to be sure that none cause it to raise
+ # (quite aside from getting as correct an answer as possible).
+ # The tests of builtins may break if the docstrings change,
+ # but a red buildbot is better than a user crash (as has happened).
+ # For a simple mismatch, change the expected output to the actual.
+
+ def test_builtins(self):
+ # 2.7 puts '()\n' where 3.x does not, other minor differences
+
+ # Python class that inherits builtin methods
+ class List(list): "List() doc"
+ # Simulate builtin with no docstring for default argspec test
+ class SB: __call__ = None
+
+ def gtest(obj, out):
+ self.assertEqual(signature(obj), out)
+
+ gtest(List, '()\n' + List.__doc__)
+ gtest(list.__new__,
+ 'T.__new__(S, ...) -> a new object with type S, a subtype of T')
+ gtest(list.__init__,
+ 'x.__init__(...) initializes x; see help(type(x)) for signature')
+ append_doc = "L.append(object) -- append object to end"
+ gtest(list.append, append_doc)
+ gtest([].append, append_doc)
+ gtest(List.append, append_doc)
+
+ gtest(types.MethodType, '()\ninstancemethod(function, instance, class)')
+ gtest(SB(), default_tip)
+
+ def test_signature_wrap(self):
+ # This is also a test of an old-style class
+ self.assertEqual(signature(textwrap.TextWrapper), '''\
+(width=70, initial_indent='', subsequent_indent='', expand_tabs=True,
+ replace_whitespace=True, fix_sentence_endings=False, break_long_words=True,
+ drop_whitespace=True, break_on_hyphens=True)''')
+
+ def test_docline_truncation(self):
+ def f(): pass
+ f.__doc__ = 'a'*300
+ self.assertEqual(signature(f), '()\n' + 'a' * (ct._MAX_COLS-3) + '...')
+
+ def test_multiline_docstring(self):
+ # Test fewer lines than max.
+ self.assertEqual(signature(list),
+ "()\nlist() -> new empty list\n"
+ "list(iterable) -> new list initialized from iterable's items")
+
+ # Test max lines and line (currently) too long.
+ def f():
+ pass
+ s = 'a\nb\nc\nd\n'
+ f.__doc__ = s + 300 * 'e' + 'f'
+ self.assertEqual(signature(f),
+ '()\n' + s + (ct._MAX_COLS - 3) * 'e' + '...')
+
+ def test_functions(self):
+ def t1(): 'doc'
+ t1.tip = "()"
+ def t2(a, b=None): 'doc'
+ t2.tip = "(a, b=None)"
+ def t3(a, *args): 'doc'
+ t3.tip = "(a, *args)"
+ def t4(*args): 'doc'
+ t4.tip = "(*args)"
+ def t5(a, b=None, *args, **kwds): 'doc'
+ t5.tip = "(a, b=None, *args, **kwargs)"
+
+ for func in (t1, t2, t3, t4, t5, TC):
+ self.assertEqual(signature(func), func.tip + '\ndoc')
+
+ def test_methods(self):
+ for meth in (TC.t1, TC.t2, TC.t3, TC.t4, TC.t5, TC.t6, TC.__call__):
+ self.assertEqual(signature(meth), meth.tip + "\ndoc")
+ self.assertEqual(signature(TC.cm), "(a)\ndoc")
+ self.assertEqual(signature(TC.sm), "(b)\ndoc")
+
+ def test_bound_methods(self):
+ # test that first parameter is correctly removed from argspec
+ for meth, mtip in ((tc.t1, "()"), (tc.t4, "(*args)"), (tc.t6, "(self)"),
+ (tc.__call__, '(ci)'), (tc, '(ci)'), (TC.cm, "(a)"),):
+ self.assertEqual(signature(meth), mtip + "\ndoc")
+
+ def test_starred_parameter(self):
+ # test that starred first parameter is *not* removed from argspec
+ class C:
+ def m1(*args): pass
+ def m2(**kwds): pass
+ def f1(args, kwargs, *a, **k): pass
+ def f2(args, kwargs, args1, kwargs1, *a, **k): pass
+ c = C()
+ self.assertEqual(signature(C.m1), '(*args)')
+ self.assertEqual(signature(c.m1), '(*args)')
+ self.assertEqual(signature(C.m2), '(**kwargs)')
+ self.assertEqual(signature(c.m2), '(**kwargs)')
+ self.assertEqual(signature(f1), '(args, kwargs, *args1, **kwargs1)')
+ self.assertEqual(signature(f2),
+ '(args, kwargs, args1, kwargs1, *args2, **kwargs2)')
+
+ def test_no_docstring(self):
+ def nd(s): pass
+ TC.nd = nd
+ self.assertEqual(signature(nd), "(s)")
+ self.assertEqual(signature(TC.nd), "(s)")
+ self.assertEqual(signature(tc.nd), "()")
+
+ def test_attribute_exception(self):
+ class NoCall(object):
+ def __getattr__(self, name):
+ raise BaseException
+ class Call(NoCall):
+ def __call__(self, ci):
+ pass
+ for meth, mtip in ((NoCall, '()'), (Call, '()'),
+ (NoCall(), ''), (Call(), '(ci)')):
+ self.assertEqual(signature(meth), mtip)
+
+ def test_non_callables(self):
+ for obj in (0, 0.0, '0', b'0', [], {}):
+ self.assertEqual(signature(obj), '')
+
+class Get_entityTest(unittest.TestCase):
+ # In 3.x, get_entity changed from 'instance method' to module function
+ # since 'self' not used. Use dummy instance until change 2.7 also.
+ def test_bad_entity(self):
+ self.assertIsNone(CTi.get_entity('1/0'))
+ def test_good_entity(self):
+ self.assertIs(CTi.get_entity('int'), int)
+
+class Py2Test(unittest.TestCase):
+ def test_paramtuple_float(self):
+ # 18539: (a,b) becomes '.0' in code object; change that but not 0.0
+ with warnings.catch_warnings():
+ # Suppess message of py3 deprecation of parameter unpacking
+ warnings.simplefilter("ignore")
+ exec "def f((a,b), c=0.0): pass"
+ self.assertEqual(signature(f), '(<tuple>, c=0.0)')
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2, exit=False)
diff --git a/Lib/idlelib/idle_test/test_config_name.py b/Lib/idlelib/idle_test/test_config_name.py
new file mode 100644
index 0000000..4403f87
--- /dev/null
+++ b/Lib/idlelib/idle_test/test_config_name.py
@@ -0,0 +1,75 @@
+"""Unit tests for idlelib.configSectionNameDialog"""
+import unittest
+from idlelib.idle_test.mock_tk import Var, Mbox
+from idlelib import configSectionNameDialog as name_dialog_module
+
+name_dialog = name_dialog_module.GetCfgSectionNameDialog
+
+class Dummy_name_dialog(object):
+ # Mock for testing the following methods of name_dialog
+ name_ok = name_dialog.name_ok.im_func
+ Ok = name_dialog.Ok.im_func
+ Cancel = name_dialog.Cancel.im_func
+ # Attributes, constant or variable, needed for tests
+ used_names = ['used']
+ name = Var()
+ result = None
+ destroyed = False
+ def destroy(self):
+ self.destroyed = True
+
+# name_ok calls Mbox.showerror if name is not ok
+orig_mbox = name_dialog_module.tkMessageBox
+showerror = Mbox.showerror
+
+class ConfigNameTest(unittest.TestCase):
+ dialog = Dummy_name_dialog()
+
+ @classmethod
+ def setUpClass(cls):
+ name_dialog_module.tkMessageBox = Mbox
+
+ @classmethod
+ def tearDownClass(cls):
+ name_dialog_module.tkMessageBox = orig_mbox
+
+ def test_blank_name(self):
+ self.dialog.name.set(' ')
+ self.assertEqual(self.dialog.name_ok(), '')
+ self.assertEqual(showerror.title, 'Name Error')
+ self.assertIn('No', showerror.message)
+
+ def test_used_name(self):
+ self.dialog.name.set('used')
+ self.assertEqual(self.dialog.name_ok(), '')
+ self.assertEqual(showerror.title, 'Name Error')
+ self.assertIn('use', showerror.message)
+
+ def test_long_name(self):
+ self.dialog.name.set('good'*8)
+ self.assertEqual(self.dialog.name_ok(), '')
+ self.assertEqual(showerror.title, 'Name Error')
+ self.assertIn('too long', showerror.message)
+
+ def test_good_name(self):
+ self.dialog.name.set(' good ')
+ showerror.title = 'No Error' # should not be called
+ self.assertEqual(self.dialog.name_ok(), 'good')
+ self.assertEqual(showerror.title, 'No Error')
+
+ def test_ok(self):
+ self.dialog.destroyed = False
+ self.dialog.name.set('good')
+ self.dialog.Ok()
+ self.assertEqual(self.dialog.result, 'good')
+ self.assertTrue(self.dialog.destroyed)
+
+ def test_cancel(self):
+ self.dialog.destroyed = False
+ self.dialog.Cancel()
+ self.assertEqual(self.dialog.result, '')
+ self.assertTrue(self.dialog.destroyed)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2, exit=False)
diff --git a/Lib/idlelib/idle_test/test_delegator.py b/Lib/idlelib/idle_test/test_delegator.py
new file mode 100644
index 0000000..b8ae5ee
--- /dev/null
+++ b/Lib/idlelib/idle_test/test_delegator.py
@@ -0,0 +1,37 @@
+import unittest
+from idlelib.Delegator import Delegator
+
+class DelegatorTest(unittest.TestCase):
+
+ def test_mydel(self):
+ # test a simple use scenario
+
+ # initialize
+ mydel = Delegator(int)
+ self.assertIs(mydel.delegate, int)
+ self.assertEqual(mydel._Delegator__cache, set())
+
+ # add an attribute:
+ self.assertRaises(AttributeError, mydel.__getattr__, 'xyz')
+ bl = mydel.bit_length
+ self.assertIs(bl, int.bit_length)
+ self.assertIs(mydel.__dict__['bit_length'], int.bit_length)
+ self.assertEqual(mydel._Delegator__cache, {'bit_length'})
+
+ # add a second attribute
+ mydel.numerator
+ self.assertEqual(mydel._Delegator__cache, {'bit_length', 'numerator'})
+
+ # delete the second (which, however, leaves it in the name cache)
+ del mydel.numerator
+ self.assertNotIn('numerator', mydel.__dict__)
+ self.assertIn('numerator', mydel._Delegator__cache)
+
+ # reset by calling .setdelegate, which calls .resetcache
+ mydel.setdelegate(float)
+ self.assertIs(mydel.delegate, float)
+ self.assertNotIn('bit_length', mydel.__dict__)
+ self.assertEqual(mydel._Delegator__cache, set())
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2, exit=2)
diff --git a/Lib/idlelib/idle_test/test_formatparagraph.py b/Lib/idlelib/idle_test/test_formatparagraph.py
new file mode 100644
index 0000000..07bbf16
--- /dev/null
+++ b/Lib/idlelib/idle_test/test_formatparagraph.py
@@ -0,0 +1,377 @@
+# Test the functions and main class method of FormatParagraph.py
+import unittest
+from idlelib import FormatParagraph as fp
+from idlelib.EditorWindow import EditorWindow
+from Tkinter import Tk, Text, TclError
+from test.test_support import requires
+
+
+class Is_Get_Test(unittest.TestCase):
+ """Test the is_ and get_ functions"""
+ test_comment = '# This is a comment'
+ test_nocomment = 'This is not a comment'
+ trailingws_comment = '# This is a comment '
+ leadingws_comment = ' # This is a comment'
+ leadingws_nocomment = ' This is not a comment'
+
+ def test_is_all_white(self):
+ self.assertTrue(fp.is_all_white(''))
+ self.assertTrue(fp.is_all_white('\t\n\r\f\v'))
+ self.assertFalse(fp.is_all_white(self.test_comment))
+
+ def test_get_indent(self):
+ Equal = self.assertEqual
+ Equal(fp.get_indent(self.test_comment), '')
+ Equal(fp.get_indent(self.trailingws_comment), '')
+ Equal(fp.get_indent(self.leadingws_comment), ' ')
+ Equal(fp.get_indent(self.leadingws_nocomment), ' ')
+
+ def test_get_comment_header(self):
+ Equal = self.assertEqual
+ # Test comment strings
+ Equal(fp.get_comment_header(self.test_comment), '#')
+ Equal(fp.get_comment_header(self.trailingws_comment), '#')
+ Equal(fp.get_comment_header(self.leadingws_comment), ' #')
+ # Test non-comment strings
+ Equal(fp.get_comment_header(self.leadingws_nocomment), ' ')
+ Equal(fp.get_comment_header(self.test_nocomment), '')
+
+
+class FindTest(unittest.TestCase):
+ """Test the find_paragraph function in FormatParagraph.
+
+ Using the runcase() function, find_paragraph() is called with 'mark' set at
+ multiple indexes before and inside the test paragraph.
+
+ It appears that code with the same indentation as a quoted string is grouped
+ as part of the same paragraph, which is probably incorrect behavior.
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ from idlelib.idle_test.mock_tk import Text
+ cls.text = Text()
+
+ def runcase(self, inserttext, stopline, expected):
+ # Check that find_paragraph returns the expected paragraph when
+ # the mark index is set to beginning, middle, end of each line
+ # up to but not including the stop line
+ text = self.text
+ text.insert('1.0', inserttext)
+ for line in range(1, stopline):
+ linelength = int(text.index("%d.end" % line).split('.')[1])
+ for col in (0, linelength//2, linelength):
+ tempindex = "%d.%d" % (line, col)
+ self.assertEqual(fp.find_paragraph(text, tempindex), expected)
+ text.delete('1.0', 'end')
+
+ def test_find_comment(self):
+ comment = (
+ "# Comment block with no blank lines before\n"
+ "# Comment line\n"
+ "\n")
+ self.runcase(comment, 3, ('1.0', '3.0', '#', comment[0:58]))
+
+ comment = (
+ "\n"
+ "# Comment block with whitespace line before and after\n"
+ "# Comment line\n"
+ "\n")
+ self.runcase(comment, 4, ('2.0', '4.0', '#', comment[1:70]))
+
+ comment = (
+ "\n"
+ " # Indented comment block with whitespace before and after\n"
+ " # Comment line\n"
+ "\n")
+ self.runcase(comment, 4, ('2.0', '4.0', ' #', comment[1:82]))
+
+ comment = (
+ "\n"
+ "# Single line comment\n"
+ "\n")
+ self.runcase(comment, 3, ('2.0', '3.0', '#', comment[1:23]))
+
+ comment = (
+ "\n"
+ " # Single line comment with leading whitespace\n"
+ "\n")
+ self.runcase(comment, 3, ('2.0', '3.0', ' #', comment[1:51]))
+
+ comment = (
+ "\n"
+ "# Comment immediately followed by code\n"
+ "x = 42\n"
+ "\n")
+ self.runcase(comment, 3, ('2.0', '3.0', '#', comment[1:40]))
+
+ comment = (
+ "\n"
+ " # Indented comment immediately followed by code\n"
+ "x = 42\n"
+ "\n")
+ self.runcase(comment, 3, ('2.0', '3.0', ' #', comment[1:53]))
+
+ comment = (
+ "\n"
+ "# Comment immediately followed by indented code\n"
+ " x = 42\n"
+ "\n")
+ self.runcase(comment, 3, ('2.0', '3.0', '#', comment[1:49]))
+
+ def test_find_paragraph(self):
+ teststring = (
+ '"""String with no blank lines before\n'
+ 'String line\n'
+ '"""\n'
+ '\n')
+ self.runcase(teststring, 4, ('1.0', '4.0', '', teststring[0:53]))
+
+ teststring = (
+ "\n"
+ '"""String with whitespace line before and after\n'
+ 'String line.\n'
+ '"""\n'
+ '\n')
+ self.runcase(teststring, 5, ('2.0', '5.0', '', teststring[1:66]))
+
+ teststring = (
+ '\n'
+ ' """Indented string with whitespace before and after\n'
+ ' Comment string.\n'
+ ' """\n'
+ '\n')
+ self.runcase(teststring, 5, ('2.0', '5.0', ' ', teststring[1:85]))
+
+ teststring = (
+ '\n'
+ '"""Single line string."""\n'
+ '\n')
+ self.runcase(teststring, 3, ('2.0', '3.0', '', teststring[1:27]))
+
+ teststring = (
+ '\n'
+ ' """Single line string with leading whitespace."""\n'
+ '\n')
+ self.runcase(teststring, 3, ('2.0', '3.0', ' ', teststring[1:55]))
+
+
+class ReformatFunctionTest(unittest.TestCase):
+ """Test the reformat_paragraph function without the editor window."""
+
+ def test_reformat_paragrah(self):
+ Equal = self.assertEqual
+ reform = fp.reformat_paragraph
+ hw = "O hello world"
+ Equal(reform(' ', 1), ' ')
+ Equal(reform("Hello world", 20), "Hello world")
+
+ # Test without leading newline
+ Equal(reform(hw, 1), "O\nhello\nworld")
+ Equal(reform(hw, 6), "O\nhello\nworld")
+ Equal(reform(hw, 7), "O hello\nworld")
+ Equal(reform(hw, 12), "O hello\nworld")
+ Equal(reform(hw, 13), "O hello world")
+
+ # Test with leading newline
+ hw = "\nO hello world"
+ Equal(reform(hw, 1), "\nO\nhello\nworld")
+ Equal(reform(hw, 6), "\nO\nhello\nworld")
+ Equal(reform(hw, 7), "\nO hello\nworld")
+ Equal(reform(hw, 12), "\nO hello\nworld")
+ Equal(reform(hw, 13), "\nO hello world")
+
+
+class ReformatCommentTest(unittest.TestCase):
+ """Test the reformat_comment function without the editor window."""
+
+ def test_reformat_comment(self):
+ Equal = self.assertEqual
+
+ # reformat_comment formats to a minimum of 20 characters
+ test_string = (
+ " \"\"\"this is a test of a reformat for a triple quoted string"
+ " will it reformat to less than 70 characters for me?\"\"\"")
+ result = fp.reformat_comment(test_string, 70, " ")
+ expected = (
+ " \"\"\"this is a test of a reformat for a triple quoted string will it\n"
+ " reformat to less than 70 characters for me?\"\"\"")
+ Equal(result, expected)
+
+ test_comment = (
+ "# this is a test of a reformat for a triple quoted string will "
+ "it reformat to less than 70 characters for me?")
+ result = fp.reformat_comment(test_comment, 70, "#")
+ expected = (
+ "# this is a test of a reformat for a triple quoted string will it\n"
+ "# reformat to less than 70 characters for me?")
+ Equal(result, expected)
+
+
+class FormatClassTest(unittest.TestCase):
+ def test_init_close(self):
+ instance = fp.FormatParagraph('editor')
+ self.assertEqual(instance.editwin, 'editor')
+ instance.close()
+ self.assertEqual(instance.editwin, None)
+
+
+# For testing format_paragraph_event, Initialize FormatParagraph with
+# a mock Editor with .text and .get_selection_indices. The text must
+# be a Text wrapper that adds two methods
+
+# A real EditorWindow creates unneeded, time-consuming baggage and
+# sometimes emits shutdown warnings like this:
+# "warning: callback failed in WindowList <class '_tkinter.TclError'>
+# : invalid command name ".55131368.windows".
+# Calling EditorWindow._close in tearDownClass prevents this but causes
+# other problems (windows left open).
+
+class TextWrapper:
+ def __init__(self, master):
+ self.text = Text(master=master)
+ def __getattr__(self, name):
+ return getattr(self.text, name)
+ def undo_block_start(self): pass
+ def undo_block_stop(self): pass
+
+class Editor:
+ def __init__(self, root):
+ self.text = TextWrapper(root)
+ get_selection_indices = EditorWindow. get_selection_indices.im_func
+
+class FormatEventTest(unittest.TestCase):
+ """Test the formatting of text inside a Text widget.
+
+ This is done with FormatParagraph.format.paragraph_event,
+ which calls functions in the module as appropriate.
+ """
+ test_string = (
+ " '''this is a test of a reformat for a triple "
+ "quoted string will it reformat to less than 70 "
+ "characters for me?'''\n")
+ multiline_test_string = (
+ " '''The first line is under the max width.\n"
+ " The second line's length is way over the max width. It goes "
+ "on and on until it is over 100 characters long.\n"
+ " Same thing with the third line. It is also way over the max "
+ "width, but FormatParagraph will fix it.\n"
+ " '''\n")
+ multiline_test_comment = (
+ "# The first line is under the max width.\n"
+ "# The second line's length is way over the max width. It goes on "
+ "and on until it is over 100 characters long.\n"
+ "# Same thing with the third line. It is also way over the max "
+ "width, but FormatParagraph will fix it.\n"
+ "# The fourth line is short like the first line.")
+
+ @classmethod
+ def setUpClass(cls):
+ requires('gui')
+ cls.root = Tk()
+ editor = Editor(root=cls.root)
+ cls.text = editor.text.text # Test code does not need the wrapper.
+ cls.formatter = fp.FormatParagraph(editor).format_paragraph_event
+ # Sets the insert mark just after the re-wrapped and inserted text.
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.root.destroy()
+ del cls.root
+ del cls.text
+ del cls.formatter
+
+ def test_short_line(self):
+ self.text.insert('1.0', "Short line\n")
+ self.formatter("Dummy")
+ self.assertEqual(self.text.get('1.0', 'insert'), "Short line\n" )
+ self.text.delete('1.0', 'end')
+
+ def test_long_line(self):
+ text = self.text
+
+ # Set cursor ('insert' mark) to '1.0', within text.
+ text.insert('1.0', self.test_string)
+ text.mark_set('insert', '1.0')
+ self.formatter('ParameterDoesNothing', limit=70)
+ result = text.get('1.0', 'insert')
+ # find function includes \n
+ expected = (
+" '''this is a test of a reformat for a triple quoted string will it\n"
+" reformat to less than 70 characters for me?'''\n") # yes
+ self.assertEqual(result, expected)
+ text.delete('1.0', 'end')
+
+ # Select from 1.11 to line end.
+ text.insert('1.0', self.test_string)
+ text.tag_add('sel', '1.11', '1.end')
+ self.formatter('ParameterDoesNothing', limit=70)
+ result = text.get('1.0', 'insert')
+ # selection excludes \n
+ expected = (
+" '''this is a test of a reformat for a triple quoted string will it reformat\n"
+" to less than 70 characters for me?'''") # no
+ self.assertEqual(result, expected)
+ text.delete('1.0', 'end')
+
+ def test_multiple_lines(self):
+ text = self.text
+ # Select 2 long lines.
+ text.insert('1.0', self.multiline_test_string)
+ text.tag_add('sel', '2.0', '4.0')
+ self.formatter('ParameterDoesNothing', limit=70)
+ result = text.get('2.0', 'insert')
+ expected = (
+" The second line's length is way over the max width. It goes on and\n"
+" on until it is over 100 characters long. Same thing with the third\n"
+" line. It is also way over the max width, but FormatParagraph will\n"
+" fix it.\n")
+ self.assertEqual(result, expected)
+ text.delete('1.0', 'end')
+
+ def test_comment_block(self):
+ text = self.text
+
+ # Set cursor ('insert') to '1.0', within block.
+ text.insert('1.0', self.multiline_test_comment)
+ self.formatter('ParameterDoesNothing', limit=70)
+ result = text.get('1.0', 'insert')
+ expected = (
+"# The first line is under the max width. The second line's length is\n"
+"# way over the max width. It goes on and on until it is over 100\n"
+"# characters long. Same thing with the third line. It is also way over\n"
+"# the max width, but FormatParagraph will fix it. The fourth line is\n"
+"# short like the first line.\n")
+ self.assertEqual(result, expected)
+ text.delete('1.0', 'end')
+
+ # Select line 2, verify line 1 unaffected.
+ text.insert('1.0', self.multiline_test_comment)
+ text.tag_add('sel', '2.0', '3.0')
+ self.formatter('ParameterDoesNothing', limit=70)
+ result = text.get('1.0', 'insert')
+ expected = (
+"# The first line is under the max width.\n"
+"# The second line's length is way over the max width. It goes on and\n"
+"# on until it is over 100 characters long.\n")
+ self.assertEqual(result, expected)
+ text.delete('1.0', 'end')
+
+# The following block worked with EditorWindow but fails with the mock.
+# Lines 2 and 3 get pasted together even though the previous block left
+# the previous line alone. More investigation is needed.
+## # Select lines 3 and 4
+## text.insert('1.0', self.multiline_test_comment)
+## text.tag_add('sel', '3.0', '5.0')
+## self.formatter('ParameterDoesNothing')
+## result = text.get('3.0', 'insert')
+## expected = (
+##"# Same thing with the third line. It is also way over the max width,\n"
+##"# but FormatParagraph will fix it. The fourth line is short like the\n"
+##"# first line.\n")
+## self.assertEqual(result, expected)
+## text.delete('1.0', 'end')
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2, exit=2)
diff --git a/Lib/idlelib/idle_test/test_grep.py b/Lib/idlelib/idle_test/test_grep.py
new file mode 100644
index 0000000..e9f4f22
--- /dev/null
+++ b/Lib/idlelib/idle_test/test_grep.py
@@ -0,0 +1,82 @@
+""" !Changing this line will break Test_findfile.test_found!
+Non-gui unit tests for idlelib.GrepDialog methods.
+dummy_command calls grep_it calls findfiles.
+An exception raised in one method will fail callers.
+Otherwise, tests are mostly independent.
+*** Currently only test grep_it.
+"""
+import unittest
+from test.test_support import captured_stdout, findfile
+from idlelib.idle_test.mock_tk import Var
+from idlelib.GrepDialog import GrepDialog
+import re
+
+__file__ = findfile('idlelib/idle_test') + '/test_grep.py'
+
+class Dummy_searchengine:
+ '''GrepDialog.__init__ calls parent SearchDiabolBase which attaches the
+ passed in SearchEngine instance as attribute 'engine'. Only a few of the
+ many possible self.engine.x attributes are needed here.
+ '''
+ def getpat(self):
+ return self._pat
+
+searchengine = Dummy_searchengine()
+
+class Dummy_grep:
+ # Methods tested
+ #default_command = GrepDialog.default_command
+ grep_it = GrepDialog.grep_it.im_func
+ findfiles = GrepDialog.findfiles.im_func
+ # Other stuff needed
+ recvar = Var(False)
+ engine = searchengine
+ def close(self): # gui method
+ pass
+
+grep = Dummy_grep()
+
+class FindfilesTest(unittest.TestCase):
+ # findfiles is really a function, not a method, could be iterator
+ # test that filename return filename
+ # test that idlelib has many .py files
+ # test that recursive flag adds idle_test .py files
+ pass
+
+class Grep_itTest(unittest.TestCase):
+ # Test captured reports with 0 and some hits.
+ # Should test file names, but Windows reports have mixed / and \ separators
+ # from incomplete replacement, so 'later'.
+
+ def report(self, pat):
+ grep.engine._pat = pat
+ with captured_stdout() as s:
+ grep.grep_it(re.compile(pat), __file__)
+ lines = s.getvalue().split('\n')
+ lines.pop() # remove bogus '' after last \n
+ return lines
+
+ def test_unfound(self):
+ pat = 'xyz*'*7
+ lines = self.report(pat)
+ self.assertEqual(len(lines), 2)
+ self.assertIn(pat, lines[0])
+ self.assertEqual(lines[1], 'No hits.')
+
+ def test_found(self):
+
+ pat = '""" !Changing this line will break Test_findfile.test_found!'
+ lines = self.report(pat)
+ self.assertEqual(len(lines), 5)
+ self.assertIn(pat, lines[0])
+ self.assertIn('py: 1:', lines[1]) # line number 1
+ self.assertIn('2', lines[3]) # hits found 2
+ self.assertTrue(lines[4].startswith('(Hint:'))
+
+class Default_commandTest(unittest.TestCase):
+ # To write this, mode OutputWindow import to top of GrepDialog
+ # so it can be replaced by captured_stdout in class setup/teardown.
+ pass
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2, exit=False)
diff --git a/Lib/idlelib/idle_test/test_hyperparser.py b/Lib/idlelib/idle_test/test_hyperparser.py
new file mode 100644
index 0000000..b480684
--- /dev/null
+++ b/Lib/idlelib/idle_test/test_hyperparser.py
@@ -0,0 +1,191 @@
+"""Unittest for idlelib.HyperParser"""
+import unittest
+from test.test_support import requires
+from Tkinter import Tk, Text
+from idlelib.EditorWindow import EditorWindow
+from idlelib.HyperParser import HyperParser
+
+class DummyEditwin:
+ def __init__(self, text):
+ self.text = text
+ self.indentwidth = 8
+ self.tabwidth = 8
+ self.context_use_ps1 = True
+ self.num_context_lines = 50, 500, 1000
+
+ _build_char_in_string_func = EditorWindow._build_char_in_string_func.im_func
+ is_char_in_string = EditorWindow.is_char_in_string.im_func
+
+
+class HyperParserTest(unittest.TestCase):
+ code = (
+ '"""This is a module docstring"""\n'
+ '# this line is a comment\n'
+ 'x = "this is a string"\n'
+ "y = 'this is also a string'\n"
+ 'l = [i for i in range(10)]\n'
+ 'm = [py*py for # comment\n'
+ ' py in l]\n'
+ 'x.__len__\n'
+ "z = ((r'asdf')+('a')))\n"
+ '[x for x in\n'
+ 'for = False\n'
+ )
+
+ @classmethod
+ def setUpClass(cls):
+ requires('gui')
+ cls.root = Tk()
+ cls.text = Text(cls.root)
+ cls.editwin = DummyEditwin(cls.text)
+
+ @classmethod
+ def tearDownClass(cls):
+ del cls.text, cls.editwin
+ cls.root.destroy()
+ del cls.root
+
+ def setUp(self):
+ self.text.insert('insert', self.code)
+
+ def tearDown(self):
+ self.text.delete('1.0', 'end')
+ self.editwin.context_use_ps1 = True
+
+ def get_parser(self, index):
+ """
+ Return a parser object with index at 'index'
+ """
+ return HyperParser(self.editwin, index)
+
+ def test_init(self):
+ """
+ test corner cases in the init method
+ """
+ with self.assertRaises(ValueError) as ve:
+ self.text.tag_add('console', '1.0', '1.end')
+ p = self.get_parser('1.5')
+ self.assertIn('precedes', str(ve.exception))
+
+ # test without ps1
+ self.editwin.context_use_ps1 = False
+
+ # number of lines lesser than 50
+ p = self.get_parser('end')
+ self.assertEqual(p.rawtext, self.text.get('1.0', 'end'))
+
+ # number of lines greater than 50
+ self.text.insert('end', self.text.get('1.0', 'end')*4)
+ p = self.get_parser('54.5')
+
+ def test_is_in_string(self):
+ get = self.get_parser
+
+ p = get('1.0')
+ self.assertFalse(p.is_in_string())
+ p = get('1.4')
+ self.assertTrue(p.is_in_string())
+ p = get('2.3')
+ self.assertFalse(p.is_in_string())
+ p = get('3.3')
+ self.assertFalse(p.is_in_string())
+ p = get('3.7')
+ self.assertTrue(p.is_in_string())
+ p = get('4.6')
+ self.assertTrue(p.is_in_string())
+
+ def test_is_in_code(self):
+ get = self.get_parser
+
+ p = get('1.0')
+ self.assertTrue(p.is_in_code())
+ p = get('1.1')
+ self.assertFalse(p.is_in_code())
+ p = get('2.5')
+ self.assertFalse(p.is_in_code())
+ p = get('3.4')
+ self.assertTrue(p.is_in_code())
+ p = get('3.6')
+ self.assertFalse(p.is_in_code())
+ p = get('4.14')
+ self.assertFalse(p.is_in_code())
+
+ def test_get_surrounding_bracket(self):
+ get = self.get_parser
+
+ def without_mustclose(parser):
+ # a utility function to get surrounding bracket
+ # with mustclose=False
+ return parser.get_surrounding_brackets(mustclose=False)
+
+ def with_mustclose(parser):
+ # a utility function to get surrounding bracket
+ # with mustclose=True
+ return parser.get_surrounding_brackets(mustclose=True)
+
+ p = get('3.2')
+ self.assertIsNone(with_mustclose(p))
+ self.assertIsNone(without_mustclose(p))
+
+ p = get('5.6')
+ self.assertTupleEqual(without_mustclose(p), ('5.4', '5.25'))
+ self.assertTupleEqual(without_mustclose(p), with_mustclose(p))
+
+ p = get('5.23')
+ self.assertTupleEqual(without_mustclose(p), ('5.21', '5.24'))
+ self.assertTupleEqual(without_mustclose(p), with_mustclose(p))
+
+ p = get('6.15')
+ self.assertTupleEqual(without_mustclose(p), ('6.4', '6.end'))
+ self.assertIsNone(with_mustclose(p))
+
+ p = get('9.end')
+ self.assertIsNone(with_mustclose(p))
+ self.assertIsNone(without_mustclose(p))
+
+ def test_get_expression(self):
+ get = self.get_parser
+
+ p = get('4.2')
+ self.assertEqual(p.get_expression(), 'y ')
+
+ p = get('4.7')
+ with self.assertRaises(ValueError) as ve:
+ p.get_expression()
+ self.assertIn('is inside a code', str(ve.exception))
+
+ p = get('5.25')
+ self.assertEqual(p.get_expression(), 'range(10)')
+
+ p = get('6.7')
+ self.assertEqual(p.get_expression(), 'py')
+
+ p = get('6.8')
+ self.assertEqual(p.get_expression(), '')
+
+ p = get('7.9')
+ self.assertEqual(p.get_expression(), 'py')
+
+ p = get('8.end')
+ self.assertEqual(p.get_expression(), 'x.__len__')
+
+ p = get('9.13')
+ self.assertEqual(p.get_expression(), "r'asdf'")
+
+ p = get('9.17')
+ with self.assertRaises(ValueError) as ve:
+ p.get_expression()
+ self.assertIn('is inside a code', str(ve.exception))
+
+ p = get('10.0')
+ self.assertEqual(p.get_expression(), '')
+
+ p = get('11.3')
+ self.assertEqual(p.get_expression(), '')
+
+ p = get('11.11')
+ self.assertEqual(p.get_expression(), 'False')
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/Lib/idlelib/idle_test/test_idlehistory.py b/Lib/idlelib/idle_test/test_idlehistory.py
new file mode 100644
index 0000000..dc6bb32
--- /dev/null
+++ b/Lib/idlelib/idle_test/test_idlehistory.py
@@ -0,0 +1,167 @@
+import unittest
+from test.test_support import requires
+
+import Tkinter as tk
+from Tkinter import Text as tkText
+from idlelib.idle_test.mock_tk import Text as mkText
+from idlelib.IdleHistory import History
+from idlelib.configHandler import idleConf
+
+line1 = 'a = 7'
+line2 = 'b = a'
+
+class StoreTest(unittest.TestCase):
+ '''Tests History.__init__ and History.store with mock Text'''
+
+ @classmethod
+ def setUpClass(cls):
+ cls.text = mkText()
+ cls.history = History(cls.text)
+
+ def tearDown(self):
+ self.text.delete('1.0', 'end')
+ self.history.history = []
+
+ def test_init(self):
+ self.assertIs(self.history.text, self.text)
+ self.assertEqual(self.history.history, [])
+ self.assertIsNone(self.history.prefix)
+ self.assertIsNone(self.history.pointer)
+ self.assertEqual(self.history.cyclic,
+ idleConf.GetOption("main", "History", "cyclic", 1, "bool"))
+
+ def test_store_short(self):
+ self.history.store('a')
+ self.assertEqual(self.history.history, [])
+ self.history.store(' a ')
+ self.assertEqual(self.history.history, [])
+
+ def test_store_dup(self):
+ self.history.store(line1)
+ self.assertEqual(self.history.history, [line1])
+ self.history.store(line2)
+ self.assertEqual(self.history.history, [line1, line2])
+ self.history.store(line1)
+ self.assertEqual(self.history.history, [line2, line1])
+
+ def test_store_reset(self):
+ self.history.prefix = line1
+ self.history.pointer = 0
+ self.history.store(line2)
+ self.assertIsNone(self.history.prefix)
+ self.assertIsNone(self.history.pointer)
+
+
+class TextWrapper:
+ def __init__(self, master):
+ self.text = tkText(master=master)
+ self._bell = False
+ def __getattr__(self, name):
+ return getattr(self.text, name)
+ def bell(self):
+ self._bell = True
+
+class FetchTest(unittest.TestCase):
+ '''Test History.fetch with wrapped tk.Text.
+ '''
+ @classmethod
+ def setUpClass(cls):
+ requires('gui')
+ cls.root = tk.Tk()
+
+ def setUp(self):
+ self.text = text = TextWrapper(self.root)
+ text.insert('1.0', ">>> ")
+ text.mark_set('iomark', '1.4')
+ text.mark_gravity('iomark', 'left')
+ self.history = History(text)
+ self.history.history = [line1, line2]
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.root.destroy()
+ del cls.root
+
+ def fetch_test(self, reverse, line, prefix, index, bell=False):
+ # Perform one fetch as invoked by Alt-N or Alt-P
+ # Test the result. The line test is the most important.
+ # The last two are diagnostic of fetch internals.
+ History = self.history
+ History.fetch(reverse)
+
+ Equal = self.assertEqual
+ Equal(self.text.get('iomark', 'end-1c'), line)
+ Equal(self.text._bell, bell)
+ if bell:
+ self.text._bell = False
+ Equal(History.prefix, prefix)
+ Equal(History.pointer, index)
+ Equal(self.text.compare("insert", '==', "end-1c"), 1)
+
+ def test_fetch_prev_cyclic(self):
+ prefix = ''
+ test = self.fetch_test
+ test(True, line2, prefix, 1)
+ test(True, line1, prefix, 0)
+ test(True, prefix, None, None, bell=True)
+
+ def test_fetch_next_cyclic(self):
+ prefix = ''
+ test = self.fetch_test
+ test(False, line1, prefix, 0)
+ test(False, line2, prefix, 1)
+ test(False, prefix, None, None, bell=True)
+
+ # Prefix 'a' tests skip line2, which starts with 'b'
+ def test_fetch_prev_prefix(self):
+ prefix = 'a'
+ self.text.insert('iomark', prefix)
+ self.fetch_test(True, line1, prefix, 0)
+ self.fetch_test(True, prefix, None, None, bell=True)
+
+ def test_fetch_next_prefix(self):
+ prefix = 'a'
+ self.text.insert('iomark', prefix)
+ self.fetch_test(False, line1, prefix, 0)
+ self.fetch_test(False, prefix, None, None, bell=True)
+
+ def test_fetch_prev_noncyclic(self):
+ prefix = ''
+ self.history.cyclic = False
+ test = self.fetch_test
+ test(True, line2, prefix, 1)
+ test(True, line1, prefix, 0)
+ test(True, line1, prefix, 0, bell=True)
+
+ def test_fetch_next_noncyclic(self):
+ prefix = ''
+ self.history.cyclic = False
+ test = self.fetch_test
+ test(False, prefix, None, None, bell=True)
+ test(True, line2, prefix, 1)
+ test(False, prefix, None, None, bell=True)
+ test(False, prefix, None, None, bell=True)
+
+ def test_fetch_cursor_move(self):
+ # Move cursor after fetch
+ self.history.fetch(reverse=True) # initialization
+ self.text.mark_set('insert', 'iomark')
+ self.fetch_test(True, line2, None, None, bell=True)
+
+ def test_fetch_edit(self):
+ # Edit after fetch
+ self.history.fetch(reverse=True) # initialization
+ self.text.delete('iomark', 'insert', )
+ self.text.insert('iomark', 'a =')
+ self.fetch_test(True, line1, 'a =', 0) # prefix is reset
+
+ def test_history_prev_next(self):
+ # Minimally test functions bound to events
+ self.history.history_prev('dummy event')
+ self.assertEqual(self.history.pointer, 1)
+ self.history.history_next('dummy event')
+ self.assertEqual(self.history.pointer, None)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2, exit=2)
diff --git a/Lib/idlelib/idle_test/test_parenmatch.py b/Lib/idlelib/idle_test/test_parenmatch.py
new file mode 100644
index 0000000..1621981
--- /dev/null
+++ b/Lib/idlelib/idle_test/test_parenmatch.py
@@ -0,0 +1,121 @@
+"""Test idlelib.ParenMatch."""
+# This must currently be a gui test because ParenMatch methods use
+# several text methods not defined on idlelib.idle_test.mock_tk.Text.
+
+import unittest
+from test.test_support import requires
+from Tkinter import Tk, Text
+from idlelib.ParenMatch import ParenMatch
+
+class Mock: # 2.7 does not have unittest.mock
+ def __init__(self, *args, **kwargs):
+ self.called = False
+
+ def __call__(self, *args, **kwargs):
+ self.called = True
+
+ def reset_mock(self, *args, **kwargs):
+ self.called = False
+
+ def after(self, *args, **kwargs):
+ pass
+
+class DummyEditwin:
+ def __init__(self, text):
+ self.text = text
+ self.indentwidth = 8
+ self.tabwidth = 8
+ self.context_use_ps1 = True
+
+
+class ParenMatchTest(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ requires('gui')
+ cls.root = Tk()
+ cls.text = Text(cls.root)
+ cls.editwin = DummyEditwin(cls.text)
+ cls.editwin.text_frame = Mock()
+
+ @classmethod
+ def tearDownClass(cls):
+ del cls.text, cls.editwin
+ cls.root.destroy()
+ del cls.root
+
+ def tearDown(self):
+ self.text.delete('1.0', 'end')
+
+ def test_paren_expression(self):
+ """
+ Test ParenMatch with 'expression' style.
+ """
+ text = self.text
+ pm = ParenMatch(self.editwin)
+ pm.set_style('expression')
+
+ text.insert('insert', 'def foobar(a, b')
+ pm.flash_paren_event('event')
+ self.assertIn('<<parenmatch-check-restore>>', text.event_info())
+ self.assertTupleEqual(text.tag_prevrange('paren', 'end'),
+ ('1.10', '1.15'))
+ text.insert('insert', ')')
+ pm.restore_event()
+ self.assertNotIn('<<parenmatch-check-restore>>', text.event_info())
+ self.assertEqual(text.tag_prevrange('paren', 'end'), ())
+
+ # paren_closed_event can only be tested as below
+ pm.paren_closed_event('event')
+ self.assertTupleEqual(text.tag_prevrange('paren', 'end'),
+ ('1.10', '1.16'))
+
+ def test_paren_default(self):
+ """
+ Test ParenMatch with 'default' style.
+ """
+ text = self.text
+ pm = ParenMatch(self.editwin)
+ pm.set_style('default')
+
+ text.insert('insert', 'def foobar(a, b')
+ pm.flash_paren_event('event')
+ self.assertIn('<<parenmatch-check-restore>>', text.event_info())
+ self.assertTupleEqual(text.tag_prevrange('paren', 'end'),
+ ('1.10', '1.11'))
+ text.insert('insert', ')')
+ pm.restore_event()
+ self.assertNotIn('<<parenmatch-check-restore>>', text.event_info())
+ self.assertEqual(text.tag_prevrange('paren', 'end'), ())
+
+ def test_paren_corner(self):
+ """
+ Test corner cases in flash_paren_event and paren_closed_event.
+
+ These cases force conditional expression and alternate paths.
+ """
+ text = self.text
+ pm = ParenMatch(self.editwin)
+
+ text.insert('insert', '# this is a commen)')
+ self.assertIsNone(pm.paren_closed_event('event'))
+
+ text.insert('insert', '\ndef')
+ self.assertIsNone(pm.flash_paren_event('event'))
+ self.assertIsNone(pm.paren_closed_event('event'))
+
+ text.insert('insert', ' a, *arg)')
+ self.assertIsNone(pm.paren_closed_event('event'))
+
+ def test_handle_restore_timer(self):
+ pm = ParenMatch(self.editwin)
+ pm.restore_event = Mock()
+ pm.handle_restore_timer(0)
+ self.assertTrue(pm.restore_event.called)
+ pm.restore_event.reset_mock()
+ pm.handle_restore_timer(1)
+ self.assertFalse(pm.restore_event.called)
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2)
diff --git a/Lib/idlelib/idle_test/test_pathbrowser.py b/Lib/idlelib/idle_test/test_pathbrowser.py
new file mode 100644
index 0000000..7ad7c97
--- /dev/null
+++ b/Lib/idlelib/idle_test/test_pathbrowser.py
@@ -0,0 +1,12 @@
+import unittest
+import idlelib.PathBrowser as PathBrowser
+
+class PathBrowserTest(unittest.TestCase):
+
+ def test_DirBrowserTreeItem(self):
+ # Issue16226 - make sure that getting a sublist works
+ d = PathBrowser.DirBrowserTreeItem('')
+ d.GetSubList()
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2, exit=False)
diff --git a/Lib/idlelib/idle_test/test_rstrip.py b/Lib/idlelib/idle_test/test_rstrip.py
new file mode 100644
index 0000000..1c90b93
--- /dev/null
+++ b/Lib/idlelib/idle_test/test_rstrip.py
@@ -0,0 +1,49 @@
+import unittest
+import idlelib.RstripExtension as rs
+from idlelib.idle_test.mock_idle import Editor
+
+class rstripTest(unittest.TestCase):
+
+ def test_rstrip_line(self):
+ editor = Editor()
+ text = editor.text
+ do_rstrip = rs.RstripExtension(editor).do_rstrip
+
+ do_rstrip()
+ self.assertEqual(text.get('1.0', 'insert'), '')
+ text.insert('1.0', ' ')
+ do_rstrip()
+ self.assertEqual(text.get('1.0', 'insert'), '')
+ text.insert('1.0', ' \n')
+ do_rstrip()
+ self.assertEqual(text.get('1.0', 'insert'), '\n')
+
+ def test_rstrip_multiple(self):
+ editor = Editor()
+ # Uncomment following to verify that test passes with real widgets.
+## from idlelib.EditorWindow import EditorWindow as Editor
+## from tkinter import Tk
+## editor = Editor(root=Tk())
+ text = editor.text
+ do_rstrip = rs.RstripExtension(editor).do_rstrip
+
+ original = (
+ "Line with an ending tab \n"
+ "Line ending in 5 spaces \n"
+ "Linewithnospaces\n"
+ " indented line\n"
+ " indented line with trailing space \n"
+ " ")
+ stripped = (
+ "Line with an ending tab\n"
+ "Line ending in 5 spaces\n"
+ "Linewithnospaces\n"
+ " indented line\n"
+ " indented line with trailing space\n")
+
+ text.insert('1.0', original)
+ do_rstrip()
+ self.assertEqual(text.get('1.0', 'insert'), stripped)
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2, exit=False)
diff --git a/Lib/idlelib/idle_test/test_searchengine.py b/Lib/idlelib/idle_test/test_searchengine.py
new file mode 100644
index 0000000..2525a13
--- /dev/null
+++ b/Lib/idlelib/idle_test/test_searchengine.py
@@ -0,0 +1,329 @@
+'''Test functions and SearchEngine class in SearchEngine.py.'''
+
+# With mock replacements, the module does not use any gui widgets.
+# The use of tk.Text is avoided (for now, until mock Text is improved)
+# by patching instances with an index function returning what is needed.
+# This works because mock Text.get does not use .index.
+
+import re
+import unittest
+from test.test_support import requires
+from Tkinter import BooleanVar, StringVar, TclError # ,Tk, Text
+import tkMessageBox
+from idlelib import SearchEngine as se
+from idlelib.idle_test.mock_tk import Var, Mbox
+from idlelib.idle_test.mock_tk import Text as mockText
+
+def setUpModule():
+ # Replace s-e module tkinter imports other than non-gui TclError.
+ se.BooleanVar = Var
+ se.StringVar = Var
+ se.tkMessageBox = Mbox
+
+def tearDownModule():
+ # Restore 'just in case', though other tests should also replace.
+ se.BooleanVar = BooleanVar
+ se.StringVar = StringVar
+ se.tkMessageBox = tkMessageBox
+
+
+class Mock:
+ def __init__(self, *args, **kwargs): pass
+
+class GetTest(unittest.TestCase):
+ # SearchEngine.get returns singleton created & saved on first call.
+ def test_get(self):
+ saved_Engine = se.SearchEngine
+ se.SearchEngine = Mock # monkey-patch class
+ try:
+ root = Mock()
+ engine = se.get(root)
+ self.assertIsInstance(engine, se.SearchEngine)
+ self.assertIs(root._searchengine, engine)
+ self.assertIs(se.get(root), engine)
+ finally:
+ se.SearchEngine = saved_Engine # restore class to module
+
+class GetLineColTest(unittest.TestCase):
+ # Test simple text-independent helper function
+ def test_get_line_col(self):
+ self.assertEqual(se.get_line_col('1.0'), (1, 0))
+ self.assertEqual(se.get_line_col('1.11'), (1, 11))
+
+ self.assertRaises(ValueError, se.get_line_col, ('1.0 lineend'))
+ self.assertRaises(ValueError, se.get_line_col, ('end'))
+
+class GetSelectionTest(unittest.TestCase):
+ # Test text-dependent helper function.
+## # Need gui for text.index('sel.first/sel.last/insert').
+## @classmethod
+## def setUpClass(cls):
+## requires('gui')
+## cls.root = Tk()
+##
+## @classmethod
+## def tearDownClass(cls):
+## cls.root.destroy()
+## del cls.root
+
+ def test_get_selection(self):
+ # text = Text(master=self.root)
+ text = mockText()
+ text.insert('1.0', 'Hello World!')
+
+ # fix text.index result when called in get_selection
+ def sel(s):
+ # select entire text, cursor irrelevant
+ if s == 'sel.first': return '1.0'
+ if s == 'sel.last': return '1.12'
+ raise TclError
+ text.index = sel # replaces .tag_add('sel', '1.0, '1.12')
+ self.assertEqual(se.get_selection(text), ('1.0', '1.12'))
+
+ def mark(s):
+ # no selection, cursor after 'Hello'
+ if s == 'insert': return '1.5'
+ raise TclError
+ text.index = mark # replaces .mark_set('insert', '1.5')
+ self.assertEqual(se.get_selection(text), ('1.5', '1.5'))
+
+
+class ReverseSearchTest(unittest.TestCase):
+ # Test helper function that searches backwards within a line.
+ def test_search_reverse(self):
+ Equal = self.assertEqual
+ line = "Here is an 'is' test text."
+ prog = re.compile('is')
+ Equal(se.search_reverse(prog, line, len(line)).span(), (12, 14))
+ Equal(se.search_reverse(prog, line, 14).span(), (12, 14))
+ Equal(se.search_reverse(prog, line, 13).span(), (5, 7))
+ Equal(se.search_reverse(prog, line, 7).span(), (5, 7))
+ Equal(se.search_reverse(prog, line, 6), None)
+
+
+class SearchEngineTest(unittest.TestCase):
+ # Test class methods that do not use Text widget.
+
+ def setUp(self):
+ self.engine = se.SearchEngine(root=None)
+ # Engine.root is only used to create error message boxes.
+ # The mock replacement ignores the root argument.
+
+ def test_is_get(self):
+ engine = self.engine
+ Equal = self.assertEqual
+
+ Equal(engine.getpat(), '')
+ engine.setpat('hello')
+ Equal(engine.getpat(), 'hello')
+
+ Equal(engine.isre(), False)
+ engine.revar.set(1)
+ Equal(engine.isre(), True)
+
+ Equal(engine.iscase(), False)
+ engine.casevar.set(1)
+ Equal(engine.iscase(), True)
+
+ Equal(engine.isword(), False)
+ engine.wordvar.set(1)
+ Equal(engine.isword(), True)
+
+ Equal(engine.iswrap(), True)
+ engine.wrapvar.set(0)
+ Equal(engine.iswrap(), False)
+
+ Equal(engine.isback(), False)
+ engine.backvar.set(1)
+ Equal(engine.isback(), True)
+
+ def test_setcookedpat(self):
+ engine = self.engine
+ engine.setcookedpat('\s')
+ self.assertEqual(engine.getpat(), '\s')
+ engine.revar.set(1)
+ engine.setcookedpat('\s')
+ self.assertEqual(engine.getpat(), r'\\s')
+
+ def test_getcookedpat(self):
+ engine = self.engine
+ Equal = self.assertEqual
+
+ Equal(engine.getcookedpat(), '')
+ engine.setpat('hello')
+ Equal(engine.getcookedpat(), 'hello')
+ engine.wordvar.set(True)
+ Equal(engine.getcookedpat(), r'\bhello\b')
+ engine.wordvar.set(False)
+
+ engine.setpat('\s')
+ Equal(engine.getcookedpat(), r'\\s')
+ engine.revar.set(True)
+ Equal(engine.getcookedpat(), '\s')
+
+ def test_getprog(self):
+ engine = self.engine
+ Equal = self.assertEqual
+
+ engine.setpat('Hello')
+ temppat = engine.getprog()
+ Equal(temppat.pattern, re.compile('Hello', re.IGNORECASE).pattern)
+ engine.casevar.set(1)
+ temppat = engine.getprog()
+ Equal(temppat.pattern, re.compile('Hello').pattern, 0)
+
+ engine.setpat('')
+ Equal(engine.getprog(), None)
+ engine.setpat('+')
+ engine.revar.set(1)
+ Equal(engine.getprog(), None)
+ self.assertEqual(Mbox.showerror.message,
+ 'Error: nothing to repeat\nPattern: +')
+
+ def test_report_error(self):
+ showerror = Mbox.showerror
+ Equal = self.assertEqual
+ pat = '[a-z'
+ msg = 'unexpected end of regular expression'
+
+ Equal(self.engine.report_error(pat, msg), None)
+ Equal(showerror.title, 'Regular expression error')
+ expected_message = ("Error: " + msg + "\nPattern: [a-z")
+ Equal(showerror.message, expected_message)
+
+ Equal(self.engine.report_error(pat, msg, 5), None)
+ Equal(showerror.title, 'Regular expression error')
+ expected_message += "\nOffset: 5"
+ Equal(showerror.message, expected_message)
+
+
+class SearchTest(unittest.TestCase):
+ # Test that search_text makes right call to right method.
+
+ @classmethod
+ def setUpClass(cls):
+## requires('gui')
+## cls.root = Tk()
+## cls.text = Text(master=cls.root)
+ cls.text = mockText()
+ test_text = (
+ 'First line\n'
+ 'Line with target\n'
+ 'Last line\n')
+ cls.text.insert('1.0', test_text)
+ cls.pat = re.compile('target')
+
+ cls.engine = se.SearchEngine(None)
+ cls.engine.search_forward = lambda *args: ('f', args)
+ cls.engine.search_backward = lambda *args: ('b', args)
+
+## @classmethod
+## def tearDownClass(cls):
+## cls.root.destroy()
+## del cls.root
+
+ def test_search(self):
+ Equal = self.assertEqual
+ engine = self.engine
+ search = engine.search_text
+ text = self.text
+ pat = self.pat
+
+ engine.patvar.set(None)
+ #engine.revar.set(pat)
+ Equal(search(text), None)
+
+ def mark(s):
+ # no selection, cursor after 'Hello'
+ if s == 'insert': return '1.5'
+ raise TclError
+ text.index = mark
+ Equal(search(text, pat), ('f', (text, pat, 1, 5, True, False)))
+ engine.wrapvar.set(False)
+ Equal(search(text, pat), ('f', (text, pat, 1, 5, False, False)))
+ engine.wrapvar.set(True)
+ engine.backvar.set(True)
+ Equal(search(text, pat), ('b', (text, pat, 1, 5, True, False)))
+ engine.backvar.set(False)
+
+ def sel(s):
+ if s == 'sel.first': return '2.10'
+ if s == 'sel.last': return '2.16'
+ raise TclError
+ text.index = sel
+ Equal(search(text, pat), ('f', (text, pat, 2, 16, True, False)))
+ Equal(search(text, pat, True), ('f', (text, pat, 2, 10, True, True)))
+ engine.backvar.set(True)
+ Equal(search(text, pat), ('b', (text, pat, 2, 10, True, False)))
+ Equal(search(text, pat, True), ('b', (text, pat, 2, 16, True, True)))
+
+
+class ForwardBackwardTest(unittest.TestCase):
+ # Test that search_forward method finds the target.
+## @classmethod
+## def tearDownClass(cls):
+## cls.root.destroy()
+## del cls.root
+
+ @classmethod
+ def setUpClass(cls):
+ cls.engine = se.SearchEngine(None)
+## requires('gui')
+## cls.root = Tk()
+## cls.text = Text(master=cls.root)
+ cls.text = mockText()
+ # search_backward calls index('end-1c')
+ cls.text.index = lambda index: '4.0'
+ test_text = (
+ 'First line\n'
+ 'Line with target\n'
+ 'Last line\n')
+ cls.text.insert('1.0', test_text)
+ cls.pat = re.compile('target')
+ cls.res = (2, (10, 16)) # line, slice indexes of 'target'
+ cls.failpat = re.compile('xyz') # not in text
+ cls.emptypat = re.compile('\w*') # empty match possible
+
+ def make_search(self, func):
+ def search(pat, line, col, wrap, ok=0):
+ res = func(self.text, pat, line, col, wrap, ok)
+ # res is (line, matchobject) or None
+ return (res[0], res[1].span()) if res else res
+ return search
+
+ def test_search_forward(self):
+ # search for non-empty match
+ Equal = self.assertEqual
+ forward = self.make_search(self.engine.search_forward)
+ pat = self.pat
+ Equal(forward(pat, 1, 0, True), self.res)
+ Equal(forward(pat, 3, 0, True), self.res) # wrap
+ Equal(forward(pat, 3, 0, False), None) # no wrap
+ Equal(forward(pat, 2, 10, False), self.res)
+
+ Equal(forward(self.failpat, 1, 0, True), None)
+ Equal(forward(self.emptypat, 2, 9, True, ok=True), (2, (9, 9)))
+ #Equal(forward(self.emptypat, 2, 9, True), self.res)
+ # While the initial empty match is correctly ignored, skipping
+ # the rest of the line and returning (3, (0,4)) seems buggy - tjr.
+ Equal(forward(self.emptypat, 2, 10, True), self.res)
+
+ def test_search_backward(self):
+ # search for non-empty match
+ Equal = self.assertEqual
+ backward = self.make_search(self.engine.search_backward)
+ pat = self.pat
+ Equal(backward(pat, 3, 5, True), self.res)
+ Equal(backward(pat, 2, 0, True), self.res) # wrap
+ Equal(backward(pat, 2, 0, False), None) # no wrap
+ Equal(backward(pat, 2, 16, False), self.res)
+
+ Equal(backward(self.failpat, 3, 9, True), None)
+ Equal(backward(self.emptypat, 2, 10, True, ok=True), (2, (9,9)))
+ # Accepted because 9 < 10, not because ok=True.
+ # It is not clear that ok=True is useful going back - tjr
+ Equal(backward(self.emptypat, 2, 9, True), (2, (5, 9)))
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2, exit=2)
diff --git a/Lib/idlelib/idle_test/test_text.py b/Lib/idlelib/idle_test/test_text.py
new file mode 100644
index 0000000..f0b9b76
--- /dev/null
+++ b/Lib/idlelib/idle_test/test_text.py
@@ -0,0 +1,228 @@
+# Test mock_tk.Text class against tkinter.Text class by running same tests with both.
+import unittest
+from test.test_support import requires
+
+from _tkinter import TclError
+import Tkinter as tk
+
+class TextTest(object):
+
+ hw = 'hello\nworld' # usual initial insert after initialization
+ hwn = hw+'\n' # \n present at initialization, before insert
+
+ Text = None
+ def setUp(self):
+ self.text = self.Text()
+
+ def test_init(self):
+ self.assertEqual(self.text.get('1.0'), '\n')
+ self.assertEqual(self.text.get('end'), '')
+
+ def test_index_empty(self):
+ index = self.text.index
+
+ for dex in (-1.0, 0.3, '1.-1', '1.0', '1.0 lineend', '1.end', '1.33',
+ 'insert'):
+ self.assertEqual(index(dex), '1.0')
+
+ for dex in 'end', 2.0, '2.1', '33.44':
+ self.assertEqual(index(dex), '2.0')
+
+ def test_index_data(self):
+ index = self.text.index
+ self.text.insert('1.0', self.hw)
+
+ for dex in -1.0, 0.3, '1.-1', '1.0':
+ self.assertEqual(index(dex), '1.0')
+
+ for dex in '1.0 lineend', '1.end', '1.33':
+ self.assertEqual(index(dex), '1.5')
+
+ for dex in 'end', '33.44':
+ self.assertEqual(index(dex), '3.0')
+
+ def test_get(self):
+ get = self.text.get
+ Equal = self.assertEqual
+ self.text.insert('1.0', self.hw)
+
+ Equal(get('end'), '')
+ Equal(get('end', 'end'), '')
+ Equal(get('1.0'), 'h')
+ Equal(get('1.0', '1.1'), 'h')
+ Equal(get('1.0', '1.3'), 'hel')
+ Equal(get('1.1', '1.3'), 'el')
+ Equal(get('1.0', '1.0 lineend'), 'hello')
+ Equal(get('1.0', '1.10'), 'hello')
+ Equal(get('1.0 lineend'), '\n')
+ Equal(get('1.1', '2.3'), 'ello\nwor')
+ Equal(get('1.0', '2.5'), self.hw)
+ Equal(get('1.0', 'end'), self.hwn)
+ Equal(get('0.0', '5.0'), self.hwn)
+
+ def test_insert(self):
+ insert = self.text.insert
+ get = self.text.get
+ Equal = self.assertEqual
+
+ insert('1.0', self.hw)
+ Equal(get('1.0', 'end'), self.hwn)
+
+ insert('1.0', '') # nothing
+ Equal(get('1.0', 'end'), self.hwn)
+
+ insert('1.0', '*')
+ Equal(get('1.0', 'end'), '*hello\nworld\n')
+
+ insert('1.0 lineend', '*')
+ Equal(get('1.0', 'end'), '*hello*\nworld\n')
+
+ insert('2.3', '*')
+ Equal(get('1.0', 'end'), '*hello*\nwor*ld\n')
+
+ insert('end', 'x')
+ Equal(get('1.0', 'end'), '*hello*\nwor*ldx\n')
+
+ insert('1.4', 'x\n')
+ Equal(get('1.0', 'end'), '*helx\nlo*\nwor*ldx\n')
+
+ def test_no_delete(self):
+ # if index1 == 'insert' or 'end' or >= end, there is no deletion
+ delete = self.text.delete
+ get = self.text.get
+ Equal = self.assertEqual
+ self.text.insert('1.0', self.hw)
+
+ delete('insert')
+ Equal(get('1.0', 'end'), self.hwn)
+
+ delete('end')
+ Equal(get('1.0', 'end'), self.hwn)
+
+ delete('insert', 'end')
+ Equal(get('1.0', 'end'), self.hwn)
+
+ delete('insert', '5.5')
+ Equal(get('1.0', 'end'), self.hwn)
+
+ delete('1.4', '1.0')
+ Equal(get('1.0', 'end'), self.hwn)
+
+ delete('1.4', '1.4')
+ Equal(get('1.0', 'end'), self.hwn)
+
+ def test_delete_char(self):
+ delete = self.text.delete
+ get = self.text.get
+ Equal = self.assertEqual
+ self.text.insert('1.0', self.hw)
+
+ delete('1.0')
+ Equal(get('1.0', '1.end'), 'ello')
+
+ delete('1.0', '1.1')
+ Equal(get('1.0', '1.end'), 'llo')
+
+ # delete \n and combine 2 lines into 1
+ delete('1.end')
+ Equal(get('1.0', '1.end'), 'lloworld')
+
+ self.text.insert('1.3', '\n')
+ delete('1.10')
+ Equal(get('1.0', '1.end'), 'lloworld')
+
+ self.text.insert('1.3', '\n')
+ delete('1.3', '2.0')
+ Equal(get('1.0', '1.end'), 'lloworld')
+
+ def test_delete_slice(self):
+ delete = self.text.delete
+ get = self.text.get
+ Equal = self.assertEqual
+ self.text.insert('1.0', self.hw)
+
+ delete('1.0', '1.0 lineend')
+ Equal(get('1.0', 'end'), '\nworld\n')
+
+ delete('1.0', 'end')
+ Equal(get('1.0', 'end'), '\n')
+
+ self.text.insert('1.0', self.hw)
+ delete('1.0', '2.0')
+ Equal(get('1.0', 'end'), 'world\n')
+
+ delete('1.0', 'end')
+ Equal(get('1.0', 'end'), '\n')
+
+ self.text.insert('1.0', self.hw)
+ delete('1.2', '2.3')
+ Equal(get('1.0', 'end'), 'held\n')
+
+ def test_multiple_lines(self): # insert and delete
+ self.text.insert('1.0', 'hello')
+
+ self.text.insert('1.3', '1\n2\n3\n4\n5')
+ self.assertEqual(self.text.get('1.0', 'end'), 'hel1\n2\n3\n4\n5lo\n')
+
+ self.text.delete('1.3', '5.1')
+ self.assertEqual(self.text.get('1.0', 'end'), 'hello\n')
+
+ def test_compare(self):
+ compare = self.text.compare
+ Equal = self.assertEqual
+ # need data so indexes not squished to 1,0
+ self.text.insert('1.0', 'First\nSecond\nThird\n')
+
+ self.assertRaises(TclError, compare, '2.2', 'op', '2.2')
+
+ for op, less1, less0, equal, greater0, greater1 in (
+ ('<', True, True, False, False, False),
+ ('<=', True, True, True, False, False),
+ ('>', False, False, False, True, True),
+ ('>=', False, False, True, True, True),
+ ('==', False, False, True, False, False),
+ ('!=', True, True, False, True, True),
+ ):
+ Equal(compare('1.1', op, '2.2'), less1, op)
+ Equal(compare('2.1', op, '2.2'), less0, op)
+ Equal(compare('2.2', op, '2.2'), equal, op)
+ Equal(compare('2.3', op, '2.2'), greater0, op)
+ Equal(compare('3.3', op, '2.2'), greater1, op)
+
+
+class MockTextTest(TextTest, unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ from idlelib.idle_test.mock_tk import Text
+ cls.Text = Text
+
+ def test_decode(self):
+ # test endflags (-1, 0) not tested by test_index (which uses +1)
+ decode = self.text._decode
+ Equal = self.assertEqual
+ self.text.insert('1.0', self.hw)
+
+ Equal(decode('end', -1), (2, 5))
+ Equal(decode('3.1', -1), (2, 5))
+ Equal(decode('end', 0), (2, 6))
+ Equal(decode('3.1', 0), (2, 6))
+
+
+class TkTextTest(TextTest, unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ requires('gui')
+ from Tkinter import Tk, Text
+ cls.Text = Text
+ cls.root = Tk()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.root.destroy()
+ del cls.root
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2, exit=False)
diff --git a/Lib/idlelib/idle_test/test_textview.py b/Lib/idlelib/idle_test/test_textview.py
new file mode 100644
index 0000000..9b25ecd
--- /dev/null
+++ b/Lib/idlelib/idle_test/test_textview.py
@@ -0,0 +1,98 @@
+'''Test the functions and main class method of textView.py.'''
+
+import unittest
+import os
+from test.test_support import requires
+from Tkinter import Tk, Text, TclError
+from idlelib import textView as tv
+from idlelib.idle_test.mock_idle import Func
+from idlelib.idle_test.mock_tk import Mbox
+
+orig_mbox = tv.tkMessageBox
+
+class textviewClassTest(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ requires('gui')
+ cls.root = Tk()
+ cls.TV = TV = tv.TextViewer
+ TV.transient = Func()
+ TV.grab_set = Func()
+ TV.wait_window = Func()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.root.destroy()
+ TV = cls.TV
+ del cls.root, cls.TV
+ del TV.transient, TV.grab_set, TV.wait_window
+
+ def setUp(self):
+ TV = self.TV
+ TV.transient.__init__()
+ TV.grab_set.__init__()
+ TV.wait_window.__init__()
+
+
+ def test_init_modal(self):
+ TV = self.TV
+ view = TV(self.root, 'Title', 'test text')
+ self.assertTrue(TV.transient.called)
+ self.assertTrue(TV.grab_set.called)
+ self.assertTrue(TV.wait_window.called)
+ view.Ok()
+
+ def test_init_nonmodal(self):
+ TV = self.TV
+ view = TV(self.root, 'Title', 'test text', modal=False)
+ self.assertFalse(TV.transient.called)
+ self.assertFalse(TV.grab_set.called)
+ self.assertFalse(TV.wait_window.called)
+ view.Ok()
+
+ def test_ok(self):
+ view = self.TV(self.root, 'Title', 'test text', modal=False)
+ view.destroy = Func()
+ view.Ok()
+ self.assertTrue(view.destroy.called)
+ del view.destroy # unmask real function
+ view.destroy
+
+
+class textviewTest(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ requires('gui')
+ cls.root = Tk()
+ tv.tkMessageBox = Mbox
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.root.destroy()
+ del cls.root
+ tv.tkMessageBox = orig_mbox
+
+ def test_view_text(self):
+ # If modal True, tkinter will error with 'can't invoke "event" command'
+ view = tv.view_text(self.root, 'Title', 'test text', modal=False)
+ self.assertIsInstance(view, tv.TextViewer)
+
+ def test_view_file(self):
+ test_dir = os.path.dirname(__file__)
+ testfile = os.path.join(test_dir, 'test_textview.py')
+ view = tv.view_file(self.root, 'Title', testfile, modal=False)
+ self.assertIsInstance(view, tv.TextViewer)
+ self.assertIn('Test', view.textView.get('1.0', '1.end'))
+ view.Ok()
+
+ # Mock messagebox will be used and view_file will not return anything
+ testfile = os.path.join(test_dir, '../notthere.py')
+ view = tv.view_file(self.root, 'Title', testfile, modal=False)
+ self.assertIsNone(view)
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2, exit=False)
+ from idlelib.idle_test.htest import run
+ run(TextViewer)
diff --git a/Lib/idlelib/idle_test/test_warning.py b/Lib/idlelib/idle_test/test_warning.py
new file mode 100644
index 0000000..da1d8a1
--- /dev/null
+++ b/Lib/idlelib/idle_test/test_warning.py
@@ -0,0 +1,73 @@
+'''Test warnings replacement in PyShell.py and run.py.
+
+This file could be expanded to include traceback overrides
+(in same two modules). If so, change name.
+Revise if output destination changes (http://bugs.python.org/issue18318).
+Make sure warnings module is left unaltered (http://bugs.python.org/issue18081).
+'''
+
+import unittest
+from test.test_support import captured_stderr
+
+import warnings
+# Try to capture default showwarning before Idle modules are imported.
+showwarning = warnings.showwarning
+# But if we run this file within idle, we are in the middle of the run.main loop
+# and default showwarnings has already been replaced.
+running_in_idle = 'idle' in showwarning.__name__
+
+from idlelib import run
+from idlelib import PyShell as shell
+
+# The following was generated from PyShell.idle_formatwarning
+# and checked as matching expectation.
+idlemsg = '''
+Warning (from warnings module):
+ File "test_warning.py", line 99
+ Line of code
+UserWarning: Test
+'''
+shellmsg = idlemsg + ">>> "
+
+class RunWarnTest(unittest.TestCase):
+
+ @unittest.skipIf(running_in_idle, "Does not work when run within Idle.")
+ def test_showwarnings(self):
+ self.assertIs(warnings.showwarning, showwarning)
+ run.capture_warnings(True)
+ self.assertIs(warnings.showwarning, run.idle_showwarning_subproc)
+ run.capture_warnings(False)
+ self.assertIs(warnings.showwarning, showwarning)
+
+ def test_run_show(self):
+ with captured_stderr() as f:
+ run.idle_showwarning_subproc(
+ 'Test', UserWarning, 'test_warning.py', 99, f, 'Line of code')
+ # The following uses .splitlines to erase line-ending differences
+ self.assertEqual(idlemsg.splitlines(), f.getvalue().splitlines())
+
+class ShellWarnTest(unittest.TestCase):
+
+ @unittest.skipIf(running_in_idle, "Does not work when run within Idle.")
+ def test_showwarnings(self):
+ self.assertIs(warnings.showwarning, showwarning)
+ shell.capture_warnings(True)
+ self.assertIs(warnings.showwarning, shell.idle_showwarning)
+ shell.capture_warnings(False)
+ self.assertIs(warnings.showwarning, showwarning)
+
+ def test_idle_formatter(self):
+ # Will fail if format changed without regenerating idlemsg
+ s = shell.idle_formatwarning(
+ 'Test', UserWarning, 'test_warning.py', 99, 'Line of code')
+ self.assertEqual(idlemsg, s)
+
+ def test_shell_show(self):
+ with captured_stderr() as f:
+ shell.idle_showwarning(
+ 'Test', UserWarning, 'test_warning.py', 99, f, 'Line of code')
+ self.assertEqual(shellmsg.splitlines(), f.getvalue().splitlines())
+
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2, exit=False)
diff --git a/Lib/idlelib/idlever.py b/Lib/idlelib/idlever.py
index 72666cb..c826882 100644
--- a/Lib/idlelib/idlever.py
+++ b/Lib/idlelib/idlever.py
@@ -1 +1 @@
-IDLE_VERSION = "2.7.3"
+IDLE_VERSION = "2.7.8"
diff --git a/Lib/idlelib/keybindingDialog.py b/Lib/idlelib/keybindingDialog.py
index 5339f88..4d32ca9 100644
--- a/Lib/idlelib/keybindingDialog.py
+++ b/Lib/idlelib/keybindingDialog.py
@@ -4,14 +4,16 @@ Dialog for building Tkinter accelerator key bindings
from Tkinter import *
import tkMessageBox
import string
+import sys
class GetKeysDialog(Toplevel):
- def __init__(self,parent,title,action,currentKeySequences):
+ def __init__(self,parent,title,action,currentKeySequences,_htest=False):
"""
action - string, the name of the virtual event these keys will be
mapped to
currentKeys - list, a list of all key sequence lists currently mapped
to virtual events, for overlap checking
+ _htest - bool, change box location when running htest
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
@@ -37,11 +39,14 @@ class GetKeysDialog(Toplevel):
self.LoadFinalKeyList()
self.withdraw() #hide while setting geometry
self.update_idletasks()
- self.geometry("+%d+%d" %
- ((parent.winfo_rootx()+((parent.winfo_width()/2)
- -(self.winfo_reqwidth()/2)),
- parent.winfo_rooty()+((parent.winfo_height()/2)
- -(self.winfo_reqheight()/2)) )) ) #centre dialog over parent
+ self.geometry(
+ "+%d+%d" % (
+ parent.winfo_rootx() +
+ (parent.winfo_width()/2 - self.winfo_reqwidth()/2),
+ parent.winfo_rooty() +
+ ((parent.winfo_height()/2 - self.winfo_reqheight()/2)
+ if not _htest else 150)
+ ) ) #centre dialog over parent (or below htest box)
self.deiconify() #geometry set, unhide
self.wait_window()
@@ -132,8 +137,7 @@ class GetKeysDialog(Toplevel):
order is also important: key binding equality depends on it, so
config-keys.def must use the same ordering.
"""
- from idlelib import macosxSupport
- if macosxSupport.runningAsOSXApp():
+ if sys.platform == "darwin":
self.modifiers = ['Shift', 'Control', 'Option', 'Command']
else:
self.modifiers = ['Control', 'Alt', 'Shift']
@@ -258,11 +262,5 @@ class GetKeysDialog(Toplevel):
return keysOK
if __name__ == '__main__':
- #test the dialog
- root=Tk()
- def run():
- keySeq=''
- dlg=GetKeysDialog(root,'Get Keys','find-again',[])
- print dlg.result
- Button(root,text='Dialog',command=run).pack()
- root.mainloop()
+ from idlelib.idle_test.htest import run
+ run(GetKeysDialog)
diff --git a/Lib/idlelib/macosxSupport.py b/Lib/idlelib/macosxSupport.py
index 4be60a3..4f5259c 100644
--- a/Lib/idlelib/macosxSupport.py
+++ b/Lib/idlelib/macosxSupport.py
@@ -1,53 +1,90 @@
"""
-A number of function that enhance IDLE on MacOSX when it used as a normal
-GUI application (as opposed to an X11 application).
+A number of functions that enhance IDLE on Mac OSX.
"""
import sys
import Tkinter
from os import path
-_appbundle = None
+import warnings
def runningAsOSXApp():
+ warnings.warn("runningAsOSXApp() is deprecated, use isAquaTk()",
+ DeprecationWarning, stacklevel=2)
+ return isAquaTk()
+
+def isCarbonAquaTk(root):
+ warnings.warn("isCarbonAquaTk(root) is deprecated, use isCarbonTk()",
+ DeprecationWarning, stacklevel=2)
+ return isCarbonTk()
+
+_tk_type = None
+
+def _initializeTkVariantTests(root):
"""
- Returns True if Python is running from within an app on OSX.
- If so, assume that Python was built with Aqua Tcl/Tk rather than
- X11 Tcl/Tk.
+ Initializes OS X Tk variant values for
+ isAquaTk(), isCarbonTk(), isCocoaTk(), and isXQuartz().
"""
- global _appbundle
- if _appbundle is None:
- _appbundle = (sys.platform == 'darwin' and '.app' in sys.executable)
- return _appbundle
+ global _tk_type
+ if sys.platform == 'darwin':
+ ws = root.tk.call('tk', 'windowingsystem')
+ if 'x11' in ws:
+ _tk_type = "xquartz"
+ elif 'aqua' not in ws:
+ _tk_type = "other"
+ elif 'AppKit' in root.tk.call('winfo', 'server', '.'):
+ _tk_type = "cocoa"
+ else:
+ _tk_type = "carbon"
+ else:
+ _tk_type = "other"
-_carbonaquatk = None
+def isAquaTk():
+ """
+ Returns True if IDLE is using a native OS X Tk (Cocoa or Carbon).
+ """
+ assert _tk_type is not None
+ return _tk_type == "cocoa" or _tk_type == "carbon"
-def isCarbonAquaTk(root):
+def isCarbonTk():
"""
Returns True if IDLE is using a Carbon Aqua Tk (instead of the
newer Cocoa Aqua Tk).
"""
- global _carbonaquatk
- if _carbonaquatk is None:
- _carbonaquatk = (runningAsOSXApp() and
- 'aqua' in root.tk.call('tk', 'windowingsystem') and
- 'AppKit' not in root.tk.call('winfo', 'server', '.'))
- return _carbonaquatk
+ assert _tk_type is not None
+ return _tk_type == "carbon"
+
+def isCocoaTk():
+ """
+ Returns True if IDLE is using a Cocoa Aqua Tk.
+ """
+ assert _tk_type is not None
+ return _tk_type == "cocoa"
+
+def isXQuartz():
+ """
+ Returns True if IDLE is using an OS X X11 Tk.
+ """
+ assert _tk_type is not None
+ return _tk_type == "xquartz"
def tkVersionWarning(root):
"""
Returns a string warning message if the Tk version in use appears to
- be one known to cause problems with IDLE. The Apple Cocoa-based Tk 8.5
- that was shipped with Mac OS X 10.6.
+ be one known to cause problems with IDLE.
+ 1. Apple Cocoa-based Tk 8.5.7 shipped with Mac OS X 10.6 is unusable.
+ 2. Apple Cocoa-based Tk 8.5.9 in OS X 10.7 and 10.8 is better but
+ can still crash unexpectedly.
"""
- if (runningAsOSXApp() and
- ('AppKit' in root.tk.call('winfo', 'server', '.')) and
- (root.tk.call('info', 'patchlevel') == '8.5.7') ):
- return (r"WARNING: The version of Tcl/Tk (8.5.7) in use may"
+ if isCocoaTk():
+ patchlevel = root.tk.call('info', 'patchlevel')
+ if patchlevel not in ('8.5.7', '8.5.9'):
+ return False
+ return (r"WARNING: The version of Tcl/Tk ({0}) in use may"
r" be unstable.\n"
r"Visit http://www.python.org/download/mac/tcltk/"
- r" for current information.")
+ r" for current information.".format(patchlevel))
else:
return False
@@ -74,8 +111,8 @@ def hideTkConsole(root):
def overrideRootMenu(root, flist):
"""
- Replace the Tk root menu by something that's more appropriate for
- IDLE.
+ Replace the Tk root menu by something that is more appropriate for
+ IDLE with an Aqua Tk.
"""
# The menu that is attached to the Tk root (".") is also used by AquaTk for
# all windows that don't specify a menu of their own. The default menubar
@@ -94,6 +131,22 @@ def overrideRootMenu(root, flist):
from idlelib import WindowList
from idlelib.MultiCall import MultiCallCreator
+ closeItem = Bindings.menudefs[0][1][-2]
+
+ # Remove the last 3 items of the file menu: a separator, close window and
+ # quit. Close window will be reinserted just above the save item, where
+ # it should be according to the HIG. Quit is in the application menu.
+ del Bindings.menudefs[0][1][-3:]
+ Bindings.menudefs[0][1].insert(6, closeItem)
+
+ # Remove the 'About' entry from the help menu, it is in the application
+ # menu
+ del Bindings.menudefs[-1][1][0:2]
+
+ # Remove the 'Configure' entry from the options menu, it is in the
+ # application menu as 'Preferences'
+ del Bindings.menudefs[-2][1][0:2]
+
menubar = Menu(root)
root.configure(menu=menubar)
menudict = {}
@@ -136,7 +189,7 @@ def overrideRootMenu(root, flist):
# right thing for now.
root.createcommand('exit', flist.close_all_callback)
- if isCarbonAquaTk(root):
+ if isCarbonTk():
# for Carbon AquaTk, replace the default Tk apple menu
menudict['application'] = menu = Menu(menubar, name='apple')
menubar.add_cascade(label='IDLE', menu=menu)
@@ -151,8 +204,7 @@ def overrideRootMenu(root, flist):
Bindings.menudefs[0][1].append(
('_Preferences....', '<<open-config-dialog>>'),
)
- else:
- # assume Cocoa AquaTk
+ if isCocoaTk():
# replace default About dialog with About IDLE one
root.createcommand('tkAboutDialog', about_dialog)
# replace default "Help" item in Help menu
@@ -162,10 +214,22 @@ def overrideRootMenu(root, flist):
def setupApp(root, flist):
"""
- Perform setup for the OSX application bundle.
+ Perform initial OS X customizations if needed.
+ Called from PyShell.main() after initial calls to Tk()
+
+ There are currently three major versions of Tk in use on OS X:
+ 1. Aqua Cocoa Tk (native default since OS X 10.6)
+ 2. Aqua Carbon Tk (original native, 32-bit only, deprecated)
+ 3. X11 (supported by some third-party distributors, deprecated)
+ There are various differences among the three that affect IDLE
+ behavior, primarily with menus, mouse key events, and accelerators.
+ Some one-time customizations are performed here.
+ Others are dynamically tested throughout idlelib by calls to the
+ isAquaTk(), isCarbonTk(), isCocoaTk(), isXQuartz() functions which
+ are initialized here as well.
"""
- if not runningAsOSXApp(): return
-
- hideTkConsole(root)
- overrideRootMenu(root, flist)
- addOpenEventSupport(root, flist)
+ _initializeTkVariantTests(root)
+ if isAquaTk():
+ hideTkConsole(root)
+ overrideRootMenu(root, flist)
+ addOpenEventSupport(root, flist)
diff --git a/Lib/idlelib/rpc.py b/Lib/idlelib/rpc.py
index 1395058..8f611a3 100644
--- a/Lib/idlelib/rpc.py
+++ b/Lib/idlelib/rpc.py
@@ -2,7 +2,7 @@
For security reasons, GvR requested that Idle's Python execution server process
connect to the Idle process, which listens for the connection. Since Idle has
-has only one client per server, this was not a limitation.
+only one client per server, this was not a limitation.
+---------------------------------+ +-------------+
| SocketServer.BaseRequestHandler | | SocketIO |
@@ -144,7 +144,7 @@ class SocketIO(object):
def exithook(self):
"override for specific exit action"
- os._exit()
+ os._exit(0)
def debug(self, *args):
if not self.debugging:
diff --git a/Lib/idlelib/run.py b/Lib/idlelib/run.py
index 642b979..604c5cd 100644
--- a/Lib/idlelib/run.py
+++ b/Lib/idlelib/run.py
@@ -1,4 +1,5 @@
import sys
+import io
import linecache
import time
import socket
@@ -14,29 +15,45 @@ from idlelib import RemoteDebugger
from idlelib import RemoteObjectBrowser
from idlelib import StackViewer
from idlelib import rpc
+from idlelib import PyShell
+from idlelib import IOBinding
import __main__
LOCALHOST = '127.0.0.1'
-try:
- import warnings
-except ImportError:
- pass
-else:
- def idle_formatwarning_subproc(message, category, filename, lineno,
- line=None):
- """Format warnings the IDLE way"""
- s = "\nWarning (from warnings module):\n"
- s += ' File \"%s\", line %s\n' % (filename, lineno)
- if line is None:
- line = linecache.getline(filename, lineno)
- line = line.strip()
- if line:
- s += " %s\n" % line
- s += "%s: %s\n" % (category.__name__, message)
- return s
- warnings.formatwarning = idle_formatwarning_subproc
+import warnings
+
+def idle_showwarning_subproc(
+ message, category, filename, lineno, file=None, line=None):
+ """Show Idle-format warning after replacing warnings.showwarning.
+
+ The only difference is the formatter called.
+ """
+ if file is None:
+ file = sys.stderr
+ try:
+ file.write(PyShell.idle_formatwarning(
+ message, category, filename, lineno, line))
+ except IOError:
+ pass # the file (probably stderr) is invalid - this warning gets lost.
+
+_warnings_showwarning = None
+
+def capture_warnings(capture):
+ "Replace warning.showwarning with idle_showwarning_subproc, or reverse."
+
+ global _warnings_showwarning
+ if capture:
+ if _warnings_showwarning is None:
+ _warnings_showwarning = warnings.showwarning
+ warnings.showwarning = idle_showwarning_subproc
+ else:
+ if _warnings_showwarning is not None:
+ warnings.showwarning = _warnings_showwarning
+ _warnings_showwarning = None
+
+capture_warnings(True)
# Thread shared globals: Establish a queue between a subthread (which handles
# the socket) and the main thread (which runs user code), plus global
@@ -75,6 +92,8 @@ def main(del_exitfunc=False):
except:
print>>sys.stderr, "IDLE Subprocess: no IP port passed in sys.argv."
return
+
+ capture_warnings(True)
sys.argv[:] = [""]
sockthread = threading.Thread(target=manage_socket,
name='SockThread',
@@ -101,6 +120,7 @@ def main(del_exitfunc=False):
exit_now = True
continue
except SystemExit:
+ capture_warnings(False)
raise
except:
type, value, tb = sys.exc_info()
@@ -120,7 +140,7 @@ def manage_socket(address):
try:
server = MyRPCServer(address, MyHandler)
break
- except socket.error, err:
+ except socket.error as err:
print>>sys.__stderr__,"IDLE Subprocess: socket error: "\
+ err.args[1] + ", retrying...."
else:
@@ -216,6 +236,7 @@ def exit():
del sys.exitfunc
except AttributeError:
pass
+ capture_warnings(False)
sys.exit(0)
class MyRPCServer(rpc.RPCServer):
@@ -248,19 +269,24 @@ class MyRPCServer(rpc.RPCServer):
quitting = True
thread.interrupt_main()
-
class MyHandler(rpc.RPCHandler):
def handle(self):
"""Override base method"""
executive = Executive(self)
self.register("exec", executive)
- sys.stdin = self.console = self.get_remote_proxy("stdin")
- sys.stdout = self.get_remote_proxy("stdout")
- sys.stderr = self.get_remote_proxy("stderr")
- from idlelib import IOBinding
- sys.stdin.encoding = sys.stdout.encoding = \
- sys.stderr.encoding = IOBinding.encoding
+ self.console = self.get_remote_proxy("console")
+ sys.stdin = PyShell.PseudoInputFile(self.console, "stdin",
+ IOBinding.encoding)
+ sys.stdout = PyShell.PseudoOutputFile(self.console, "stdout",
+ IOBinding.encoding)
+ sys.stderr = PyShell.PseudoOutputFile(self.console, "stderr",
+ IOBinding.encoding)
+
+ # Keep a reference to stdin so that it won't try to exit IDLE if
+ # sys.stdin gets changed from within IDLE's shell. See issue17838.
+ self._keep_stdin = sys.stdin
+
self.interp = self.get_remote_proxy("interp")
rpc.RPCHandler.getresponse(self, myseq=None, wait=0.05)
@@ -298,11 +324,14 @@ class Executive(object):
exec code in self.locals
finally:
interruptable = False
+ except SystemExit:
+ # Scripts that raise SystemExit should just
+ # return to the interactive prompt
+ pass
except:
self.usr_exc_info = sys.exc_info()
if quitting:
exit()
- # even print a user code SystemExit exception, continue
print_exception()
jit = self.rpchandler.console.getvar("<<toggle-jit-stack-viewer>>")
if jit:
@@ -341,3 +370,5 @@ class Executive(object):
sys.last_value = val
item = StackViewer.StackTreeItem(flist, tb)
return RemoteObjectBrowser.remote_object_tree_item(item)
+
+capture_warnings(False) # Make sure turned off; see issue 18081
diff --git a/Lib/idlelib/tabbedpages.py b/Lib/idlelib/tabbedpages.py
index 8d7113d..0723d94 100644
--- a/Lib/idlelib/tabbedpages.py
+++ b/Lib/idlelib/tabbedpages.py
@@ -467,9 +467,12 @@ class TabbedPageSet(Frame):
self._tab_set.set_selected_tab(page_name)
-if __name__ == '__main__':
+def _tabbed_pages(parent):
# test dialog
root=Tk()
+ width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
+ root.geometry("+%d+%d"%(x, y + 175))
+ root.title("Test tabbed pages")
tabPage=TabbedPageSet(root, page_names=['Foobar','Baz'], n_rows=0,
expand_tabs=False,
)
@@ -488,3 +491,8 @@ if __name__ == '__main__':
labelPgName.pack(padx=5)
entryPgName.pack(padx=5)
root.mainloop()
+
+
+if __name__ == '__main__':
+ from idlelib.idle_test.htest import run
+ run(_tabbed_pages)
diff --git a/Lib/idlelib/textView.py b/Lib/idlelib/textView.py
index 8937c17..eb60274 100644
--- a/Lib/idlelib/textView.py
+++ b/Lib/idlelib/textView.py
@@ -9,15 +9,21 @@ class TextViewer(Toplevel):
"""A simple text viewer dialog for IDLE
"""
- def __init__(self, parent, title, text, modal=True):
+ def __init__(self, parent, title, text, modal=True, _htest=False):
"""Show the given text in a scrollable window with a 'close' button
+ If modal option set to False, user can interact with other windows,
+ otherwise they will be unable to interact with other windows until
+ the textview window is closed.
+
+ _htest - bool; change box location when running htest.
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
+ # place dialog below parent if running htest
self.geometry("=%dx%d+%d+%d" % (625, 500,
- parent.winfo_rootx() + 10,
- parent.winfo_rooty() + 10))
+ parent.winfo_rootx() + 10,
+ parent.winfo_rooty() + (10 if not _htest else 100)))
#elguavas - config placeholders til config stuff completed
self.bg = '#ffffff'
self.fg = '#000000'
@@ -70,7 +76,6 @@ def view_file(parent, title, filename, encoding=None, modal=True):
else:
textFile = open(filename, 'r')
except IOError:
- import tkMessageBox
tkMessageBox.showerror(title='File Load Error',
message='Unable to load file %r .' % filename,
parent=parent)
@@ -79,21 +84,7 @@ def view_file(parent, title, filename, encoding=None, modal=True):
if __name__ == '__main__':
- #test the dialog
- root=Tk()
- root.title('textView test')
- filename = './textView.py'
- text = file(filename, 'r').read()
- btn1 = Button(root, text='view_text',
- command=lambda:view_text(root, 'view_text', text))
- btn1.pack(side=LEFT)
- btn2 = Button(root, text='view_file',
- command=lambda:view_file(root, 'view_file', filename))
- btn2.pack(side=LEFT)
- btn3 = Button(root, text='nonmodal view_text',
- command=lambda:view_text(root, 'nonmodal view_text', text,
- modal=False))
- btn3.pack(side=LEFT)
- close = Button(root, text='Close', command=root.destroy)
- close.pack(side=RIGHT)
- root.mainloop()
+ import unittest
+ unittest.main('idlelib.idle_test.test_textview', verbosity=2, exit=False)
+ from idlelib.idle_test.htest import run
+ run(TextViewer)
diff --git a/Lib/imaplib.py b/Lib/imaplib.py
index c576927..10ff340 100644
--- a/Lib/imaplib.py
+++ b/Lib/imaplib.py
@@ -35,6 +35,15 @@ IMAP4_PORT = 143
IMAP4_SSL_PORT = 993
AllowedVersions = ('IMAP4REV1', 'IMAP4') # Most recent first
+# Maximal line length when calling readline(). This is to prevent
+# reading arbitrary length lines. RFC 3501 and 2060 (IMAP 4rev1)
+# don't specify a line length. RFC 2683 however suggests limiting client
+# command lines to 1000 octets and server command lines to 8000 octets.
+# We have selected 10000 for some extra margin and since that is supposedly
+# also what UW and Panda IMAP does.
+_MAXLINE = 10000
+
+
# Commands
Commands = {
@@ -237,7 +246,10 @@ class IMAP4:
def readline(self):
"""Read line from remote."""
- return self.file.readline()
+ line = self.file.readline(_MAXLINE + 1)
+ if len(line) > _MAXLINE:
+ raise self.error("got more than %d bytes" % _MAXLINE)
+ return line
def send(self, data):
@@ -990,6 +1002,11 @@ class IMAP4:
del self.tagged_commands[tag]
return result
+ # If we've seen a BYE at this point, the socket will be
+ # closed, so report the BYE now.
+
+ self._check_bye()
+
# Some have reported "unexpected response" exceptions.
# Note that ignoring them here causes loops.
# Instead, send me details of the unexpected response and
diff --git a/Lib/imghdr.py b/Lib/imghdr.py
index 1683024..fc864c3 100644
--- a/Lib/imghdr.py
+++ b/Lib/imghdr.py
@@ -7,18 +7,16 @@ __all__ = ["what"]
#-------------------------#
def what(file, h=None):
- if h is None:
- if isinstance(file, basestring):
- f = open(file, 'rb')
- h = f.read(32)
- else:
- location = file.tell()
- h = file.read(32)
- file.seek(location)
- f = None
- else:
- f = None
+ f = None
try:
+ if h is None:
+ if isinstance(file, basestring):
+ f = open(file, 'rb')
+ h = f.read(32)
+ else:
+ location = file.tell()
+ h = file.read(32)
+ file.seek(location)
for tf in tests:
res = tf(h, f)
if res:
diff --git a/Lib/inspect.py b/Lib/inspect.py
index 66d5186..9336943 100644
--- a/Lib/inspect.py
+++ b/Lib/inspect.py
@@ -165,7 +165,7 @@ def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
- __iter__ defined to support interation over container
+ __iter__ defined to support iteration over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
@@ -525,7 +525,7 @@ def findsource(object):
file = getfile(object)
sourcefile = getsourcefile(object)
- if not sourcefile and file[0] + file[-1] != '<>':
+ if not sourcefile and file[:1] + file[-1:] != '<>':
raise IOError('source code not available')
file = sourcefile if sourcefile else file
@@ -728,7 +728,8 @@ def getclasstree(classes, unique=0):
for parent in c.__bases__:
if not parent in children:
children[parent] = []
- children[parent].append(c)
+ if c not in children[parent]:
+ children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
diff --git a/Lib/io.py b/Lib/io.py
index 5c429c6..1438493 100644
--- a/Lib/io.py
+++ b/Lib/io.py
@@ -4,7 +4,7 @@ builtin open function is defined in this module.
At the top of the I/O hierarchy is the abstract base class IOBase. It
defines the basic interface to a stream. Note, however, that there is no
separation between reading and writing to streams; implementations are
-allowed to throw an IOError if they do not support a given operation.
+allowed to raise an IOError if they do not support a given operation.
Extending IOBase is RawIOBase which deals simply with the reading and
writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide
@@ -34,15 +34,6 @@ DEFAULT_BUFFER_SIZE
"""
# New I/O library conforming to PEP 3116.
-# XXX edge cases when switching between reading/writing
-# XXX need to support 1 meaning line-buffered
-# XXX whenever an argument is None, use the default value
-# XXX read/write ops should check readable/writable
-# XXX buffered readinto should work with arbitrary buffer objects
-# XXX use incremental encoder for text output, at least for UTF-16 and UTF-8-SIG
-# XXX check writable, readable and seekable in appropriate places
-
-
__author__ = ("Guido van Rossum <guido@python.org>, "
"Mike Verdone <mike.verdone@gmail.com>, "
"Mark Russell <mark.russell@zen.co.uk>, "
@@ -77,15 +68,16 @@ SEEK_END = 2
# version however.
class IOBase(_io._IOBase):
__metaclass__ = abc.ABCMeta
+ __doc__ = _io._IOBase.__doc__
class RawIOBase(_io._RawIOBase, IOBase):
- pass
+ __doc__ = _io._RawIOBase.__doc__
class BufferedIOBase(_io._BufferedIOBase, IOBase):
- pass
+ __doc__ = _io._BufferedIOBase.__doc__
class TextIOBase(_io._TextIOBase, IOBase):
- pass
+ __doc__ = _io._TextIOBase.__doc__
RawIOBase.register(FileIO)
diff --git a/Lib/json/__init__.py b/Lib/json/__init__.py
index ccbe9f4..0be85da 100644
--- a/Lib/json/__init__.py
+++ b/Lib/json/__init__.py
@@ -37,8 +37,8 @@ Compact encoding::
Pretty printing::
>>> import json
- >>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
- >>> print '\n'.join([l.rstrip() for l in s.splitlines()])
+ >>> print json.dumps({'4': 5, '6': 7}, sort_keys=True,
+ ... indent=4, separators=(',', ': '))
{
"4": 5,
"6": 7
@@ -95,7 +95,7 @@ Using json.tool from the shell to validate and pretty-print::
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m json.tool
- Expecting property name: line 1 column 2 (char 2)
+ Expecting property name enclosed in double quotes: line 1 column 3 (char 2)
"""
__version__ = '2.0.9'
__all__ = [
@@ -121,7 +121,7 @@ _default_encoder = JSONEncoder(
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
- encoding='utf-8', default=None, **kw):
+ encoding='utf-8', default=None, sort_keys=False, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
@@ -129,11 +129,14 @@ def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
- If ``ensure_ascii`` is false, then the some chunks written to ``fp``
- may be ``unicode`` instances, subject to normal Python ``str`` to
- ``unicode`` coercion rules. Unless ``fp.write()`` explicitly
- understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
- to cause an error.
+ If ``ensure_ascii`` is true (the default), all non-ASCII characters in the
+ output are escaped with ``\uXXXX`` sequences, and the result is a ``str``
+ instance consisting of ASCII characters only. If ``ensure_ascii`` is
+ ``False``, some chunks written to ``fp`` may be ``unicode`` instances.
+ This usually happens because the input contains unicode strings or the
+ ``encoding`` parameter is used. Unless ``fp.write()`` explicitly
+ understands ``unicode`` (as in ``codecs.getwriter``) this is likely to
+ cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
@@ -147,7 +150,9 @@ def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
- representation.
+ representation. Since the default item separator is ``', '``, the
+ output might include trailing whitespace when ``indent`` is specified.
+ You can use ``separators=(',', ': ')`` to avoid this.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
@@ -158,6 +163,9 @@ def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
+ If *sort_keys* is ``True`` (default: ``False``), then the output of
+ dictionaries will be sorted by key.
+
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg; otherwise ``JSONEncoder`` is used.
@@ -167,7 +175,7 @@ def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
- encoding == 'utf-8' and default is None and not kw):
+ encoding == 'utf-8' and default is None and not sort_keys and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
@@ -175,7 +183,7 @@ def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
- default=default, **kw).iterencode(obj)
+ default=default, sort_keys=sort_keys, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
@@ -184,16 +192,15 @@ def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
- encoding='utf-8', default=None, **kw):
+ encoding='utf-8', default=None, sort_keys=False, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
- If ``ensure_ascii`` is false, then the return value will be a
- ``unicode`` instance subject to normal Python ``str`` to ``unicode``
- coercion rules instead of being escaped to an ASCII ``str``.
+ If ``ensure_ascii`` is false, all non-ASCII characters are not escaped, and
+ the return value may be a ``unicode`` instance. See ``dump`` for details.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
@@ -207,7 +214,9 @@ def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
- representation.
+ representation. Since the default item separator is ``', '``, the
+ output might include trailing whitespace when ``indent`` is specified.
+ You can use ``separators=(',', ': ')`` to avoid this.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
@@ -218,6 +227,9 @@ def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
+ If *sort_keys* is ``True`` (default: ``False``), then the output of
+ dictionaries will be sorted by key.
+
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg; otherwise ``JSONEncoder`` is used.
@@ -227,7 +239,7 @@ def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
- encoding == 'utf-8' and default is None and not kw):
+ encoding == 'utf-8' and default is None and not sort_keys and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
@@ -235,7 +247,7 @@ def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
- **kw).encode(obj)
+ sort_keys=sort_keys, **kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
diff --git a/Lib/json/decoder.py b/Lib/json/decoder.py
index 1f2da72..1b43238 100644
--- a/Lib/json/decoder.py
+++ b/Lib/json/decoder.py
@@ -27,7 +27,7 @@ NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
- colno = pos
+ colno = pos + 1
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
@@ -62,6 +62,16 @@ BACKSLASH = {
DEFAULT_ENCODING = "utf-8"
+def _decode_uXXXX(s, pos):
+ esc = s[pos + 1:pos + 5]
+ if len(esc) == 4 and esc[1] not in 'xX':
+ try:
+ return int(esc, 16)
+ except ValueError:
+ pass
+ msg = "Invalid \\uXXXX escape"
+ raise ValueError(errmsg(msg, s, pos))
+
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
@@ -116,25 +126,16 @@ def py_scanstring(s, end, encoding=None, strict=True,
end += 1
else:
# Unicode escape sequence
- esc = s[end + 1:end + 5]
- next_end = end + 5
- if len(esc) != 4:
- msg = "Invalid \\uXXXX escape"
- raise ValueError(errmsg(msg, s, end))
- uni = int(esc, 16)
+ uni = _decode_uXXXX(s, end)
+ end += 5
# Check for surrogate pair on UCS-4 systems
- if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
- msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
- if not s[end + 5:end + 7] == '\\u':
- raise ValueError(errmsg(msg, s, end))
- esc2 = s[end + 7:end + 11]
- if len(esc2) != 4:
- raise ValueError(errmsg(msg, s, end))
- uni2 = int(esc2, 16)
- uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
- next_end += 6
+ if sys.maxunicode > 65535 and \
+ 0xd800 <= uni <= 0xdbff and s[end:end + 2] == '\\u':
+ uni2 = _decode_uXXXX(s, end + 1)
+ if 0xdc00 <= uni2 <= 0xdfff:
+ uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
+ end += 6
char = unichr(uni)
- end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
@@ -163,13 +164,14 @@ def JSONObject(s_and_end, encoding, strict, scan_once, object_hook,
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
- return result, end
+ return result, end + 1
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
- raise ValueError(errmsg("Expecting property name", s, end))
+ raise ValueError(errmsg(
+ "Expecting property name enclosed in double quotes", s, end))
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
@@ -179,8 +181,7 @@ def JSONObject(s_and_end, encoding, strict, scan_once, object_hook,
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
- raise ValueError(errmsg("Expecting : delimiter", s, end))
-
+ raise ValueError(errmsg("Expecting ':' delimiter", s, end))
end += 1
try:
@@ -209,7 +210,7 @@ def JSONObject(s_and_end, encoding, strict, scan_once, object_hook,
if nextchar == '}':
break
elif nextchar != ',':
- raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
+ raise ValueError(errmsg("Expecting ',' delimiter", s, end - 1))
try:
nextchar = s[end]
@@ -224,8 +225,8 @@ def JSONObject(s_and_end, encoding, strict, scan_once, object_hook,
end += 1
if nextchar != '"':
- raise ValueError(errmsg("Expecting property name", s, end - 1))
-
+ raise ValueError(errmsg(
+ "Expecting property name enclosed in double quotes", s, end - 1))
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
@@ -259,8 +260,7 @@ def JSONArray(s_and_end, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
if nextchar == ']':
break
elif nextchar != ',':
- raise ValueError(errmsg("Expecting , delimiter", s, end))
-
+ raise ValueError(errmsg("Expecting ',' delimiter", s, end))
try:
if s[end] in _ws:
end += 1
diff --git a/Lib/json/encoder.py b/Lib/json/encoder.py
index 906c462..f5eeed7 100644
--- a/Lib/json/encoder.py
+++ b/Lib/json/encoder.py
@@ -27,8 +27,7 @@ for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
#ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
-# Assume this produces an infinity on all machines (probably not guaranteed)
-INFINITY = float('1e66666')
+INFINITY = float('inf')
FLOAT_REPR = repr
def encode_basestring(s):
@@ -108,9 +107,12 @@ class JSONEncoder(object):
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
- If ensure_ascii is true, the output is guaranteed to be str
- objects with all incoming unicode characters escaped. If
- ensure_ascii is false, the output will be unicode object.
+ If *ensure_ascii* is true (the default), all non-ASCII
+ characters in the output are escaped with \uXXXX sequences,
+ and the results are str instances consisting of ASCII
+ characters only. If ensure_ascii is False, a result may be a
+ unicode instance. This usually happens if the input contains
+ unicode strings or the *encoding* parameter is used.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
@@ -129,7 +131,10 @@ class JSONEncoder(object):
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
- None is the most compact representation.
+ None is the most compact representation. Since the default
+ item separator is ', ', the output might include trailing
+ whitespace when indent is specified. You can use
+ separators=(',', ': ') to avoid this.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
@@ -172,6 +177,7 @@ class JSONEncoder(object):
pass
else:
return list(iterable)
+ # Let the base class default method raise the TypeError
return JSONEncoder.default(self, o)
"""
diff --git a/Lib/json/tests/test_decode.py b/Lib/json/tests/test_decode.py
index aa8bbe9..78d7fbb 100644
--- a/Lib/json/tests/test_decode.py
+++ b/Lib/json/tests/test_decode.py
@@ -40,11 +40,30 @@ class TestDecode(object):
self.assertEqual(od, OrderedDict(p))
self.assertEqual(type(od), OrderedDict)
# the object_pairs_hook takes priority over the object_hook
- self.assertEqual(self.loads(s,
- object_pairs_hook=OrderedDict,
+ self.assertEqual(self.loads(s, object_pairs_hook=OrderedDict,
object_hook=lambda x: None),
OrderedDict(p))
+ # check that empty objects literals work (see #17368)
+ self.assertEqual(self.loads('{}', object_pairs_hook=OrderedDict),
+ OrderedDict())
+ self.assertEqual(self.loads('{"empty": {}}',
+ object_pairs_hook=OrderedDict),
+ OrderedDict([('empty', OrderedDict())]))
+ def test_extra_data(self):
+ s = '[1, 2, 3]5'
+ msg = 'Extra data'
+ self.assertRaisesRegexp(ValueError, msg, self.loads, s)
+
+ def test_invalid_escape(self):
+ s = '["abc\\y"]'
+ msg = 'escape'
+ self.assertRaisesRegexp(ValueError, msg, self.loads, s)
+
+ def test_negative_index(self):
+ d = self.json.JSONDecoder()
+ self.assertRaises(ValueError, d.raw_decode, 'a'*42, -50000)
+ self.assertRaises(ValueError, d.raw_decode, u'a'*42, -50000)
class TestPyDecode(TestDecode, PyTest): pass
class TestCDecode(TestDecode, CTest): pass
diff --git a/Lib/json/tests/test_dump.py b/Lib/json/tests/test_dump.py
index 9a7c8cc..cd92569 100644
--- a/Lib/json/tests/test_dump.py
+++ b/Lib/json/tests/test_dump.py
@@ -19,5 +19,14 @@ class TestDump(object):
{2: 3.0, 4.0: 5L, False: 1, 6L: True}, sort_keys=True),
'{"false": 1, "2": 3.0, "4.0": 5, "6": true}')
+ # Issue 16228: Crash on encoding resized list
+ def test_encode_mutated(self):
+ a = [object()] * 10
+ def crasher(obj):
+ del a[-1]
+ self.assertEqual(self.dumps(a, default=crasher),
+ '[null, null, null, null, null]')
+
+
class TestPyDump(TestDump, PyTest): pass
class TestCDump(TestDump, CTest): pass
diff --git a/Lib/json/tests/test_fail.py b/Lib/json/tests/test_fail.py
index ae962c8..e31b379 100644
--- a/Lib/json/tests/test_fail.py
+++ b/Lib/json/tests/test_fail.py
@@ -1,13 +1,13 @@
from json.tests import PyTest, CTest
-# Fri Dec 30 18:57:26 2005
+# 2007-10-05
JSONDOCS = [
# http://json.org/JSON_checker/test/fail1.json
'"A JSON payload should be an object or array, not a string."',
# http://json.org/JSON_checker/test/fail2.json
'["Unclosed array"',
# http://json.org/JSON_checker/test/fail3.json
- '{unquoted_key: "keys must be quoted}',
+ '{unquoted_key: "keys must be quoted"}',
# http://json.org/JSON_checker/test/fail4.json
'["extra comma",]',
# http://json.org/JSON_checker/test/fail5.json
@@ -33,7 +33,7 @@ JSONDOCS = [
# http://json.org/JSON_checker/test/fail15.json
'["Illegal backslash escape: \\x15"]',
# http://json.org/JSON_checker/test/fail16.json
- '["Illegal backslash escape: \\\'"]',
+ '[\\naked]',
# http://json.org/JSON_checker/test/fail17.json
'["Illegal backslash escape: \\017"]',
# http://json.org/JSON_checker/test/fail18.json
@@ -50,6 +50,24 @@ JSONDOCS = [
'["Bad value", truth]',
# http://json.org/JSON_checker/test/fail24.json
"['single quote']",
+ # http://json.org/JSON_checker/test/fail25.json
+ '["\ttab\tcharacter\tin\tstring\t"]',
+ # http://json.org/JSON_checker/test/fail26.json
+ '["tab\\ character\\ in\\ string\\ "]',
+ # http://json.org/JSON_checker/test/fail27.json
+ '["line\nbreak"]',
+ # http://json.org/JSON_checker/test/fail28.json
+ '["line\\\nbreak"]',
+ # http://json.org/JSON_checker/test/fail29.json
+ '[0e]',
+ # http://json.org/JSON_checker/test/fail30.json
+ '[0e+]',
+ # http://json.org/JSON_checker/test/fail31.json
+ '[0e+-1]',
+ # http://json.org/JSON_checker/test/fail32.json
+ '{"Comma instead if closing brace": true,',
+ # http://json.org/JSON_checker/test/fail33.json
+ '["mismatch"}',
# http://code.google.com/p/simplejson/issues/detail?id=3
u'["A\u001FZ control characters in string"]',
]
diff --git a/Lib/json/tests/test_float.py b/Lib/json/tests/test_float.py
index 12d3507..049f9ae 100644
--- a/Lib/json/tests/test_float.py
+++ b/Lib/json/tests/test_float.py
@@ -17,6 +17,21 @@ class TestFloat(object):
self.assertEqual(self.loads(self.dumps(num)), num)
self.assertEqual(self.loads(unicode(self.dumps(num))), num)
+ def test_out_of_range(self):
+ self.assertEqual(self.loads('[23456789012E666]'), [float('inf')])
+ self.assertEqual(self.loads('[-23456789012E666]'), [float('-inf')])
+
+ def test_allow_nan(self):
+ for val in (float('inf'), float('-inf'), float('nan')):
+ out = self.dumps([val])
+ if val == val: # inf
+ self.assertEqual(self.loads(out), [val])
+ else: # nan
+ res = self.loads(out)
+ self.assertEqual(len(res), 1)
+ self.assertNotEqual(res[0], res[0])
+ self.assertRaises(ValueError, self.dumps, [val], allow_nan=False)
+
class TestPyFloat(TestFloat, PyTest): pass
class TestCFloat(TestFloat, CTest): pass
diff --git a/Lib/json/tests/test_pass1.py b/Lib/json/tests/test_pass1.py
index 82d7154..df8259b 100644
--- a/Lib/json/tests/test_pass1.py
+++ b/Lib/json/tests/test_pass1.py
@@ -17,7 +17,7 @@ JSON = r'''
"real": -9876.543210,
"e": 0.123456789e-12,
"E": 1.234567890E+34,
- "": 23456789012E666,
+ "": 23456789012E66,
"zero": 0,
"one": 1,
"space": " ",
@@ -28,6 +28,7 @@ JSON = r'''
"alpha": "abcdefghijklmnopqrstuvwyz",
"ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ",
"digit": "0123456789",
+ "0123456789": "digit",
"special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?",
"hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A",
"true": true,
@@ -43,8 +44,7 @@ JSON = r'''
,
-4 , 5 , 6 ,7 ],
- "compact": [1,2,3,4,5,6,7],
+4 , 5 , 6 ,7 ],"compact":[1,2,3,4,5,6,7],
"jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}",
"quotes": "&#34; \u0022 %22 0x22 034 &#x22;",
"\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?"
@@ -55,9 +55,11 @@ JSON = r'''
99.44
,
-1066
-
-
+1066,
+1e1,
+0.1e1,
+1e-1,
+1e00,2e+00,2e-00
,"rosebud"]
'''
@@ -67,12 +69,6 @@ class TestPass1(object):
res = self.loads(JSON)
out = self.dumps(res)
self.assertEqual(res, self.loads(out))
- try:
- self.dumps(res, allow_nan=False)
- except ValueError:
- pass
- else:
- self.fail("23456789012E666 should be out of range")
class TestPyPass1(TestPass1, PyTest): pass
diff --git a/Lib/json/tests/test_scanstring.py b/Lib/json/tests/test_scanstring.py
index 4fef8cb..ed80a41 100644
--- a/Lib/json/tests/test_scanstring.py
+++ b/Lib/json/tests/test_scanstring.py
@@ -5,10 +5,6 @@ from json.tests import PyTest, CTest
class TestScanstring(object):
def test_scanstring(self):
scanstring = self.json.decoder.scanstring
- self.assertEqual(
- scanstring('"z\\ud834\\udd20x"', 1, None, True),
- (u'z\U0001d120x', 16))
-
if sys.maxunicode == 65535:
self.assertEqual(
scanstring(u'"z\U0001d120x"', 1, None, True),
@@ -94,6 +90,58 @@ class TestScanstring(object):
scanstring('["Bad value", truth]', 2, None, True),
(u'Bad value', 12))
+ def test_surrogates(self):
+ scanstring = self.json.decoder.scanstring
+ def assertScan(given, expect):
+ self.assertEqual(scanstring(given, 1, None, True),
+ (expect, len(given)))
+ if not isinstance(given, unicode):
+ given = unicode(given)
+ self.assertEqual(scanstring(given, 1, None, True),
+ (expect, len(given)))
+
+ surrogates = unichr(0xd834) + unichr(0xdd20)
+ assertScan('"z\\ud834\\u0079x"', u'z\ud834yx')
+ assertScan('"z\\ud834\\udd20x"', u'z\U0001d120x')
+ assertScan('"z\\ud834\\ud834\\udd20x"', u'z\ud834\U0001d120x')
+ assertScan('"z\\ud834x"', u'z\ud834x')
+ assertScan(u'"z\\ud834\udd20x12345"', u'z%sx12345' % surrogates)
+ assertScan('"z\\udd20x"', u'z\udd20x')
+ assertScan(u'"z\ud834\udd20x"', u'z\ud834\udd20x')
+ assertScan(u'"z\ud834\\udd20x"', u'z%sx' % surrogates)
+ assertScan(u'"z\ud834x"', u'z\ud834x')
+
+ def test_bad_escapes(self):
+ scanstring = self.json.decoder.scanstring
+ bad_escapes = [
+ '"\\"',
+ '"\\x"',
+ '"\\u"',
+ '"\\u0"',
+ '"\\u01"',
+ '"\\u012"',
+ '"\\uz012"',
+ '"\\u0z12"',
+ '"\\u01z2"',
+ '"\\u012z"',
+ '"\\u0x12"',
+ '"\\u0X12"',
+ '"\\ud834\\"',
+ '"\\ud834\\u"',
+ '"\\ud834\\ud"',
+ '"\\ud834\\udd"',
+ '"\\ud834\\udd2"',
+ '"\\ud834\\uzdd2"',
+ '"\\ud834\\udzd2"',
+ '"\\ud834\\uddz2"',
+ '"\\ud834\\udd2z"',
+ '"\\ud834\\u0x20"',
+ '"\\ud834\\u0X20"',
+ ]
+ for s in bad_escapes:
+ with self.assertRaises(ValueError):
+ scanstring(s, 1, None, True)
+
def test_issue3623(self):
self.assertRaises(ValueError, self.json.decoder.scanstring, b"xxx", 1,
"xxx")
diff --git a/Lib/json/tests/test_tool.py b/Lib/json/tests/test_tool.py
new file mode 100644
index 0000000..27dfb84
--- /dev/null
+++ b/Lib/json/tests/test_tool.py
@@ -0,0 +1,69 @@
+import os
+import sys
+import textwrap
+import unittest
+import subprocess
+from test import test_support
+from test.script_helper import assert_python_ok
+
+class TestTool(unittest.TestCase):
+ data = """
+
+ [["blorpie"],[ "whoops" ] , [
+ ],\t"d-shtaeou",\r"d-nthiouh",
+ "i-vhbjkhnth", {"nifty":87}, {"morefield" :\tfalse,"field"
+ :"yes"} ]
+ """
+
+ expect = textwrap.dedent("""\
+ [
+ [
+ "blorpie"
+ ],
+ [
+ "whoops"
+ ],
+ [],
+ "d-shtaeou",
+ "d-nthiouh",
+ "i-vhbjkhnth",
+ {
+ "nifty": 87
+ },
+ {
+ "field": "yes",
+ "morefield": false
+ }
+ ]
+ """)
+
+ def test_stdin_stdout(self):
+ proc = subprocess.Popen(
+ (sys.executable, '-m', 'json.tool'),
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ out, err = proc.communicate(self.data.encode())
+ self.assertEqual(out.splitlines(), self.expect.encode().splitlines())
+ self.assertEqual(err, None)
+
+ def _create_infile(self):
+ infile = test_support.TESTFN
+ with open(infile, "w") as fp:
+ self.addCleanup(os.remove, infile)
+ fp.write(self.data)
+ return infile
+
+ def test_infile_stdout(self):
+ infile = self._create_infile()
+ rc, out, err = assert_python_ok('-m', 'json.tool', infile)
+ self.assertEqual(out.splitlines(), self.expect.encode().splitlines())
+ self.assertEqual(err, b'')
+
+ def test_infile_outfile(self):
+ infile = self._create_infile()
+ outfile = test_support.TESTFN + '.out'
+ rc, out, err = assert_python_ok('-m', 'json.tool', infile, outfile)
+ self.addCleanup(os.remove, outfile)
+ with open(outfile, "r") as fp:
+ self.assertEqual(fp.read(), self.expect)
+ self.assertEqual(out, b'')
+ self.assertEqual(err, b'')
diff --git a/Lib/json/tool.py b/Lib/json/tool.py
index c37bb77..fc5d749 100644
--- a/Lib/json/tool.py
+++ b/Lib/json/tool.py
@@ -7,7 +7,7 @@ Usage::
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m json.tool
- Expecting property name: line 1 column 2 (char 2)
+ Expecting property name enclosed in double quotes: line 1 column 3 (char 2)
"""
import sys
@@ -25,12 +25,15 @@ def main():
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit(sys.argv[0] + " [infile [outfile]]")
- try:
- obj = json.load(infile)
- except ValueError, e:
- raise SystemExit(e)
- json.dump(obj, outfile, sort_keys=True, indent=4)
- outfile.write('\n')
+ with infile:
+ try:
+ obj = json.load(infile)
+ except ValueError, e:
+ raise SystemExit(e)
+ with outfile:
+ json.dump(obj, outfile, sort_keys=True,
+ indent=4, separators=(',', ': '))
+ outfile.write('\n')
if __name__ == '__main__':
diff --git a/Lib/keyword.py b/Lib/keyword.py
index 8eb2860..69794bd 100755
--- a/Lib/keyword.py
+++ b/Lib/keyword.py
@@ -7,7 +7,7 @@ This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
- python Lib/keyword.py
+ ./python Lib/keyword.py
"""
__all__ = ["iskeyword", "kwlist"]
diff --git a/Lib/lib-tk/Tix.py b/Lib/lib-tk/Tix.py
index d474235..1f28e56 100644
--- a/Lib/lib-tk/Tix.py
+++ b/Lib/lib-tk/Tix.py
@@ -122,13 +122,9 @@ class tixCommand:
elif cnf:
cnf = _cnfmerge(cnf)
if cnf is None:
- cnf = {}
- for x in self.tk.split(self.tk.call('tix', 'configure')):
- cnf[x[0][1:]] = (x[0][1:],) + x[1:]
- return cnf
+ return self._getconfigure('tix', 'configure')
if isinstance(cnf, StringType):
- x = self.tk.split(self.tk.call('tix', 'configure', '-'+cnf))
- return (x[0][1:],) + x[1:]
+ return self._getconfigure1('tix', 'configure', '-'+cnf)
return self.tk.call(('tix', 'configure') + self._options(cnf))
def tix_filedialog(self, dlgclass=None):
@@ -380,7 +376,7 @@ class TixWidget(Tkinter.Widget):
"""Return the name of all subwidgets."""
try:
x = self.tk.call(self._w, 'subwidgets', '-all')
- return self.tk.split(x)
+ return self.tk.splitlist(x)
except TclError:
return None
@@ -473,13 +469,6 @@ class TixSubWidget(TixWidget):
self.tk.call('destroy', self._w)
-# Useful func. to split Tcl lists and return as a dict. From Tkinter.py
-def _lst2dict(lst):
- dict = {}
- for x in lst:
- dict[x[0][1:]] = (x[0][1:],) + x[1:]
- return dict
-
# Useful class to create a display style - later shared by many items.
# Contributed by Steffen Kremser
class DisplayStyle:
@@ -515,10 +504,8 @@ class DisplayStyle:
self.tk.call(self.stylename, 'configure', '-%s'%key, value)
def config(self, cnf={}, **kw):
- return _lst2dict(
- self.tk.split(
- self.tk.call(
- self.stylename, 'configure', *self._options(cnf,kw))))
+ return self._getconfigure(
+ self.stylename, 'configure', *self._options(cnf,kw))
def __getitem__(self,key):
return self.tk.call(self.stylename, 'cget', '-%s'%key)
@@ -928,9 +915,7 @@ class HList(TixWidget, XView, YView):
def header_configure(self, col, cnf={}, **kw):
if cnf is None:
- return _lst2dict(
- self.tk.split(
- self.tk.call(self._w, 'header', 'configure', col)))
+ return self._getconfigure(self._w, 'header', 'configure', col)
self.tk.call(self._w, 'header', 'configure', col,
*self._options(cnf, kw))
@@ -955,9 +940,8 @@ class HList(TixWidget, XView, YView):
def indicator_configure(self, entry, cnf={}, **kw):
if cnf is None:
- return _lst2dict(
- self.tk.split(
- self.tk.call(self._w, 'indicator', 'configure', entry)))
+ return self._getconfigure(
+ self._w, 'indicator', 'configure', entry)
self.tk.call(
self._w, 'indicator', 'configure', entry, *self._options(cnf, kw))
@@ -1017,9 +1001,7 @@ class HList(TixWidget, XView, YView):
def item_configure(self, entry, col, cnf={}, **kw):
if cnf is None:
- return _lst2dict(
- self.tk.split(
- self.tk.call(self._w, 'item', 'configure', entry, col)))
+ return self._getconfigure(self._w, 'item', 'configure', entry, col)
self.tk.call(self._w, 'item', 'configure', entry, col,
*self._options(cnf, kw))
@@ -1038,9 +1020,7 @@ class HList(TixWidget, XView, YView):
def entryconfigure(self, entry, cnf={}, **kw):
if cnf is None:
- return _lst2dict(
- self.tk.split(
- self.tk.call(self._w, 'entryconfigure', entry)))
+ return self._getconfigure(self._w, 'entryconfigure', entry)
self.tk.call(self._w, 'entryconfigure', entry,
*self._options(cnf, kw))
@@ -1255,9 +1235,7 @@ class PanedWindow(TixWidget):
def paneconfigure(self, entry, cnf={}, **kw):
if cnf is None:
- return _lst2dict(
- self.tk.split(
- self.tk.call(self._w, 'paneconfigure', entry)))
+ return self._getconfigure(self._w, 'paneconfigure', entry)
self.tk.call(self._w, 'paneconfigure', entry, *self._options(cnf, kw))
def panes(self):
@@ -1902,38 +1880,39 @@ class Grid(TixWidget, XView, YView):
self.tk.call(self, 'set', x, y, *args)
def size_column(self, index, **kw):
- """Queries or sets the size of the column given by
- INDEX. INDEX may be any non-negative
- integer that gives the position of a given column.
+ """Queries or sets the size of the column given by
+ INDEX. INDEX may be any non-negative
+ integer that gives the position of a given column.
INDEX can also be the string "default"; in this case, this command
queries or sets the default size of all columns.
- When no option-value pair is given, this command returns a tuple
- containing the current size setting of the given column. When
- option-value pairs are given, the corresponding options of the
+ When no option-value pair is given, this command returns a tuple
+ containing the current size setting of the given column. When
+ option-value pairs are given, the corresponding options of the
size setting of the given column are changed. Options may be one
- of the follwing:
+ of the follwing:
pad0 pixels
Specifies the paddings to the left of a column.
pad1 pixels
- Specifies the paddings to the right of a column.
+ Specifies the paddings to the right of a column.
size val
- Specifies the width of a column .
- Val may be: "auto" -- the width of the column is set the
- the widest cell in the column; a valid Tk screen distance
- unit; or a real number following by the word chars
+ Specifies the width of a column. Val may be:
+ "auto" -- the width of the column is set to the
+ width of the widest cell in the column;
+ a valid Tk screen distance unit;
+ or a real number following by the word chars
(e.g. 3.4chars) that sets the width of the column to the
given number of characters."""
return self.tk.split(self.tk.call(self._w, 'size', 'column', index,
*self._options({}, kw)))
def size_row(self, index, **kw):
- """Queries or sets the size of the row given by
- INDEX. INDEX may be any non-negative
- integer that gives the position of a given row .
+ """Queries or sets the size of the row given by
+ INDEX. INDEX may be any non-negative
+ integer that gives the position of a given row .
INDEX can also be the string "default"; in this case, this command
queries or sets the default size of all rows.
- When no option-value pair is given, this command returns a list con-
- taining the current size setting of the given row . When option-value
+ When no option-value pair is given, this command returns a list con-
+ taining the current size setting of the given row . When option-value
pairs are given, the corresponding options of the size setting of the
given row are changed. Options may be one of the follwing:
pad0 pixels
@@ -1941,10 +1920,11 @@ class Grid(TixWidget, XView, YView):
pad1 pixels
Specifies the paddings to the bottom of a row.
size val
- Specifies the height of a row.
- Val may be: "auto" -- the height of the row is set the
- the highest cell in the row; a valid Tk screen distance
- unit; or a real number following by the word chars
+ Specifies the height of a row. Val may be:
+ "auto" -- the height of the row is set to the
+ height of the highest cell in the row;
+ a valid Tk screen distance unit;
+ or a real number following by the word chars
(e.g. 3.4chars) that sets the height of the row to the
given number of characters."""
return self.tk.split(self.tk.call(
diff --git a/Lib/lib-tk/Tkinter.py b/Lib/lib-tk/Tkinter.py
index 586129a..d3d7d83 100644
--- a/Lib/lib-tk/Tkinter.py
+++ b/Lib/lib-tk/Tkinter.py
@@ -41,6 +41,7 @@ tkinter = _tkinter # b/w compat for export
TclError = _tkinter.TclError
from types import *
from Tkconstants import *
+import re
wantobjects = 1
@@ -58,6 +59,37 @@ try: _tkinter.deletefilehandler
except AttributeError: _tkinter.deletefilehandler = None
+_magic_re = re.compile(r'([\\{}])')
+_space_re = re.compile(r'([\s])')
+
+def _join(value):
+ """Internal function."""
+ return ' '.join(map(_stringify, value))
+
+def _stringify(value):
+ """Internal function."""
+ if isinstance(value, (list, tuple)):
+ if len(value) == 1:
+ value = _stringify(value[0])
+ if value[0] == '{':
+ value = '{%s}' % value
+ else:
+ value = '{%s}' % _join(value)
+ else:
+ if isinstance(value, str):
+ value = unicode(value, 'utf-8')
+ elif not isinstance(value, unicode):
+ value = str(value)
+ if not value:
+ value = '{}'
+ elif _magic_re.search(value):
+ # add '\' before special characters and spaces
+ value = _magic_re.sub(r'\\\1', value)
+ value = _space_re.sub(r'\\\1', value)
+ elif value[0] == '"' or _space_re.search(value):
+ value = '{%s}' % value
+ return value
+
def _flatten(tuple):
"""Internal function."""
res = ()
@@ -154,8 +186,12 @@ def _tkerror(err):
"""Internal function."""
pass
-def _exit(code='0'):
- """Internal function. Calling it will throw the exception SystemExit."""
+def _exit(code=0):
+ """Internal function. Calling it will raise the exception SystemExit."""
+ try:
+ code = int(code)
+ except ValueError:
+ pass
raise SystemExit, code
_varnum = 0
@@ -187,11 +223,13 @@ class Variable:
_varnum += 1
if value is not None:
self.set(value)
- elif not self._tk.call("info", "exists", self._name):
+ elif not self._tk.getboolean(self._tk.call("info", "exists", self._name)):
self.set(self._default)
def __del__(self):
"""Unset the variable in Tcl."""
- self._tk.globalunsetvar(self._name)
+ if (self._tk is not None and
+ self._tk.getboolean(self._tk.call("info", "exists", self._name))):
+ self._tk.globalunsetvar(self._name)
def __str__(self):
"""Return the name of the variable in Tcl."""
return self._name
@@ -534,12 +572,19 @@ class Misc:
The type keyword specifies the form in which the data is
to be returned and should be an atom name such as STRING
- or FILE_NAME. Type defaults to STRING.
+ or FILE_NAME. Type defaults to STRING, except on X11, where the default
+ is to try UTF8_STRING and fall back to STRING.
This command is equivalent to:
selection_get(CLIPBOARD)
"""
+ if 'type' not in kw and self._windowingsystem == 'x11':
+ try:
+ kw['type'] = 'UTF8_STRING'
+ return self.tk.call(('clipboard', 'get') + self._options(kw))
+ except TclError:
+ del kw['type']
return self.tk.call(('clipboard', 'get') + self._options(kw))
def clipboard_clear(self, **kw):
@@ -621,8 +666,16 @@ class Misc:
A keyword parameter selection specifies the name of
the selection and defaults to PRIMARY. A keyword
parameter displayof specifies a widget on the display
- to use."""
+ to use. A keyword parameter type specifies the form of data to be
+ fetched, defaulting to STRING except on X11, where UTF8_STRING is tried
+ before STRING."""
if 'displayof' not in kw: kw['displayof'] = self._w
+ if 'type' not in kw and self._windowingsystem == 'x11':
+ try:
+ kw['type'] = 'UTF8_STRING'
+ return self.tk.call(('selection', 'get') + self._options(kw))
+ except TclError:
+ del kw['type']
return self.tk.call(('selection', 'get') + self._options(kw))
def selection_handle(self, command, **kw):
"""Specify a function COMMAND to call if the X
@@ -1037,6 +1090,15 @@ class Misc:
if displayof is None:
return ('-displayof', self._w)
return ()
+ @property
+ def _windowingsystem(self):
+ """Internal function."""
+ try:
+ return self._root()._windowingsystem_cached
+ except AttributeError:
+ ws = self._root()._windowingsystem_cached = \
+ self.tk.call('tk', 'windowingsystem')
+ return ws
def _options(self, cnf, kw = None):
"""Internal function."""
if kw:
@@ -1058,7 +1120,7 @@ class Misc:
nv.append('%d' % item)
else:
# format it to proper Tcl code if it contains space
- nv.append(('{%s}' if ' ' in item else '%s') % item)
+ nv.append(_stringify(item))
else:
v = ' '.join(nv)
res = res + ('-'+k, v)
@@ -1174,6 +1236,19 @@ class Misc:
exc, val, tb = sys.exc_type, sys.exc_value, sys.exc_traceback
root = self._root()
root.report_callback_exception(exc, val, tb)
+
+ def _getconfigure(self, *args):
+ """Call Tcl configure command and return the result as a dict."""
+ cnf = {}
+ for x in self.tk.splitlist(self.tk.call(*args)):
+ x = self.tk.splitlist(x)
+ cnf[x[0][1:]] = (x[0][1:],) + x[1:]
+ return cnf
+
+ def _getconfigure1(self, *args):
+ x = self.tk.splitlist(self.tk.call(*args))
+ return (x[0][1:],) + x[1:]
+
def _configure(self, cmd, cnf, kw):
"""Internal function."""
if kw:
@@ -1181,15 +1256,9 @@ class Misc:
elif cnf:
cnf = _cnfmerge(cnf)
if cnf is None:
- cnf = {}
- for x in self.tk.split(
- self.tk.call(_flatten((self._w, cmd)))):
- cnf[x[0][1:]] = (x[0][1:],) + x[1:]
- return cnf
+ return self._getconfigure(_flatten((self._w, cmd)))
if type(cnf) is StringType:
- x = self.tk.split(
- self.tk.call(_flatten((self._w, cmd, '-'+cnf))))
- return (x[0][1:],) + x[1:]
+ return self._getconfigure1(_flatten((self._w, cmd, '-'+cnf)))
self.tk.call(_flatten((self._w, cmd)) + self._options(cnf))
# These used to be defined in Widget:
def configure(self, cnf=None, **kw):
@@ -1211,8 +1280,8 @@ class Misc:
raise TypeError("Tkinter objects don't support 'in' tests.")
def keys(self):
"""Return a list of all resource names of this widget."""
- return map(lambda x: x[0][1:],
- self.tk.split(self.tk.call(self._w, 'configure')))
+ return [x[0][1:] for x in
+ self.tk.splitlist(self.tk.call(self._w, 'configure'))]
def __str__(self):
"""Return the window path name of this widget."""
return self._w
@@ -1267,6 +1336,21 @@ class Misc:
return self._getints(self.tk.call(*args)) or None
bbox = grid_bbox
+
+ def _gridconvvalue(self, value):
+ if isinstance(value, (str, _tkinter.Tcl_Obj)):
+ try:
+ svalue = str(value)
+ if not svalue:
+ return None
+ elif '.' in svalue:
+ return getdouble(svalue)
+ else:
+ return getint(svalue)
+ except ValueError:
+ pass
+ return value
+
def _grid_configure(self, command, index, cnf, kw):
"""Internal function."""
if type(cnf) is StringType and not kw:
@@ -1285,22 +1369,14 @@ class Misc:
for i in range(0, len(words), 2):
key = words[i][1:]
value = words[i+1]
- if not value:
- value = None
- elif '.' in value:
- value = getdouble(value)
- else:
- value = getint(value)
- dict[key] = value
+ dict[key] = self._gridconvvalue(value)
return dict
res = self.tk.call(
('grid', command, self._w, index)
+ options)
if len(options) == 1:
- if not res: return None
- # In Tk 7.5, -width can be a float
- if '.' in res: return getdouble(res)
- return getint(res)
+ return self._gridconvvalue(res)
+
def grid_columnconfigure(self, index, cnf={}, **kw):
"""Configure column INDEX of a grid.
@@ -1387,11 +1463,11 @@ class Misc:
def image_names(self):
"""Return a list of all existing image names."""
- return self.tk.call('image', 'names')
+ return self.tk.splitlist(self.tk.call('image', 'names'))
def image_types(self):
"""Return a list of all available image types (e.g. phote bitmap)."""
- return self.tk.call('image', 'types')
+ return self.tk.splitlist(self.tk.call('image', 'types'))
class CallWrapper:
@@ -1505,7 +1581,10 @@ class Wm:
if len(wlist) > 1:
wlist = (wlist,) # Tk needs a list of windows here
args = ('wm', 'colormapwindows', self._w) + wlist
- return map(self._nametowidget, self.tk.call(args))
+ if wlist:
+ self.tk.call(args)
+ else:
+ return map(self._nametowidget, self.tk.splitlist(self.tk.call(args)))
colormapwindows = wm_colormapwindows
def wm_command(self, value=None):
"""Store VALUE in WM_COMMAND property. It is the command
@@ -1676,7 +1755,7 @@ class Tk(Misc, Wm):
# ensure that self.tk is always _something_.
self.tk = None
if baseName is None:
- import sys, os
+ import os
baseName = os.path.basename(sys.argv[0])
baseName, ext = os.path.splitext(baseName)
if ext not in ('.py', '.pyc', '.pyo'):
@@ -1685,7 +1764,9 @@ class Tk(Misc, Wm):
self.tk = _tkinter.create(screenName, baseName, className, interactive, wantobjects, useTk, sync, use)
if useTk:
self._loadtk()
- self.readprofile(baseName, className)
+ if not sys.flags.ignore_environment:
+ # Issue #16248: Honor the -E flag to avoid code injection.
+ self.readprofile(baseName, className)
def loadtk(self):
if not self._tkloaded:
self.tk.loadtk()
@@ -1818,7 +1899,7 @@ class Pack:
for i in range(0, len(words), 2):
key = words[i][1:]
value = words[i+1]
- if value[:1] == '.':
+ if str(value)[:1] == '.':
value = self._nametowidget(value)
dict[key] = value
return dict
@@ -1869,7 +1950,7 @@ class Place:
for i in range(0, len(words), 2):
key = words[i][1:]
value = words[i+1]
- if value[:1] == '.':
+ if str(value)[:1] == '.':
value = self._nametowidget(value)
dict[key] = value
return dict
@@ -1918,7 +1999,7 @@ class Grid:
for i in range(0, len(words), 2):
key = words[i][1:]
value = words[i+1]
- if value[:1] == '.':
+ if str(value)[:1] == '.':
value = self._nametowidget(value)
dict[key] = value
return dict
@@ -2488,22 +2569,19 @@ class Listbox(Widget, XView, YView):
def activate(self, index):
"""Activate item identified by INDEX."""
self.tk.call(self._w, 'activate', index)
- def bbox(self, *args):
+ def bbox(self, index):
"""Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle
- which encloses the item identified by index in ARGS."""
- return self._getints(
- self.tk.call((self._w, 'bbox') + args)) or None
+ which encloses the item identified by the given index."""
+ return self._getints(self.tk.call(self._w, 'bbox', index)) or None
def curselection(self):
- """Return list of indices of currently selected item."""
- # XXX Ought to apply self._getints()...
- return self.tk.splitlist(self.tk.call(
- self._w, 'curselection'))
+ """Return the indices of currently selected item."""
+ return self._getints(self.tk.call(self._w, 'curselection')) or ()
def delete(self, first, last=None):
- """Delete items from FIRST to LAST (not included)."""
+ """Delete items from FIRST to LAST (included)."""
self.tk.call(self._w, 'delete', first, last)
def get(self, first, last=None):
- """Get list of items from FIRST to LAST (not included)."""
- if last:
+ """Get list of items from FIRST to LAST (included)."""
+ if last is not None:
return self.tk.splitlist(self.tk.call(
self._w, 'get', first, last))
else:
@@ -2536,7 +2614,7 @@ class Listbox(Widget, XView, YView):
self.tk.call(self._w, 'selection', 'anchor', index)
select_anchor = selection_anchor
def selection_clear(self, first, last=None):
- """Clear the selection from FIRST to LAST (not included)."""
+ """Clear the selection from FIRST to LAST (included)."""
self.tk.call(self._w,
'selection', 'clear', first, last)
select_clear = selection_clear
@@ -2546,7 +2624,7 @@ class Listbox(Widget, XView, YView):
self._w, 'selection', 'includes', index))
select_includes = selection_includes
def selection_set(self, first, last=None):
- """Set the selection from FIRST to LAST (not included) without
+ """Set the selection from FIRST to LAST (included) without
changing the currently selected elements."""
self.tk.call(self._w, 'selection', 'set', first, last)
select_set = selection_set
@@ -2846,8 +2924,9 @@ class Text(Widget, XView, YView):
def debug(self, boolean=None):
"""Turn on the internal consistency checks of the B-Tree inside the text
widget according to BOOLEAN."""
- return self.tk.getboolean(self.tk.call(
- self._w, 'debug', boolean))
+ if boolean is None:
+ return self.tk.getboolean(self.tk.call(self._w, 'debug'))
+ self.tk.call(self._w, 'debug', boolean)
def delete(self, index1, index2=None):
"""Delete the characters between INDEX1 and INDEX2 (not included)."""
self.tk.call(self._w, 'delete', index1, index2)
@@ -3302,8 +3381,11 @@ class BitmapImage(Image):
Valid resource names: background, data, file, foreground, maskdata, maskfile."""
Image.__init__(self, 'bitmap', name, cnf, master, **kw)
-def image_names(): return _default_root.tk.call('image', 'names')
-def image_types(): return _default_root.tk.call('image', 'types')
+def image_names():
+ return _default_root.tk.splitlist(_default_root.tk.call('image', 'names'))
+
+def image_types():
+ return _default_root.tk.splitlist(_default_root.tk.call('image', 'types'))
class Spinbox(Widget, XView):
@@ -3349,7 +3431,7 @@ class Spinbox(Widget, XView):
bounding box may refer to a region outside the
visible area of the window.
"""
- return self.tk.call(self._w, 'bbox', index)
+ return self._getints(self.tk.call(self._w, 'bbox', index)) or None
def delete(self, first, last=None):
"""Delete one or more elements of the spinbox.
@@ -3662,23 +3744,17 @@ class PanedWindow(Widget):
"""
if cnf is None and not kw:
- cnf = {}
- for x in self.tk.split(
- self.tk.call(self._w,
- 'paneconfigure', tagOrId)):
- cnf[x[0][1:]] = (x[0][1:],) + x[1:]
- return cnf
+ return self._getconfigure(self._w, 'paneconfigure', tagOrId)
if type(cnf) == StringType and not kw:
- x = self.tk.split(self.tk.call(
- self._w, 'paneconfigure', tagOrId, '-'+cnf))
- return (x[0][1:],) + x[1:]
+ return self._getconfigure1(
+ self._w, 'paneconfigure', tagOrId, '-'+cnf)
self.tk.call((self._w, 'paneconfigure', tagOrId) +
self._options(cnf, kw))
paneconfig = paneconfigure
def panes(self):
"""Returns an ordered list of the child panes."""
- return self.tk.call(self._w, 'panes')
+ return self.tk.splitlist(self.tk.call(self._w, 'panes'))
######################################################################
# Extensions:
diff --git a/Lib/lib-tk/test/runtktests.py b/Lib/lib-tk/test/runtktests.py
index 95e1728..d4b1893 100644
--- a/Lib/lib-tk/test/runtktests.py
+++ b/Lib/lib-tk/test/runtktests.py
@@ -14,49 +14,6 @@ import test.test_support
this_dir_path = os.path.abspath(os.path.dirname(__file__))
-_tk_unavailable = None
-
-def check_tk_availability():
- """Check that Tk is installed and available."""
- global _tk_unavailable
-
- if _tk_unavailable is None:
- _tk_unavailable = False
- if sys.platform == 'darwin':
- # The Aqua Tk implementations on OS X can abort the process if
- # being called in an environment where a window server connection
- # cannot be made, for instance when invoked by a buildbot or ssh
- # process not running under the same user id as the current console
- # user. To avoid that, raise an exception if the window manager
- # connection is not available.
- from ctypes import cdll, c_int, pointer, Structure
- from ctypes.util import find_library
-
- app_services = cdll.LoadLibrary(find_library("ApplicationServices"))
-
- if app_services.CGMainDisplayID() == 0:
- _tk_unavailable = "cannot run without OS X window manager"
- else:
- class ProcessSerialNumber(Structure):
- _fields_ = [("highLongOfPSN", c_int),
- ("lowLongOfPSN", c_int)]
- psn = ProcessSerialNumber()
- psn_p = pointer(psn)
- if ( (app_services.GetCurrentProcess(psn_p) < 0) or
- (app_services.SetFrontProcess(psn_p) < 0) ):
- _tk_unavailable = "cannot run without OS X gui process"
- else: # not OS X
- import Tkinter
- try:
- Tkinter.Button()
- except Tkinter.TclError as msg:
- # assuming tk is not available
- _tk_unavailable = "tk not available: %s" % msg
-
- if _tk_unavailable:
- raise unittest.SkipTest(_tk_unavailable)
- return
-
def is_package(path):
for name in os.listdir(path):
if name in ('__init__.py', '__init__.pyc', '__init.pyo'):
@@ -68,7 +25,7 @@ def get_tests_modules(basepath=this_dir_path, gui=True, packages=None):
and are inside packages found in the path starting at basepath.
If packages is specified it should contain package names that want
- their tests colleted.
+ their tests collected.
"""
py_ext = '.py'
@@ -110,5 +67,4 @@ def get_tests(text=True, gui=True, packages=None):
yield test
if __name__ == "__main__":
- test.test_support.use_resources = ['gui']
test.test_support.run_unittest(*get_tests())
diff --git a/Lib/lib-tk/test/test_tkinter/test_geometry_managers.py b/Lib/lib-tk/test/test_tkinter/test_geometry_managers.py
new file mode 100644
index 0000000..876039f
--- /dev/null
+++ b/Lib/lib-tk/test/test_tkinter/test_geometry_managers.py
@@ -0,0 +1,889 @@
+import unittest
+import re
+import Tkinter
+from Tkinter import TclError
+from test.test_support import requires, run_unittest
+
+from test_ttk.support import pixels_conv, tcl_version, requires_tcl
+from widget_tests import AbstractWidgetTest, int_round
+
+requires('gui')
+
+
+class PackTest(AbstractWidgetTest, unittest.TestCase):
+
+ def create2(self):
+ pack = Tkinter.Toplevel(self.root, name='pack')
+ pack.wm_geometry('300x200+0+0')
+ pack.wm_minsize(1, 1)
+ a = Tkinter.Frame(pack, name='a', width=20, height=40, bg='red')
+ b = Tkinter.Frame(pack, name='b', width=50, height=30, bg='blue')
+ c = Tkinter.Frame(pack, name='c', width=80, height=80, bg='green')
+ d = Tkinter.Frame(pack, name='d', width=40, height=30, bg='yellow')
+ return pack, a, b, c, d
+
+ def test_pack_configure_after(self):
+ pack, a, b, c, d = self.create2()
+ with self.assertRaisesRegexp(TclError, 'window "%s" isn\'t packed' % b):
+ a.pack_configure(after=b)
+ with self.assertRaisesRegexp(TclError, 'bad window path name ".foo"'):
+ a.pack_configure(after='.foo')
+ a.pack_configure(side='top')
+ b.pack_configure(side='top')
+ c.pack_configure(side='top')
+ d.pack_configure(side='top')
+ self.assertEqual(pack.pack_slaves(), [a, b, c, d])
+ a.pack_configure(after=b)
+ self.assertEqual(pack.pack_slaves(), [b, a, c, d])
+ a.pack_configure(after=a)
+ self.assertEqual(pack.pack_slaves(), [b, a, c, d])
+
+ def test_pack_configure_anchor(self):
+ pack, a, b, c, d = self.create2()
+ def check(anchor, geom):
+ a.pack_configure(side='top', ipadx=5, padx=10, ipady=15, pady=20,
+ expand=True, anchor=anchor)
+ self.root.update()
+ self.assertEqual(a.winfo_geometry(), geom)
+ check('n', '30x70+135+20')
+ check('ne', '30x70+260+20')
+ check('e', '30x70+260+65')
+ check('se', '30x70+260+110')
+ check('s', '30x70+135+110')
+ check('sw', '30x70+10+110')
+ check('w', '30x70+10+65')
+ check('nw', '30x70+10+20')
+ check('center', '30x70+135+65')
+
+ def test_pack_configure_before(self):
+ pack, a, b, c, d = self.create2()
+ with self.assertRaisesRegexp(TclError, 'window "%s" isn\'t packed' % b):
+ a.pack_configure(before=b)
+ with self.assertRaisesRegexp(TclError, 'bad window path name ".foo"'):
+ a.pack_configure(before='.foo')
+ a.pack_configure(side='top')
+ b.pack_configure(side='top')
+ c.pack_configure(side='top')
+ d.pack_configure(side='top')
+ self.assertEqual(pack.pack_slaves(), [a, b, c, d])
+ a.pack_configure(before=d)
+ self.assertEqual(pack.pack_slaves(), [b, c, a, d])
+ a.pack_configure(before=a)
+ self.assertEqual(pack.pack_slaves(), [b, c, a, d])
+
+ def test_pack_configure_expand(self):
+ pack, a, b, c, d = self.create2()
+ def check(*geoms):
+ self.root.update()
+ self.assertEqual(a.winfo_geometry(), geoms[0])
+ self.assertEqual(b.winfo_geometry(), geoms[1])
+ self.assertEqual(c.winfo_geometry(), geoms[2])
+ self.assertEqual(d.winfo_geometry(), geoms[3])
+ a.pack_configure(side='left')
+ b.pack_configure(side='top')
+ c.pack_configure(side='right')
+ d.pack_configure(side='bottom')
+ check('20x40+0+80', '50x30+135+0', '80x80+220+75', '40x30+100+170')
+ a.pack_configure(side='left', expand='yes')
+ b.pack_configure(side='top', expand='on')
+ c.pack_configure(side='right', expand=True)
+ d.pack_configure(side='bottom', expand=1)
+ check('20x40+40+80', '50x30+175+35', '80x80+180+110', '40x30+100+135')
+ a.pack_configure(side='left', expand='yes', fill='both')
+ b.pack_configure(side='top', expand='on', fill='both')
+ c.pack_configure(side='right', expand=True, fill='both')
+ d.pack_configure(side='bottom', expand=1, fill='both')
+ check('100x200+0+0', '200x100+100+0', '160x100+140+100', '40x100+100+100')
+
+ def test_pack_configure_in(self):
+ pack, a, b, c, d = self.create2()
+ a.pack_configure(side='top')
+ b.pack_configure(side='top')
+ c.pack_configure(side='top')
+ d.pack_configure(side='top')
+ a.pack_configure(in_=pack)
+ self.assertEqual(pack.pack_slaves(), [b, c, d, a])
+ a.pack_configure(in_=c)
+ self.assertEqual(pack.pack_slaves(), [b, c, d])
+ self.assertEqual(c.pack_slaves(), [a])
+ with self.assertRaisesRegexp(TclError,
+ 'can\'t pack %s inside itself' % (a,)):
+ a.pack_configure(in_=a)
+ with self.assertRaisesRegexp(TclError, 'bad window path name ".foo"'):
+ a.pack_configure(in_='.foo')
+
+ def test_pack_configure_padx_ipadx_fill(self):
+ pack, a, b, c, d = self.create2()
+ def check(geom1, geom2, **kwargs):
+ a.pack_forget()
+ b.pack_forget()
+ a.pack_configure(**kwargs)
+ b.pack_configure(expand=True, fill='both')
+ self.root.update()
+ self.assertEqual(a.winfo_geometry(), geom1)
+ self.assertEqual(b.winfo_geometry(), geom2)
+ check('20x40+260+80', '240x200+0+0', side='right', padx=20)
+ check('20x40+250+80', '240x200+0+0', side='right', padx=(10, 30))
+ check('60x40+240+80', '240x200+0+0', side='right', ipadx=20)
+ check('30x40+260+80', '250x200+0+0', side='right', ipadx=5, padx=10)
+ check('20x40+260+80', '240x200+0+0', side='right', padx=20, fill='x')
+ check('20x40+249+80', '240x200+0+0',
+ side='right', padx=(9, 31), fill='x')
+ check('60x40+240+80', '240x200+0+0', side='right', ipadx=20, fill='x')
+ check('30x40+260+80', '250x200+0+0',
+ side='right', ipadx=5, padx=10, fill='x')
+ check('30x40+255+80', '250x200+0+0',
+ side='right', ipadx=5, padx=(5, 15), fill='x')
+ check('20x40+140+0', '300x160+0+40', side='top', padx=20)
+ check('20x40+120+0', '300x160+0+40', side='top', padx=(0, 40))
+ check('60x40+120+0', '300x160+0+40', side='top', ipadx=20)
+ check('30x40+135+0', '300x160+0+40', side='top', ipadx=5, padx=10)
+ check('30x40+130+0', '300x160+0+40', side='top', ipadx=5, padx=(5, 15))
+ check('260x40+20+0', '300x160+0+40', side='top', padx=20, fill='x')
+ check('260x40+25+0', '300x160+0+40',
+ side='top', padx=(25, 15), fill='x')
+ check('300x40+0+0', '300x160+0+40', side='top', ipadx=20, fill='x')
+ check('280x40+10+0', '300x160+0+40',
+ side='top', ipadx=5, padx=10, fill='x')
+ check('280x40+5+0', '300x160+0+40',
+ side='top', ipadx=5, padx=(5, 15), fill='x')
+ a.pack_configure(padx='1c')
+ self.assertEqual(a.pack_info()['padx'],
+ self._str(pack.winfo_pixels('1c')))
+ a.pack_configure(ipadx='1c')
+ self.assertEqual(a.pack_info()['ipadx'],
+ self._str(pack.winfo_pixels('1c')))
+
+ def test_pack_configure_pady_ipady_fill(self):
+ pack, a, b, c, d = self.create2()
+ def check(geom1, geom2, **kwargs):
+ a.pack_forget()
+ b.pack_forget()
+ a.pack_configure(**kwargs)
+ b.pack_configure(expand=True, fill='both')
+ self.root.update()
+ self.assertEqual(a.winfo_geometry(), geom1)
+ self.assertEqual(b.winfo_geometry(), geom2)
+ check('20x40+280+80', '280x200+0+0', side='right', pady=20)
+ check('20x40+280+70', '280x200+0+0', side='right', pady=(10, 30))
+ check('20x80+280+60', '280x200+0+0', side='right', ipady=20)
+ check('20x50+280+75', '280x200+0+0', side='right', ipady=5, pady=10)
+ check('20x40+280+80', '280x200+0+0', side='right', pady=20, fill='x')
+ check('20x40+280+69', '280x200+0+0',
+ side='right', pady=(9, 31), fill='x')
+ check('20x80+280+60', '280x200+0+0', side='right', ipady=20, fill='x')
+ check('20x50+280+75', '280x200+0+0',
+ side='right', ipady=5, pady=10, fill='x')
+ check('20x50+280+70', '280x200+0+0',
+ side='right', ipady=5, pady=(5, 15), fill='x')
+ check('20x40+140+20', '300x120+0+80', side='top', pady=20)
+ check('20x40+140+0', '300x120+0+80', side='top', pady=(0, 40))
+ check('20x80+140+0', '300x120+0+80', side='top', ipady=20)
+ check('20x50+140+10', '300x130+0+70', side='top', ipady=5, pady=10)
+ check('20x50+140+5', '300x130+0+70', side='top', ipady=5, pady=(5, 15))
+ check('300x40+0+20', '300x120+0+80', side='top', pady=20, fill='x')
+ check('300x40+0+25', '300x120+0+80',
+ side='top', pady=(25, 15), fill='x')
+ check('300x80+0+0', '300x120+0+80', side='top', ipady=20, fill='x')
+ check('300x50+0+10', '300x130+0+70',
+ side='top', ipady=5, pady=10, fill='x')
+ check('300x50+0+5', '300x130+0+70',
+ side='top', ipady=5, pady=(5, 15), fill='x')
+ a.pack_configure(pady='1c')
+ self.assertEqual(a.pack_info()['pady'],
+ self._str(pack.winfo_pixels('1c')))
+ a.pack_configure(ipady='1c')
+ self.assertEqual(a.pack_info()['ipady'],
+ self._str(pack.winfo_pixels('1c')))
+
+ def test_pack_configure_side(self):
+ pack, a, b, c, d = self.create2()
+ def check(side, geom1, geom2):
+ a.pack_configure(side=side)
+ self.assertEqual(a.pack_info()['side'], side)
+ b.pack_configure(expand=True, fill='both')
+ self.root.update()
+ self.assertEqual(a.winfo_geometry(), geom1)
+ self.assertEqual(b.winfo_geometry(), geom2)
+ check('top', '20x40+140+0', '300x160+0+40')
+ check('bottom', '20x40+140+160', '300x160+0+0')
+ check('left', '20x40+0+80', '280x200+20+0')
+ check('right', '20x40+280+80', '280x200+0+0')
+
+ def test_pack_forget(self):
+ pack, a, b, c, d = self.create2()
+ a.pack_configure()
+ b.pack_configure()
+ c.pack_configure()
+ self.assertEqual(pack.pack_slaves(), [a, b, c])
+ b.pack_forget()
+ self.assertEqual(pack.pack_slaves(), [a, c])
+ b.pack_forget()
+ self.assertEqual(pack.pack_slaves(), [a, c])
+ d.pack_forget()
+
+ def test_pack_info(self):
+ pack, a, b, c, d = self.create2()
+ with self.assertRaisesRegexp(TclError, 'window "%s" isn\'t packed' % a):
+ a.pack_info()
+ a.pack_configure()
+ b.pack_configure(side='right', in_=a, anchor='s', expand=True, fill='x',
+ ipadx=5, padx=10, ipady=2, pady=(5, 15))
+ info = a.pack_info()
+ self.assertIsInstance(info, dict)
+ self.assertEqual(info['anchor'], 'center')
+ self.assertEqual(info['expand'], self._str(0))
+ self.assertEqual(info['fill'], 'none')
+ self.assertEqual(info['in'], pack)
+ self.assertEqual(info['ipadx'], self._str(0))
+ self.assertEqual(info['ipady'], self._str(0))
+ self.assertEqual(info['padx'], self._str(0))
+ self.assertEqual(info['pady'], self._str(0))
+ self.assertEqual(info['side'], 'top')
+ info = b.pack_info()
+ self.assertIsInstance(info, dict)
+ self.assertEqual(info['anchor'], 's')
+ self.assertEqual(info['expand'], self._str(1))
+ self.assertEqual(info['fill'], 'x')
+ self.assertEqual(info['in'], a)
+ self.assertEqual(info['ipadx'], self._str(5))
+ self.assertEqual(info['ipady'], self._str(2))
+ self.assertEqual(info['padx'], self._str(10))
+ self.assertEqual(info['pady'], self._str((5, 15)))
+ self.assertEqual(info['side'], 'right')
+
+ def test_pack_propagate(self):
+ pack, a, b, c, d = self.create2()
+ pack.configure(width=300, height=200)
+ a.pack_configure()
+ pack.pack_propagate(False)
+ self.root.update()
+ self.assertEqual(pack.winfo_reqwidth(), 300)
+ self.assertEqual(pack.winfo_reqheight(), 200)
+ pack.pack_propagate(True)
+ self.root.update()
+ self.assertEqual(pack.winfo_reqwidth(), 20)
+ self.assertEqual(pack.winfo_reqheight(), 40)
+
+ def test_pack_slaves(self):
+ pack, a, b, c, d = self.create2()
+ self.assertEqual(pack.pack_slaves(), [])
+ a.pack_configure()
+ self.assertEqual(pack.pack_slaves(), [a])
+ b.pack_configure()
+ self.assertEqual(pack.pack_slaves(), [a, b])
+
+
+class PlaceTest(AbstractWidgetTest, unittest.TestCase):
+
+ def create2(self):
+ t = Tkinter.Toplevel(self.root, width=300, height=200, bd=0)
+ t.wm_geometry('+0+0')
+ f = Tkinter.Frame(t, width=154, height=84, bd=2, relief='raised')
+ f.place_configure(x=48, y=38)
+ f2 = Tkinter.Frame(t, width=30, height=60, bd=2, relief='raised')
+ self.root.update()
+ return t, f, f2
+
+ def test_place_configure_in(self):
+ t, f, f2 = self.create2()
+ self.assertEqual(f2.winfo_manager(), '')
+ with self.assertRaisesRegexp(TclError, "can't place %s relative to "
+ "itself" % re.escape(str(f2))):
+ f2.place_configure(in_=f2)
+ if tcl_version >= (8, 5):
+ self.assertEqual(f2.winfo_manager(), '')
+ with self.assertRaisesRegexp(TclError, 'bad window path name'):
+ f2.place_configure(in_='spam')
+ f2.place_configure(in_=f)
+ self.assertEqual(f2.winfo_manager(), 'place')
+
+ def test_place_configure_x(self):
+ t, f, f2 = self.create2()
+ f2.place_configure(in_=f)
+ self.assertEqual(f2.place_info()['x'], '0')
+ self.root.update()
+ self.assertEqual(f2.winfo_x(), 50)
+ f2.place_configure(x=100)
+ self.assertEqual(f2.place_info()['x'], '100')
+ self.root.update()
+ self.assertEqual(f2.winfo_x(), 150)
+ f2.place_configure(x=-10, relx=1)
+ self.assertEqual(f2.place_info()['x'], '-10')
+ self.root.update()
+ self.assertEqual(f2.winfo_x(), 190)
+ with self.assertRaisesRegexp(TclError, 'bad screen distance "spam"'):
+ f2.place_configure(in_=f, x='spam')
+
+ def test_place_configure_y(self):
+ t, f, f2 = self.create2()
+ f2.place_configure(in_=f)
+ self.assertEqual(f2.place_info()['y'], '0')
+ self.root.update()
+ self.assertEqual(f2.winfo_y(), 40)
+ f2.place_configure(y=50)
+ self.assertEqual(f2.place_info()['y'], '50')
+ self.root.update()
+ self.assertEqual(f2.winfo_y(), 90)
+ f2.place_configure(y=-10, rely=1)
+ self.assertEqual(f2.place_info()['y'], '-10')
+ self.root.update()
+ self.assertEqual(f2.winfo_y(), 110)
+ with self.assertRaisesRegexp(TclError, 'bad screen distance "spam"'):
+ f2.place_configure(in_=f, y='spam')
+
+ def test_place_configure_relx(self):
+ t, f, f2 = self.create2()
+ f2.place_configure(in_=f)
+ self.assertEqual(f2.place_info()['relx'], '0')
+ self.root.update()
+ self.assertEqual(f2.winfo_x(), 50)
+ f2.place_configure(relx=0.5)
+ self.assertEqual(f2.place_info()['relx'], '0.5')
+ self.root.update()
+ self.assertEqual(f2.winfo_x(), 125)
+ f2.place_configure(relx=1)
+ self.assertEqual(f2.place_info()['relx'], '1')
+ self.root.update()
+ self.assertEqual(f2.winfo_x(), 200)
+ with self.assertRaisesRegexp(TclError, 'expected floating-point number '
+ 'but got "spam"'):
+ f2.place_configure(in_=f, relx='spam')
+
+ def test_place_configure_rely(self):
+ t, f, f2 = self.create2()
+ f2.place_configure(in_=f)
+ self.assertEqual(f2.place_info()['rely'], '0')
+ self.root.update()
+ self.assertEqual(f2.winfo_y(), 40)
+ f2.place_configure(rely=0.5)
+ self.assertEqual(f2.place_info()['rely'], '0.5')
+ self.root.update()
+ self.assertEqual(f2.winfo_y(), 80)
+ f2.place_configure(rely=1)
+ self.assertEqual(f2.place_info()['rely'], '1')
+ self.root.update()
+ self.assertEqual(f2.winfo_y(), 120)
+ with self.assertRaisesRegexp(TclError, 'expected floating-point number '
+ 'but got "spam"'):
+ f2.place_configure(in_=f, rely='spam')
+
+ def test_place_configure_anchor(self):
+ f = Tkinter.Frame(self.root)
+ with self.assertRaisesRegexp(TclError, 'bad anchor "j"'):
+ f.place_configure(anchor='j')
+ with self.assertRaisesRegexp(TclError, 'ambiguous anchor ""'):
+ f.place_configure(anchor='')
+ for value in 'n', 'ne', 'e', 'se', 's', 'sw', 'w', 'nw', 'center':
+ f.place_configure(anchor=value)
+ self.assertEqual(f.place_info()['anchor'], value)
+
+ def test_place_configure_width(self):
+ t, f, f2 = self.create2()
+ f2.place_configure(in_=f, width=120)
+ self.root.update()
+ self.assertEqual(f2.winfo_width(), 120)
+ f2.place_configure(width='')
+ self.root.update()
+ self.assertEqual(f2.winfo_width(), 30)
+ with self.assertRaisesRegexp(TclError, 'bad screen distance "abcd"'):
+ f2.place_configure(width='abcd')
+
+ def test_place_configure_height(self):
+ t, f, f2 = self.create2()
+ f2.place_configure(in_=f, height=120)
+ self.root.update()
+ self.assertEqual(f2.winfo_height(), 120)
+ f2.place_configure(height='')
+ self.root.update()
+ self.assertEqual(f2.winfo_height(), 60)
+ with self.assertRaisesRegexp(TclError, 'bad screen distance "abcd"'):
+ f2.place_configure(height='abcd')
+
+ def test_place_configure_relwidth(self):
+ t, f, f2 = self.create2()
+ f2.place_configure(in_=f, relwidth=0.5)
+ self.root.update()
+ self.assertEqual(f2.winfo_width(), 75)
+ f2.place_configure(relwidth='')
+ self.root.update()
+ self.assertEqual(f2.winfo_width(), 30)
+ with self.assertRaisesRegexp(TclError, 'expected floating-point number '
+ 'but got "abcd"'):
+ f2.place_configure(relwidth='abcd')
+
+ def test_place_configure_relheight(self):
+ t, f, f2 = self.create2()
+ f2.place_configure(in_=f, relheight=0.5)
+ self.root.update()
+ self.assertEqual(f2.winfo_height(), 40)
+ f2.place_configure(relheight='')
+ self.root.update()
+ self.assertEqual(f2.winfo_height(), 60)
+ with self.assertRaisesRegexp(TclError, 'expected floating-point number '
+ 'but got "abcd"'):
+ f2.place_configure(relheight='abcd')
+
+ def test_place_configure_bordermode(self):
+ f = Tkinter.Frame(self.root)
+ with self.assertRaisesRegexp(TclError, 'bad bordermode "j"'):
+ f.place_configure(bordermode='j')
+ with self.assertRaisesRegexp(TclError, 'ambiguous bordermode ""'):
+ f.place_configure(bordermode='')
+ for value in 'inside', 'outside', 'ignore':
+ f.place_configure(bordermode=value)
+ self.assertEqual(f.place_info()['bordermode'], value)
+
+ def test_place_forget(self):
+ foo = Tkinter.Frame(self.root)
+ foo.place_configure(width=50, height=50)
+ self.root.update()
+ foo.place_forget()
+ self.root.update()
+ self.assertFalse(foo.winfo_ismapped())
+ with self.assertRaises(TypeError):
+ foo.place_forget(0)
+
+ def test_place_info(self):
+ t, f, f2 = self.create2()
+ f2.place_configure(in_=f, x=1, y=2, width=3, height=4,
+ relx=0.1, rely=0.2, relwidth=0.3, relheight=0.4,
+ anchor='se', bordermode='outside')
+ info = f2.place_info()
+ self.assertIsInstance(info, dict)
+ self.assertEqual(info['x'], '1')
+ self.assertEqual(info['y'], '2')
+ self.assertEqual(info['width'], '3')
+ self.assertEqual(info['height'], '4')
+ self.assertEqual(info['relx'], '0.1')
+ self.assertEqual(info['rely'], '0.2')
+ self.assertEqual(info['relwidth'], '0.3')
+ self.assertEqual(info['relheight'], '0.4')
+ self.assertEqual(info['anchor'], 'se')
+ self.assertEqual(info['bordermode'], 'outside')
+ self.assertEqual(info['x'], '1')
+ self.assertEqual(info['x'], '1')
+ with self.assertRaises(TypeError):
+ f2.place_info(0)
+
+ def test_place_slaves(self):
+ foo = Tkinter.Frame(self.root)
+ bar = Tkinter.Frame(self.root)
+ self.assertEqual(foo.place_slaves(), [])
+ bar.place_configure(in_=foo)
+ self.assertEqual(foo.place_slaves(), [bar])
+ with self.assertRaises(TypeError):
+ foo.place_slaves(0)
+
+
+class GridTest(AbstractWidgetTest, unittest.TestCase):
+
+ def tearDown(self):
+ super(GridTest, self).tearDown()
+ cols, rows = self.root.grid_size()
+ for i in range(cols + 1):
+ self.root.grid_columnconfigure(i, weight=0, minsize=0, pad=0, uniform='')
+ for i in range(rows + 1):
+ self.root.grid_rowconfigure(i, weight=0, minsize=0, pad=0, uniform='')
+ self.root.grid_propagate(1)
+
+ def test_grid_configure(self):
+ b = Tkinter.Button(self.root)
+ self.addCleanup(b.destroy)
+ self.assertEqual(b.grid_info(), {})
+ b.grid_configure()
+ self.assertEqual(b.grid_info()['in'], self.root)
+ self.assertEqual(b.grid_info()['column'], self._str(0))
+ self.assertEqual(b.grid_info()['row'], self._str(0))
+ b.grid_configure({'column': 1}, row=2)
+ self.assertEqual(b.grid_info()['column'], self._str(1))
+ self.assertEqual(b.grid_info()['row'], self._str(2))
+
+ def test_grid_configure_column(self):
+ b = Tkinter.Button(self.root)
+ with self.assertRaisesRegexp(TclError, 'bad column value "-1": '
+ 'must be a non-negative integer'):
+ b.grid_configure(column=-1)
+ b.grid_configure(column=2)
+ self.assertEqual(b.grid_info()['column'], self._str(2))
+
+ def test_grid_configure_columnspan(self):
+ b = Tkinter.Button(self.root)
+ with self.assertRaisesRegexp(TclError, 'bad columnspan value "0": '
+ 'must be a positive integer'):
+ b.grid_configure(columnspan=0)
+ b.grid_configure(columnspan=2)
+ self.assertEqual(b.grid_info()['columnspan'], self._str(2))
+
+ def test_grid_configure_in(self):
+ f = Tkinter.Frame(self.root)
+ b = Tkinter.Button(self.root)
+ self.assertEqual(b.grid_info(), {})
+ b.grid_configure()
+ self.assertEqual(b.grid_info()['in'], self.root)
+ b.grid_configure(in_=f)
+ self.assertEqual(b.grid_info()['in'], f)
+ b.grid_configure({'in': self.root})
+ self.assertEqual(b.grid_info()['in'], self.root)
+
+ def test_grid_configure_ipadx(self):
+ b = Tkinter.Button(self.root)
+ with self.assertRaisesRegexp(TclError, 'bad ipadx value "-1": '
+ 'must be positive screen distance'):
+ b.grid_configure(ipadx=-1)
+ b.grid_configure(ipadx=1)
+ self.assertEqual(b.grid_info()['ipadx'], self._str(1))
+ b.grid_configure(ipadx='.5c')
+ self.assertEqual(b.grid_info()['ipadx'],
+ self._str(int_round(pixels_conv('.5c') * self.scaling)))
+
+ def test_grid_configure_ipady(self):
+ b = Tkinter.Button(self.root)
+ with self.assertRaisesRegexp(TclError, 'bad ipady value "-1": '
+ 'must be positive screen distance'):
+ b.grid_configure(ipady=-1)
+ b.grid_configure(ipady=1)
+ self.assertEqual(b.grid_info()['ipady'], self._str(1))
+ b.grid_configure(ipady='.5c')
+ self.assertEqual(b.grid_info()['ipady'],
+ self._str(int_round(pixels_conv('.5c') * self.scaling)))
+
+ def test_grid_configure_padx(self):
+ b = Tkinter.Button(self.root)
+ with self.assertRaisesRegexp(TclError, 'bad pad value "-1": '
+ 'must be positive screen distance'):
+ b.grid_configure(padx=-1)
+ b.grid_configure(padx=1)
+ self.assertEqual(b.grid_info()['padx'], self._str(1))
+ b.grid_configure(padx=(10, 5))
+ self.assertEqual(b.grid_info()['padx'], self._str((10, 5)))
+ b.grid_configure(padx='.5c')
+ self.assertEqual(b.grid_info()['padx'],
+ self._str(int_round(pixels_conv('.5c') * self.scaling)))
+
+ def test_grid_configure_pady(self):
+ b = Tkinter.Button(self.root)
+ with self.assertRaisesRegexp(TclError, 'bad pad value "-1": '
+ 'must be positive screen distance'):
+ b.grid_configure(pady=-1)
+ b.grid_configure(pady=1)
+ self.assertEqual(b.grid_info()['pady'], self._str(1))
+ b.grid_configure(pady=(10, 5))
+ self.assertEqual(b.grid_info()['pady'], self._str((10, 5)))
+ b.grid_configure(pady='.5c')
+ self.assertEqual(b.grid_info()['pady'],
+ self._str(int_round(pixels_conv('.5c') * self.scaling)))
+
+ def test_grid_configure_row(self):
+ b = Tkinter.Button(self.root)
+ self.addCleanup(b.destroy)
+ with self.assertRaisesRegexp(TclError, 'bad (row|grid) value "-1": '
+ 'must be a non-negative integer'):
+ b.grid_configure(row=-1)
+ b.grid_configure(row=2)
+ self.assertEqual(b.grid_info()['row'], self._str(2))
+
+ def test_grid_configure_rownspan(self):
+ b = Tkinter.Button(self.root)
+ with self.assertRaisesRegexp(TclError, 'bad rowspan value "0": '
+ 'must be a positive integer'):
+ b.grid_configure(rowspan=0)
+ b.grid_configure(rowspan=2)
+ self.assertEqual(b.grid_info()['rowspan'], self._str(2))
+
+ def test_grid_configure_sticky(self):
+ f = Tkinter.Frame(self.root, bg='red')
+ with self.assertRaisesRegexp(TclError, 'bad stickyness value "glue"'):
+ f.grid_configure(sticky='glue')
+ f.grid_configure(sticky='ne')
+ self.assertEqual(f.grid_info()['sticky'], 'ne')
+ f.grid_configure(sticky='n,s,e,w')
+ self.assertEqual(f.grid_info()['sticky'], 'nesw')
+
+ def test_grid_columnconfigure(self):
+ with self.assertRaises(TypeError):
+ self.root.grid_columnconfigure()
+ self.assertEqual(self.root.grid_columnconfigure(0),
+ {'minsize': 0, 'pad': 0, 'uniform': None, 'weight': 0})
+ with self.assertRaisesRegexp(TclError, 'bad option "-foo"'):
+ self.root.grid_columnconfigure(0, 'foo')
+ self.root.grid_columnconfigure((0, 3), weight=2)
+ with self.assertRaisesRegexp(TclError,
+ 'must specify a single element on retrieval'):
+ self.root.grid_columnconfigure((0, 3))
+ b = Tkinter.Button(self.root)
+ b.grid_configure(column=0, row=0)
+ if tcl_version >= (8, 5):
+ self.root.grid_columnconfigure('all', weight=3)
+ with self.assertRaisesRegexp(TclError, 'expected integer but got "all"'):
+ self.root.grid_columnconfigure('all')
+ self.assertEqual(self.root.grid_columnconfigure(0, 'weight'), 3)
+ self.assertEqual(self.root.grid_columnconfigure(3, 'weight'), 2)
+ self.assertEqual(self.root.grid_columnconfigure(265, 'weight'), 0)
+ if tcl_version >= (8, 5):
+ self.root.grid_columnconfigure(b, weight=4)
+ self.assertEqual(self.root.grid_columnconfigure(0, 'weight'), 4)
+
+ def test_grid_columnconfigure_minsize(self):
+ with self.assertRaisesRegexp(TclError, 'bad screen distance "foo"'):
+ self.root.grid_columnconfigure(0, minsize='foo')
+ self.root.grid_columnconfigure(0, minsize=10)
+ self.assertEqual(self.root.grid_columnconfigure(0, 'minsize'), 10)
+ self.assertEqual(self.root.grid_columnconfigure(0)['minsize'], 10)
+
+ def test_grid_columnconfigure_weight(self):
+ with self.assertRaisesRegexp(TclError, 'expected integer but got "bad"'):
+ self.root.grid_columnconfigure(0, weight='bad')
+ with self.assertRaisesRegexp(TclError, 'invalid arg "-weight": '
+ 'should be non-negative'):
+ self.root.grid_columnconfigure(0, weight=-3)
+ self.root.grid_columnconfigure(0, weight=3)
+ self.assertEqual(self.root.grid_columnconfigure(0, 'weight'), 3)
+ self.assertEqual(self.root.grid_columnconfigure(0)['weight'], 3)
+
+ def test_grid_columnconfigure_pad(self):
+ with self.assertRaisesRegexp(TclError, 'bad screen distance "foo"'):
+ self.root.grid_columnconfigure(0, pad='foo')
+ with self.assertRaisesRegexp(TclError, 'invalid arg "-pad": '
+ 'should be non-negative'):
+ self.root.grid_columnconfigure(0, pad=-3)
+ self.root.grid_columnconfigure(0, pad=3)
+ self.assertEqual(self.root.grid_columnconfigure(0, 'pad'), 3)
+ self.assertEqual(self.root.grid_columnconfigure(0)['pad'], 3)
+
+ def test_grid_columnconfigure_uniform(self):
+ self.root.grid_columnconfigure(0, uniform='foo')
+ self.assertEqual(self.root.grid_columnconfigure(0, 'uniform'), 'foo')
+ self.assertEqual(self.root.grid_columnconfigure(0)['uniform'], 'foo')
+
+ def test_grid_rowconfigure(self):
+ with self.assertRaises(TypeError):
+ self.root.grid_rowconfigure()
+ self.assertEqual(self.root.grid_rowconfigure(0),
+ {'minsize': 0, 'pad': 0, 'uniform': None, 'weight': 0})
+ with self.assertRaisesRegexp(TclError, 'bad option "-foo"'):
+ self.root.grid_rowconfigure(0, 'foo')
+ self.root.grid_rowconfigure((0, 3), weight=2)
+ with self.assertRaisesRegexp(TclError,
+ 'must specify a single element on retrieval'):
+ self.root.grid_rowconfigure((0, 3))
+ b = Tkinter.Button(self.root)
+ b.grid_configure(column=0, row=0)
+ if tcl_version >= (8, 5):
+ self.root.grid_rowconfigure('all', weight=3)
+ with self.assertRaisesRegexp(TclError, 'expected integer but got "all"'):
+ self.root.grid_rowconfigure('all')
+ self.assertEqual(self.root.grid_rowconfigure(0, 'weight'), 3)
+ self.assertEqual(self.root.grid_rowconfigure(3, 'weight'), 2)
+ self.assertEqual(self.root.grid_rowconfigure(265, 'weight'), 0)
+ if tcl_version >= (8, 5):
+ self.root.grid_rowconfigure(b, weight=4)
+ self.assertEqual(self.root.grid_rowconfigure(0, 'weight'), 4)
+
+ def test_grid_rowconfigure_minsize(self):
+ with self.assertRaisesRegexp(TclError, 'bad screen distance "foo"'):
+ self.root.grid_rowconfigure(0, minsize='foo')
+ self.root.grid_rowconfigure(0, minsize=10)
+ self.assertEqual(self.root.grid_rowconfigure(0, 'minsize'), 10)
+ self.assertEqual(self.root.grid_rowconfigure(0)['minsize'], 10)
+
+ def test_grid_rowconfigure_weight(self):
+ with self.assertRaisesRegexp(TclError, 'expected integer but got "bad"'):
+ self.root.grid_rowconfigure(0, weight='bad')
+ with self.assertRaisesRegexp(TclError, 'invalid arg "-weight": '
+ 'should be non-negative'):
+ self.root.grid_rowconfigure(0, weight=-3)
+ self.root.grid_rowconfigure(0, weight=3)
+ self.assertEqual(self.root.grid_rowconfigure(0, 'weight'), 3)
+ self.assertEqual(self.root.grid_rowconfigure(0)['weight'], 3)
+
+ def test_grid_rowconfigure_pad(self):
+ with self.assertRaisesRegexp(TclError, 'bad screen distance "foo"'):
+ self.root.grid_rowconfigure(0, pad='foo')
+ with self.assertRaisesRegexp(TclError, 'invalid arg "-pad": '
+ 'should be non-negative'):
+ self.root.grid_rowconfigure(0, pad=-3)
+ self.root.grid_rowconfigure(0, pad=3)
+ self.assertEqual(self.root.grid_rowconfigure(0, 'pad'), 3)
+ self.assertEqual(self.root.grid_rowconfigure(0)['pad'], 3)
+
+ def test_grid_rowconfigure_uniform(self):
+ self.root.grid_rowconfigure(0, uniform='foo')
+ self.assertEqual(self.root.grid_rowconfigure(0, 'uniform'), 'foo')
+ self.assertEqual(self.root.grid_rowconfigure(0)['uniform'], 'foo')
+
+ def test_grid_forget(self):
+ b = Tkinter.Button(self.root)
+ c = Tkinter.Button(self.root)
+ b.grid_configure(row=2, column=2, rowspan=2, columnspan=2,
+ padx=3, pady=4, sticky='ns')
+ self.assertEqual(self.root.grid_slaves(), [b])
+ b.grid_forget()
+ c.grid_forget()
+ self.assertEqual(self.root.grid_slaves(), [])
+ self.assertEqual(b.grid_info(), {})
+ b.grid_configure(row=0, column=0)
+ info = b.grid_info()
+ self.assertEqual(info['row'], self._str(0))
+ self.assertEqual(info['column'], self._str(0))
+ self.assertEqual(info['rowspan'], self._str(1))
+ self.assertEqual(info['columnspan'], self._str(1))
+ self.assertEqual(info['padx'], self._str(0))
+ self.assertEqual(info['pady'], self._str(0))
+ self.assertEqual(info['sticky'], '')
+
+ def test_grid_remove(self):
+ b = Tkinter.Button(self.root)
+ c = Tkinter.Button(self.root)
+ b.grid_configure(row=2, column=2, rowspan=2, columnspan=2,
+ padx=3, pady=4, sticky='ns')
+ self.assertEqual(self.root.grid_slaves(), [b])
+ b.grid_remove()
+ c.grid_remove()
+ self.assertEqual(self.root.grid_slaves(), [])
+ self.assertEqual(b.grid_info(), {})
+ b.grid_configure(row=0, column=0)
+ info = b.grid_info()
+ self.assertEqual(info['row'], self._str(0))
+ self.assertEqual(info['column'], self._str(0))
+ self.assertEqual(info['rowspan'], self._str(2))
+ self.assertEqual(info['columnspan'], self._str(2))
+ self.assertEqual(info['padx'], self._str(3))
+ self.assertEqual(info['pady'], self._str(4))
+ self.assertEqual(info['sticky'], 'ns')
+
+ def test_grid_info(self):
+ b = Tkinter.Button(self.root)
+ self.assertEqual(b.grid_info(), {})
+ b.grid_configure(row=2, column=2, rowspan=2, columnspan=2,
+ padx=3, pady=4, sticky='ns')
+ info = b.grid_info()
+ self.assertIsInstance(info, dict)
+ self.assertEqual(info['in'], self.root)
+ self.assertEqual(info['row'], self._str(2))
+ self.assertEqual(info['column'], self._str(2))
+ self.assertEqual(info['rowspan'], self._str(2))
+ self.assertEqual(info['columnspan'], self._str(2))
+ self.assertEqual(info['padx'], self._str(3))
+ self.assertEqual(info['pady'], self._str(4))
+ self.assertEqual(info['sticky'], 'ns')
+
+ def test_grid_bbox(self):
+ self.assertEqual(self.root.grid_bbox(), (0, 0, 0, 0))
+ self.assertEqual(self.root.grid_bbox(0, 0), (0, 0, 0, 0))
+ self.assertEqual(self.root.grid_bbox(0, 0, 1, 1), (0, 0, 0, 0))
+ with self.assertRaisesRegexp(TclError, 'expected integer but got "x"'):
+ self.root.grid_bbox('x', 0)
+ with self.assertRaisesRegexp(TclError, 'expected integer but got "x"'):
+ self.root.grid_bbox(0, 'x')
+ with self.assertRaisesRegexp(TclError, 'expected integer but got "x"'):
+ self.root.grid_bbox(0, 0, 'x', 0)
+ with self.assertRaisesRegexp(TclError, 'expected integer but got "x"'):
+ self.root.grid_bbox(0, 0, 0, 'x')
+ with self.assertRaises(TypeError):
+ self.root.grid_bbox(0, 0, 0, 0, 0)
+ t = Tkinter.Toplevel(self.root)
+ # de-maximize
+ t.wm_geometry('1x1+0+0')
+ t.wm_geometry('')
+ f1 = Tkinter.Frame(t, width=75, height=75, bg='red')
+ f2 = Tkinter.Frame(t, width=90, height=90, bg='blue')
+ f1.grid_configure(row=0, column=0)
+ f2.grid_configure(row=1, column=1)
+ self.root.update()
+ self.assertEqual(t.grid_bbox(), (0, 0, 165, 165))
+ self.assertEqual(t.grid_bbox(0, 0), (0, 0, 75, 75))
+ self.assertEqual(t.grid_bbox(0, 0, 1, 1), (0, 0, 165, 165))
+ self.assertEqual(t.grid_bbox(1, 1), (75, 75, 90, 90))
+ self.assertEqual(t.grid_bbox(10, 10, 0, 0), (0, 0, 165, 165))
+ self.assertEqual(t.grid_bbox(-2, -2, -1, -1), (0, 0, 0, 0))
+ self.assertEqual(t.grid_bbox(10, 10, 12, 12), (165, 165, 0, 0))
+
+ def test_grid_location(self):
+ with self.assertRaises(TypeError):
+ self.root.grid_location()
+ with self.assertRaises(TypeError):
+ self.root.grid_location(0)
+ with self.assertRaises(TypeError):
+ self.root.grid_location(0, 0, 0)
+ with self.assertRaisesRegexp(TclError, 'bad screen distance "x"'):
+ self.root.grid_location('x', 'y')
+ with self.assertRaisesRegexp(TclError, 'bad screen distance "y"'):
+ self.root.grid_location('1c', 'y')
+ t = Tkinter.Toplevel(self.root)
+ # de-maximize
+ t.wm_geometry('1x1+0+0')
+ t.wm_geometry('')
+ f = Tkinter.Frame(t, width=200, height=100,
+ highlightthickness=0, bg='red')
+ self.assertEqual(f.grid_location(10, 10), (-1, -1))
+ f.grid_configure()
+ self.root.update()
+ self.assertEqual(t.grid_location(-10, -10), (-1, -1))
+ self.assertEqual(t.grid_location(-10, 0), (-1, 0))
+ self.assertEqual(t.grid_location(-1, 0), (-1, 0))
+ self.assertEqual(t.grid_location(0, -10), (0, -1))
+ self.assertEqual(t.grid_location(0, -1), (0, -1))
+ self.assertEqual(t.grid_location(0, 0), (0, 0))
+ self.assertEqual(t.grid_location(200, 0), (0, 0))
+ self.assertEqual(t.grid_location(201, 0), (1, 0))
+ self.assertEqual(t.grid_location(0, 100), (0, 0))
+ self.assertEqual(t.grid_location(0, 101), (0, 1))
+ self.assertEqual(t.grid_location(201, 101), (1, 1))
+
+ def test_grid_propagate(self):
+ self.assertEqual(self.root.grid_propagate(), True)
+ with self.assertRaises(TypeError):
+ self.root.grid_propagate(False, False)
+ self.root.grid_propagate(False)
+ self.assertFalse(self.root.grid_propagate())
+ f = Tkinter.Frame(self.root, width=100, height=100, bg='red')
+ f.grid_configure(row=0, column=0)
+ self.root.update()
+ self.assertEqual(f.winfo_width(), 100)
+ self.assertEqual(f.winfo_height(), 100)
+ f.grid_propagate(False)
+ g = Tkinter.Frame(self.root, width=75, height=85, bg='green')
+ g.grid_configure(in_=f, row=0, column=0)
+ self.root.update()
+ self.assertEqual(f.winfo_width(), 100)
+ self.assertEqual(f.winfo_height(), 100)
+ f.grid_propagate(True)
+ self.root.update()
+ self.assertEqual(f.winfo_width(), 75)
+ self.assertEqual(f.winfo_height(), 85)
+
+ def test_grid_size(self):
+ with self.assertRaises(TypeError):
+ self.root.grid_size(0)
+ self.assertEqual(self.root.grid_size(), (0, 0))
+ f = Tkinter.Scale(self.root)
+ f.grid_configure(row=0, column=0)
+ self.assertEqual(self.root.grid_size(), (1, 1))
+ f.grid_configure(row=4, column=5)
+ self.assertEqual(self.root.grid_size(), (6, 5))
+
+ def test_grid_slaves(self):
+ self.assertEqual(self.root.grid_slaves(), [])
+ a = Tkinter.Label(self.root)
+ a.grid_configure(row=0, column=1)
+ b = Tkinter.Label(self.root)
+ b.grid_configure(row=1, column=0)
+ c = Tkinter.Label(self.root)
+ c.grid_configure(row=1, column=1)
+ d = Tkinter.Label(self.root)
+ d.grid_configure(row=1, column=1)
+ self.assertEqual(self.root.grid_slaves(), [d, c, b, a])
+ self.assertEqual(self.root.grid_slaves(row=0), [a])
+ self.assertEqual(self.root.grid_slaves(row=1), [d, c, b])
+ self.assertEqual(self.root.grid_slaves(column=0), [b])
+ self.assertEqual(self.root.grid_slaves(column=1), [d, c, a])
+ self.assertEqual(self.root.grid_slaves(row=1, column=1), [d, c])
+
+
+tests_gui = (
+ PackTest, PlaceTest, GridTest,
+)
+
+if __name__ == '__main__':
+ run_unittest(*tests_gui)
diff --git a/Lib/lib-tk/test/test_tkinter/test_images.py b/Lib/lib-tk/test/test_tkinter/test_images.py
new file mode 100644
index 0000000..a27b763
--- /dev/null
+++ b/Lib/lib-tk/test/test_tkinter/test_images.py
@@ -0,0 +1,339 @@
+import unittest
+import Tkinter as tkinter
+import ttk
+import test.test_support as support
+from test_ttk.support import requires_tcl
+
+support.requires('gui')
+
+
+class MiscTest(unittest.TestCase):
+
+ def setUp(self):
+ self.root = ttk.setup_master()
+
+ def test_image_types(self):
+ image_types = self.root.image_types()
+ self.assertIsInstance(image_types, tuple)
+ self.assertIn('photo', image_types)
+ self.assertIn('bitmap', image_types)
+
+ def test_image_names(self):
+ image_names = self.root.image_names()
+ self.assertIsInstance(image_names, tuple)
+
+
+class BitmapImageTest(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.testfile = support.findfile('python.xbm', subdir='imghdrdata')
+
+ def setUp(self):
+ self.root = ttk.setup_master()
+
+ def test_create_from_file(self):
+ image = tkinter.BitmapImage('::img::test', master=self.root,
+ foreground='yellow', background='blue',
+ file=self.testfile)
+ self.assertEqual(str(image), '::img::test')
+ self.assertEqual(image.type(), 'bitmap')
+ self.assertEqual(image.width(), 16)
+ self.assertEqual(image.height(), 16)
+ self.assertIn('::img::test', self.root.image_names())
+ del image
+ self.assertNotIn('::img::test', self.root.image_names())
+
+ def test_create_from_data(self):
+ with open(self.testfile, 'rb') as f:
+ data = f.read()
+ image = tkinter.BitmapImage('::img::test', master=self.root,
+ foreground='yellow', background='blue',
+ data=data)
+ self.assertEqual(str(image), '::img::test')
+ self.assertEqual(image.type(), 'bitmap')
+ self.assertEqual(image.width(), 16)
+ self.assertEqual(image.height(), 16)
+ self.assertIn('::img::test', self.root.image_names())
+ del image
+ self.assertNotIn('::img::test', self.root.image_names())
+
+ def assertEqualStrList(self, actual, expected):
+ self.assertIsInstance(actual, str)
+ self.assertEqual(self.root.splitlist(actual), expected)
+
+ def test_configure_data(self):
+ image = tkinter.BitmapImage('::img::test', master=self.root)
+ self.assertEqual(image['data'], '-data {} {} {} {}')
+ with open(self.testfile, 'rb') as f:
+ data = f.read()
+ image.configure(data=data)
+ self.assertEqualStrList(image['data'],
+ ('-data', '', '', '', data))
+ self.assertEqual(image.width(), 16)
+ self.assertEqual(image.height(), 16)
+
+ self.assertEqual(image['maskdata'], '-maskdata {} {} {} {}')
+ image.configure(maskdata=data)
+ self.assertEqualStrList(image['maskdata'],
+ ('-maskdata', '', '', '', data))
+
+ def test_configure_file(self):
+ image = tkinter.BitmapImage('::img::test', master=self.root)
+ self.assertEqual(image['file'], '-file {} {} {} {}')
+ image.configure(file=self.testfile)
+ self.assertEqualStrList(image['file'],
+ ('-file', '', '', '',self.testfile))
+ self.assertEqual(image.width(), 16)
+ self.assertEqual(image.height(), 16)
+
+ self.assertEqual(image['maskfile'], '-maskfile {} {} {} {}')
+ image.configure(maskfile=self.testfile)
+ self.assertEqualStrList(image['maskfile'],
+ ('-maskfile', '', '', '', self.testfile))
+
+ def test_configure_background(self):
+ image = tkinter.BitmapImage('::img::test', master=self.root)
+ self.assertEqual(image['background'], '-background {} {} {} {}')
+ image.configure(background='blue')
+ self.assertEqual(image['background'], '-background {} {} {} blue')
+
+ def test_configure_foreground(self):
+ image = tkinter.BitmapImage('::img::test', master=self.root)
+ self.assertEqual(image['foreground'],
+ '-foreground {} {} #000000 #000000')
+ image.configure(foreground='yellow')
+ self.assertEqual(image['foreground'],
+ '-foreground {} {} #000000 yellow')
+
+
+class PhotoImageTest(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.testfile = support.findfile('python.gif', subdir='imghdrdata')
+
+ def setUp(self):
+ self.root = ttk.setup_master()
+ self.wantobjects = self.root.wantobjects()
+
+ def create(self):
+ return tkinter.PhotoImage('::img::test', master=self.root,
+ file=self.testfile)
+
+ def colorlist(self, *args):
+ if tkinter.TkVersion >= 8.6 and self.wantobjects:
+ return args
+ else:
+ return tkinter._join(args)
+
+ def check_create_from_file(self, ext):
+ testfile = support.findfile('python.' + ext, subdir='imghdrdata')
+ image = tkinter.PhotoImage('::img::test', master=self.root,
+ file=testfile)
+ self.assertEqual(str(image), '::img::test')
+ self.assertEqual(image.type(), 'photo')
+ self.assertEqual(image.width(), 16)
+ self.assertEqual(image.height(), 16)
+ self.assertEqual(image['data'], '')
+ self.assertEqual(image['file'], testfile)
+ self.assertIn('::img::test', self.root.image_names())
+ del image
+ self.assertNotIn('::img::test', self.root.image_names())
+
+ def check_create_from_data(self, ext):
+ testfile = support.findfile('python.' + ext, subdir='imghdrdata')
+ with open(testfile, 'rb') as f:
+ data = f.read()
+ image = tkinter.PhotoImage('::img::test', master=self.root,
+ data=data)
+ self.assertEqual(str(image), '::img::test')
+ self.assertEqual(image.type(), 'photo')
+ self.assertEqual(image.width(), 16)
+ self.assertEqual(image.height(), 16)
+ self.assertEqual(image['data'], data)
+ self.assertEqual(image['file'], '')
+ self.assertIn('::img::test', self.root.image_names())
+ del image
+ self.assertNotIn('::img::test', self.root.image_names())
+
+ def test_create_from_ppm_file(self):
+ self.check_create_from_file('ppm')
+
+ @unittest.skip('issue #21580')
+ def test_create_from_ppm_data(self):
+ self.check_create_from_data('ppm')
+
+ def test_create_from_pgm_file(self):
+ self.check_create_from_file('pgm')
+
+ @unittest.skip('issue #21580')
+ def test_create_from_pgm_data(self):
+ self.check_create_from_data('pgm')
+
+ def test_create_from_gif_file(self):
+ self.check_create_from_file('gif')
+
+ @unittest.skip('issue #21580')
+ def test_create_from_gif_data(self):
+ self.check_create_from_data('gif')
+
+ @requires_tcl(8, 6)
+ def test_create_from_png_file(self):
+ self.check_create_from_file('png')
+
+ @unittest.skip('issue #21580')
+ @requires_tcl(8, 6)
+ def test_create_from_png_data(self):
+ self.check_create_from_data('png')
+
+ @unittest.skip('issue #21580')
+ def test_configure_data(self):
+ image = tkinter.PhotoImage('::img::test', master=self.root)
+ self.assertEqual(image['data'], '')
+ with open(self.testfile, 'rb') as f:
+ data = f.read()
+ image.configure(data=data)
+ self.assertEqual(image['data'], data)
+ self.assertEqual(image.width(), 16)
+ self.assertEqual(image.height(), 16)
+
+ def test_configure_format(self):
+ image = tkinter.PhotoImage('::img::test', master=self.root)
+ self.assertEqual(image['format'], '')
+ image.configure(file=self.testfile, format='gif')
+ self.assertEqual(image['format'], ('gif',) if self.wantobjects
+ else 'gif')
+ self.assertEqual(image.width(), 16)
+ self.assertEqual(image.height(), 16)
+
+ def test_configure_file(self):
+ image = tkinter.PhotoImage('::img::test', master=self.root)
+ self.assertEqual(image['file'], '')
+ image.configure(file=self.testfile)
+ self.assertEqual(image['file'], self.testfile)
+ self.assertEqual(image.width(), 16)
+ self.assertEqual(image.height(), 16)
+
+ def test_configure_gamma(self):
+ image = tkinter.PhotoImage('::img::test', master=self.root)
+ self.assertEqual(image['gamma'], '1.0')
+ image.configure(gamma=2.0)
+ self.assertEqual(image['gamma'], '2.0')
+
+ def test_configure_width_height(self):
+ image = tkinter.PhotoImage('::img::test', master=self.root)
+ self.assertEqual(image['width'], '0')
+ self.assertEqual(image['height'], '0')
+ image.configure(width=20)
+ image.configure(height=10)
+ self.assertEqual(image['width'], '20')
+ self.assertEqual(image['height'], '10')
+ self.assertEqual(image.width(), 20)
+ self.assertEqual(image.height(), 10)
+
+ def test_configure_palette(self):
+ image = tkinter.PhotoImage('::img::test', master=self.root)
+ self.assertEqual(image['palette'], '')
+ image.configure(palette=256)
+ self.assertEqual(image['palette'], '256')
+ image.configure(palette='3/4/2')
+ self.assertEqual(image['palette'], '3/4/2')
+
+ def test_blank(self):
+ image = self.create()
+ image.blank()
+ self.assertEqual(image.width(), 16)
+ self.assertEqual(image.height(), 16)
+ self.assertEqual(image.get(4, 6), self.colorlist(0, 0, 0))
+
+ def test_copy(self):
+ image = self.create()
+ image2 = image.copy()
+ self.assertEqual(image2.width(), 16)
+ self.assertEqual(image2.height(), 16)
+ self.assertEqual(image.get(4, 6), image.get(4, 6))
+
+ def test_subsample(self):
+ image = self.create()
+ image2 = image.subsample(2, 3)
+ self.assertEqual(image2.width(), 8)
+ self.assertEqual(image2.height(), 6)
+ self.assertEqual(image2.get(2, 2), image.get(4, 6))
+
+ image2 = image.subsample(2)
+ self.assertEqual(image2.width(), 8)
+ self.assertEqual(image2.height(), 8)
+ self.assertEqual(image2.get(2, 3), image.get(4, 6))
+
+ def test_zoom(self):
+ image = self.create()
+ image2 = image.zoom(2, 3)
+ self.assertEqual(image2.width(), 32)
+ self.assertEqual(image2.height(), 48)
+ self.assertEqual(image2.get(8, 18), image.get(4, 6))
+ self.assertEqual(image2.get(9, 20), image.get(4, 6))
+
+ image2 = image.zoom(2)
+ self.assertEqual(image2.width(), 32)
+ self.assertEqual(image2.height(), 32)
+ self.assertEqual(image2.get(8, 12), image.get(4, 6))
+ self.assertEqual(image2.get(9, 13), image.get(4, 6))
+
+ def test_put(self):
+ image = self.create()
+ image.put('{red green} {blue yellow}', to=(4, 6))
+ self.assertEqual(image.get(4, 6), self.colorlist(255, 0, 0))
+ self.assertEqual(image.get(5, 6),
+ self.colorlist(0, 128 if tkinter.TkVersion >= 8.6
+ else 255, 0))
+ self.assertEqual(image.get(4, 7), self.colorlist(0, 0, 255))
+ self.assertEqual(image.get(5, 7), self.colorlist(255, 255, 0))
+
+ image.put((('#f00', '#00ff00'), ('#000000fff', '#ffffffff0000')))
+ self.assertEqual(image.get(0, 0), self.colorlist(255, 0, 0))
+ self.assertEqual(image.get(1, 0), self.colorlist(0, 255, 0))
+ self.assertEqual(image.get(0, 1), self.colorlist(0, 0, 255))
+ self.assertEqual(image.get(1, 1), self.colorlist(255, 255, 0))
+
+ def test_get(self):
+ image = self.create()
+ self.assertEqual(image.get(4, 6), self.colorlist(62, 116, 162))
+ self.assertEqual(image.get(0, 0), self.colorlist(0, 0, 0))
+ self.assertEqual(image.get(15, 15), self.colorlist(0, 0, 0))
+ self.assertRaises(tkinter.TclError, image.get, -1, 0)
+ self.assertRaises(tkinter.TclError, image.get, 0, -1)
+ self.assertRaises(tkinter.TclError, image.get, 16, 15)
+ self.assertRaises(tkinter.TclError, image.get, 15, 16)
+
+ def test_write(self):
+ image = self.create()
+ self.addCleanup(support.unlink, support.TESTFN)
+
+ image.write(support.TESTFN)
+ image2 = tkinter.PhotoImage('::img::test2', master=self.root,
+ format='ppm',
+ file=support.TESTFN)
+ self.assertEqual(str(image2), '::img::test2')
+ self.assertEqual(image2.type(), 'photo')
+ self.assertEqual(image2.width(), 16)
+ self.assertEqual(image2.height(), 16)
+ self.assertEqual(image2.get(0, 0), image.get(0, 0))
+ self.assertEqual(image2.get(15, 8), image.get(15, 8))
+
+ image.write(support.TESTFN, format='gif', from_coords=(4, 6, 6, 9))
+ image3 = tkinter.PhotoImage('::img::test3', master=self.root,
+ format='gif',
+ file=support.TESTFN)
+ self.assertEqual(str(image3), '::img::test3')
+ self.assertEqual(image3.type(), 'photo')
+ self.assertEqual(image3.width(), 2)
+ self.assertEqual(image3.height(), 3)
+ self.assertEqual(image3.get(0, 0), image.get(4, 6))
+ self.assertEqual(image3.get(1, 2), image.get(5, 8))
+
+
+tests_gui = (MiscTest, BitmapImageTest, PhotoImageTest,)
+
+if __name__ == "__main__":
+ support.run_unittest(*tests_gui)
diff --git a/Lib/lib-tk/test/test_tkinter/test_text.py b/Lib/lib-tk/test/test_tkinter/test_text.py
index e6c08be..ca21b60 100644
--- a/Lib/lib-tk/test/test_tkinter/test_text.py
+++ b/Lib/lib-tk/test/test_tkinter/test_text.py
@@ -14,6 +14,17 @@ class TextTest(unittest.TestCase):
def tearDown(self):
self.text.destroy()
+ def test_debug(self):
+ text = self.text
+ olddebug = text.debug()
+ try:
+ text.debug(0)
+ self.assertEqual(text.debug(), 0)
+ text.debug(1)
+ self.assertEqual(text.debug(), 1)
+ finally:
+ text.debug(olddebug)
+ self.assertEqual(text.debug(), olddebug)
def test_search(self):
text = self.text
diff --git a/Lib/lib-tk/test/test_tkinter/test_variables.py b/Lib/lib-tk/test/test_tkinter/test_variables.py
new file mode 100644
index 0000000..a24ea38
--- /dev/null
+++ b/Lib/lib-tk/test/test_tkinter/test_variables.py
@@ -0,0 +1,166 @@
+import unittest
+
+from Tkinter import Variable, StringVar, IntVar, DoubleVar, BooleanVar, Tk, TclError
+
+
+class TestBase(unittest.TestCase):
+
+ def setUp(self):
+ self.root = Tk()
+
+ def tearDown(self):
+ self.root.destroy()
+
+
+class TestVariable(TestBase):
+
+ def info_exists(self, *args):
+ return self.root.getboolean(self.root.call("info", "exists", *args))
+
+ def test_default(self):
+ v = Variable(self.root)
+ self.assertEqual("", v.get())
+ self.assertRegexpMatches(str(v), r"^PY_VAR(\d+)$")
+
+ def test_name_and_value(self):
+ v = Variable(self.root, "sample string", "varname")
+ self.assertEqual("sample string", v.get())
+ self.assertEqual("varname", str(v))
+
+ def test___del__(self):
+ self.assertFalse(self.info_exists("varname"))
+ v = Variable(self.root, "sample string", "varname")
+ self.assertTrue(self.info_exists("varname"))
+ del v
+ self.assertFalse(self.info_exists("varname"))
+
+ def test_dont_unset_not_existing(self):
+ self.assertFalse(self.info_exists("varname"))
+ v1 = Variable(self.root, name="name")
+ v2 = Variable(self.root, name="name")
+ del v1
+ self.assertFalse(self.info_exists("name"))
+ # shouldn't raise exception
+ del v2
+ self.assertFalse(self.info_exists("name"))
+
+ def test___eq__(self):
+ # values doesn't matter, only class and name are checked
+ v1 = Variable(self.root, name="abc")
+ v2 = Variable(self.root, name="abc")
+ self.assertEqual(v1, v2)
+
+ v3 = Variable(self.root, name="abc")
+ v4 = StringVar(self.root, name="abc")
+ self.assertNotEqual(v3, v4)
+
+ def test_invalid_name(self):
+ with self.assertRaises(TypeError):
+ Variable(self.root, name=123)
+
+ def test_null_in_name(self):
+ with self.assertRaises(ValueError):
+ Variable(self.root, name='var\x00name')
+ with self.assertRaises(ValueError):
+ self.root.globalsetvar('var\x00name', "value")
+ with self.assertRaises(ValueError):
+ self.root.setvar('var\x00name', "value")
+
+
+class TestStringVar(TestBase):
+
+ def test_default(self):
+ v = StringVar(self.root)
+ self.assertEqual("", v.get())
+
+ def test_get(self):
+ v = StringVar(self.root, "abc", "name")
+ self.assertEqual("abc", v.get())
+ self.root.globalsetvar("name", "value")
+ self.assertEqual("value", v.get())
+
+ def test_get_null(self):
+ v = StringVar(self.root, "abc\x00def", "name")
+ self.assertEqual("abc\x00def", v.get())
+ self.root.globalsetvar("name", "val\x00ue")
+ self.assertEqual("val\x00ue", v.get())
+
+
+class TestIntVar(TestBase):
+
+ def test_default(self):
+ v = IntVar(self.root)
+ self.assertEqual(0, v.get())
+
+ def test_get(self):
+ v = IntVar(self.root, 123, "name")
+ self.assertEqual(123, v.get())
+ self.root.globalsetvar("name", "345")
+ self.assertEqual(345, v.get())
+
+ def test_invalid_value(self):
+ v = IntVar(self.root, name="name")
+ self.root.globalsetvar("name", "value")
+ with self.assertRaises(ValueError):
+ v.get()
+ self.root.globalsetvar("name", "345.0")
+ with self.assertRaises(ValueError):
+ v.get()
+
+
+class TestDoubleVar(TestBase):
+
+ def test_default(self):
+ v = DoubleVar(self.root)
+ self.assertEqual(0.0, v.get())
+
+ def test_get(self):
+ v = DoubleVar(self.root, 1.23, "name")
+ self.assertAlmostEqual(1.23, v.get())
+ self.root.globalsetvar("name", "3.45")
+ self.assertAlmostEqual(3.45, v.get())
+
+ def test_get_from_int(self):
+ v = DoubleVar(self.root, 1.23, "name")
+ self.assertAlmostEqual(1.23, v.get())
+ self.root.globalsetvar("name", "3.45")
+ self.assertAlmostEqual(3.45, v.get())
+ self.root.globalsetvar("name", "456")
+ self.assertAlmostEqual(456, v.get())
+
+ def test_invalid_value(self):
+ v = DoubleVar(self.root, name="name")
+ self.root.globalsetvar("name", "value")
+ with self.assertRaises(ValueError):
+ v.get()
+
+
+class TestBooleanVar(TestBase):
+
+ def test_default(self):
+ v = BooleanVar(self.root)
+ self.assertEqual(False, v.get())
+
+ def test_get(self):
+ v = BooleanVar(self.root, True, "name")
+ self.assertAlmostEqual(True, v.get())
+ self.root.globalsetvar("name", "0")
+ self.assertAlmostEqual(False, v.get())
+
+ def test_invalid_value_domain(self):
+ v = BooleanVar(self.root, name="name")
+ self.root.globalsetvar("name", "value")
+ with self.assertRaises(TclError):
+ v.get()
+ self.root.globalsetvar("name", "1.0")
+ with self.assertRaises(TclError):
+ v.get()
+
+
+tests_gui = (TestVariable, TestStringVar, TestIntVar,
+ TestDoubleVar, TestBooleanVar)
+
+
+if __name__ == "__main__":
+ from test.support import run_unittest
+ run_unittest(*tests_gui)
diff --git a/Lib/lib-tk/test/test_tkinter/test_widgets.py b/Lib/lib-tk/test/test_tkinter/test_widgets.py
new file mode 100644
index 0000000..3f5f0b9
--- /dev/null
+++ b/Lib/lib-tk/test/test_tkinter/test_widgets.py
@@ -0,0 +1,1172 @@
+import unittest
+import Tkinter
+from Tkinter import TclError
+import os
+import sys
+from test.test_support import requires, run_unittest
+
+from test_ttk.support import (tcl_version, requires_tcl, get_tk_patchlevel,
+ widget_eq)
+from widget_tests import (
+ add_standard_options, noconv, noconv_meth, int_round, pixels_round,
+ AbstractWidgetTest, StandardOptionsTests,
+ IntegerSizeTests, PixelSizeTests,
+ setUpModule)
+
+requires('gui')
+
+
+class AbstractToplevelTest(AbstractWidgetTest, PixelSizeTests):
+ _conv_pad_pixels = noconv_meth
+
+ def test_class(self):
+ widget = self.create()
+ self.assertEqual(widget['class'],
+ widget.__class__.__name__.title())
+ self.checkInvalidParam(widget, 'class', 'Foo',
+ errmsg="can't modify -class option after widget is created")
+ widget2 = self.create(class_='Foo')
+ self.assertEqual(widget2['class'], 'Foo')
+
+ def test_colormap(self):
+ widget = self.create()
+ self.assertEqual(widget['colormap'], '')
+ self.checkInvalidParam(widget, 'colormap', 'new',
+ errmsg="can't modify -colormap option after widget is created")
+ widget2 = self.create(colormap='new')
+ self.assertEqual(widget2['colormap'], 'new')
+
+ def test_container(self):
+ widget = self.create()
+ self.assertEqual(widget['container'], 0 if self.wantobjects else '0')
+ self.checkInvalidParam(widget, 'container', 1,
+ errmsg="can't modify -container option after widget is created")
+ widget2 = self.create(container=True)
+ self.assertEqual(widget2['container'], 1 if self.wantobjects else '1')
+
+ def test_visual(self):
+ widget = self.create()
+ self.assertEqual(widget['visual'], '')
+ self.checkInvalidParam(widget, 'visual', 'default',
+ errmsg="can't modify -visual option after widget is created")
+ widget2 = self.create(visual='default')
+ self.assertEqual(widget2['visual'], 'default')
+
+
+@add_standard_options(StandardOptionsTests)
+class ToplevelTest(AbstractToplevelTest, unittest.TestCase):
+ OPTIONS = (
+ 'background', 'borderwidth',
+ 'class', 'colormap', 'container', 'cursor', 'height',
+ 'highlightbackground', 'highlightcolor', 'highlightthickness',
+ 'menu', 'padx', 'pady', 'relief', 'screen',
+ 'takefocus', 'use', 'visual', 'width',
+ )
+
+ def _create(self, **kwargs):
+ return Tkinter.Toplevel(self.root, **kwargs)
+
+ def test_menu(self):
+ widget = self.create()
+ menu = Tkinter.Menu(self.root)
+ self.checkParam(widget, 'menu', menu, eq=widget_eq)
+ self.checkParam(widget, 'menu', '')
+
+ def test_screen(self):
+ widget = self.create()
+ self.assertEqual(widget['screen'], '')
+ try:
+ display = os.environ['DISPLAY']
+ except KeyError:
+ self.skipTest('No $DISPLAY set.')
+ self.checkInvalidParam(widget, 'screen', display,
+ errmsg="can't modify -screen option after widget is created")
+ widget2 = self.create(screen=display)
+ self.assertEqual(widget2['screen'], display)
+
+ def test_use(self):
+ widget = self.create()
+ self.assertEqual(widget['use'], '')
+ parent = self.create(container=True)
+ wid = parent.winfo_id()
+ widget2 = self.create(use=wid)
+ self.assertEqual(int(widget2['use']), wid)
+
+
+@add_standard_options(StandardOptionsTests)
+class FrameTest(AbstractToplevelTest, unittest.TestCase):
+ OPTIONS = (
+ 'background', 'borderwidth',
+ 'class', 'colormap', 'container', 'cursor', 'height',
+ 'highlightbackground', 'highlightcolor', 'highlightthickness',
+ 'relief', 'takefocus', 'visual', 'width',
+ )
+
+ def _create(self, **kwargs):
+ return Tkinter.Frame(self.root, **kwargs)
+
+
+@add_standard_options(StandardOptionsTests)
+class LabelFrameTest(AbstractToplevelTest, unittest.TestCase):
+ OPTIONS = (
+ 'background', 'borderwidth',
+ 'class', 'colormap', 'container', 'cursor',
+ 'font', 'foreground', 'height',
+ 'highlightbackground', 'highlightcolor', 'highlightthickness',
+ 'labelanchor', 'labelwidget', 'padx', 'pady', 'relief',
+ 'takefocus', 'text', 'visual', 'width',
+ )
+
+ def _create(self, **kwargs):
+ return Tkinter.LabelFrame(self.root, **kwargs)
+
+ def test_labelanchor(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'labelanchor',
+ 'e', 'en', 'es', 'n', 'ne', 'nw',
+ 's', 'se', 'sw', 'w', 'wn', 'ws')
+ self.checkInvalidParam(widget, 'labelanchor', 'center')
+
+ def test_labelwidget(self):
+ widget = self.create()
+ label = Tkinter.Label(self.root, text='Mupp', name='foo')
+ self.checkParam(widget, 'labelwidget', label, expected='.foo')
+ label.destroy()
+
+
+class AbstractLabelTest(AbstractWidgetTest, IntegerSizeTests):
+ _conv_pixels = noconv_meth
+
+ def test_highlightthickness(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'highlightthickness',
+ 0, 1.3, 2.6, 6, -2, '10p')
+
+
+@add_standard_options(StandardOptionsTests)
+class LabelTest(AbstractLabelTest, unittest.TestCase):
+ OPTIONS = (
+ 'activebackground', 'activeforeground', 'anchor',
+ 'background', 'bitmap', 'borderwidth', 'compound', 'cursor',
+ 'disabledforeground', 'font', 'foreground', 'height',
+ 'highlightbackground', 'highlightcolor', 'highlightthickness',
+ 'image', 'justify', 'padx', 'pady', 'relief', 'state',
+ 'takefocus', 'text', 'textvariable',
+ 'underline', 'width', 'wraplength',
+ )
+
+ def _create(self, **kwargs):
+ return Tkinter.Label(self.root, **kwargs)
+
+
+@add_standard_options(StandardOptionsTests)
+class ButtonTest(AbstractLabelTest, unittest.TestCase):
+ OPTIONS = (
+ 'activebackground', 'activeforeground', 'anchor',
+ 'background', 'bitmap', 'borderwidth',
+ 'command', 'compound', 'cursor', 'default',
+ 'disabledforeground', 'font', 'foreground', 'height',
+ 'highlightbackground', 'highlightcolor', 'highlightthickness',
+ 'image', 'justify', 'overrelief', 'padx', 'pady', 'relief',
+ 'repeatdelay', 'repeatinterval',
+ 'state', 'takefocus', 'text', 'textvariable',
+ 'underline', 'width', 'wraplength')
+
+ def _create(self, **kwargs):
+ return Tkinter.Button(self.root, **kwargs)
+
+ def test_default(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'default', 'active', 'disabled', 'normal')
+
+
+@add_standard_options(StandardOptionsTests)
+class CheckbuttonTest(AbstractLabelTest, unittest.TestCase):
+ OPTIONS = (
+ 'activebackground', 'activeforeground', 'anchor',
+ 'background', 'bitmap', 'borderwidth',
+ 'command', 'compound', 'cursor',
+ 'disabledforeground', 'font', 'foreground', 'height',
+ 'highlightbackground', 'highlightcolor', 'highlightthickness',
+ 'image', 'indicatoron', 'justify',
+ 'offrelief', 'offvalue', 'onvalue', 'overrelief',
+ 'padx', 'pady', 'relief', 'selectcolor', 'selectimage', 'state',
+ 'takefocus', 'text', 'textvariable',
+ 'tristateimage', 'tristatevalue',
+ 'underline', 'variable', 'width', 'wraplength',
+ )
+
+ def _create(self, **kwargs):
+ return Tkinter.Checkbutton(self.root, **kwargs)
+
+
+ def test_offvalue(self):
+ widget = self.create()
+ self.checkParams(widget, 'offvalue', 1, 2.3, '', 'any string')
+
+ def test_onvalue(self):
+ widget = self.create()
+ self.checkParams(widget, 'onvalue', 1, 2.3, '', 'any string')
+
+
+@add_standard_options(StandardOptionsTests)
+class RadiobuttonTest(AbstractLabelTest, unittest.TestCase):
+ OPTIONS = (
+ 'activebackground', 'activeforeground', 'anchor',
+ 'background', 'bitmap', 'borderwidth',
+ 'command', 'compound', 'cursor',
+ 'disabledforeground', 'font', 'foreground', 'height',
+ 'highlightbackground', 'highlightcolor', 'highlightthickness',
+ 'image', 'indicatoron', 'justify', 'offrelief', 'overrelief',
+ 'padx', 'pady', 'relief', 'selectcolor', 'selectimage', 'state',
+ 'takefocus', 'text', 'textvariable',
+ 'tristateimage', 'tristatevalue',
+ 'underline', 'value', 'variable', 'width', 'wraplength',
+ )
+
+ def _create(self, **kwargs):
+ return Tkinter.Radiobutton(self.root, **kwargs)
+
+ def test_value(self):
+ widget = self.create()
+ self.checkParams(widget, 'value', 1, 2.3, '', 'any string')
+
+
+@add_standard_options(StandardOptionsTests)
+class MenubuttonTest(AbstractLabelTest, unittest.TestCase):
+ OPTIONS = (
+ 'activebackground', 'activeforeground', 'anchor',
+ 'background', 'bitmap', 'borderwidth',
+ 'compound', 'cursor', 'direction',
+ 'disabledforeground', 'font', 'foreground', 'height',
+ 'highlightbackground', 'highlightcolor', 'highlightthickness',
+ 'image', 'indicatoron', 'justify', 'menu',
+ 'padx', 'pady', 'relief', 'state',
+ 'takefocus', 'text', 'textvariable',
+ 'underline', 'width', 'wraplength',
+ )
+ _conv_pixels = staticmethod(pixels_round)
+
+ def _create(self, **kwargs):
+ return Tkinter.Menubutton(self.root, **kwargs)
+
+ def test_direction(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'direction',
+ 'above', 'below', 'flush', 'left', 'right')
+
+ def test_height(self):
+ widget = self.create()
+ self.checkIntegerParam(widget, 'height', 100, -100, 0, conv=str)
+
+ test_highlightthickness = StandardOptionsTests.test_highlightthickness.im_func
+
+ @unittest.skipIf(sys.platform == 'darwin',
+ 'crashes with Cocoa Tk (issue19733)')
+ def test_image(self):
+ widget = self.create()
+ image = Tkinter.PhotoImage('image1')
+ self.checkParam(widget, 'image', image, conv=str)
+ errmsg = 'image "spam" doesn\'t exist'
+ with self.assertRaises(Tkinter.TclError) as cm:
+ widget['image'] = 'spam'
+ if errmsg is not None:
+ self.assertEqual(str(cm.exception), errmsg)
+ with self.assertRaises(Tkinter.TclError) as cm:
+ widget.configure({'image': 'spam'})
+ if errmsg is not None:
+ self.assertEqual(str(cm.exception), errmsg)
+
+ def test_menu(self):
+ widget = self.create()
+ menu = Tkinter.Menu(widget, name='menu')
+ self.checkParam(widget, 'menu', menu, eq=widget_eq)
+ menu.destroy()
+
+ def test_padx(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'padx', 3, 4.4, 5.6, '12m')
+ self.checkParam(widget, 'padx', -2, expected=0)
+
+ def test_pady(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'pady', 3, 4.4, 5.6, '12m')
+ self.checkParam(widget, 'pady', -2, expected=0)
+
+ def test_width(self):
+ widget = self.create()
+ self.checkIntegerParam(widget, 'width', 402, -402, 0, conv=str)
+
+
+class OptionMenuTest(MenubuttonTest, unittest.TestCase):
+
+ def _create(self, default='b', values=('a', 'b', 'c'), **kwargs):
+ return Tkinter.OptionMenu(self.root, None, default, *values, **kwargs)
+
+
+@add_standard_options(IntegerSizeTests, StandardOptionsTests)
+class EntryTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'background', 'borderwidth', 'cursor',
+ 'disabledbackground', 'disabledforeground',
+ 'exportselection', 'font', 'foreground',
+ 'highlightbackground', 'highlightcolor', 'highlightthickness',
+ 'insertbackground', 'insertborderwidth',
+ 'insertofftime', 'insertontime', 'insertwidth',
+ 'invalidcommand', 'justify', 'readonlybackground', 'relief',
+ 'selectbackground', 'selectborderwidth', 'selectforeground',
+ 'show', 'state', 'takefocus', 'textvariable',
+ 'validate', 'validatecommand', 'width', 'xscrollcommand',
+ )
+
+ def _create(self, **kwargs):
+ return Tkinter.Entry(self.root, **kwargs)
+
+ def test_disabledbackground(self):
+ widget = self.create()
+ self.checkColorParam(widget, 'disabledbackground')
+
+ def test_insertborderwidth(self):
+ widget = self.create(insertwidth=100)
+ self.checkPixelsParam(widget, 'insertborderwidth',
+ 0, 1.3, 2.6, 6, -2, '10p')
+ # insertborderwidth is bounded above by a half of insertwidth.
+ self.checkParam(widget, 'insertborderwidth', 60, expected=100//2)
+
+ def test_insertwidth(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'insertwidth', 1.3, 3.6, '10p')
+ self.checkParam(widget, 'insertwidth', 0.1, expected=2)
+ self.checkParam(widget, 'insertwidth', -2, expected=2)
+ if pixels_round(0.9) <= 0:
+ self.checkParam(widget, 'insertwidth', 0.9, expected=2)
+ else:
+ self.checkParam(widget, 'insertwidth', 0.9, expected=1)
+
+ def test_invalidcommand(self):
+ widget = self.create()
+ self.checkCommandParam(widget, 'invalidcommand')
+ self.checkCommandParam(widget, 'invcmd')
+
+ def test_readonlybackground(self):
+ widget = self.create()
+ self.checkColorParam(widget, 'readonlybackground')
+
+ def test_show(self):
+ widget = self.create()
+ self.checkParam(widget, 'show', '*')
+ self.checkParam(widget, 'show', '')
+ self.checkParam(widget, 'show', ' ')
+
+ def test_state(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'state',
+ 'disabled', 'normal', 'readonly')
+
+ def test_validate(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'validate',
+ 'all', 'key', 'focus', 'focusin', 'focusout', 'none')
+
+ def test_validatecommand(self):
+ widget = self.create()
+ self.checkCommandParam(widget, 'validatecommand')
+ self.checkCommandParam(widget, 'vcmd')
+
+
+@add_standard_options(StandardOptionsTests)
+class SpinboxTest(EntryTest, unittest.TestCase):
+ OPTIONS = (
+ 'activebackground', 'background', 'borderwidth',
+ 'buttonbackground', 'buttoncursor', 'buttondownrelief', 'buttonuprelief',
+ 'command', 'cursor', 'disabledbackground', 'disabledforeground',
+ 'exportselection', 'font', 'foreground', 'format', 'from',
+ 'highlightbackground', 'highlightcolor', 'highlightthickness',
+ 'increment',
+ 'insertbackground', 'insertborderwidth',
+ 'insertofftime', 'insertontime', 'insertwidth',
+ 'invalidcommand', 'justify', 'relief', 'readonlybackground',
+ 'repeatdelay', 'repeatinterval',
+ 'selectbackground', 'selectborderwidth', 'selectforeground',
+ 'state', 'takefocus', 'textvariable', 'to',
+ 'validate', 'validatecommand', 'values',
+ 'width', 'wrap', 'xscrollcommand',
+ )
+
+ def _create(self, **kwargs):
+ return Tkinter.Spinbox(self.root, **kwargs)
+
+ test_show = None
+
+ def test_buttonbackground(self):
+ widget = self.create()
+ self.checkColorParam(widget, 'buttonbackground')
+
+ def test_buttoncursor(self):
+ widget = self.create()
+ self.checkCursorParam(widget, 'buttoncursor')
+
+ def test_buttondownrelief(self):
+ widget = self.create()
+ self.checkReliefParam(widget, 'buttondownrelief')
+
+ def test_buttonuprelief(self):
+ widget = self.create()
+ self.checkReliefParam(widget, 'buttonuprelief')
+
+ def test_format(self):
+ widget = self.create()
+ self.checkParam(widget, 'format', '%2f')
+ self.checkParam(widget, 'format', '%2.2f')
+ self.checkParam(widget, 'format', '%.2f')
+ self.checkParam(widget, 'format', '%2.f')
+ self.checkInvalidParam(widget, 'format', '%2e-1f')
+ self.checkInvalidParam(widget, 'format', '2.2')
+ self.checkInvalidParam(widget, 'format', '%2.-2f')
+ self.checkParam(widget, 'format', '%-2.02f')
+ self.checkParam(widget, 'format', '% 2.02f')
+ self.checkParam(widget, 'format', '% -2.200f')
+ self.checkParam(widget, 'format', '%09.200f')
+ self.checkInvalidParam(widget, 'format', '%d')
+
+ def test_from(self):
+ widget = self.create()
+ self.checkParam(widget, 'to', 100.0)
+ self.checkFloatParam(widget, 'from', -10, 10.2, 11.7)
+ self.checkInvalidParam(widget, 'from', 200,
+ errmsg='-to value must be greater than -from value')
+
+ def test_increment(self):
+ widget = self.create()
+ self.checkFloatParam(widget, 'increment', -1, 1, 10.2, 12.8, 0)
+
+ def test_to(self):
+ widget = self.create()
+ self.checkParam(widget, 'from', -100.0)
+ self.checkFloatParam(widget, 'to', -10, 10.2, 11.7)
+ self.checkInvalidParam(widget, 'to', -200,
+ errmsg='-to value must be greater than -from value')
+
+ def test_values(self):
+ # XXX
+ widget = self.create()
+ self.assertEqual(widget['values'], '')
+ self.checkParam(widget, 'values', 'mon tue wed thur')
+ self.checkParam(widget, 'values', ('mon', 'tue', 'wed', 'thur'),
+ expected='mon tue wed thur')
+ self.checkParam(widget, 'values', (42, 3.14, '', 'any string'),
+ expected='42 3.14 {} {any string}')
+ self.checkParam(widget, 'values', '')
+
+ def test_wrap(self):
+ widget = self.create()
+ self.checkBooleanParam(widget, 'wrap')
+
+ def test_bbox(self):
+ widget = self.create()
+ self.assertIsBoundingBox(widget.bbox(0))
+ self.assertRaises(Tkinter.TclError, widget.bbox, 'noindex')
+ self.assertRaises(Tkinter.TclError, widget.bbox, None)
+ self.assertRaises(TypeError, widget.bbox)
+ self.assertRaises(TypeError, widget.bbox, 0, 1)
+
+
+@add_standard_options(StandardOptionsTests)
+class TextTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'autoseparators', 'background', 'blockcursor', 'borderwidth',
+ 'cursor', 'endline', 'exportselection',
+ 'font', 'foreground', 'height',
+ 'highlightbackground', 'highlightcolor', 'highlightthickness',
+ 'inactiveselectbackground', 'insertbackground', 'insertborderwidth',
+ 'insertofftime', 'insertontime', 'insertunfocussed', 'insertwidth',
+ 'maxundo', 'padx', 'pady', 'relief',
+ 'selectbackground', 'selectborderwidth', 'selectforeground',
+ 'setgrid', 'spacing1', 'spacing2', 'spacing3', 'startline', 'state',
+ 'tabs', 'tabstyle', 'takefocus', 'undo', 'width', 'wrap',
+ 'xscrollcommand', 'yscrollcommand',
+ )
+ if tcl_version < (8, 5):
+ wantobjects = False
+
+ def _create(self, **kwargs):
+ return Tkinter.Text(self.root, **kwargs)
+
+ def test_autoseparators(self):
+ widget = self.create()
+ self.checkBooleanParam(widget, 'autoseparators')
+
+ @requires_tcl(8, 5)
+ def test_blockcursor(self):
+ widget = self.create()
+ self.checkBooleanParam(widget, 'blockcursor')
+
+ @requires_tcl(8, 5)
+ def test_endline(self):
+ widget = self.create()
+ text = '\n'.join('Line %d' for i in range(100))
+ widget.insert('end', text)
+ self.checkParam(widget, 'endline', 200, expected='')
+ self.checkParam(widget, 'endline', -10, expected='')
+ self.checkInvalidParam(widget, 'endline', 'spam',
+ errmsg='expected integer but got "spam"')
+ self.checkParam(widget, 'endline', 50)
+ self.checkParam(widget, 'startline', 15)
+ self.checkInvalidParam(widget, 'endline', 10,
+ errmsg='-startline must be less than or equal to -endline')
+
+ def test_height(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'height', 100, 101.2, 102.6, '3c')
+ self.checkParam(widget, 'height', -100, expected=1)
+ self.checkParam(widget, 'height', 0, expected=1)
+
+ def test_maxundo(self):
+ widget = self.create()
+ self.checkIntegerParam(widget, 'maxundo', 0, 5, -1)
+
+ @requires_tcl(8, 5)
+ def test_inactiveselectbackground(self):
+ widget = self.create()
+ self.checkColorParam(widget, 'inactiveselectbackground')
+
+ @requires_tcl(8, 6)
+ def test_insertunfocussed(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'insertunfocussed',
+ 'hollow', 'none', 'solid')
+
+ def test_selectborderwidth(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'selectborderwidth',
+ 1.3, 2.6, -2, '10p', conv=noconv,
+ keep_orig=tcl_version >= (8, 5))
+
+ def test_spacing1(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'spacing1', 20, 21.4, 22.6, '0.5c')
+ self.checkParam(widget, 'spacing1', -5, expected=0)
+
+ def test_spacing2(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'spacing2', 5, 6.4, 7.6, '0.1c')
+ self.checkParam(widget, 'spacing2', -1, expected=0)
+
+ def test_spacing3(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'spacing3', 20, 21.4, 22.6, '0.5c')
+ self.checkParam(widget, 'spacing3', -10, expected=0)
+
+ @requires_tcl(8, 5)
+ def test_startline(self):
+ widget = self.create()
+ text = '\n'.join('Line %d' for i in range(100))
+ widget.insert('end', text)
+ self.checkParam(widget, 'startline', 200, expected='')
+ self.checkParam(widget, 'startline', -10, expected='')
+ self.checkInvalidParam(widget, 'startline', 'spam',
+ errmsg='expected integer but got "spam"')
+ self.checkParam(widget, 'startline', 10)
+ self.checkParam(widget, 'endline', 50)
+ self.checkInvalidParam(widget, 'startline', 70,
+ errmsg='-startline must be less than or equal to -endline')
+
+ def test_state(self):
+ widget = self.create()
+ if tcl_version < (8, 5):
+ self.checkParams(widget, 'state', 'disabled', 'normal')
+ else:
+ self.checkEnumParam(widget, 'state', 'disabled', 'normal')
+
+ def test_tabs(self):
+ widget = self.create()
+ if get_tk_patchlevel() < (8, 5, 11):
+ self.checkParam(widget, 'tabs', (10.2, 20.7, '1i', '2i'),
+ expected=('10.2', '20.7', '1i', '2i'))
+ else:
+ self.checkParam(widget, 'tabs', (10.2, 20.7, '1i', '2i'))
+ self.checkParam(widget, 'tabs', '10.2 20.7 1i 2i',
+ expected=('10.2', '20.7', '1i', '2i'))
+ self.checkParam(widget, 'tabs', '2c left 4c 6c center',
+ expected=('2c', 'left', '4c', '6c', 'center'))
+ self.checkInvalidParam(widget, 'tabs', 'spam',
+ errmsg='bad screen distance "spam"',
+ keep_orig=tcl_version >= (8, 5))
+
+ @requires_tcl(8, 5)
+ def test_tabstyle(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'tabstyle', 'tabular', 'wordprocessor')
+
+ def test_undo(self):
+ widget = self.create()
+ self.checkBooleanParam(widget, 'undo')
+
+ def test_width(self):
+ widget = self.create()
+ self.checkIntegerParam(widget, 'width', 402)
+ self.checkParam(widget, 'width', -402, expected=1)
+ self.checkParam(widget, 'width', 0, expected=1)
+
+ def test_wrap(self):
+ widget = self.create()
+ if tcl_version < (8, 5):
+ self.checkParams(widget, 'wrap', 'char', 'none', 'word')
+ else:
+ self.checkEnumParam(widget, 'wrap', 'char', 'none', 'word')
+
+ def test_bbox(self):
+ widget = self.create()
+ self.assertIsBoundingBox(widget.bbox('1.1'))
+ self.assertIsNone(widget.bbox('end'))
+ self.assertRaises(Tkinter.TclError, widget.bbox, 'noindex')
+ self.assertRaises(Tkinter.TclError, widget.bbox, None)
+ self.assertRaises(Tkinter.TclError, widget.bbox)
+ self.assertRaises(Tkinter.TclError, widget.bbox, '1.1', 'end')
+
+
+@add_standard_options(PixelSizeTests, StandardOptionsTests)
+class CanvasTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'background', 'borderwidth',
+ 'closeenough', 'confine', 'cursor', 'height',
+ 'highlightbackground', 'highlightcolor', 'highlightthickness',
+ 'insertbackground', 'insertborderwidth',
+ 'insertofftime', 'insertontime', 'insertwidth',
+ 'relief', 'scrollregion',
+ 'selectbackground', 'selectborderwidth', 'selectforeground',
+ 'state', 'takefocus',
+ 'xscrollcommand', 'xscrollincrement',
+ 'yscrollcommand', 'yscrollincrement', 'width',
+ )
+
+ _conv_pixels = staticmethod(int_round)
+ wantobjects = False
+
+ def _create(self, **kwargs):
+ return Tkinter.Canvas(self.root, **kwargs)
+
+ def test_closeenough(self):
+ widget = self.create()
+ self.checkFloatParam(widget, 'closeenough', 24, 2.4, 3.6, -3,
+ conv=float)
+
+ def test_confine(self):
+ widget = self.create()
+ self.checkBooleanParam(widget, 'confine')
+
+ def test_scrollregion(self):
+ widget = self.create()
+ self.checkParam(widget, 'scrollregion', '0 0 200 150')
+ self.checkParam(widget, 'scrollregion', (0, 0, 200, 150),
+ expected='0 0 200 150')
+ self.checkParam(widget, 'scrollregion', '')
+ self.checkInvalidParam(widget, 'scrollregion', 'spam',
+ errmsg='bad scrollRegion "spam"')
+ self.checkInvalidParam(widget, 'scrollregion', (0, 0, 200, 'spam'))
+ self.checkInvalidParam(widget, 'scrollregion', (0, 0, 200))
+ self.checkInvalidParam(widget, 'scrollregion', (0, 0, 200, 150, 0))
+
+ def test_state(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'state', 'disabled', 'normal',
+ errmsg='bad state value "{}": must be normal or disabled')
+
+ def test_xscrollincrement(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'xscrollincrement',
+ 40, 0, 41.2, 43.6, -40, '0.5i')
+
+ def test_yscrollincrement(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'yscrollincrement',
+ 10, 0, 11.2, 13.6, -10, '0.1i')
+
+
+@add_standard_options(IntegerSizeTests, StandardOptionsTests)
+class ListboxTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'activestyle', 'background', 'borderwidth', 'cursor',
+ 'disabledforeground', 'exportselection',
+ 'font', 'foreground', 'height',
+ 'highlightbackground', 'highlightcolor', 'highlightthickness',
+ 'listvariable', 'relief',
+ 'selectbackground', 'selectborderwidth', 'selectforeground',
+ 'selectmode', 'setgrid', 'state',
+ 'takefocus', 'width', 'xscrollcommand', 'yscrollcommand',
+ )
+
+ def _create(self, **kwargs):
+ return Tkinter.Listbox(self.root, **kwargs)
+
+ def test_activestyle(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'activestyle',
+ 'dotbox', 'none', 'underline')
+
+ def test_listvariable(self):
+ widget = self.create()
+ var = Tkinter.DoubleVar()
+ self.checkVariableParam(widget, 'listvariable', var)
+
+ def test_selectmode(self):
+ widget = self.create()
+ self.checkParam(widget, 'selectmode', 'single')
+ self.checkParam(widget, 'selectmode', 'browse')
+ self.checkParam(widget, 'selectmode', 'multiple')
+ self.checkParam(widget, 'selectmode', 'extended')
+
+ def test_state(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'state', 'disabled', 'normal')
+
+ def test_itemconfigure(self):
+ widget = self.create()
+ with self.assertRaisesRegexp(TclError, 'item number "0" out of range'):
+ widget.itemconfigure(0)
+ colors = 'red orange yellow green blue white violet'.split()
+ widget.insert('end', *colors)
+ for i, color in enumerate(colors):
+ widget.itemconfigure(i, background=color)
+ with self.assertRaises(TypeError):
+ widget.itemconfigure()
+ with self.assertRaisesRegexp(TclError, 'bad listbox index "red"'):
+ widget.itemconfigure('red')
+ self.assertEqual(widget.itemconfigure(0, 'background'),
+ ('background', 'background', 'Background', '', 'red'))
+ self.assertEqual(widget.itemconfigure('end', 'background'),
+ ('background', 'background', 'Background', '', 'violet'))
+ self.assertEqual(widget.itemconfigure('@0,0', 'background'),
+ ('background', 'background', 'Background', '', 'red'))
+
+ d = widget.itemconfigure(0)
+ self.assertIsInstance(d, dict)
+ for k, v in d.items():
+ self.assertIn(len(v), (2, 5))
+ if len(v) == 5:
+ self.assertEqual(v, widget.itemconfigure(0, k))
+ self.assertEqual(v[4], widget.itemcget(0, k))
+
+ def check_itemconfigure(self, name, value):
+ widget = self.create()
+ widget.insert('end', 'a', 'b', 'c', 'd')
+ widget.itemconfigure(0, **{name: value})
+ self.assertEqual(widget.itemconfigure(0, name)[4], value)
+ self.assertEqual(widget.itemcget(0, name), value)
+ with self.assertRaisesRegexp(TclError, 'unknown color name "spam"'):
+ widget.itemconfigure(0, **{name: 'spam'})
+
+ def test_itemconfigure_background(self):
+ self.check_itemconfigure('background', '#ff0000')
+
+ def test_itemconfigure_bg(self):
+ self.check_itemconfigure('bg', '#ff0000')
+
+ def test_itemconfigure_fg(self):
+ self.check_itemconfigure('fg', '#110022')
+
+ def test_itemconfigure_foreground(self):
+ self.check_itemconfigure('foreground', '#110022')
+
+ def test_itemconfigure_selectbackground(self):
+ self.check_itemconfigure('selectbackground', '#110022')
+
+ def test_itemconfigure_selectforeground(self):
+ self.check_itemconfigure('selectforeground', '#654321')
+
+ def test_box(self):
+ lb = self.create()
+ lb.insert(0, *('el%d' % i for i in range(8)))
+ lb.pack()
+ self.assertIsBoundingBox(lb.bbox(0))
+ self.assertIsNone(lb.bbox(-1))
+ self.assertIsNone(lb.bbox(10))
+ self.assertRaises(TclError, lb.bbox, 'noindex')
+ self.assertRaises(TclError, lb.bbox, None)
+ self.assertRaises(TypeError, lb.bbox)
+ self.assertRaises(TypeError, lb.bbox, 0, 1)
+
+ def test_curselection(self):
+ lb = self.create()
+ lb.insert(0, *('el%d' % i for i in range(8)))
+ lb.selection_clear(0, Tkinter.END)
+ lb.selection_set(2, 4)
+ lb.selection_set(6)
+ self.assertEqual(lb.curselection(), (2, 3, 4, 6))
+ self.assertRaises(TypeError, lb.curselection, 0)
+
+ def test_get(self):
+ lb = self.create()
+ lb.insert(0, *('el%d' % i for i in range(8)))
+ self.assertEqual(lb.get(0), 'el0')
+ self.assertEqual(lb.get(3), 'el3')
+ self.assertEqual(lb.get('end'), 'el7')
+ self.assertEqual(lb.get(8), '')
+ self.assertEqual(lb.get(-1), '')
+ self.assertEqual(lb.get(3, 5), ('el3', 'el4', 'el5'))
+ self.assertEqual(lb.get(5, 'end'), ('el5', 'el6', 'el7'))
+ self.assertEqual(lb.get(5, 0), ())
+ self.assertEqual(lb.get(0, 0), ('el0',))
+ self.assertRaises(TclError, lb.get, 'noindex')
+ self.assertRaises(TclError, lb.get, None)
+ self.assertRaises(TypeError, lb.get)
+ self.assertRaises(TclError, lb.get, 'end', 'noindex')
+ self.assertRaises(TypeError, lb.get, 1, 2, 3)
+ self.assertRaises(TclError, lb.get, 2.4)
+
+
+@add_standard_options(PixelSizeTests, StandardOptionsTests)
+class ScaleTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'activebackground', 'background', 'bigincrement', 'borderwidth',
+ 'command', 'cursor', 'digits', 'font', 'foreground', 'from',
+ 'highlightbackground', 'highlightcolor', 'highlightthickness',
+ 'label', 'length', 'orient', 'relief',
+ 'repeatdelay', 'repeatinterval',
+ 'resolution', 'showvalue', 'sliderlength', 'sliderrelief', 'state',
+ 'takefocus', 'tickinterval', 'to', 'troughcolor', 'variable', 'width',
+ )
+ default_orient = 'vertical'
+
+ def _create(self, **kwargs):
+ return Tkinter.Scale(self.root, **kwargs)
+
+ def test_bigincrement(self):
+ widget = self.create()
+ self.checkFloatParam(widget, 'bigincrement', 12.4, 23.6, -5)
+
+ def test_digits(self):
+ widget = self.create()
+ self.checkIntegerParam(widget, 'digits', 5, 0)
+
+ def test_from(self):
+ widget = self.create()
+ self.checkFloatParam(widget, 'from', 100, 14.9, 15.1, conv=round)
+
+ def test_label(self):
+ widget = self.create()
+ self.checkParam(widget, 'label', 'any string')
+ self.checkParam(widget, 'label', '')
+
+ def test_length(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'length', 130, 131.2, 135.6, '5i')
+
+ def test_resolution(self):
+ widget = self.create()
+ self.checkFloatParam(widget, 'resolution', 4.2, 0, 6.7, -2)
+
+ def test_showvalue(self):
+ widget = self.create()
+ self.checkBooleanParam(widget, 'showvalue')
+
+ def test_sliderlength(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'sliderlength',
+ 10, 11.2, 15.6, -3, '3m')
+
+ def test_sliderrelief(self):
+ widget = self.create()
+ self.checkReliefParam(widget, 'sliderrelief')
+
+ def test_tickinterval(self):
+ widget = self.create()
+ self.checkFloatParam(widget, 'tickinterval', 1, 4.3, 7.6, 0,
+ conv=round)
+ self.checkParam(widget, 'tickinterval', -2, expected=2,
+ conv=round)
+
+ def test_to(self):
+ widget = self.create()
+ self.checkFloatParam(widget, 'to', 300, 14.9, 15.1, -10,
+ conv=round)
+
+
+@add_standard_options(PixelSizeTests, StandardOptionsTests)
+class ScrollbarTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'activebackground', 'activerelief',
+ 'background', 'borderwidth',
+ 'command', 'cursor', 'elementborderwidth',
+ 'highlightbackground', 'highlightcolor', 'highlightthickness',
+ 'jump', 'orient', 'relief',
+ 'repeatdelay', 'repeatinterval',
+ 'takefocus', 'troughcolor', 'width',
+ )
+ _conv_pixels = staticmethod(int_round)
+ wantobjects = False
+ default_orient = 'vertical'
+
+ def _create(self, **kwargs):
+ return Tkinter.Scrollbar(self.root, **kwargs)
+
+ def test_activerelief(self):
+ widget = self.create()
+ self.checkReliefParam(widget, 'activerelief')
+
+ def test_elementborderwidth(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'elementborderwidth', 4.3, 5.6, -2, '1m')
+
+ def test_orient(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'orient', 'vertical', 'horizontal',
+ errmsg='bad orientation "{}": must be vertical or horizontal')
+
+
+@add_standard_options(StandardOptionsTests)
+class PanedWindowTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'background', 'borderwidth', 'cursor',
+ 'handlepad', 'handlesize', 'height',
+ 'opaqueresize', 'orient', 'relief',
+ 'sashcursor', 'sashpad', 'sashrelief', 'sashwidth',
+ 'showhandle', 'width',
+ )
+ default_orient = 'horizontal'
+
+ def _create(self, **kwargs):
+ return Tkinter.PanedWindow(self.root, **kwargs)
+
+ def test_handlepad(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'handlepad', 5, 6.4, 7.6, -3, '1m')
+
+ def test_handlesize(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'handlesize', 8, 9.4, 10.6, -3, '2m',
+ conv=noconv)
+
+ def test_height(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'height', 100, 101.2, 102.6, -100, 0, '1i',
+ conv=noconv)
+
+ def test_opaqueresize(self):
+ widget = self.create()
+ self.checkBooleanParam(widget, 'opaqueresize')
+
+ def test_sashcursor(self):
+ widget = self.create()
+ self.checkCursorParam(widget, 'sashcursor')
+
+ def test_sashpad(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'sashpad', 8, 1.3, 2.6, -2, '2m')
+
+ def test_sashrelief(self):
+ widget = self.create()
+ self.checkReliefParam(widget, 'sashrelief')
+
+ def test_sashwidth(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'sashwidth', 10, 11.1, 15.6, -3, '1m',
+ conv=noconv)
+
+ def test_showhandle(self):
+ widget = self.create()
+ self.checkBooleanParam(widget, 'showhandle')
+
+ def test_width(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'width', 402, 403.4, 404.6, -402, 0, '5i',
+ conv=noconv)
+
+ def create2(self):
+ p = self.create()
+ b = Tkinter.Button(p)
+ c = Tkinter.Button(p)
+ p.add(b)
+ p.add(c)
+ return p, b, c
+
+ def test_paneconfigure(self):
+ p, b, c = self.create2()
+ self.assertRaises(TypeError, p.paneconfigure)
+ d = p.paneconfigure(b)
+ self.assertIsInstance(d, dict)
+ for k, v in d.items():
+ self.assertEqual(len(v), 5)
+ self.assertEqual(v, p.paneconfigure(b, k))
+ self.assertEqual(v[4], p.panecget(b, k))
+
+ def check_paneconfigure(self, p, b, name, value, expected, stringify=False):
+ conv = lambda x: x
+ if not self.wantobjects or stringify:
+ expected = str(expected)
+ if self.wantobjects and stringify:
+ conv = str
+ p.paneconfigure(b, **{name: value})
+ self.assertEqual(conv(p.paneconfigure(b, name)[4]), expected)
+ self.assertEqual(conv(p.panecget(b, name)), expected)
+
+ def check_paneconfigure_bad(self, p, b, name, msg):
+ with self.assertRaisesRegexp(TclError, msg):
+ p.paneconfigure(b, **{name: 'badValue'})
+
+ def test_paneconfigure_after(self):
+ p, b, c = self.create2()
+ self.check_paneconfigure(p, b, 'after', c, str(c))
+ self.check_paneconfigure_bad(p, b, 'after',
+ 'bad window path name "badValue"')
+
+ def test_paneconfigure_before(self):
+ p, b, c = self.create2()
+ self.check_paneconfigure(p, b, 'before', c, str(c))
+ self.check_paneconfigure_bad(p, b, 'before',
+ 'bad window path name "badValue"')
+
+ def test_paneconfigure_height(self):
+ p, b, c = self.create2()
+ self.check_paneconfigure(p, b, 'height', 10, 10,
+ stringify=tcl_version < (8, 5))
+ self.check_paneconfigure_bad(p, b, 'height',
+ 'bad screen distance "badValue"')
+
+ @requires_tcl(8, 5)
+ def test_paneconfigure_hide(self):
+ p, b, c = self.create2()
+ self.check_paneconfigure(p, b, 'hide', False, 0)
+ self.check_paneconfigure_bad(p, b, 'hide',
+ 'expected boolean value but got "badValue"')
+
+ def test_paneconfigure_minsize(self):
+ p, b, c = self.create2()
+ self.check_paneconfigure(p, b, 'minsize', 10, 10)
+ self.check_paneconfigure_bad(p, b, 'minsize',
+ 'bad screen distance "badValue"')
+
+ def test_paneconfigure_padx(self):
+ p, b, c = self.create2()
+ self.check_paneconfigure(p, b, 'padx', 1.3, 1)
+ self.check_paneconfigure_bad(p, b, 'padx',
+ 'bad screen distance "badValue"')
+
+ def test_paneconfigure_pady(self):
+ p, b, c = self.create2()
+ self.check_paneconfigure(p, b, 'pady', 1.3, 1)
+ self.check_paneconfigure_bad(p, b, 'pady',
+ 'bad screen distance "badValue"')
+
+ def test_paneconfigure_sticky(self):
+ p, b, c = self.create2()
+ self.check_paneconfigure(p, b, 'sticky', 'nsew', 'nesw')
+ self.check_paneconfigure_bad(p, b, 'sticky',
+ 'bad stickyness value "badValue": must '
+ 'be a string containing zero or more of '
+ 'n, e, s, and w')
+
+ @requires_tcl(8, 5)
+ def test_paneconfigure_stretch(self):
+ p, b, c = self.create2()
+ self.check_paneconfigure(p, b, 'stretch', 'alw', 'always')
+ self.check_paneconfigure_bad(p, b, 'stretch',
+ 'bad stretch "badValue": must be '
+ 'always, first, last, middle, or never')
+
+ def test_paneconfigure_width(self):
+ p, b, c = self.create2()
+ self.check_paneconfigure(p, b, 'width', 10, 10,
+ stringify=tcl_version < (8, 5))
+ self.check_paneconfigure_bad(p, b, 'width',
+ 'bad screen distance "badValue"')
+
+
+@add_standard_options(StandardOptionsTests)
+class MenuTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'activebackground', 'activeborderwidth', 'activeforeground',
+ 'background', 'borderwidth', 'cursor',
+ 'disabledforeground', 'font', 'foreground',
+ 'postcommand', 'relief', 'selectcolor', 'takefocus',
+ 'tearoff', 'tearoffcommand', 'title', 'type',
+ )
+ _conv_pixels = noconv_meth
+
+ def _create(self, **kwargs):
+ return Tkinter.Menu(self.root, **kwargs)
+
+ def test_postcommand(self):
+ widget = self.create()
+ self.checkCommandParam(widget, 'postcommand')
+
+ def test_tearoff(self):
+ widget = self.create()
+ self.checkBooleanParam(widget, 'tearoff')
+
+ def test_tearoffcommand(self):
+ widget = self.create()
+ self.checkCommandParam(widget, 'tearoffcommand')
+
+ def test_title(self):
+ widget = self.create()
+ self.checkParam(widget, 'title', 'any string')
+
+ def test_type(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'type',
+ 'normal', 'tearoff', 'menubar')
+
+ def test_entryconfigure(self):
+ m1 = self.create()
+ m1.add_command(label='test')
+ self.assertRaises(TypeError, m1.entryconfigure)
+ with self.assertRaisesRegexp(TclError, 'bad menu entry index "foo"'):
+ m1.entryconfigure('foo')
+ d = m1.entryconfigure(1)
+ self.assertIsInstance(d, dict)
+ for k, v in d.items():
+ self.assertIsInstance(k, str)
+ self.assertIsInstance(v, tuple)
+ self.assertEqual(len(v), 5)
+ self.assertEqual(v[0], k)
+ self.assertEqual(m1.entrycget(1, k), v[4])
+ m1.destroy()
+
+ def test_entryconfigure_label(self):
+ m1 = self.create()
+ m1.add_command(label='test')
+ self.assertEqual(m1.entrycget(1, 'label'), 'test')
+ m1.entryconfigure(1, label='changed')
+ self.assertEqual(m1.entrycget(1, 'label'), 'changed')
+
+ def test_entryconfigure_variable(self):
+ m1 = self.create()
+ v1 = Tkinter.BooleanVar(self.root)
+ v2 = Tkinter.BooleanVar(self.root)
+ m1.add_checkbutton(variable=v1, onvalue=True, offvalue=False,
+ label='Nonsense')
+ self.assertEqual(str(m1.entrycget(1, 'variable')), str(v1))
+ m1.entryconfigure(1, variable=v2)
+ self.assertEqual(str(m1.entrycget(1, 'variable')), str(v2))
+
+
+@add_standard_options(PixelSizeTests, StandardOptionsTests)
+class MessageTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'anchor', 'aspect', 'background', 'borderwidth',
+ 'cursor', 'font', 'foreground',
+ 'highlightbackground', 'highlightcolor', 'highlightthickness',
+ 'justify', 'padx', 'pady', 'relief',
+ 'takefocus', 'text', 'textvariable', 'width',
+ )
+ _conv_pad_pixels = noconv_meth
+
+ def _create(self, **kwargs):
+ return Tkinter.Message(self.root, **kwargs)
+
+ def test_aspect(self):
+ widget = self.create()
+ self.checkIntegerParam(widget, 'aspect', 250, 0, -300)
+
+
+tests_gui = [
+ ButtonTest, CanvasTest, CheckbuttonTest, EntryTest,
+ FrameTest, LabelFrameTest,LabelTest, ListboxTest,
+ MenubuttonTest, MenuTest, MessageTest, OptionMenuTest,
+ PanedWindowTest, RadiobuttonTest, ScaleTest, ScrollbarTest,
+ SpinboxTest, TextTest, ToplevelTest,
+]
+
+if __name__ == '__main__':
+ run_unittest(*tests_gui)
diff --git a/Lib/lib-tk/test/test_ttk/support.py b/Lib/lib-tk/test/test_ttk/support.py
index 57e7803..e10f80c 100644
--- a/Lib/lib-tk/test/test_ttk/support.py
+++ b/Lib/lib-tk/test/test_ttk/support.py
@@ -1,3 +1,4 @@
+import unittest
import Tkinter
def get_tk_root():
@@ -31,3 +32,57 @@ def simulate_mouse_click(widget, x, y):
widget.event_generate('<Motion>', x=x, y=y)
widget.event_generate('<ButtonPress-1>', x=x, y=y)
widget.event_generate('<ButtonRelease-1>', x=x, y=y)
+
+
+import _tkinter
+tcl_version = tuple(map(int, _tkinter.TCL_VERSION.split('.')))
+
+def requires_tcl(*version):
+ return unittest.skipUnless(tcl_version >= version,
+ 'requires Tcl version >= ' + '.'.join(map(str, version)))
+
+_tk_patchlevel = None
+def get_tk_patchlevel():
+ global _tk_patchlevel
+ if _tk_patchlevel is None:
+ tcl = Tkinter.Tcl()
+ patchlevel = []
+ for x in tcl.call('info', 'patchlevel').split('.'):
+ try:
+ x = int(x, 10)
+ except ValueError:
+ x = -1
+ patchlevel.append(x)
+ _tk_patchlevel = tuple(patchlevel)
+ return _tk_patchlevel
+
+units = {
+ 'c': 72 / 2.54, # centimeters
+ 'i': 72, # inches
+ 'm': 72 / 25.4, # millimeters
+ 'p': 1, # points
+}
+
+def pixels_conv(value):
+ return float(value[:-1]) * units[value[-1:]]
+
+def tcl_obj_eq(actual, expected):
+ if actual == expected:
+ return True
+ if isinstance(actual, _tkinter.Tcl_Obj):
+ if isinstance(expected, str):
+ return str(actual) == expected
+ if isinstance(actual, tuple):
+ if isinstance(expected, tuple):
+ return (len(actual) == len(expected) and
+ all(tcl_obj_eq(act, exp)
+ for act, exp in zip(actual, expected)))
+ return False
+
+def widget_eq(actual, expected):
+ if actual == expected:
+ return True
+ if isinstance(actual, (str, Tkinter.Widget)):
+ if isinstance(expected, (str, Tkinter.Widget)):
+ return str(actual) == str(expected)
+ return False
diff --git a/Lib/lib-tk/test/test_ttk/test_extensions.py b/Lib/lib-tk/test/test_ttk/test_extensions.py
index 6e46cbc..8301f87 100644
--- a/Lib/lib-tk/test/test_ttk/test_extensions.py
+++ b/Lib/lib-tk/test/test_ttk/test_extensions.py
@@ -29,7 +29,10 @@ class LabeledScaleTest(unittest.TestCase):
name = myvar._name
x = ttk.LabeledScale(variable=myvar)
x.destroy()
- self.assertEqual(x.tk.globalgetvar(name), myvar.get())
+ if x.tk.wantobjects():
+ self.assertEqual(x.tk.globalgetvar(name), myvar.get())
+ else:
+ self.assertEqual(float(x.tk.globalgetvar(name)), myvar.get())
del myvar
self.assertRaises(Tkinter.TclError, x.tk.globalgetvar, name)
@@ -45,7 +48,7 @@ class LabeledScaleTest(unittest.TestCase):
# it tries calling instance attributes not yet defined.
ttk.LabeledScale(variable=myvar)
if hasattr(sys, 'last_type'):
- self.assertFalse(sys.last_type == Tkinter.TclError)
+ self.assertNotEqual(sys.last_type, Tkinter.TclError)
def test_initialization(self):
@@ -59,8 +62,10 @@ class LabeledScaleTest(unittest.TestCase):
x.destroy()
# variable initialization/passing
- passed_expected = ((2.5, 2), ('0', 0), (0, 0), (10, 10),
+ passed_expected = (('0', 0), (0, 0), (10, 10),
(-1, -1), (sys.maxint + 1, sys.maxint + 1))
+ if x.tk.wantobjects():
+ passed_expected += ((2.5, 2),)
for pair in passed_expected:
x = ttk.LabeledScale(from_=pair[0])
self.assertEqual(x.value, pair[1])
@@ -120,14 +125,14 @@ class LabeledScaleTest(unittest.TestCase):
# at the same time this shouldn't affect test outcome
lscale.update()
curr_xcoord = lscale.scale.coords()[0]
- self.assertTrue(prev_xcoord != curr_xcoord)
+ self.assertNotEqual(prev_xcoord, curr_xcoord)
# the label widget should have been repositioned too
linfo_2 = lscale.label.place_info()
- self.assertEqual(lscale.label['text'], 0)
+ self.assertEqual(lscale.label['text'], 0 if lscale.tk.wantobjects() else '0')
self.assertEqual(curr_xcoord, int(linfo_2['x']))
# change the range back
lscale.scale.configure(from_=0, to=10)
- self.assertTrue(prev_xcoord != curr_xcoord)
+ self.assertNotEqual(prev_xcoord, curr_xcoord)
self.assertEqual(prev_xcoord, int(linfo_1['x']))
lscale.destroy()
@@ -145,15 +150,20 @@ class LabeledScaleTest(unittest.TestCase):
# The following update is needed since the test doesn't use mainloop,
# at the same time this shouldn't affect test outcome
x.update()
- self.assertEqual(x.label['text'], newval)
- self.assertTrue(x.scale.coords()[0] > curr_xcoord)
+ self.assertEqual(x.label['text'],
+ newval if x.tk.wantobjects() else str(newval))
+ self.assertGreater(x.scale.coords()[0], curr_xcoord)
self.assertEqual(x.scale.coords()[0],
int(x.label.place_info()['x']))
# value outside range
- x.value = x.scale['to'] + 1 # no changes shouldn't happen
+ if x.tk.wantobjects():
+ conv = lambda x: x
+ else:
+ conv = int
+ x.value = conv(x.scale['to']) + 1 # no changes shouldn't happen
x.update()
- self.assertEqual(x.label['text'], newval)
+ self.assertEqual(conv(x.label['text']), newval)
self.assertEqual(x.scale.coords()[0],
int(x.label.place_info()['x']))
@@ -238,7 +248,7 @@ class OptionMenuTest(unittest.TestCase):
if last == curr:
# no more menu entries
break
- self.assertFalse(curr == default)
+ self.assertNotEqual(curr, default)
i += 1
self.assertEqual(i, len(items))
diff --git a/Lib/lib-tk/test/test_ttk/test_functions.py b/Lib/lib-tk/test/test_ttk/test_functions.py
index 15e76c1..50c5aeb 100644
--- a/Lib/lib-tk/test/test_ttk/test_functions.py
+++ b/Lib/lib-tk/test/test_ttk/test_functions.py
@@ -3,6 +3,17 @@ import sys
import unittest
import ttk
+class MockTkApp:
+
+ def splitlist(self, arg):
+ if isinstance(arg, tuple):
+ return arg
+ return arg.split(':')
+
+ def wantobjects(self):
+ return True
+
+
class MockTclObj(object):
typename = 'test'
@@ -50,13 +61,17 @@ class InternalFunctionsTest(unittest.TestCase):
ttk._format_optdict({'test': {'left': 'as is'}}),
{'-test': {'left': 'as is'}})
- # check script formatting and untouched value(s)
+ # check script formatting
check_against(
ttk._format_optdict(
- {'test': [1, -1, '', '2m', 0], 'nochange1': 3,
- 'nochange2': 'abc def'}, script=True),
- {'-test': '{1 -1 {} 2m 0}', '-nochange1': 3,
- '-nochange2': 'abc def' })
+ {'test': [1, -1, '', '2m', 0], 'test2': 3,
+ 'test3': '', 'test4': 'abc def',
+ 'test5': '"abc"', 'test6': '{}',
+ 'test7': '} -spam {'}, script=True),
+ {'-test': '{1 -1 {} 2m 0}', '-test2': '3',
+ '-test3': '{}', '-test4': '{abc def}',
+ '-test5': '{"abc"}', '-test6': r'\{\}',
+ '-test7': r'\}\ -spam\ \{'})
opts = {u'αβγ': True, u'á': False}
orig_opts = opts.copy()
@@ -70,6 +85,32 @@ class InternalFunctionsTest(unittest.TestCase):
ttk._format_optdict(
{'option': ('one two', 'three')}),
{'-option': '{one two} three'})
+ check_against(
+ ttk._format_optdict(
+ {'option': ('one\ttwo', 'three')}),
+ {'-option': '{one\ttwo} three'})
+
+ # passing empty strings inside a tuple/list
+ check_against(
+ ttk._format_optdict(
+ {'option': ('', 'one')}),
+ {'-option': '{} one'})
+
+ # passing values with braces inside a tuple/list
+ check_against(
+ ttk._format_optdict(
+ {'option': ('one} {two', 'three')}),
+ {'-option': r'one\}\ \{two three'})
+
+ # passing quoted strings inside a tuple/list
+ check_against(
+ ttk._format_optdict(
+ {'option': ('"one"', 'two')}),
+ {'-option': '{"one"} two'})
+ check_against(
+ ttk._format_optdict(
+ {'option': ('{one}', 'two')}),
+ {'-option': r'\{one\} two'})
# ignore an option
amount_opts = len(ttk._format_optdict(opts, ignore=(u'á'))) // 2
@@ -323,20 +364,22 @@ class InternalFunctionsTest(unittest.TestCase):
def test_list_from_layouttuple(self):
+ tk = MockTkApp()
+
# empty layout tuple
- self.assertFalse(ttk._list_from_layouttuple(()))
+ self.assertFalse(ttk._list_from_layouttuple(tk, ()))
# shortest layout tuple
- self.assertEqual(ttk._list_from_layouttuple(('name', )),
+ self.assertEqual(ttk._list_from_layouttuple(tk, ('name', )),
[('name', {})])
# not so interesting ltuple
sample_ltuple = ('name', '-option', 'value')
- self.assertEqual(ttk._list_from_layouttuple(sample_ltuple),
+ self.assertEqual(ttk._list_from_layouttuple(tk, sample_ltuple),
[('name', {'option': 'value'})])
# empty children
- self.assertEqual(ttk._list_from_layouttuple(
+ self.assertEqual(ttk._list_from_layouttuple(tk,
('something', '-children', ())),
[('something', {'children': []})]
)
@@ -349,7 +392,7 @@ class InternalFunctionsTest(unittest.TestCase):
)
)
)
- self.assertEqual(ttk._list_from_layouttuple(ltuple),
+ self.assertEqual(ttk._list_from_layouttuple(tk, ltuple),
[('name', {'option': 'niceone', 'children':
[('otherone', {'otheropt': 'othervalue', 'children':
[('child', {})]
@@ -358,27 +401,35 @@ class InternalFunctionsTest(unittest.TestCase):
)
# bad tuples
- self.assertRaises(ValueError, ttk._list_from_layouttuple,
+ self.assertRaises(ValueError, ttk._list_from_layouttuple, tk,
('name', 'no_minus'))
- self.assertRaises(ValueError, ttk._list_from_layouttuple,
+ self.assertRaises(ValueError, ttk._list_from_layouttuple, tk,
('name', 'no_minus', 'value'))
- self.assertRaises(ValueError, ttk._list_from_layouttuple,
+ self.assertRaises(ValueError, ttk._list_from_layouttuple, tk,
('something', '-children')) # no children
- self.assertRaises(ValueError, ttk._list_from_layouttuple,
- ('something', '-children', 'value')) # invalid children
def test_val_or_dict(self):
- def func(opt, val=None):
+ def func(res, opt=None, val=None):
+ if opt is None:
+ return res
if val is None:
return "test val"
return (opt, val)
- options = {'test': None}
- self.assertEqual(ttk._val_or_dict(options, func), "test val")
+ tk = MockTkApp()
+ tk.call = func
+
+ self.assertEqual(ttk._val_or_dict(tk, {}, '-test:3'),
+ {'test': '3'})
+ self.assertEqual(ttk._val_or_dict(tk, {}, ('-test', 3)),
+ {'test': 3})
+
+ self.assertEqual(ttk._val_or_dict(tk, {'test': None}, 'x:y'),
+ 'test val')
- options = {'test': 3}
- self.assertEqual(ttk._val_or_dict(options, func), options)
+ self.assertEqual(ttk._val_or_dict(tk, {'test': 3}, 'x:y'),
+ {'test': 3})
def test_convert_stringval(self):
diff --git a/Lib/lib-tk/test/test_ttk/test_style.py b/Lib/lib-tk/test/test_ttk/test_style.py
index 630e075..e90eeea 100644
--- a/Lib/lib-tk/test/test_ttk/test_style.py
+++ b/Lib/lib-tk/test/test_ttk/test_style.py
@@ -18,15 +18,16 @@ class StyleTest(unittest.TestCase):
style.configure('TButton', background='yellow')
self.assertEqual(style.configure('TButton', 'background'),
'yellow')
- self.assertTrue(isinstance(style.configure('TButton'), dict))
+ self.assertIsInstance(style.configure('TButton'), dict)
def test_map(self):
style = self.style
style.map('TButton', background=[('active', 'background', 'blue')])
self.assertEqual(style.map('TButton', 'background'),
- [('active', 'background', 'blue')])
- self.assertTrue(isinstance(style.map('TButton'), dict))
+ [('active', 'background', 'blue')] if style.tk.wantobjects() else
+ [('active background', 'blue')])
+ self.assertIsInstance(style.map('TButton'), dict)
def test_lookup(self):
@@ -57,7 +58,7 @@ class StyleTest(unittest.TestCase):
self.assertEqual(style.layout('Treeview'), tv_style)
# should return a list
- self.assertTrue(isinstance(style.layout('TButton'), list))
+ self.assertIsInstance(style.layout('TButton'), list)
# correct layout, but "option" doesn't exist as option
self.assertRaises(Tkinter.TclError, style.layout, 'Treeview',
diff --git a/Lib/lib-tk/test/test_ttk/test_widgets.py b/Lib/lib-tk/test/test_ttk/test_widgets.py
index aca90cf..4b01364 100644
--- a/Lib/lib-tk/test/test_ttk/test_widgets.py
+++ b/Lib/lib-tk/test/test_ttk/test_widgets.py
@@ -6,9 +6,54 @@ import sys
import support
from test_functions import MockTclObj, MockStateSpec
+from support import tcl_version, get_tk_patchlevel
+from widget_tests import (add_standard_options, noconv, noconv_meth,
+ AbstractWidgetTest, StandardOptionsTests,
+ IntegerSizeTests, PixelSizeTests,
+ setUpModule)
requires('gui')
+
+class StandardTtkOptionsTests(StandardOptionsTests):
+
+ def test_class(self):
+ widget = self.create()
+ self.assertEqual(widget['class'], '')
+ errmsg='attempt to change read-only option'
+ if get_tk_patchlevel() < (8, 6, 0): # actually this was changed in 8.6b3
+ errmsg='Attempt to change read-only option'
+ self.checkInvalidParam(widget, 'class', 'Foo', errmsg=errmsg)
+ widget2 = self.create(class_='Foo')
+ self.assertEqual(widget2['class'], 'Foo')
+
+ def test_padding(self):
+ widget = self.create()
+ self.checkParam(widget, 'padding', 0, expected=('0',))
+ self.checkParam(widget, 'padding', 5, expected=('5',))
+ self.checkParam(widget, 'padding', (5, 6), expected=('5', '6'))
+ self.checkParam(widget, 'padding', (5, 6, 7),
+ expected=('5', '6', '7'))
+ self.checkParam(widget, 'padding', (5, 6, 7, 8),
+ expected=('5', '6', '7', '8'))
+ self.checkParam(widget, 'padding', ('5p', '6p', '7p', '8p'))
+ self.checkParam(widget, 'padding', (), expected='')
+
+ def test_style(self):
+ widget = self.create()
+ self.assertEqual(widget['style'], '')
+ errmsg = 'Layout Foo not found'
+ if hasattr(self, 'default_orient'):
+ errmsg = ('Layout %s.Foo not found' %
+ getattr(self, 'default_orient').title())
+ self.checkInvalidParam(widget, 'style', 'Foo',
+ errmsg=errmsg)
+ widget2 = self.create(class_='Foo')
+ self.assertEqual(widget2['class'], 'Foo')
+ # XXX
+ pass
+
+
class WidgetTest(unittest.TestCase):
"""Tests methods available in every ttk widget."""
@@ -26,8 +71,8 @@ class WidgetTest(unittest.TestCase):
def test_identify(self):
self.widget.update_idletasks()
self.assertEqual(self.widget.identify(
- int(self.widget.winfo_width() / 2),
- int(self.widget.winfo_height() / 2)
+ self.widget.winfo_width() // 2,
+ self.widget.winfo_height() // 2
), "label")
self.assertEqual(self.widget.identify(-1, -1), "")
@@ -72,7 +117,112 @@ class WidgetTest(unittest.TestCase):
self.assertEqual(self.widget.state(), ('active', ))
-class ButtonTest(unittest.TestCase):
+class AbstractToplevelTest(AbstractWidgetTest, PixelSizeTests):
+ _conv_pixels = noconv_meth
+
+
+@add_standard_options(StandardTtkOptionsTests)
+class FrameTest(AbstractToplevelTest, unittest.TestCase):
+ OPTIONS = (
+ 'borderwidth', 'class', 'cursor', 'height',
+ 'padding', 'relief', 'style', 'takefocus',
+ 'width',
+ )
+
+ def _create(self, **kwargs):
+ return ttk.Frame(self.root, **kwargs)
+
+
+@add_standard_options(StandardTtkOptionsTests)
+class LabelFrameTest(AbstractToplevelTest, unittest.TestCase):
+ OPTIONS = (
+ 'borderwidth', 'class', 'cursor', 'height',
+ 'labelanchor', 'labelwidget',
+ 'padding', 'relief', 'style', 'takefocus',
+ 'text', 'underline', 'width',
+ )
+
+ def _create(self, **kwargs):
+ return ttk.LabelFrame(self.root, **kwargs)
+
+ def test_labelanchor(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'labelanchor',
+ 'e', 'en', 'es', 'n', 'ne', 'nw', 's', 'se', 'sw', 'w', 'wn', 'ws',
+ errmsg='Bad label anchor specification {}')
+ self.checkInvalidParam(widget, 'labelanchor', 'center')
+
+ def test_labelwidget(self):
+ widget = self.create()
+ label = ttk.Label(self.root, text='Mupp', name='foo')
+ self.checkParam(widget, 'labelwidget', label, expected='.foo')
+ label.destroy()
+
+
+class AbstractLabelTest(AbstractWidgetTest):
+
+ def checkImageParam(self, widget, name):
+ image = Tkinter.PhotoImage('image1')
+ image2 = Tkinter.PhotoImage('image2')
+ self.checkParam(widget, name, image, expected=('image1',))
+ self.checkParam(widget, name, 'image1', expected=('image1',))
+ self.checkParam(widget, name, (image,), expected=('image1',))
+ self.checkParam(widget, name, (image, 'active', image2),
+ expected=('image1', 'active', 'image2'))
+ self.checkParam(widget, name, 'image1 active image2',
+ expected=('image1', 'active', 'image2'))
+ self.checkInvalidParam(widget, name, 'spam',
+ errmsg='image "spam" doesn\'t exist')
+
+ def test_compound(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'compound',
+ 'none', 'text', 'image', 'center',
+ 'top', 'bottom', 'left', 'right')
+
+ def test_state(self):
+ widget = self.create()
+ self.checkParams(widget, 'state', 'active', 'disabled', 'normal')
+
+ def test_width(self):
+ widget = self.create()
+ self.checkParams(widget, 'width', 402, -402, 0)
+
+
+@add_standard_options(StandardTtkOptionsTests)
+class LabelTest(AbstractLabelTest, unittest.TestCase):
+ OPTIONS = (
+ 'anchor', 'background',
+ 'class', 'compound', 'cursor', 'font', 'foreground',
+ 'image', 'justify', 'padding', 'relief', 'state', 'style',
+ 'takefocus', 'text', 'textvariable',
+ 'underline', 'width', 'wraplength',
+ )
+ _conv_pixels = noconv_meth
+
+ def _create(self, **kwargs):
+ return ttk.Label(self.root, **kwargs)
+
+ def test_font(self):
+ widget = self.create()
+ self.checkParam(widget, 'font',
+ '-Adobe-Helvetica-Medium-R-Normal--*-120-*-*-*-*-*-*')
+
+
+@add_standard_options(StandardTtkOptionsTests)
+class ButtonTest(AbstractLabelTest, unittest.TestCase):
+ OPTIONS = (
+ 'class', 'command', 'compound', 'cursor', 'default',
+ 'image', 'state', 'style', 'takefocus', 'text', 'textvariable',
+ 'underline', 'width',
+ )
+
+ def _create(self, **kwargs):
+ return ttk.Button(self.root, **kwargs)
+
+ def test_default(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'default', 'normal', 'active', 'disabled')
def test_invoke(self):
success = []
@@ -81,7 +231,27 @@ class ButtonTest(unittest.TestCase):
self.assertTrue(success)
-class CheckbuttonTest(unittest.TestCase):
+@add_standard_options(StandardTtkOptionsTests)
+class CheckbuttonTest(AbstractLabelTest, unittest.TestCase):
+ OPTIONS = (
+ 'class', 'command', 'compound', 'cursor',
+ 'image',
+ 'offvalue', 'onvalue',
+ 'state', 'style',
+ 'takefocus', 'text', 'textvariable',
+ 'underline', 'variable', 'width',
+ )
+
+ def _create(self, **kwargs):
+ return ttk.Checkbutton(self.root, **kwargs)
+
+ def test_offvalue(self):
+ widget = self.create()
+ self.checkParams(widget, 'offvalue', 1, 2.3, '', 'any string')
+
+ def test_onvalue(self):
+ widget = self.create()
+ self.checkParams(widget, 'onvalue', 1, 2.3, '', 'any string')
def test_invoke(self):
success = []
@@ -104,21 +274,40 @@ class CheckbuttonTest(unittest.TestCase):
cbtn['command'] = ''
res = cbtn.invoke()
- self.assertEqual(res, '')
- self.assertFalse(len(success) > 1)
+ self.assertFalse(str(res))
+ self.assertLessEqual(len(success), 1)
self.assertEqual(cbtn['offvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
-class ComboboxTest(unittest.TestCase):
+@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
+class ComboboxTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'class', 'cursor', 'exportselection', 'height',
+ 'justify', 'postcommand', 'state', 'style',
+ 'takefocus', 'textvariable', 'values', 'width',
+ )
def setUp(self):
+ super(ComboboxTest, self).setUp()
support.root_deiconify()
- self.combo = ttk.Combobox()
+ self.combo = self.create()
def tearDown(self):
self.combo.destroy()
support.root_withdraw()
+ super(ComboboxTest, self).tearDown()
+
+ def _create(self, **kwargs):
+ return ttk.Combobox(self.root, **kwargs)
+
+ def test_height(self):
+ widget = self.create()
+ self.checkParams(widget, 'height', 100, 101.2, 102.6, -100, 0, '1i')
+
+ def test_state(self):
+ widget = self.create()
+ self.checkParams(widget, 'state', 'active', 'disabled', 'normal')
def _show_drop_down_listbox(self):
width = self.combo.winfo_width()
@@ -166,8 +355,16 @@ class ComboboxTest(unittest.TestCase):
self.assertEqual(self.combo.get(), getval)
self.assertEqual(self.combo.current(), currval)
+ self.assertEqual(self.combo['values'],
+ () if tcl_version < (8, 5) else '')
check_get_current('', -1)
+ self.checkParam(self.combo, 'values', 'mon tue wed thur',
+ expected=('mon', 'tue', 'wed', 'thur'))
+ self.checkParam(self.combo, 'values', ('mon', 'tue', 'wed', 'thur'))
+ self.checkParam(self.combo, 'values', (42, 3.14, '', 'any string'))
+ self.checkParam(self.combo, 'values', () if tcl_version < (8, 5) else '')
+
self.combo['values'] = ['a', 1, 'c']
self.combo.set('c')
@@ -186,7 +383,21 @@ class ComboboxTest(unittest.TestCase):
# testing values with empty string set through configure
self.combo.configure(values=[1, '', 2])
- self.assertEqual(self.combo['values'], ('1', '', '2'))
+ self.assertEqual(self.combo['values'],
+ ('1', '', '2') if self.wantobjects else
+ '1 {} 2')
+
+ # testing values with spaces
+ self.combo['values'] = ['a b', 'a\tb', 'a\nb']
+ self.assertEqual(self.combo['values'],
+ ('a b', 'a\tb', 'a\nb') if self.wantobjects else
+ '{a b} {a\tb} {a\nb}')
+
+ # testing values with special characters
+ self.combo['values'] = [r'a\tb', '"a"', '} {']
+ self.assertEqual(self.combo['values'],
+ (r'a\tb', '"a"', '} {') if self.wantobjects else
+ r'a\\tb {"a"} \}\ \{')
# out of range
self.assertRaises(Tkinter.TclError, self.combo.current,
@@ -196,26 +407,61 @@ class ComboboxTest(unittest.TestCase):
# testing creating combobox with empty string in values
combo2 = ttk.Combobox(values=[1, 2, ''])
- self.assertEqual(combo2['values'], ('1', '2', ''))
+ self.assertEqual(combo2['values'],
+ ('1', '2', '') if self.wantobjects else '1 2 {}')
combo2.destroy()
-class EntryTest(unittest.TestCase):
+@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
+class EntryTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'background', 'class', 'cursor',
+ 'exportselection', 'font',
+ 'invalidcommand', 'justify',
+ 'show', 'state', 'style', 'takefocus', 'textvariable',
+ 'validate', 'validatecommand', 'width', 'xscrollcommand',
+ )
def setUp(self):
+ super(EntryTest, self).setUp()
support.root_deiconify()
- self.entry = ttk.Entry()
+ self.entry = self.create()
def tearDown(self):
self.entry.destroy()
support.root_withdraw()
+ super(EntryTest, self).tearDown()
+ def _create(self, **kwargs):
+ return ttk.Entry(self.root, **kwargs)
+
+ def test_invalidcommand(self):
+ widget = self.create()
+ self.checkCommandParam(widget, 'invalidcommand')
+
+ def test_show(self):
+ widget = self.create()
+ self.checkParam(widget, 'show', '*')
+ self.checkParam(widget, 'show', '')
+ self.checkParam(widget, 'show', ' ')
+
+ def test_state(self):
+ widget = self.create()
+ self.checkParams(widget, 'state',
+ 'disabled', 'normal', 'readonly')
+
+ def test_validate(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'validate',
+ 'all', 'key', 'focus', 'focusin', 'focusout', 'none')
+
+ def test_validatecommand(self):
+ widget = self.create()
+ self.checkCommandParam(widget, 'validatecommand')
- def test_bbox(self):
- self.assertEqual(len(self.entry.bbox(0)), 4)
- for item in self.entry.bbox(0):
- self.assertTrue(isinstance(item, int))
+ def test_bbox(self):
+ self.assertIsBoundingBox(self.entry.bbox(0))
self.assertRaises(Tkinter.TclError, self.entry.bbox, 'noindex')
self.assertRaises(Tkinter.TclError, self.entry.bbox, None)
@@ -304,16 +550,36 @@ class EntryTest(unittest.TestCase):
self.assertEqual(self.entry.state(), ())
-class PanedwindowTest(unittest.TestCase):
+@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
+class PanedWindowTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'class', 'cursor', 'height',
+ 'orient', 'style', 'takefocus', 'width',
+ )
def setUp(self):
+ super(PanedWindowTest, self).setUp()
support.root_deiconify()
- self.paned = ttk.Panedwindow()
+ self.paned = self.create()
def tearDown(self):
self.paned.destroy()
support.root_withdraw()
-
+ super(PanedWindowTest, self).tearDown()
+
+ def _create(self, **kwargs):
+ return ttk.PanedWindow(self.root, **kwargs)
+
+ def test_orient(self):
+ widget = self.create()
+ self.assertEqual(str(widget['orient']), 'vertical')
+ errmsg='attempt to change read-only option'
+ if get_tk_patchlevel() < (8, 6, 0): # actually this was changed in 8.6b3
+ errmsg='Attempt to change read-only option'
+ self.checkInvalidParam(widget, 'orient', 'horizontal',
+ errmsg=errmsg)
+ widget2 = self.create(orient='horizontal')
+ self.assertEqual(str(widget2['orient']), 'horizontal')
def test_add(self):
# attempt to add a child that is not a direct child of the paned window
@@ -392,10 +658,12 @@ class PanedwindowTest(unittest.TestCase):
child = ttk.Label()
self.paned.add(child)
- self.assertTrue(isinstance(self.paned.pane(0), dict))
- self.assertEqual(self.paned.pane(0, weight=None), 0)
+ self.assertIsInstance(self.paned.pane(0), dict)
+ self.assertEqual(self.paned.pane(0, weight=None),
+ 0 if self.wantobjects else '0')
# newer form for querying a single option
- self.assertEqual(self.paned.pane(0, 'weight'), 0)
+ self.assertEqual(self.paned.pane(0, 'weight'),
+ 0 if self.wantobjects else '0')
self.assertEqual(self.paned.pane(0), self.paned.pane(str(child)))
self.assertRaises(Tkinter.TclError, self.paned.pane, 0,
@@ -419,11 +687,26 @@ class PanedwindowTest(unittest.TestCase):
curr_pos = self.paned.sashpos(0)
self.paned.sashpos(0, 1000)
- self.assertTrue(curr_pos != self.paned.sashpos(0))
- self.assertTrue(isinstance(self.paned.sashpos(0), int))
+ self.assertNotEqual(curr_pos, self.paned.sashpos(0))
+ self.assertIsInstance(self.paned.sashpos(0), int)
+
+@add_standard_options(StandardTtkOptionsTests)
+class RadiobuttonTest(AbstractLabelTest, unittest.TestCase):
+ OPTIONS = (
+ 'class', 'command', 'compound', 'cursor',
+ 'image',
+ 'state', 'style',
+ 'takefocus', 'text', 'textvariable',
+ 'underline', 'value', 'variable', 'width',
+ )
-class RadiobuttonTest(unittest.TestCase):
+ def _create(self, **kwargs):
+ return ttk.Radiobutton(self.root, **kwargs)
+
+ def test_value(self):
+ widget = self.create()
+ self.checkParams(widget, 'value', 1, 2.3, '', 'any string')
def test_invoke(self):
success = []
@@ -435,37 +718,91 @@ class RadiobuttonTest(unittest.TestCase):
cbtn = ttk.Radiobutton(command=cb_test, variable=myvar, value=0)
cbtn2 = ttk.Radiobutton(command=cb_test, variable=myvar, value=1)
+ if self.wantobjects:
+ conv = lambda x: x
+ else:
+ conv = int
+
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
- self.assertEqual(cbtn['value'], myvar.get())
+ self.assertEqual(conv(cbtn['value']), myvar.get())
self.assertEqual(myvar.get(),
- cbtn.tk.globalgetvar(cbtn['variable']))
+ conv(cbtn.tk.globalgetvar(cbtn['variable'])))
self.assertTrue(success)
cbtn2['command'] = ''
res = cbtn2.invoke()
- self.assertEqual(res, '')
- self.assertFalse(len(success) > 1)
- self.assertEqual(cbtn2['value'], myvar.get())
+ self.assertEqual(str(res), '')
+ self.assertLessEqual(len(success), 1)
+ self.assertEqual(conv(cbtn2['value']), myvar.get())
self.assertEqual(myvar.get(),
- cbtn.tk.globalgetvar(cbtn['variable']))
+ conv(cbtn.tk.globalgetvar(cbtn['variable'])))
self.assertEqual(str(cbtn['variable']), str(cbtn2['variable']))
+class MenubuttonTest(AbstractLabelTest, unittest.TestCase):
+ OPTIONS = (
+ 'class', 'compound', 'cursor', 'direction',
+ 'image', 'menu', 'state', 'style',
+ 'takefocus', 'text', 'textvariable',
+ 'underline', 'width',
+ )
+
+ def _create(self, **kwargs):
+ return ttk.Menubutton(self.root, **kwargs)
-class ScaleTest(unittest.TestCase):
+ def test_direction(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'direction',
+ 'above', 'below', 'left', 'right', 'flush')
+
+ def test_menu(self):
+ widget = self.create()
+ menu = Tkinter.Menu(widget, name='menu')
+ self.checkParam(widget, 'menu', menu, conv=str)
+ menu.destroy()
+
+
+@add_standard_options(StandardTtkOptionsTests)
+class ScaleTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'class', 'command', 'cursor', 'from', 'length',
+ 'orient', 'style', 'takefocus', 'to', 'value', 'variable',
+ )
+ _conv_pixels = noconv_meth
+ default_orient = 'horizontal'
def setUp(self):
+ super(ScaleTest, self).setUp()
support.root_deiconify()
- self.scale = ttk.Scale()
+ self.scale = self.create()
self.scale.pack()
self.scale.update()
def tearDown(self):
self.scale.destroy()
support.root_withdraw()
+ super(ScaleTest, self).tearDown()
+
+ def _create(self, **kwargs):
+ return ttk.Scale(self.root, **kwargs)
+
+ def test_from(self):
+ widget = self.create()
+ self.checkFloatParam(widget, 'from', 100, 14.9, 15.1, conv=False)
+ def test_length(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'length', 130, 131.2, 135.6, '5i')
+
+ def test_to(self):
+ widget = self.create()
+ self.checkFloatParam(widget, 'to', 300, 14.9, 15.1, -10, conv=False)
+
+ def test_value(self):
+ widget = self.create()
+ self.checkFloatParam(widget, 'value', 300, 14.9, 15.1, -10, conv=False)
def test_custom_event(self):
failure = [1, 1, 1] # will need to be empty
@@ -487,10 +824,15 @@ class ScaleTest(unittest.TestCase):
def test_get(self):
+ if self.wantobjects:
+ conv = lambda x: x
+ else:
+ conv = float
+
scale_width = self.scale.winfo_width()
self.assertEqual(self.scale.get(scale_width, 0), self.scale['to'])
- self.assertEqual(self.scale.get(0, 0), self.scale['from'])
+ self.assertEqual(conv(self.scale.get(0, 0)), conv(self.scale['from']))
self.assertEqual(self.scale.get(), self.scale['value'])
self.scale['value'] = 30
self.assertEqual(self.scale.get(), self.scale['value'])
@@ -500,41 +842,99 @@ class ScaleTest(unittest.TestCase):
def test_set(self):
+ if self.wantobjects:
+ conv = lambda x: x
+ else:
+ conv = float
+
# set restricts the max/min values according to the current range
- max = self.scale['to']
+ max = conv(self.scale['to'])
new_max = max + 10
self.scale.set(new_max)
- self.assertEqual(self.scale.get(), max)
- min = self.scale['from']
+ self.assertEqual(conv(self.scale.get()), max)
+ min = conv(self.scale['from'])
self.scale.set(min - 1)
- self.assertEqual(self.scale.get(), min)
+ self.assertEqual(conv(self.scale.get()), min)
# changing directly the variable doesn't impose this limitation tho
var = Tkinter.DoubleVar()
self.scale['variable'] = var
var.set(max + 5)
- self.assertEqual(self.scale.get(), var.get())
- self.assertEqual(self.scale.get(), max + 5)
+ self.assertEqual(conv(self.scale.get()), var.get())
+ self.assertEqual(conv(self.scale.get()), max + 5)
del var
# the same happens with the value option
self.scale['value'] = max + 10
- self.assertEqual(self.scale.get(), max + 10)
- self.assertEqual(self.scale.get(), self.scale['value'])
+ self.assertEqual(conv(self.scale.get()), max + 10)
+ self.assertEqual(conv(self.scale.get()), conv(self.scale['value']))
# nevertheless, note that the max/min values we can get specifying
# x, y coords are the ones according to the current range
- self.assertEqual(self.scale.get(0, 0), min)
- self.assertEqual(self.scale.get(self.scale.winfo_width(), 0), max)
+ self.assertEqual(conv(self.scale.get(0, 0)), min)
+ self.assertEqual(conv(self.scale.get(self.scale.winfo_width(), 0)), max)
self.assertRaises(Tkinter.TclError, self.scale.set, None)
-class NotebookTest(unittest.TestCase):
+@add_standard_options(StandardTtkOptionsTests)
+class ProgressbarTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'class', 'cursor', 'orient', 'length',
+ 'mode', 'maximum', 'phase',
+ 'style', 'takefocus', 'value', 'variable',
+ )
+ _conv_pixels = noconv_meth
+ default_orient = 'horizontal'
+
+ def _create(self, **kwargs):
+ return ttk.Progressbar(self.root, **kwargs)
+
+ def test_length(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'length', 100.1, 56.7, '2i')
+
+ def test_maximum(self):
+ widget = self.create()
+ self.checkFloatParam(widget, 'maximum', 150.2, 77.7, 0, -10, conv=False)
+
+ def test_mode(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'mode', 'determinate', 'indeterminate')
+
+ def test_phase(self):
+ # XXX
+ pass
+
+ def test_value(self):
+ widget = self.create()
+ self.checkFloatParam(widget, 'value', 150.2, 77.7, 0, -10,
+ conv=False)
+
+
+@unittest.skipIf(sys.platform == 'darwin',
+ 'ttk.Scrollbar is special on MacOSX')
+@add_standard_options(StandardTtkOptionsTests)
+class ScrollbarTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'class', 'command', 'cursor', 'orient', 'style', 'takefocus',
+ )
+ default_orient = 'vertical'
+
+ def _create(self, **kwargs):
+ return ttk.Scrollbar(self.root, **kwargs)
+
+
+@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
+class NotebookTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'class', 'cursor', 'height', 'padding', 'style', 'takefocus',
+ )
def setUp(self):
+ super(NotebookTest, self).setUp()
support.root_deiconify()
- self.nb = ttk.Notebook(padding=0)
+ self.nb = self.create(padding=0)
self.child1 = ttk.Label()
self.child2 = ttk.Label()
self.nb.add(self.child1, text='a')
@@ -545,7 +945,10 @@ class NotebookTest(unittest.TestCase):
self.child2.destroy()
self.nb.destroy()
support.root_withdraw()
+ super(NotebookTest, self).tearDown()
+ def _create(self, **kwargs):
+ return ttk.Notebook(self.root, **kwargs)
def test_tab_identifiers(self):
self.nb.forget(0)
@@ -602,7 +1005,7 @@ class NotebookTest(unittest.TestCase):
self.nb.add(self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.assertEqual(self.nb.index(self.child2), child2_index)
- self.assertTrue(str(self.child2) == self.nb.tabs()[child2_index])
+ self.assertEqual(str(self.child2), self.nb.tabs()[child2_index])
# but the tab next to it (not hidden) is the one selected now
self.assertEqual(self.nb.index('current'), curr + 1)
@@ -615,19 +1018,19 @@ class NotebookTest(unittest.TestCase):
tabs = self.nb.tabs()
child1_index = self.nb.index(self.child1)
self.nb.forget(self.child1)
- self.assertFalse(str(self.child1) in self.nb.tabs())
+ self.assertNotIn(str(self.child1), self.nb.tabs())
self.assertEqual(len(tabs) - 1, len(self.nb.tabs()))
self.nb.add(self.child1)
self.assertEqual(self.nb.index(self.child1), 1)
- self.assertFalse(child1_index == self.nb.index(self.child1))
+ self.assertNotEqual(child1_index, self.nb.index(self.child1))
def test_index(self):
self.assertRaises(Tkinter.TclError, self.nb.index, -1)
self.assertRaises(Tkinter.TclError, self.nb.index, None)
- self.assertTrue(isinstance(self.nb.index('end'), int))
+ self.assertIsInstance(self.nb.index('end'), int)
self.assertEqual(self.nb.index(self.child1), 0)
self.assertEqual(self.nb.index(self.child2), 1)
self.assertEqual(self.nb.index('end'), 2)
@@ -691,7 +1094,7 @@ class NotebookTest(unittest.TestCase):
self.assertRaises(Tkinter.TclError, self.nb.tab, 'notab')
self.assertRaises(Tkinter.TclError, self.nb.tab, None)
- self.assertTrue(isinstance(self.nb.tab(self.child1), dict))
+ self.assertIsInstance(self.nb.tab(self.child1), dict)
self.assertEqual(self.nb.tab(self.child1, text=None), 'a')
# newer form for querying a single option
self.assertEqual(self.nb.tab(self.child1, 'text'), 'a')
@@ -737,16 +1140,68 @@ class NotebookTest(unittest.TestCase):
self.assertEqual(self.nb.select(), str(self.child1))
-class TreeviewTest(unittest.TestCase):
+@add_standard_options(StandardTtkOptionsTests)
+class TreeviewTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'class', 'columns', 'cursor', 'displaycolumns',
+ 'height', 'padding', 'selectmode', 'show',
+ 'style', 'takefocus', 'xscrollcommand', 'yscrollcommand',
+ )
def setUp(self):
+ super(TreeviewTest, self).setUp()
support.root_deiconify()
- self.tv = ttk.Treeview(padding=0)
+ self.tv = self.create(padding=0)
def tearDown(self):
self.tv.destroy()
support.root_withdraw()
-
+ super(TreeviewTest, self).tearDown()
+
+ def _create(self, **kwargs):
+ return ttk.Treeview(self.root, **kwargs)
+
+ def test_columns(self):
+ widget = self.create()
+ self.checkParam(widget, 'columns', 'a b c',
+ expected=('a', 'b', 'c'))
+ self.checkParam(widget, 'columns', ('a', 'b', 'c'))
+ self.checkParam(widget, 'columns', () if tcl_version < (8, 5) else '')
+
+ def test_displaycolumns(self):
+ widget = self.create()
+ widget['columns'] = ('a', 'b', 'c')
+ self.checkParam(widget, 'displaycolumns', 'b a c',
+ expected=('b', 'a', 'c'))
+ self.checkParam(widget, 'displaycolumns', ('b', 'a', 'c'))
+ self.checkParam(widget, 'displaycolumns', '#all',
+ expected=('#all',))
+ self.checkParam(widget, 'displaycolumns', (2, 1, 0))
+ self.checkInvalidParam(widget, 'displaycolumns', ('a', 'b', 'd'),
+ errmsg='Invalid column index d')
+ self.checkInvalidParam(widget, 'displaycolumns', (1, 2, 3),
+ errmsg='Column index 3 out of bounds')
+ self.checkInvalidParam(widget, 'displaycolumns', (1, -2),
+ errmsg='Column index -2 out of bounds')
+
+ def test_height(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'height', 100, -100, 0, '3c', conv=False)
+ self.checkPixelsParam(widget, 'height', 101.2, 102.6, conv=noconv)
+
+ def test_selectmode(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'selectmode',
+ 'none', 'browse', 'extended')
+
+ def test_show(self):
+ widget = self.create()
+ self.checkParam(widget, 'show', 'tree headings',
+ expected=('tree', 'headings'))
+ self.checkParam(widget, 'show', ('tree', 'headings'))
+ self.checkParam(widget, 'show', ('headings', 'tree'))
+ self.checkParam(widget, 'show', 'tree', expected=('tree',))
+ self.checkParam(widget, 'show', 'headings', expected=('headings',))
def test_bbox(self):
self.tv.pack()
@@ -759,18 +1214,15 @@ class TreeviewTest(unittest.TestCase):
self.assertTrue(children)
bbox = self.tv.bbox(children[0])
- self.assertEqual(len(bbox), 4)
- self.assertTrue(isinstance(bbox, tuple))
- for item in bbox:
- if not isinstance(item, int):
- self.fail("Invalid bounding box: %s" % bbox)
- break
+ self.assertIsBoundingBox(bbox)
# compare width in bboxes
self.tv['columns'] = ['test']
self.tv.column('test', width=50)
bbox_column0 = self.tv.bbox(children[0], 0)
root_width = self.tv.column('#0', width=None)
+ if not self.wantobjects:
+ root_width = int(root_width)
self.assertEqual(bbox_column0[0], bbox[0] + root_width)
# verify that bbox of a closed item is the empty string
@@ -783,7 +1235,7 @@ class TreeviewTest(unittest.TestCase):
self.assertEqual(self.tv.get_children(), ())
item_id = self.tv.insert('', 'end')
- self.assertTrue(isinstance(self.tv.get_children(), tuple))
+ self.assertIsInstance(self.tv.get_children(), tuple)
self.assertEqual(self.tv.get_children()[0], item_id)
# add item_id and child3 as children of child2
@@ -808,14 +1260,17 @@ class TreeviewTest(unittest.TestCase):
def test_column(self):
# return a dict with all options/values
- self.assertTrue(isinstance(self.tv.column('#0'), dict))
+ self.assertIsInstance(self.tv.column('#0'), dict)
# return a single value of the given option
- self.assertTrue(isinstance(self.tv.column('#0', width=None), int))
+ if self.wantobjects:
+ self.assertIsInstance(self.tv.column('#0', width=None), int)
# set a new value for an option
self.tv.column('#0', width=10)
# testing new way to get option value
- self.assertEqual(self.tv.column('#0', 'width'), 10)
- self.assertEqual(self.tv.column('#0', width=None), 10)
+ self.assertEqual(self.tv.column('#0', 'width'),
+ 10 if self.wantobjects else '10')
+ self.assertEqual(self.tv.column('#0', width=None),
+ 10 if self.wantobjects else '10')
# check read-only option
self.assertRaises(Tkinter.TclError, self.tv.column, '#0', id='X')
@@ -923,7 +1378,7 @@ class TreeviewTest(unittest.TestCase):
def test_heading(self):
# check a dict is returned
- self.assertTrue(isinstance(self.tv.heading('#0'), dict))
+ self.assertIsInstance(self.tv.heading('#0'), dict)
# check a value is returned
self.tv.heading('#0', text='hi')
@@ -937,12 +1392,10 @@ class TreeviewTest(unittest.TestCase):
self.assertRaises(Tkinter.TclError, self.tv.heading, '#0',
anchor=1)
- # XXX skipping for now; should be fixed to work with newer ttk
- @unittest.skip("skipping pending resolution of Issue #10734")
def test_heading_callback(self):
def simulate_heading_click(x, y):
support.simulate_mouse_click(self.tv, x, y)
- self.tv.update_idletasks()
+ self.tv.update()
success = [] # no success for now
@@ -1030,13 +1483,16 @@ class TreeviewTest(unittest.TestCase):
# unicode values
value = u'\xe1ba'
item = self.tv.insert('', 'end', values=(value, ))
- self.assertEqual(self.tv.item(item, 'values'), (value, ))
- self.assertEqual(self.tv.item(item, values=None), (value, ))
+ self.assertEqual(self.tv.item(item, 'values'),
+ (value,) if self.wantobjects else value)
+ self.assertEqual(self.tv.item(item, values=None),
+ (value,) if self.wantobjects else value)
- self.tv.item(item, values=list(self.tv.item(item, values=None)))
- self.assertEqual(self.tv.item(item, values=None), (value, ))
+ self.tv.item(item, values=self.root.splitlist(self.tv.item(item, values=None)))
+ self.assertEqual(self.tv.item(item, values=None),
+ (value,) if self.wantobjects else value)
- self.assertTrue(isinstance(self.tv.item(item), dict))
+ self.assertIsInstance(self.tv.item(item), dict)
# erase item values
self.tv.item(item, values='')
@@ -1044,17 +1500,21 @@ class TreeviewTest(unittest.TestCase):
# item tags
item = self.tv.insert('', 'end', tags=[1, 2, value])
- self.assertEqual(self.tv.item(item, tags=None), ('1', '2', value))
+ self.assertEqual(self.tv.item(item, tags=None),
+ ('1', '2', value) if self.wantobjects else
+ '1 2 %s' % value)
self.tv.item(item, tags=[])
self.assertFalse(self.tv.item(item, tags=None))
self.tv.item(item, tags=(1, 2))
- self.assertEqual(self.tv.item(item, tags=None), ('1', '2'))
+ self.assertEqual(self.tv.item(item, tags=None),
+ ('1', '2') if self.wantobjects else '1 2')
# values with spaces
item = self.tv.insert('', 'end', values=('a b c',
'%s %s' % (value, value)))
self.assertEqual(self.tv.item(item, values=None),
- ('a b c', '%s %s' % (value, value)))
+ ('a b c', '%s %s' % (value, value)) if self.wantobjects else
+ '{a b c} {%s %s}' % (value, value))
# text
self.assertEqual(self.tv.item(
@@ -1071,19 +1531,24 @@ class TreeviewTest(unittest.TestCase):
self.assertEqual(self.tv.set(item), {'A': 'a', 'B': 'b'})
self.tv.set(item, 'B', 'a')
- self.assertEqual(self.tv.item(item, values=None), ('a', 'a'))
+ self.assertEqual(self.tv.item(item, values=None),
+ ('a', 'a') if self.wantobjects else 'a a')
self.tv['columns'] = ['B']
self.assertEqual(self.tv.set(item), {'B': 'a'})
self.tv.set(item, 'B', 'b')
self.assertEqual(self.tv.set(item, column='B'), 'b')
- self.assertEqual(self.tv.item(item, values=None), ('b', 'a'))
+ self.assertEqual(self.tv.item(item, values=None),
+ ('b', 'a') if self.wantobjects else 'b a')
self.tv.set(item, 'B', 123)
- self.assertEqual(self.tv.set(item, 'B'), 123)
- self.assertEqual(self.tv.item(item, values=None), (123, 'a'))
- self.assertEqual(self.tv.set(item), {'B': 123})
+ self.assertEqual(self.tv.set(item, 'B'),
+ 123 if self.wantobjects else '123')
+ self.assertEqual(self.tv.item(item, values=None),
+ (123, 'a') if self.wantobjects else '123 a')
+ self.assertEqual(self.tv.set(item),
+ {'B': 123} if self.wantobjects else {'B': '123'})
# inexistent column
self.assertRaises(Tkinter.TclError, self.tv.set, item, 'A')
@@ -1137,13 +1602,38 @@ class TreeviewTest(unittest.TestCase):
'blue')
self.assertEqual(str(self.tv.tag_configure('test', foreground=None)),
'blue')
- self.assertTrue(isinstance(self.tv.tag_configure('test'), dict))
+ self.assertIsInstance(self.tv.tag_configure('test'), dict)
+
+
+@add_standard_options(StandardTtkOptionsTests)
+class SeparatorTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'class', 'cursor', 'orient', 'style', 'takefocus',
+ # 'state'?
+ )
+ default_orient = 'horizontal'
+
+ def _create(self, **kwargs):
+ return ttk.Separator(self.root, **kwargs)
+
+
+@add_standard_options(StandardTtkOptionsTests)
+class SizegripTest(AbstractWidgetTest, unittest.TestCase):
+ OPTIONS = (
+ 'class', 'cursor', 'style', 'takefocus',
+ # 'state'?
+ )
+
+ def _create(self, **kwargs):
+ return ttk.Sizegrip(self.root, **kwargs)
tests_gui = (
- WidgetTest, ButtonTest, CheckbuttonTest, RadiobuttonTest,
- ComboboxTest, EntryTest, PanedwindowTest, ScaleTest, NotebookTest,
- TreeviewTest
+ ButtonTest, CheckbuttonTest, ComboboxTest, EntryTest,
+ FrameTest, LabelFrameTest, LabelTest, MenubuttonTest,
+ NotebookTest, PanedWindowTest, ProgressbarTest,
+ RadiobuttonTest, ScaleTest, ScrollbarTest, SeparatorTest,
+ SizegripTest, TreeviewTest, WidgetTest,
)
if __name__ == "__main__":
diff --git a/Lib/lib-tk/test/widget_tests.py b/Lib/lib-tk/test/widget_tests.py
new file mode 100644
index 0000000..28b245d
--- /dev/null
+++ b/Lib/lib-tk/test/widget_tests.py
@@ -0,0 +1,547 @@
+# Common tests for test_tkinter/test_widgets.py and test_ttk/test_widgets.py
+
+import unittest
+import sys
+import Tkinter
+from ttk import setup_master, Scale
+from test_ttk.support import (tcl_version, requires_tcl, get_tk_patchlevel,
+ pixels_conv, tcl_obj_eq)
+import test.test_support
+
+
+noconv = noconv_meth = False
+if get_tk_patchlevel() < (8, 5, 11):
+ noconv = str
+noconv_meth = noconv and staticmethod(noconv)
+
+def int_round(x):
+ return int(round(x))
+
+pixels_round = int_round
+if get_tk_patchlevel()[:3] == (8, 5, 11):
+ # Issue #19085: Workaround a bug in Tk
+ # http://core.tcl.tk/tk/info/3497848
+ pixels_round = int
+
+
+_sentinel = object()
+
+class AbstractWidgetTest(object):
+ _conv_pixels = staticmethod(pixels_round)
+ _conv_pad_pixels = None
+ wantobjects = True
+
+ def setUp(self):
+ self.root = setup_master()
+ self.scaling = float(self.root.call('tk', 'scaling'))
+ if not self.root.wantobjects():
+ self.wantobjects = False
+
+ def tearDown(self):
+ for w in self.root.winfo_children():
+ w.destroy()
+
+ def _str(self, value):
+ if self.wantobjects and tcl_version >= (8, 6):
+ return value
+ if isinstance(value, tuple):
+ return ' '.join(map(self._str, value))
+ return str(value)
+
+ def create(self, **kwargs):
+ widget = self._create(**kwargs)
+ self.addCleanup(widget.destroy)
+ return widget
+
+ def assertEqual2(self, actual, expected, msg=None, eq=object.__eq__):
+ if eq(actual, expected):
+ return
+ self.assertEqual(actual, expected, msg)
+
+ def checkParam(self, widget, name, value, expected=_sentinel,
+ conv=False, eq=None):
+ widget[name] = value
+ if expected is _sentinel:
+ expected = value
+ if conv:
+ expected = conv(expected)
+ if not self.wantobjects:
+ if isinstance(expected, tuple):
+ expected = Tkinter._join(expected)
+ else:
+ expected = str(expected)
+ if eq is None:
+ eq = tcl_obj_eq
+ self.assertEqual2(widget[name], expected, eq=eq)
+ self.assertEqual2(widget.cget(name), expected, eq=eq)
+ # XXX
+ if not isinstance(widget, Scale):
+ t = widget.configure(name)
+ self.assertEqual(len(t), 5)
+ self.assertEqual2(t[4], expected, eq=eq)
+
+ def checkInvalidParam(self, widget, name, value, errmsg=None,
+ keep_orig=True):
+ orig = widget[name]
+ if errmsg is not None:
+ errmsg = errmsg.format(value)
+ with self.assertRaises(Tkinter.TclError) as cm:
+ widget[name] = value
+ if errmsg is not None:
+ self.assertEqual(str(cm.exception), errmsg)
+ if keep_orig:
+ self.assertEqual(widget[name], orig)
+ else:
+ widget[name] = orig
+ with self.assertRaises(Tkinter.TclError) as cm:
+ widget.configure({name: value})
+ if errmsg is not None:
+ self.assertEqual(str(cm.exception), errmsg)
+ if keep_orig:
+ self.assertEqual(widget[name], orig)
+ else:
+ widget[name] = orig
+
+ def checkParams(self, widget, name, *values, **kwargs):
+ for value in values:
+ self.checkParam(widget, name, value, **kwargs)
+
+ def checkIntegerParam(self, widget, name, *values, **kwargs):
+ self.checkParams(widget, name, *values, **kwargs)
+ self.checkInvalidParam(widget, name, '',
+ errmsg='expected integer but got ""')
+ self.checkInvalidParam(widget, name, '10p',
+ errmsg='expected integer but got "10p"')
+ self.checkInvalidParam(widget, name, 3.2,
+ errmsg='expected integer but got "3.2"')
+
+ def checkFloatParam(self, widget, name, *values, **kwargs):
+ if 'conv' in kwargs:
+ conv = kwargs.pop('conv')
+ else:
+ conv = float
+ for value in values:
+ self.checkParam(widget, name, value, conv=conv, **kwargs)
+ self.checkInvalidParam(widget, name, '',
+ errmsg='expected floating-point number but got ""')
+ self.checkInvalidParam(widget, name, 'spam',
+ errmsg='expected floating-point number but got "spam"')
+
+ def checkBooleanParam(self, widget, name):
+ for value in (False, 0, 'false', 'no', 'off'):
+ self.checkParam(widget, name, value, expected=0)
+ for value in (True, 1, 'true', 'yes', 'on'):
+ self.checkParam(widget, name, value, expected=1)
+ self.checkInvalidParam(widget, name, '',
+ errmsg='expected boolean value but got ""')
+ self.checkInvalidParam(widget, name, 'spam',
+ errmsg='expected boolean value but got "spam"')
+
+ def checkColorParam(self, widget, name, allow_empty=None, **kwargs):
+ self.checkParams(widget, name,
+ '#ff0000', '#00ff00', '#0000ff', '#123456',
+ 'red', 'green', 'blue', 'white', 'black', 'grey',
+ **kwargs)
+ self.checkInvalidParam(widget, name, 'spam',
+ errmsg='unknown color name "spam"')
+
+ def checkCursorParam(self, widget, name, **kwargs):
+ self.checkParams(widget, name, 'arrow', 'watch', 'cross', '',**kwargs)
+ if tcl_version >= (8, 5):
+ self.checkParam(widget, name, 'none')
+ self.checkInvalidParam(widget, name, 'spam',
+ errmsg='bad cursor spec "spam"')
+
+ def checkCommandParam(self, widget, name):
+ def command(*args):
+ pass
+ widget[name] = command
+ self.assertTrue(widget[name])
+ self.checkParams(widget, name, '')
+
+ def checkEnumParam(self, widget, name, *values, **kwargs):
+ if 'errmsg' in kwargs:
+ errmsg = kwargs.pop('errmsg')
+ else:
+ errmsg = None
+ self.checkParams(widget, name, *values, **kwargs)
+ if errmsg is None:
+ errmsg2 = ' %s "{}": must be %s%s or %s' % (
+ name,
+ ', '.join(values[:-1]),
+ ',' if len(values) > 2 else '',
+ values[-1])
+ self.checkInvalidParam(widget, name, '',
+ errmsg='ambiguous' + errmsg2)
+ errmsg = 'bad' + errmsg2
+ self.checkInvalidParam(widget, name, 'spam', errmsg=errmsg)
+
+ def checkPixelsParam(self, widget, name, *values, **kwargs):
+ if 'conv' in kwargs:
+ conv = kwargs.pop('conv')
+ else:
+ conv = None
+ if conv is None:
+ conv = self._conv_pixels
+ if 'keep_orig' in kwargs:
+ keep_orig = kwargs.pop('keep_orig')
+ else:
+ keep_orig = True
+ for value in values:
+ expected = _sentinel
+ conv1 = conv
+ if isinstance(value, str):
+ if conv1 and conv1 is not str:
+ expected = pixels_conv(value) * self.scaling
+ conv1 = int_round
+ self.checkParam(widget, name, value, expected=expected,
+ conv=conv1, **kwargs)
+ self.checkInvalidParam(widget, name, '6x',
+ errmsg='bad screen distance "6x"', keep_orig=keep_orig)
+ self.checkInvalidParam(widget, name, 'spam',
+ errmsg='bad screen distance "spam"', keep_orig=keep_orig)
+
+ def checkReliefParam(self, widget, name):
+ self.checkParams(widget, name,
+ 'flat', 'groove', 'raised', 'ridge', 'solid', 'sunken')
+ errmsg='bad relief "spam": must be '\
+ 'flat, groove, raised, ridge, solid, or sunken'
+ if tcl_version < (8, 6):
+ errmsg = None
+ self.checkInvalidParam(widget, name, 'spam',
+ errmsg=errmsg)
+
+ def checkImageParam(self, widget, name):
+ image = Tkinter.PhotoImage('image1')
+ self.checkParam(widget, name, image, conv=str)
+ self.checkInvalidParam(widget, name, 'spam',
+ errmsg='image "spam" doesn\'t exist')
+ widget[name] = ''
+
+ def checkVariableParam(self, widget, name, var):
+ self.checkParam(widget, name, var, conv=str)
+
+ def assertIsBoundingBox(self, bbox):
+ self.assertIsNotNone(bbox)
+ self.assertIsInstance(bbox, tuple)
+ if len(bbox) != 4:
+ self.fail('Invalid bounding box: %r' % (bbox,))
+ for item in bbox:
+ if not isinstance(item, int):
+ self.fail('Invalid bounding box: %r' % (bbox,))
+ break
+
+
+class StandardOptionsTests(object):
+ STANDARD_OPTIONS = (
+ 'activebackground', 'activeborderwidth', 'activeforeground', 'anchor',
+ 'background', 'bitmap', 'borderwidth', 'compound', 'cursor',
+ 'disabledforeground', 'exportselection', 'font', 'foreground',
+ 'highlightbackground', 'highlightcolor', 'highlightthickness',
+ 'image', 'insertbackground', 'insertborderwidth',
+ 'insertofftime', 'insertontime', 'insertwidth',
+ 'jump', 'justify', 'orient', 'padx', 'pady', 'relief',
+ 'repeatdelay', 'repeatinterval',
+ 'selectbackground', 'selectborderwidth', 'selectforeground',
+ 'setgrid', 'takefocus', 'text', 'textvariable', 'troughcolor',
+ 'underline', 'wraplength', 'xscrollcommand', 'yscrollcommand',
+ )
+
+ def test_activebackground(self):
+ widget = self.create()
+ self.checkColorParam(widget, 'activebackground')
+
+ def test_activeborderwidth(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'activeborderwidth',
+ 0, 1.3, 2.9, 6, -2, '10p')
+
+ def test_activeforeground(self):
+ widget = self.create()
+ self.checkColorParam(widget, 'activeforeground')
+
+ def test_anchor(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'anchor',
+ 'n', 'ne', 'e', 'se', 's', 'sw', 'w', 'nw', 'center')
+
+ def test_background(self):
+ widget = self.create()
+ self.checkColorParam(widget, 'background')
+ if 'bg' in self.OPTIONS:
+ self.checkColorParam(widget, 'bg')
+
+ def test_bitmap(self):
+ widget = self.create()
+ self.checkParam(widget, 'bitmap', 'questhead')
+ self.checkParam(widget, 'bitmap', 'gray50')
+ filename = test.test_support.findfile('python.xbm', subdir='imghdrdata')
+ self.checkParam(widget, 'bitmap', '@' + filename)
+ # Cocoa Tk widgets don't detect invalid -bitmap values
+ # See https://core.tcl.tk/tk/info/31cd33dbf0
+ if not ('aqua' in self.root.tk.call('tk', 'windowingsystem') and
+ 'AppKit' in self.root.winfo_server()):
+ self.checkInvalidParam(widget, 'bitmap', 'spam',
+ errmsg='bitmap "spam" not defined')
+
+ def test_borderwidth(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'borderwidth',
+ 0, 1.3, 2.6, 6, -2, '10p')
+ if 'bd' in self.OPTIONS:
+ self.checkPixelsParam(widget, 'bd', 0, 1.3, 2.6, 6, -2, '10p')
+
+ def test_compound(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'compound',
+ 'bottom', 'center', 'left', 'none', 'right', 'top')
+
+ def test_cursor(self):
+ widget = self.create()
+ self.checkCursorParam(widget, 'cursor')
+
+ def test_disabledforeground(self):
+ widget = self.create()
+ self.checkColorParam(widget, 'disabledforeground')
+
+ def test_exportselection(self):
+ widget = self.create()
+ self.checkBooleanParam(widget, 'exportselection')
+
+ def test_font(self):
+ widget = self.create()
+ self.checkParam(widget, 'font',
+ '-Adobe-Helvetica-Medium-R-Normal--*-120-*-*-*-*-*-*')
+ self.checkInvalidParam(widget, 'font', '',
+ errmsg='font "" doesn\'t exist')
+
+ def test_foreground(self):
+ widget = self.create()
+ self.checkColorParam(widget, 'foreground')
+ if 'fg' in self.OPTIONS:
+ self.checkColorParam(widget, 'fg')
+
+ def test_highlightbackground(self):
+ widget = self.create()
+ self.checkColorParam(widget, 'highlightbackground')
+
+ def test_highlightcolor(self):
+ widget = self.create()
+ self.checkColorParam(widget, 'highlightcolor')
+
+ def test_highlightthickness(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'highlightthickness',
+ 0, 1.3, 2.6, 6, '10p')
+ self.checkParam(widget, 'highlightthickness', -2, expected=0,
+ conv=self._conv_pixels)
+
+ @unittest.skipIf(sys.platform == 'darwin',
+ 'crashes with Cocoa Tk (issue19733)')
+ def test_image(self):
+ widget = self.create()
+ self.checkImageParam(widget, 'image')
+
+ def test_insertbackground(self):
+ widget = self.create()
+ self.checkColorParam(widget, 'insertbackground')
+
+ def test_insertborderwidth(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'insertborderwidth',
+ 0, 1.3, 2.6, 6, -2, '10p')
+
+ def test_insertofftime(self):
+ widget = self.create()
+ self.checkIntegerParam(widget, 'insertofftime', 100)
+
+ def test_insertontime(self):
+ widget = self.create()
+ self.checkIntegerParam(widget, 'insertontime', 100)
+
+ def test_insertwidth(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'insertwidth', 1.3, 2.6, -2, '10p')
+
+ def test_jump(self):
+ widget = self.create()
+ self.checkBooleanParam(widget, 'jump')
+
+ def test_justify(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'justify', 'left', 'right', 'center',
+ errmsg='bad justification "{}": must be '
+ 'left, right, or center')
+ self.checkInvalidParam(widget, 'justify', '',
+ errmsg='ambiguous justification "": must be '
+ 'left, right, or center')
+
+ def test_orient(self):
+ widget = self.create()
+ self.assertEqual(str(widget['orient']), self.default_orient)
+ self.checkEnumParam(widget, 'orient', 'horizontal', 'vertical')
+
+ def test_padx(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'padx', 3, 4.4, 5.6, -2, '12m',
+ conv=self._conv_pad_pixels)
+
+ def test_pady(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'pady', 3, 4.4, 5.6, -2, '12m',
+ conv=self._conv_pad_pixels)
+
+ def test_relief(self):
+ widget = self.create()
+ self.checkReliefParam(widget, 'relief')
+
+ def test_repeatdelay(self):
+ widget = self.create()
+ self.checkIntegerParam(widget, 'repeatdelay', -500, 500)
+
+ def test_repeatinterval(self):
+ widget = self.create()
+ self.checkIntegerParam(widget, 'repeatinterval', -500, 500)
+
+ def test_selectbackground(self):
+ widget = self.create()
+ self.checkColorParam(widget, 'selectbackground')
+
+ def test_selectborderwidth(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'selectborderwidth', 1.3, 2.6, -2, '10p')
+
+ def test_selectforeground(self):
+ widget = self.create()
+ self.checkColorParam(widget, 'selectforeground')
+
+ def test_setgrid(self):
+ widget = self.create()
+ self.checkBooleanParam(widget, 'setgrid')
+
+ def test_state(self):
+ widget = self.create()
+ self.checkEnumParam(widget, 'state', 'active', 'disabled', 'normal')
+
+ def test_takefocus(self):
+ widget = self.create()
+ self.checkParams(widget, 'takefocus', '0', '1', '')
+
+ def test_text(self):
+ widget = self.create()
+ self.checkParams(widget, 'text', '', 'any string')
+
+ def test_textvariable(self):
+ widget = self.create()
+ var = Tkinter.StringVar()
+ self.checkVariableParam(widget, 'textvariable', var)
+
+ def test_troughcolor(self):
+ widget = self.create()
+ self.checkColorParam(widget, 'troughcolor')
+
+ def test_underline(self):
+ widget = self.create()
+ self.checkIntegerParam(widget, 'underline', 0, 1, 10)
+
+ def test_wraplength(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'wraplength', 100)
+
+ def test_xscrollcommand(self):
+ widget = self.create()
+ self.checkCommandParam(widget, 'xscrollcommand')
+
+ def test_yscrollcommand(self):
+ widget = self.create()
+ self.checkCommandParam(widget, 'yscrollcommand')
+
+ # non-standard but common options
+
+ def test_command(self):
+ widget = self.create()
+ self.checkCommandParam(widget, 'command')
+
+ def test_indicatoron(self):
+ widget = self.create()
+ self.checkBooleanParam(widget, 'indicatoron')
+
+ def test_offrelief(self):
+ widget = self.create()
+ self.checkReliefParam(widget, 'offrelief')
+
+ def test_overrelief(self):
+ widget = self.create()
+ self.checkReliefParam(widget, 'overrelief')
+
+ def test_selectcolor(self):
+ widget = self.create()
+ self.checkColorParam(widget, 'selectcolor')
+
+ def test_selectimage(self):
+ widget = self.create()
+ self.checkImageParam(widget, 'selectimage')
+
+ @requires_tcl(8, 5)
+ def test_tristateimage(self):
+ widget = self.create()
+ self.checkImageParam(widget, 'tristateimage')
+
+ @requires_tcl(8, 5)
+ def test_tristatevalue(self):
+ widget = self.create()
+ self.checkParam(widget, 'tristatevalue', 'unknowable')
+
+ def test_variable(self):
+ widget = self.create()
+ var = Tkinter.DoubleVar()
+ self.checkVariableParam(widget, 'variable', var)
+
+
+class IntegerSizeTests(object):
+ def test_height(self):
+ widget = self.create()
+ self.checkIntegerParam(widget, 'height', 100, -100, 0)
+
+ def test_width(self):
+ widget = self.create()
+ self.checkIntegerParam(widget, 'width', 402, -402, 0)
+
+
+class PixelSizeTests(object):
+ def test_height(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'height', 100, 101.2, 102.6, -100, 0, '3c')
+
+ def test_width(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'width', 402, 403.4, 404.6, -402, 0, '5i')
+
+
+def add_standard_options(*source_classes):
+ # This decorator adds test_xxx methods from source classes for every xxx
+ # option in the OPTIONS class attribute if they are not defined explicitly.
+ def decorator(cls):
+ for option in cls.OPTIONS:
+ methodname = 'test_' + option
+ if not hasattr(cls, methodname):
+ for source_class in source_classes:
+ if hasattr(source_class, methodname):
+ setattr(cls, methodname,
+ getattr(source_class, methodname).im_func)
+ break
+ else:
+ def test(self, option=option):
+ widget = self.create()
+ widget[option]
+ raise AssertionError('Option "%s" is not tested in %s' %
+ (option, cls.__name__))
+ test.__name__ = methodname
+ setattr(cls, methodname, test)
+ return cls
+ return decorator
+
+def setUpModule():
+ if test.test_support.verbose:
+ tcl = Tkinter.Tcl()
+ print 'patchlevel =', tcl.call('info', 'patchlevel')
diff --git a/Lib/lib-tk/tkFont.py b/Lib/lib-tk/tkFont.py
index 229f251..61c2f86 100644
--- a/Lib/lib-tk/tkFont.py
+++ b/Lib/lib-tk/tkFont.py
@@ -78,7 +78,8 @@ class Font:
if exists:
self.delete_font = False
# confirm font exists
- if self.name not in root.tk.call("font", "names"):
+ if self.name not in root.tk.splitlist(
+ root.tk.call("font", "names")):
raise Tkinter._tkinter.TclError, "named font %s does not already exist" % (self.name,)
# if font config info supplied, apply it
if font:
diff --git a/Lib/lib-tk/tkSimpleDialog.py b/Lib/lib-tk/tkSimpleDialog.py
index 24388a5..023475d 100644
--- a/Lib/lib-tk/tkSimpleDialog.py
+++ b/Lib/lib-tk/tkSimpleDialog.py
@@ -200,7 +200,7 @@ class _QueryDialog(Dialog):
self.entry = Entry(master, name="entry")
self.entry.grid(row=1, padx=5, sticky=W+E)
- if self.initialvalue:
+ if self.initialvalue is not None:
self.entry.insert(0, self.initialvalue)
self.entry.select_range(0, END)
diff --git a/Lib/lib-tk/ttk.py b/Lib/lib-tk/ttk.py
index 2fd513a..77f1d3a 100644
--- a/Lib/lib-tk/ttk.py
+++ b/Lib/lib-tk/ttk.py
@@ -26,8 +26,7 @@ __all__ = ["Button", "Checkbutton", "Combobox", "Entry", "Frame", "Label",
"tclobjs_to_py", "setup_master"]
import Tkinter
-
-_flatten = Tkinter._flatten
+from Tkinter import _flatten, _join, _stringify
# Verify if Tk is new enough to not need the Tile package
_REQUIRE_TILE = True if Tkinter.TkVersion < 8.5 else False
@@ -47,40 +46,57 @@ def _load_tile(master):
master.tk.eval('package require tile') # TclError may be raised here
master._tile_loaded = True
+def _format_optvalue(value, script=False):
+ """Internal function."""
+ if script:
+ # if caller passes a Tcl script to tk.call, all the values need to
+ # be grouped into words (arguments to a command in Tcl dialect)
+ value = _stringify(value)
+ elif isinstance(value, (list, tuple)):
+ value = _join(value)
+ return value
+
def _format_optdict(optdict, script=False, ignore=None):
"""Formats optdict to a tuple to pass it to tk.call.
E.g. (script=False):
{'foreground': 'blue', 'padding': [1, 2, 3, 4]} returns:
('-foreground', 'blue', '-padding', '1 2 3 4')"""
- format = "%s" if not script else "{%s}"
opts = []
for opt, value in optdict.iteritems():
- if ignore and opt in ignore:
- continue
-
- if isinstance(value, (list, tuple)):
- v = []
- for val in value:
- if isinstance(val, basestring):
- v.append(unicode(val) if val else '{}')
- else:
- v.append(str(val))
-
- # format v according to the script option, but also check for
- # space in any value in v in order to group them correctly
- value = format % ' '.join(
- ('{%s}' if ' ' in val else '%s') % val for val in v)
-
- if script and value == '':
- value = '{}' # empty string in Python is equivalent to {} in Tcl
+ if not ignore or opt not in ignore:
+ opts.append("-%s" % opt)
+ if value is not None:
+ opts.append(_format_optvalue(value, script))
- opts.append(("-%s" % opt, value))
-
- # Remember: _flatten skips over None
return _flatten(opts)
+def _mapdict_values(items):
+ # each value in mapdict is expected to be a sequence, where each item
+ # is another sequence containing a state (or several) and a value
+ # E.g. (script=False):
+ # [('active', 'selected', 'grey'), ('focus', [1, 2, 3, 4])]
+ # returns:
+ # ['active selected', 'grey', 'focus', [1, 2, 3, 4]]
+ opt_val = []
+ for item in items:
+ state = item[:-1]
+ val = item[-1]
+ # hacks for bakward compatibility
+ state[0] # raise IndexError if empty
+ if len(state) == 1:
+ # if it is empty (something that evaluates to False), then
+ # format it to Tcl code to denote the "normal" state
+ state = state[0] or ''
+ else:
+ # group multiple states
+ state = ' '.join(state) # raise TypeError if not str
+ opt_val.append(state)
+ if val is not None:
+ opt_val.append(val)
+ return opt_val
+
def _format_mapdict(mapdict, script=False):
"""Formats mapdict to pass it to tk.call.
@@ -90,32 +106,11 @@ def _format_mapdict(mapdict, script=False):
returns:
('-expand', '{active selected} grey focus {1, 2, 3, 4}')"""
- # if caller passes a Tcl script to tk.call, all the values need to
- # be grouped into words (arguments to a command in Tcl dialect)
- format = "%s" if not script else "{%s}"
opts = []
for opt, value in mapdict.iteritems():
-
- opt_val = []
- # each value in mapdict is expected to be a sequence, where each item
- # is another sequence containing a state (or several) and a value
- for statespec in value:
- state, val = statespec[:-1], statespec[-1]
-
- if len(state) > 1: # group multiple states
- state = "{%s}" % ' '.join(state)
- else: # single state
- # if it is empty (something that evaluates to False), then
- # format it to Tcl code to denote the "normal" state
- state = state[0] or '{}'
-
- if isinstance(val, (list, tuple)): # val needs to be grouped
- val = "{%s}" % ' '.join(map(str, val))
-
- opt_val.append("%s %s" % (state, val))
-
- opts.append(("-%s" % opt, format % ' '.join(opt_val)))
+ opts.extend(("-%s" % opt,
+ _format_optvalue(_mapdict_values(value), script)))
return _flatten(opts)
@@ -129,7 +124,7 @@ def _format_elemcreate(etype, script=False, *args, **kw):
iname = args[0]
# next args, if any, are statespec/value pairs which is almost
# a mapdict, but we just need the value
- imagespec = _format_mapdict({None: args[1:]})[1]
+ imagespec = _join(_mapdict_values(args[1:]))
spec = "%s %s" % (iname, imagespec)
else:
@@ -138,7 +133,7 @@ def _format_elemcreate(etype, script=False, *args, **kw):
# themed styles on Windows XP and Vista.
# Availability: Tk 8.6, Windows XP and Vista.
class_name, part_id = args[:2]
- statemap = _format_mapdict({None: args[2:]})[1]
+ statemap = _join(_mapdict_values(args[2:]))
spec = "%s %s %s" % (class_name, part_id, statemap)
opts = _format_optdict(kw, script)
@@ -148,11 +143,11 @@ def _format_elemcreate(etype, script=False, *args, **kw):
# otherwise it will clone {} (empty element)
spec = args[0] # theme name
if len(args) > 1: # elementfrom specified
- opts = (args[1], )
+ opts = (_format_optvalue(args[1], script),)
if script:
spec = '{%s}' % spec
- opts = ' '.join(map(str, opts))
+ opts = ' '.join(opts)
return spec, opts
@@ -189,7 +184,7 @@ def _format_layoutlist(layout, indent=0, indent_size=2):
for layout_elem in layout:
elem, opts = layout_elem
opts = opts or {}
- fopts = ' '.join(map(str, _format_optdict(opts, True, "children")))
+ fopts = ' '.join(_format_optdict(opts, True, ("children",)))
head = "%s%s%s" % (' ' * indent, elem, (" %s" % fopts) if fopts else '')
if "children" in opts:
@@ -215,11 +210,11 @@ def _script_from_settings(settings):
for name, opts in settings.iteritems():
# will format specific keys according to Tcl code
if opts.get('configure'): # format 'configure'
- s = ' '.join(map(unicode, _format_optdict(opts['configure'], True)))
+ s = ' '.join(_format_optdict(opts['configure'], True))
script.append("ttk::style configure %s %s;" % (name, s))
if opts.get('map'): # format 'map'
- s = ' '.join(map(unicode, _format_mapdict(opts['map'], True)))
+ s = ' '.join(_format_mapdict(opts['map'], True))
script.append("ttk::style map %s %s;" % (name, s))
if 'layout' in opts: # format 'layout' which may be empty
@@ -279,9 +274,10 @@ def _list_from_statespec(stuple):
it = iter(nval)
return [_flatten(spec) for spec in zip(it, it)]
-def _list_from_layouttuple(ltuple):
+def _list_from_layouttuple(tk, ltuple):
"""Construct a list from the tuple returned by ttk::layout, this is
somewhat the reverse of _format_layoutlist."""
+ ltuple = tk.splitlist(ltuple)
res = []
indx = 0
@@ -300,14 +296,14 @@ def _list_from_layouttuple(ltuple):
indx += 2
if opt == 'children':
- val = _list_from_layouttuple(val)
+ val = _list_from_layouttuple(tk, val)
opts[opt] = val
return res
-def _val_or_dict(options, func, *args):
- """Format options then call func with args and options and return
+def _val_or_dict(tk, options, *args):
+ """Format options then call Tk command with args and options and return
the appropriate result.
If no option is specified, a dict is returned. If a option is
@@ -315,12 +311,12 @@ def _val_or_dict(options, func, *args):
Otherwise, the function just sets the passed options and the caller
shouldn't be expecting a return value anyway."""
options = _format_optdict(options)
- res = func(*(args + options))
+ res = tk.call(*(args + options))
if len(options) % 2: # option specified without a value, return its value
return res
- return _dict_from_tcltuple(res)
+ return _dict_from_tcltuple(tk.splitlist(res))
def _convert_stringval(value):
"""Converts a value to, hopefully, a more appropriate Python object."""
@@ -332,6 +328,14 @@ def _convert_stringval(value):
return value
+def _to_number(x):
+ if isinstance(x, str):
+ if '.' in x:
+ x = float(x)
+ else:
+ x = int(x)
+ return x
+
def tclobjs_to_py(adict):
"""Returns adict with its values converted from Tcl objects to Python
objects."""
@@ -390,7 +394,7 @@ class Style(object):
a sequence identifying the value for that option."""
if query_opt is not None:
kw[query_opt] = None
- return _val_or_dict(kw, self.tk.call, self._name, "configure", style)
+ return _val_or_dict(self.tk, kw, self._name, "configure", style)
def map(self, style, query_opt=None, **kw):
@@ -402,11 +406,11 @@ class Style(object):
or something else of your preference. A statespec is compound of
one or more states and then a value."""
if query_opt is not None:
- return _list_from_statespec(
- self.tk.call(self._name, "map", style, '-%s' % query_opt))
+ return _list_from_statespec(self.tk.splitlist(
+ self.tk.call(self._name, "map", style, '-%s' % query_opt)))
- return _dict_from_tcltuple(
- self.tk.call(self._name, "map", style, *(_format_mapdict(kw))))
+ return _dict_from_tcltuple(self.tk.splitlist(
+ self.tk.call(self._name, "map", style, *(_format_mapdict(kw)))))
def lookup(self, style, option, state=None, default=None):
@@ -460,7 +464,7 @@ class Style(object):
lspec = "null" # could be any other word, but this may make sense
# when calling layout(style) later
- return _list_from_layouttuple(
+ return _list_from_layouttuple(self.tk,
self.tk.call(self._name, "layout", style, lspec))
@@ -473,12 +477,12 @@ class Style(object):
def element_names(self):
"""Returns the list of elements defined in the current theme."""
- return self.tk.call(self._name, "element", "names")
+ return self.tk.splitlist(self.tk.call(self._name, "element", "names"))
def element_options(self, elementname):
"""Return the list of elementname's options."""
- return self.tk.call(self._name, "element", "options", elementname)
+ return self.tk.splitlist(self.tk.call(self._name, "element", "options", elementname))
def theme_create(self, themename, parent=None, settings=None):
@@ -512,7 +516,7 @@ class Style(object):
def theme_names(self):
"""Returns a list of all known themes."""
- return self.tk.call(self._name, "theme", "names")
+ return self.tk.splitlist(self.tk.call(self._name, "theme", "names"))
def theme_use(self, themename=None):
@@ -575,7 +579,8 @@ class Widget(Tkinter.Widget):
matches statespec and False otherwise. If callback is specified,
then it will be invoked with *args, **kw if the widget state
matches statespec. statespec is expected to be a sequence."""
- ret = self.tk.call(self._w, "instate", ' '.join(statespec))
+ ret = self.tk.getboolean(
+ self.tk.call(self._w, "instate", ' '.join(statespec)))
if ret and callback:
return callback(*args, **kw)
@@ -674,7 +679,7 @@ class Entry(Widget, Tkinter.Entry):
def bbox(self, index):
"""Return a tuple of (x, y, width, height) which describes the
bounding box of the character given by index."""
- return self.tk.call(self._w, "bbox", index)
+ return self._getints(self.tk.call(self._w, "bbox", index))
def identify(self, x, y):
@@ -687,7 +692,7 @@ class Entry(Widget, Tkinter.Entry):
"""Force revalidation, independent of the conditions specified
by the validate option. Returns False if validation fails, True
if it succeeds. Sets or clears the invalid state accordingly."""
- return bool(self.tk.call(self._w, "validate"))
+ return bool(self.tk.getboolean(self.tk.call(self._w, "validate")))
class Combobox(Entry):
@@ -706,35 +711,16 @@ class Combobox(Entry):
exportselection, justify, height, postcommand, state,
textvariable, values, width
"""
- # The "values" option may need special formatting, so leave to
- # _format_optdict the responsibility to format it
- if "values" in kw:
- kw["values"] = _format_optdict({'v': kw["values"]})[1]
-
Entry.__init__(self, master, "ttk::combobox", **kw)
- def __setitem__(self, item, value):
- if item == "values":
- value = _format_optdict({item: value})[1]
-
- Entry.__setitem__(self, item, value)
-
-
- def configure(self, cnf=None, **kw):
- """Custom Combobox configure, created to properly format the values
- option."""
- if "values" in kw:
- kw["values"] = _format_optdict({'v': kw["values"]})[1]
-
- return Entry.configure(self, cnf, **kw)
-
-
def current(self, newindex=None):
"""If newindex is supplied, sets the combobox value to the
element at position newindex in the list of values. Otherwise,
returns the index of the current value in the list of values
or -1 if the current value does not appear in the list."""
+ if newindex is None:
+ return self.tk.getint(self.tk.call(self._w, "current"))
return self.tk.call(self._w, "current", newindex)
@@ -889,7 +875,7 @@ class Notebook(Widget):
def index(self, tab_id):
"""Returns the numeric index of the tab specified by tab_id, or
the total number of tabs if tab_id is the string "end"."""
- return self.tk.call(self._w, "index", tab_id)
+ return self.tk.getint(self.tk.call(self._w, "index", tab_id))
def insert(self, pos, child, **kw):
@@ -919,12 +905,12 @@ class Notebook(Widget):
options to the corresponding values."""
if option is not None:
kw[option] = None
- return _val_or_dict(kw, self.tk.call, self._w, "tab", tab_id)
+ return _val_or_dict(self.tk, kw, self._w, "tab", tab_id)
def tabs(self):
"""Returns a list of windows managed by the notebook."""
- return self.tk.call(self._w, "tabs") or ()
+ return self.tk.splitlist(self.tk.call(self._w, "tabs") or ())
def enable_traversal(self):
@@ -996,7 +982,7 @@ class Panedwindow(Widget, Tkinter.PanedWindow):
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
- return _val_or_dict(kw, self.tk.call, self._w, "pane", pane)
+ return _val_or_dict(self.tk, kw, self._w, "pane", pane)
def sashpos(self, index, newpos=None):
@@ -1007,7 +993,7 @@ class Panedwindow(Widget, Tkinter.PanedWindow):
constrained to be between 0 and the total size of the widget.
Returns the new position of sash number index."""
- return self.tk.call(self._w, "sashpos", index, newpos)
+ return self.tk.getint(self.tk.call(self._w, "sashpos", index, newpos))
PanedWindow = Panedwindow # Tkinter name compatibility
@@ -1207,14 +1193,15 @@ class Treeview(Widget, Tkinter.XView, Tkinter.YView):
If column is specified, returns the bounding box of that cell.
If the item is not visible (i.e., if it is a descendant of a
closed item or is scrolled offscreen), returns an empty string."""
- return self.tk.call(self._w, "bbox", item, column)
+ return self._getints(self.tk.call(self._w, "bbox", item, column)) or ''
def get_children(self, item=None):
"""Returns a tuple of children belonging to item.
If item is not specified, returns root children."""
- return self.tk.call(self._w, "children", item or '') or ()
+ return self.tk.splitlist(
+ self.tk.call(self._w, "children", item or '') or ())
def set_children(self, item, *newchildren):
@@ -1234,7 +1221,7 @@ class Treeview(Widget, Tkinter.XView, Tkinter.YView):
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
- return _val_or_dict(kw, self.tk.call, self._w, "column", column)
+ return _val_or_dict(self.tk, kw, self._w, "column", column)
def delete(self, *items):
@@ -1253,9 +1240,9 @@ class Treeview(Widget, Tkinter.XView, Tkinter.YView):
def exists(self, item):
- """Returns True if the specified item is present in the three,
+ """Returns True if the specified item is present in the tree,
False otherwise."""
- return bool(self.tk.call(self._w, "exists", item))
+ return bool(self.tk.getboolean(self.tk.call(self._w, "exists", item)))
def focus(self, item=None):
@@ -1293,7 +1280,7 @@ class Treeview(Widget, Tkinter.XView, Tkinter.YView):
if option is not None:
kw[option] = None
- return _val_or_dict(kw, self.tk.call, self._w, 'heading', column)
+ return _val_or_dict(self.tk, kw, self._w, 'heading', column)
def identify(self, component, x, y):
@@ -1337,7 +1324,7 @@ class Treeview(Widget, Tkinter.XView, Tkinter.YView):
def index(self, item):
"""Returns the integer index of item within its parent's list
of children."""
- return self.tk.call(self._w, "index", item)
+ return self.tk.getint(self.tk.call(self._w, "index", item))
def insert(self, parent, index, iid=None, **kw):
@@ -1372,7 +1359,7 @@ class Treeview(Widget, Tkinter.XView, Tkinter.YView):
values as given by kw."""
if option is not None:
kw[option] = None
- return _val_or_dict(kw, self.tk.call, self._w, "item", item)
+ return _val_or_dict(self.tk, kw, self._w, "item", item)
def move(self, item, parent, index):
@@ -1446,7 +1433,7 @@ class Treeview(Widget, Tkinter.XView, Tkinter.YView):
value of given column in given item to the specified value."""
res = self.tk.call(self._w, "set", item, column, value)
if column is None and value is None:
- return _dict_from_tcltuple(res, False)
+ return _dict_from_tcltuple(self.tk.splitlist(res), False)
else:
return res
@@ -1467,7 +1454,7 @@ class Treeview(Widget, Tkinter.XView, Tkinter.YView):
values for the given tagname."""
if option is not None:
kw[option] = None
- return _val_or_dict(kw, self.tk.call, self._w, "tag", "configure",
+ return _val_or_dict(self.tk, kw, self._w, "tag", "configure",
tagname)
@@ -1477,7 +1464,8 @@ class Treeview(Widget, Tkinter.XView, Tkinter.YView):
all items which have the specified tag.
* Availability: Tk 8.6"""
- return self.tk.call(self._w, "tag", "has", tagname, item)
+ return self.tk.getboolean(
+ self.tk.call(self._w, "tag", "has", tagname, item))
# Extensions
@@ -1549,7 +1537,8 @@ class LabeledScale(Frame, object):
self.label.place_configure(x=x, y=y)
- from_, to = self.scale['from'], self.scale['to']
+ from_ = _to_number(self.scale['from'])
+ to = _to_number(self.scale['to'])
if to < from_:
from_, to = to, from_
newval = self._variable.get()
diff --git a/Lib/lib-tk/turtle.py b/Lib/lib-tk/turtle.py
index aa4fe84..8d7cdc0 100644
--- a/Lib/lib-tk/turtle.py
+++ b/Lib/lib-tk/turtle.py
@@ -811,8 +811,8 @@ class TurtleScreenBase(object):
class Terminator (Exception):
"""Will be raised in TurtleScreen.update, if _RUNNING becomes False.
- Thus stops execution of turtle graphics script. Main purpose: use in
- in the Demo-Viewer turtle.Demo.py.
+ This stops execution of a turtle graphics script.
+ Main purpose: use in the Demo-Viewer turtle.Demo.py.
"""
pass
@@ -835,7 +835,7 @@ class Shape(object):
if isinstance(data, list):
data = tuple(data)
elif type_ == "image":
- if isinstance(data, str):
+ if isinstance(data, basestring):
if data.lower().endswith(".gif") and isfile(data):
data = TurtleScreen._image(data)
# else data assumed to be Photoimage
@@ -1098,7 +1098,7 @@ class TurtleScreen(TurtleScreenBase):
"""
if len(color) == 1:
color = color[0]
- if isinstance(color, str):
+ if isinstance(color, basestring):
if self._iscolorstring(color) or color == "":
return color
else:
@@ -1233,7 +1233,7 @@ class TurtleScreen(TurtleScreenBase):
self._delayvalue = int(delay)
def _incrementudc(self):
- """Increment upadate counter."""
+ """Increment update counter."""
if not TurtleScreen._RUNNING:
TurtleScreen._RUNNNING = True
raise Terminator
@@ -2439,7 +2439,7 @@ class RawTurtle(TPen, TNavigator):
self.screen = TurtleScreen(canvas)
RawTurtle.screens.append(self.screen)
else:
- raise TurtleGraphicsError("bad cavas argument %s" % canvas)
+ raise TurtleGraphicsError("bad canvas argument %s" % canvas)
screen = self.screen
TNavigator.__init__(self, screen.mode())
@@ -2602,7 +2602,7 @@ class RawTurtle(TPen, TNavigator):
def _cc(self, args):
"""Convert colortriples to hexstrings.
"""
- if isinstance(args, str):
+ if isinstance(args, basestring):
return args
try:
r, g, b = args
@@ -2684,7 +2684,7 @@ class RawTurtle(TPen, TNavigator):
def shapesize(self, stretch_wid=None, stretch_len=None, outline=None):
"""Set/return turtle's stretchfactors/outline. Set resizemode to "user".
- Optinonal arguments:
+ Optional arguments:
stretch_wid : positive number
stretch_len : positive number
outline : positive number
@@ -2975,7 +2975,7 @@ class RawTurtle(TPen, TNavigator):
def _goto(self, end):
"""Move the pen to the point end, thereby drawing a line
- if pen is down. All other methodes for turtle movement depend
+ if pen is down. All other methods for turtle movement depend
on this one.
"""
## Version mit undo-stuff
@@ -3228,7 +3228,7 @@ class RawTurtle(TPen, TNavigator):
"""
#print "dot-1:", size, color
if not color:
- if isinstance(size, (str, tuple)):
+ if isinstance(size, (basestring, tuple)):
color = self._colorstr(size)
size = self._pensize + max(self._pensize, 4)
else:
@@ -3913,7 +3913,7 @@ if __name__ == "__main__":
down()
# some text
write("startstart", 1)
- write("start", 1)
+ write(u"start", 1)
color("red")
# staircase
for i in range(5):
@@ -3988,7 +3988,7 @@ if __name__ == "__main__":
tri = getturtle()
tri.resizemode("auto")
turtle = Turtle()
- turtle.resizemode("auto")
+ turtle.resizemode(u"auto")
turtle.shape("turtle")
turtle.reset()
turtle.left(90)
@@ -3998,7 +3998,7 @@ if __name__ == "__main__":
turtle.lt(30)
turtle.down()
turtle.speed(6)
- turtle.color("blue","orange")
+ turtle.color("blue",u"orange")
turtle.pensize(2)
tri.speed(6)
setheading(towards(turtle))
@@ -4013,9 +4013,9 @@ if __name__ == "__main__":
tri.stamp()
switchpen()
count += 1
- tri.write("CAUGHT! ", font=("Arial", 16, "bold"), align="right")
+ tri.write("CAUGHT! ", font=("Arial", 16, "bold"), align=u"right")
tri.pencolor("black")
- tri.pencolor("red")
+ tri.pencolor(u"red")
def baba(xdummy, ydummy):
clearscreen()
diff --git a/Lib/lib2to3/Grammar.txt b/Lib/lib2to3/Grammar.txt
index 1e1f24c..e667bcd 100644
--- a/Lib/lib2to3/Grammar.txt
+++ b/Lib/lib2to3/Grammar.txt
@@ -56,7 +56,7 @@ small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) |
('=' (yield_expr|testlist_star_expr))*)
testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
-augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
+augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
'<<=' | '>>=' | '**=' | '//=')
# For normal assignments, additional restrictions enforced by the interpreter
print_stmt: 'print' ( [ test (',' test)* [','] ] |
@@ -119,7 +119,7 @@ xor_expr: and_expr ('^' and_expr)*
and_expr: shift_expr ('&' shift_expr)*
shift_expr: arith_expr (('<<'|'>>') arith_expr)*
arith_expr: term (('+'|'-') term)*
-term: factor (('*'|'/'|'%'|'//') factor)*
+term: factor (('*'|'@'|'/'|'%'|'//') factor)*
factor: ('+'|'-'|'~') factor | power
power: atom trailer* ['**' factor]
atom: ('(' [yield_expr|testlist_gexp] ')' |
@@ -155,4 +155,5 @@ testlist1: test (',' test)*
# not used in grammar, but may appear in "node" passed from Parser to Compiler
encoding_decl: NAME
-yield_expr: 'yield' [testlist]
+yield_expr: 'yield' [yield_arg]
+yield_arg: 'from' test | testlist
diff --git a/Lib/lib2to3/fixer_util.py b/Lib/lib2to3/fixer_util.py
index 30da893..78fdf26 100644
--- a/Lib/lib2to3/fixer_util.py
+++ b/Lib/lib2to3/fixer_util.py
@@ -165,7 +165,7 @@ def parenthesize(node):
consuming_calls = set(["sorted", "list", "set", "any", "all", "tuple", "sum",
- "min", "max"])
+ "min", "max", "enumerate"])
def attr_chain(obj, attr):
"""Follow an attribute chain.
@@ -192,14 +192,14 @@ p0 = """for_stmt< 'for' any 'in' node=any ':' any* >
p1 = """
power<
( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' |
- 'any' | 'all' | (any* trailer< '.' 'join' >) )
+ 'any' | 'all' | 'enumerate' | (any* trailer< '.' 'join' >) )
trailer< '(' node=any ')' >
any*
>
"""
p2 = """
power<
- 'sorted'
+ ( 'sorted' | 'enumerate' )
trailer< '(' arglist<node=any any*> ')' >
any*
>
@@ -207,14 +207,14 @@ power<
pats_built = False
def in_special_context(node):
""" Returns true if node is in an environment where all that is required
- of it is being itterable (ie, it doesn't matter if it returns a list
- or an itterator).
+ of it is being iterable (ie, it doesn't matter if it returns a list
+ or an iterator).
See test_map_nochange in test_fixers.py for some examples and tests.
"""
global p0, p1, p2, pats_built
if not pats_built:
- p1 = patcomp.compile_pattern(p1)
p0 = patcomp.compile_pattern(p0)
+ p1 = patcomp.compile_pattern(p1)
p2 = patcomp.compile_pattern(p2)
pats_built = True
patterns = [p0, p1, p2]
@@ -274,9 +274,9 @@ def find_root(node):
"""Find the top level namespace."""
# Scamper up to the top level namespace
while node.type != syms.file_input:
- assert node.parent, "Tree is insane! root found before "\
- "file_input node was found."
node = node.parent
+ if not node:
+ raise ValueError("root found before file_input node was found.")
return node
def does_tree_import(package, name, node):
diff --git a/Lib/lib2to3/fixes/fix_import.py b/Lib/lib2to3/fixes/fix_import.py
index 201e811..88e9d10 100644
--- a/Lib/lib2to3/fixes/fix_import.py
+++ b/Lib/lib2to3/fixes/fix_import.py
@@ -32,7 +32,7 @@ def traverse_imports(names):
elif node.type == syms.dotted_as_names:
pending.extend(node.children[::-2])
else:
- raise AssertionError("unkown node type")
+ raise AssertionError("unknown node type")
class FixImport(fixer_base.BaseFix):
diff --git a/Lib/lib2to3/fixes/fix_itertools.py b/Lib/lib2to3/fixes/fix_itertools.py
index 27f8a49..067641b 100644
--- a/Lib/lib2to3/fixes/fix_itertools.py
+++ b/Lib/lib2to3/fixes/fix_itertools.py
@@ -34,8 +34,8 @@ class FixItertools(fixer_base.BaseFix):
# Remove the 'itertools'
prefix = it.prefix
it.remove()
- # Replace the node wich contains ('.', 'function') with the
- # function (to be consistant with the second part of the pattern)
+ # Replace the node which contains ('.', 'function') with the
+ # function (to be consistent with the second part of the pattern)
dot.remove()
func.parent.replace(func)
diff --git a/Lib/lib2to3/fixes/fix_metaclass.py b/Lib/lib2to3/fixes/fix_metaclass.py
index c86fbea..4f5593c 100644
--- a/Lib/lib2to3/fixes/fix_metaclass.py
+++ b/Lib/lib2to3/fixes/fix_metaclass.py
@@ -71,7 +71,7 @@ def fixup_parse_tree(cls_node):
def fixup_simple_stmt(parent, i, stmt_node):
""" if there is a semi-colon all the parts count as part of the same
simple_stmt. We just want the __metaclass__ part so we move
- everything efter the semi-colon into its own simple_stmt node
+ everything after the semi-colon into its own simple_stmt node
"""
for semi_ind, node in enumerate(stmt_node.children):
if node.type == token.SEMI: # *sigh*
diff --git a/Lib/lib2to3/fixes/fix_unicode.py b/Lib/lib2to3/fixes/fix_unicode.py
index 6c89576..2d776f6 100644
--- a/Lib/lib2to3/fixes/fix_unicode.py
+++ b/Lib/lib2to3/fixes/fix_unicode.py
@@ -1,25 +1,42 @@
-"""Fixer that changes unicode to str, unichr to chr, and u"..." into "...".
+r"""Fixer for unicode.
+
+* Changes unicode to str and unichr to chr.
+
+* If "...\u..." is not unicode literal change it into "...\\u...".
+
+* Change u"..." into "...".
"""
-import re
from ..pgen2 import token
from .. import fixer_base
_mapping = {u"unichr" : u"chr", u"unicode" : u"str"}
-_literal_re = re.compile(ur"[uU][rR]?[\'\"]")
class FixUnicode(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "STRING | 'unicode' | 'unichr'"
+ def start_tree(self, tree, filename):
+ super(FixUnicode, self).start_tree(tree, filename)
+ self.unicode_literals = 'unicode_literals' in tree.future_features
+
def transform(self, node, results):
if node.type == token.NAME:
new = node.clone()
new.value = _mapping[node.value]
return new
elif node.type == token.STRING:
- if _literal_re.match(node.value):
- new = node.clone()
- new.value = new.value[1:]
- return new
+ val = node.value
+ if not self.unicode_literals and val[0] in u'\'"' and u'\\' in val:
+ val = ur'\\'.join([
+ v.replace(u'\\u', ur'\\u').replace(u'\\U', ur'\\U')
+ for v in val.split(ur'\\')
+ ])
+ if val[0] in u'uU':
+ val = val[1:]
+ if val == node.value:
+ return node
+ new = node.clone()
+ new.value = val
+ return new
diff --git a/Lib/lib2to3/pgen2/driver.py b/Lib/lib2to3/pgen2/driver.py
index 16adec0..39dafb9 100644
--- a/Lib/lib2to3/pgen2/driver.py
+++ b/Lib/lib2to3/pgen2/driver.py
@@ -138,3 +138,20 @@ def _newer(a, b):
if not os.path.exists(b):
return True
return os.path.getmtime(a) >= os.path.getmtime(b)
+
+
+def main(*args):
+ """Main program, when run as a script: produce grammar pickle files.
+
+ Calls load_grammar for each argument, a path to a grammar text file.
+ """
+ if not args:
+ args = sys.argv[1:]
+ logging.basicConfig(level=logging.INFO, stream=sys.stdout,
+ format='%(message)s')
+ for gt in args:
+ load_grammar(gt, save=True, force=True)
+ return True
+
+if __name__ == "__main__":
+ sys.exit(int(not main()))
diff --git a/Lib/lib2to3/pgen2/grammar.py b/Lib/lib2to3/pgen2/grammar.py
index 0483424..8220b0a 100644
--- a/Lib/lib2to3/pgen2/grammar.py
+++ b/Lib/lib2to3/pgen2/grammar.py
@@ -20,7 +20,7 @@ from . import token, tokenize
class Grammar(object):
- """Pgen parsing tables tables conversion class.
+ """Pgen parsing tables conversion class.
Once initialized, this class supplies the grammar tables for the
parsing engine implemented by parse.py. The parsing engine
@@ -45,7 +45,7 @@ class Grammar(object):
these two are each other's inverse.
states -- a list of DFAs, where each DFA is a list of
- states, each state is is a list of arcs, and each
+ states, each state is a list of arcs, and each
arc is a (i, j) pair where i is a label and j is
a state number. The DFA number is the index into
this list. (This name is slightly confusing.)
@@ -151,6 +151,7 @@ opmap_raw = """
{ LBRACE
} RBRACE
@ AT
+@= ATEQUAL
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
diff --git a/Lib/lib2to3/pgen2/token.py b/Lib/lib2to3/pgen2/token.py
index 61468b3..5fac5ce 100755
--- a/Lib/lib2to3/pgen2/token.py
+++ b/Lib/lib2to3/pgen2/token.py
@@ -57,12 +57,13 @@ DOUBLESTAREQUAL = 47
DOUBLESLASH = 48
DOUBLESLASHEQUAL = 49
AT = 50
-OP = 51
-COMMENT = 52
-NL = 53
-RARROW = 54
-ERRORTOKEN = 55
-N_TOKENS = 56
+ATEQUAL = 51
+OP = 52
+COMMENT = 53
+NL = 54
+RARROW = 55
+ERRORTOKEN = 56
+N_TOKENS = 57
NT_OFFSET = 256
#--end constants--
diff --git a/Lib/lib2to3/pgen2/tokenize.py b/Lib/lib2to3/pgen2/tokenize.py
index e090aa9..4cb2a41 100644
--- a/Lib/lib2to3/pgen2/tokenize.py
+++ b/Lib/lib2to3/pgen2/tokenize.py
@@ -84,7 +84,7 @@ String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?", r"->",
- r"[+\-*/%&|^=<>]=?",
+ r"[+\-*/%&@|^=<>]=?",
r"~")
Bracket = '[][(){}]'
@@ -236,7 +236,7 @@ class Untokenizer:
startline = False
toks_append(tokval)
-cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
+cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)')
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
@@ -252,7 +252,7 @@ def _get_normal_name(orig_enc):
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
- be used to decode a Python source file. It requires one argment, readline,
+ be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
@@ -281,11 +281,10 @@ def detect_encoding(readline):
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
-
- matches = cookie_re.findall(line_string)
- if not matches:
+ match = cookie_re.match(line_string)
+ if not match:
return None
- encoding = _get_normal_name(matches[0])
+ encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
@@ -344,7 +343,7 @@ def untokenize(iterable):
def generate_tokens(readline):
"""
- The generate_tokens() generator requires one argment, readline, which
+ The generate_tokens() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
diff --git a/Lib/lib2to3/refactor.py b/Lib/lib2to3/refactor.py
index badcac2..a4c168d 100644
--- a/Lib/lib2to3/refactor.py
+++ b/Lib/lib2to3/refactor.py
@@ -445,7 +445,7 @@ class RefactoringTool(object):
try:
find_root(node)
- except AssertionError:
+ except ValueError:
# this node has been cut off from a
# previous transformation ; skip
continue
diff --git a/Lib/lib2to3/tests/data/different_encoding.py b/Lib/lib2to3/tests/data/different_encoding.py
index 9f32bd0..9f32bd0 100644..100755
--- a/Lib/lib2to3/tests/data/different_encoding.py
+++ b/Lib/lib2to3/tests/data/different_encoding.py
diff --git a/Lib/lib2to3/tests/data/false_encoding.py b/Lib/lib2to3/tests/data/false_encoding.py
new file mode 100755
index 0000000..f4e59e7
--- /dev/null
+++ b/Lib/lib2to3/tests/data/false_encoding.py
@@ -0,0 +1,2 @@
+#!/usr/bin/env python
+print '#coding=0'
diff --git a/Lib/lib2to3/tests/test_fixers.py b/Lib/lib2to3/tests/test_fixers.py
index 88679f0..dd34028 100644
--- a/Lib/lib2to3/tests/test_fixers.py
+++ b/Lib/lib2to3/tests/test_fixers.py
@@ -41,7 +41,7 @@ class FixerTestCase(support.TestCase):
def warns(self, before, after, message, unchanged=False):
tree = self._check(before, after)
- self.assertTrue(message in "".join(self.fixer_log))
+ self.assertIn(message, "".join(self.fixer_log))
if not unchanged:
self.assertTrue(tree.was_changed)
@@ -1405,27 +1405,27 @@ class Test_dict(FixerTestCase):
a = "d.values()"
self.check(b, a)
- def test_14(self):
+ def test_28(self):
b = "[i for i in d.viewkeys()]"
a = "[i for i in d.keys()]"
self.check(b, a)
- def test_15(self):
+ def test_29(self):
b = "(i for i in d.viewkeys())"
a = "(i for i in d.keys())"
self.check(b, a)
- def test_17(self):
+ def test_30(self):
b = "iter(d.viewkeys())"
a = "iter(d.keys())"
self.check(b, a)
- def test_18(self):
+ def test_31(self):
b = "list(d.viewkeys())"
a = "list(d.keys())"
self.check(b, a)
- def test_19(self):
+ def test_32(self):
b = "sorted(d.viewkeys())"
a = "sorted(d.keys())"
self.check(b, a)
@@ -2824,6 +2824,43 @@ class Test_unicode(FixerTestCase):
a = """R'''x''' """
self.check(b, a)
+ def test_native_literal_escape_u(self):
+ b = """'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = """'\\\\\\\\u20ac\\\\U0001d121\\\\u20ac'"""
+ self.check(b, a)
+
+ b = """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ self.check(b, a)
+
+ def test_bytes_literal_escape_u(self):
+ b = """b'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = """b'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ self.check(b, a)
+
+ b = """br'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = """br'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ self.check(b, a)
+
+ def test_unicode_literal_escape_u(self):
+ b = """u'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = """'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ self.check(b, a)
+
+ b = """ur'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ self.check(b, a)
+
+ def test_native_unicode_literal_escape_u(self):
+ f = 'from __future__ import unicode_literals\n'
+ b = f + """'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = f + """'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ self.check(b, a)
+
+ b = f + """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = f + """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ self.check(b, a)
+
class Test_callable(FixerTestCase):
fixer = "callable"
@@ -2981,6 +3018,10 @@ class Test_filter(FixerTestCase):
self.unchanged(a)
a = """sorted(filter(f, 'abc'), key=blah)[0]"""
self.unchanged(a)
+ a = """enumerate(filter(f, 'abc'))"""
+ self.unchanged(a)
+ a = """enumerate(filter(f, 'abc'), start=1)"""
+ self.unchanged(a)
a = """for i in filter(f, 'abc'): pass"""
self.unchanged(a)
a = """[x for x in filter(f, 'abc')]"""
@@ -3089,6 +3130,10 @@ class Test_map(FixerTestCase):
self.unchanged(a)
a = """sorted(map(f, 'abc'), key=blah)[0]"""
self.unchanged(a)
+ a = """enumerate(map(f, 'abc'))"""
+ self.unchanged(a)
+ a = """enumerate(map(f, 'abc'), start=1)"""
+ self.unchanged(a)
a = """for i in map(f, 'abc'): pass"""
self.unchanged(a)
a = """[x for x in map(f, 'abc')]"""
@@ -3152,6 +3197,10 @@ class Test_zip(FixerTestCase):
self.unchanged(a)
a = """sorted(zip(a, b), key=blah)[0]"""
self.unchanged(a)
+ a = """enumerate(zip(a, b))"""
+ self.unchanged(a)
+ a = """enumerate(zip(a, b), start=1)"""
+ self.unchanged(a)
a = """for i in zip(a, b): pass"""
self.unchanged(a)
a = """[x for x in zip(a, b)]"""
diff --git a/Lib/lib2to3/tests/test_main.py b/Lib/lib2to3/tests/test_main.py
index 7f8b25c..04131cf 100644
--- a/Lib/lib2to3/tests/test_main.py
+++ b/Lib/lib2to3/tests/test_main.py
@@ -59,9 +59,9 @@ class TestMain(unittest.TestCase):
ret = self.run_2to3_capture(["-"], input_stream, out_enc, err)
self.assertEqual(ret, 0)
output = out.getvalue()
- self.assertTrue("-print 'nothing'" in output)
- self.assertTrue("WARNING: couldn't encode <stdin>'s diff for "
- "your terminal" in err.getvalue())
+ self.assertIn("-print 'nothing'", output)
+ self.assertIn("WARNING: couldn't encode <stdin>'s diff for "
+ "your terminal", err.getvalue())
def setup_test_source_trees(self):
"""Setup a test source tree and output destination tree."""
diff --git a/Lib/lib2to3/tests/test_parser.py b/Lib/lib2to3/tests/test_parser.py
index 2602381..9c5463a 100644
--- a/Lib/lib2to3/tests/test_parser.py
+++ b/Lib/lib2to3/tests/test_parser.py
@@ -44,6 +44,19 @@ class GrammarTest(support.TestCase):
raise AssertionError("Syntax shouldn't have been valid")
+class TestMatrixMultiplication(GrammarTest):
+ def test_matrix_multiplication_operator(self):
+ self.validate("a @ b")
+ self.validate("a @= b")
+
+
+class TestYieldFrom(GrammarTest):
+ def test_matrix_multiplication_operator(self):
+ self.validate("yield from x")
+ self.validate("(yield from x) + y")
+ self.invalid_syntax("yield from")
+
+
class TestRaiseChanges(GrammarTest):
def test_2x_style_1(self):
self.validate("raise")
@@ -73,7 +86,7 @@ class TestRaiseChanges(GrammarTest):
self.invalid_syntax("raise E from")
-# Adapated from Python 3's Lib/test/test_grammar.py:GrammarTests.testFuncdef
+# Adaptated from Python 3's Lib/test/test_grammar.py:GrammarTests.testFuncdef
class TestFunctionAnnotations(GrammarTest):
def test_1(self):
self.validate("""def f(x) -> list: pass""")
@@ -165,8 +178,8 @@ class TestParserIdempotency(support.TestCase):
for filepath in support.all_project_files():
with open(filepath, "rb") as fp:
encoding = tokenize.detect_encoding(fp.readline)[0]
- self.assertTrue(encoding is not None,
- "can't detect encoding for %s" % filepath)
+ self.assertIsNotNone(encoding,
+ "can't detect encoding for %s" % filepath)
with open(filepath, "r") as fp:
source = fp.read()
source = source.decode(encoding)
diff --git a/Lib/lib2to3/tests/test_pytree.py b/Lib/lib2to3/tests/test_pytree.py
index ac7d900..ccddce6 100644
--- a/Lib/lib2to3/tests/test_pytree.py
+++ b/Lib/lib2to3/tests/test_pytree.py
@@ -160,12 +160,12 @@ class TestNodes(support.TestCase):
l3 = pytree.Leaf(100, "bar")
n1 = pytree.Node(1000, [l1, l2, l3])
self.assertEqual(n1.children, [l1, l2, l3])
- self.assertTrue(isinstance(n1.children, list))
+ self.assertIsInstance(n1.children, list)
self.assertFalse(n1.was_changed)
l2new = pytree.Leaf(100, "-")
l2.replace(l2new)
self.assertEqual(n1.children, [l1, l2new, l3])
- self.assertTrue(isinstance(n1.children, list))
+ self.assertIsInstance(n1.children, list)
self.assertTrue(n1.was_changed)
def test_replace_with_list(self):
@@ -176,7 +176,7 @@ class TestNodes(support.TestCase):
l2.replace([pytree.Leaf(100, "*"), pytree.Leaf(100, "*")])
self.assertEqual(str(n1), "foo**bar")
- self.assertTrue(isinstance(n1.children, list))
+ self.assertIsInstance(n1.children, list)
def test_leaves(self):
l1 = pytree.Leaf(100, "foo")
@@ -347,7 +347,7 @@ class TestNodes(support.TestCase):
n2 = pytree.Node(1000, [])
p1 = pytree.Node(1000, [n1, n2])
- self.assertTrue(n1.next_sibling is n2)
+ self.assertIs(n1.next_sibling, n2)
self.assertEqual(n2.next_sibling, None)
self.assertEqual(p1.next_sibling, None)
@@ -356,7 +356,7 @@ class TestNodes(support.TestCase):
l2 = pytree.Leaf(100, "b")
p1 = pytree.Node(1000, [l1, l2])
- self.assertTrue(l1.next_sibling is l2)
+ self.assertIs(l1.next_sibling, l2)
self.assertEqual(l2.next_sibling, None)
self.assertEqual(p1.next_sibling, None)
@@ -365,7 +365,7 @@ class TestNodes(support.TestCase):
n2 = pytree.Node(1000, [])
p1 = pytree.Node(1000, [n1, n2])
- self.assertTrue(n2.prev_sibling is n1)
+ self.assertIs(n2.prev_sibling, n1)
self.assertEqual(n1.prev_sibling, None)
self.assertEqual(p1.prev_sibling, None)
@@ -374,7 +374,7 @@ class TestNodes(support.TestCase):
l2 = pytree.Leaf(100, "b")
p1 = pytree.Node(1000, [l1, l2])
- self.assertTrue(l2.prev_sibling is l1)
+ self.assertIs(l2.prev_sibling, l1)
self.assertEqual(l1.prev_sibling, None)
self.assertEqual(p1.prev_sibling, None)
@@ -447,7 +447,7 @@ class TestPatterns(support.TestCase):
r = {}
self.assertTrue(pw.match_seq([l1, l3], r))
self.assertEqual(r, {"pl": l3, "pw": [l1, l3]})
- self.assertTrue(r["pl"] is l3)
+ self.assertIs(r["pl"], l3)
r = {}
def test_generate_matches(self):
diff --git a/Lib/lib2to3/tests/test_refactor.py b/Lib/lib2to3/tests/test_refactor.py
index 6020d1f..7fc84e2 100644
--- a/Lib/lib2to3/tests/test_refactor.py
+++ b/Lib/lib2to3/tests/test_refactor.py
@@ -49,9 +49,9 @@ class TestRefactoringTool(unittest.TestCase):
def test_print_function_option(self):
rt = self.rt({"print_function" : True})
- self.assertTrue(rt.grammar is pygram.python_grammar_no_print_statement)
- self.assertTrue(rt.driver.grammar is
- pygram.python_grammar_no_print_statement)
+ self.assertIs(rt.grammar, pygram.python_grammar_no_print_statement)
+ self.assertIs(rt.driver.grammar,
+ pygram.python_grammar_no_print_statement)
def test_write_unchanged_files_option(self):
rt = self.rt()
@@ -271,6 +271,10 @@ from __future__ import print_function"""
fn = os.path.join(TEST_DATA_DIR, "different_encoding.py")
self.check_file_refactoring(fn)
+ def test_false_file_encoding(self):
+ fn = os.path.join(TEST_DATA_DIR, "false_encoding.py")
+ data = self.check_file_refactoring(fn)
+
def test_bom(self):
fn = os.path.join(TEST_DATA_DIR, "bom.py")
data = self.check_file_refactoring(fn)
diff --git a/Lib/locale.py b/Lib/locale.py
index 561e9e4..25dccaf 100644
--- a/Lib/locale.py
+++ b/Lib/locale.py
@@ -18,6 +18,14 @@ import re
import operator
import functools
+try:
+ _unicode = unicode
+except NameError:
+ # If Python is built without Unicode support, the unicode type
+ # will not exist. Fake one.
+ class _unicode(object):
+ pass
+
# Try importing the _locale module.
#
# If this fails, fall back on a basic 'C' locale emulation.
@@ -336,6 +344,22 @@ _ascii_lower_map = ''.join(
for x in range(256)
)
+def _replace_encoding(code, encoding):
+ if '.' in code:
+ langname = code[:code.index('.')]
+ else:
+ langname = code
+ # Convert the encoding to a C lib compatible encoding string
+ norm_encoding = encodings.normalize_encoding(encoding)
+ #print('norm encoding: %r' % norm_encoding)
+ norm_encoding = encodings.aliases.aliases.get(norm_encoding,
+ norm_encoding)
+ #print('aliased encoding: %r' % norm_encoding)
+ encoding = locale_encoding_alias.get(norm_encoding,
+ norm_encoding)
+ #print('found encoding %r' % encoding)
+ return langname + '.' + encoding
+
def normalize(localename):
""" Returns a normalized locale code for the given locale
@@ -352,57 +376,73 @@ def normalize(localename):
does.
"""
- # Normalize the locale name and extract the encoding
- if isinstance(localename, unicode):
+ # Normalize the locale name and extract the encoding and modifier
+ if isinstance(localename, _unicode):
localename = localename.encode('ascii')
- fullname = localename.translate(_ascii_lower_map)
- if ':' in fullname:
+ code = localename.translate(_ascii_lower_map)
+ if ':' in code:
# ':' is sometimes used as encoding delimiter.
- fullname = fullname.replace(':', '.')
- if '.' in fullname:
- langname, encoding = fullname.split('.')[:2]
- fullname = langname + '.' + encoding
+ code = code.replace(':', '.')
+ if '@' in code:
+ code, modifier = code.split('@', 1)
else:
- langname = fullname
+ modifier = ''
+ if '.' in code:
+ langname, encoding = code.split('.')[:2]
+ else:
+ langname = code
encoding = ''
- # First lookup: fullname (possibly with encoding)
- norm_encoding = encoding.replace('-', '')
- norm_encoding = norm_encoding.replace('_', '')
- lookup_name = langname + '.' + encoding
+ # First lookup: fullname (possibly with encoding and modifier)
+ lang_enc = langname
+ if encoding:
+ norm_encoding = encoding.replace('-', '')
+ norm_encoding = norm_encoding.replace('_', '')
+ lang_enc += '.' + norm_encoding
+ lookup_name = lang_enc
+ if modifier:
+ lookup_name += '@' + modifier
code = locale_alias.get(lookup_name, None)
if code is not None:
return code
- #print 'first lookup failed'
-
- # Second try: langname (without encoding)
- code = locale_alias.get(langname, None)
- if code is not None:
- #print 'langname lookup succeeded'
- if '.' in code:
- langname, defenc = code.split('.')
- else:
- langname = code
- defenc = ''
- if encoding:
- # Convert the encoding to a C lib compatible encoding string
- norm_encoding = encodings.normalize_encoding(encoding)
- #print 'norm encoding: %r' % norm_encoding
- norm_encoding = encodings.aliases.aliases.get(norm_encoding,
- norm_encoding)
- #print 'aliased encoding: %r' % norm_encoding
- encoding = locale_encoding_alias.get(norm_encoding,
- norm_encoding)
- else:
- encoding = defenc
- #print 'found encoding %r' % encoding
- if encoding:
- return langname + '.' + encoding
- else:
- return langname
-
- else:
- return localename
+ #print('first lookup failed')
+
+ if modifier:
+ # Second try: fullname without modifier (possibly with encoding)
+ code = locale_alias.get(lang_enc, None)
+ if code is not None:
+ #print('lookup without modifier succeeded')
+ if '@' not in code:
+ return code + '@' + modifier
+ if code.split('@', 1)[1].translate(_ascii_lower_map) == modifier:
+ return code
+ #print('second lookup failed')
+
+ if encoding:
+ # Third try: langname (without encoding, possibly with modifier)
+ lookup_name = langname
+ if modifier:
+ lookup_name += '@' + modifier
+ code = locale_alias.get(lookup_name, None)
+ if code is not None:
+ #print('lookup without encoding succeeded')
+ if '@' not in code:
+ return _replace_encoding(code, encoding)
+ code, modifier = code.split('@', 1)
+ return _replace_encoding(code, encoding) + '@' + modifier
+
+ if modifier:
+ # Fourth try: langname (without encoding and modifier)
+ code = locale_alias.get(langname, None)
+ if code is not None:
+ #print('lookup without modifier and encoding succeeded')
+ if '@' not in code:
+ return _replace_encoding(code, encoding) + '@' + modifier
+ code, defmod = code.split('@', 1)
+ if defmod.translate(_ascii_lower_map) == modifier:
+ return _replace_encoding(code, encoding) + '@' + defmod
+
+ return localename
def _parse_localename(localename):
@@ -421,7 +461,7 @@ def _parse_localename(localename):
code = normalize(localename)
if '@' in code:
# Deal with locale modifiers
- code, modifier = code.split('@')
+ code, modifier = code.split('@', 1)
if modifier == 'euro' and '.' not in code:
# Assume Latin-9 for @euro locales. This is bogus,
# since some systems may use other encodings for these
@@ -726,11 +766,30 @@ locale_encoding_alias = {
# updated 'sr_yu.utf8@cyrillic' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8'
# updated 'sr_yu@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
#
+# SS 2013-12-20:
+# Updated alias mapping to most recent locale.alias file
+# from X.org distribution using makelocalealias.py.
+#
+# These are the differences compared to the old mapping (Python 2.7.6
+# and older):
+#
+# updated 'a3' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
+# updated 'a3_az' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
+# updated 'a3_az.koi8c' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
+# updated 'cs_cs.iso88592' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'
+# updated 'hebrew' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
+# updated 'hebrew.iso88598' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
+# updated 'sd' -> 'sd_IN@devanagari.UTF-8' to 'sd_IN.UTF-8'
+# updated 'sr@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
+# updated 'sr_cs' -> 'sr_RS.UTF-8' to 'sr_CS.UTF-8'
+# updated 'sr_cs.utf8@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
+# updated 'sr_cs@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
locale_alias = {
- 'a3': 'a3_AZ.KOI8-C',
- 'a3_az': 'a3_AZ.KOI8-C',
- 'a3_az.koi8c': 'a3_AZ.KOI8-C',
+ 'a3': 'az_AZ.KOI8-C',
+ 'a3_az': 'az_AZ.KOI8-C',
+ 'a3_az.koi8c': 'az_AZ.KOI8-C',
+ 'a3_az.koic': 'az_AZ.KOI8-C',
'af': 'af_ZA.ISO8859-1',
'af_za': 'af_ZA.ISO8859-1',
'af_za.iso88591': 'af_ZA.ISO8859-1',
@@ -749,6 +808,7 @@ locale_alias = {
'ar_dz.iso88596': 'ar_DZ.ISO8859-6',
'ar_eg': 'ar_EG.ISO8859-6',
'ar_eg.iso88596': 'ar_EG.ISO8859-6',
+ 'ar_in': 'ar_IN.UTF-8',
'ar_iq': 'ar_IQ.ISO8859-6',
'ar_iq.iso88596': 'ar_IQ.ISO8859-6',
'ar_jo': 'ar_JO.ISO8859-6',
@@ -778,6 +838,7 @@ locale_alias = {
'arabic': 'ar_AA.ISO8859-6',
'arabic.iso88596': 'ar_AA.ISO8859-6',
'as': 'as_IN.UTF-8',
+ 'as_in': 'as_IN.UTF-8',
'az': 'az_AZ.ISO8859-9E',
'az_az': 'az_AZ.ISO8859-9E',
'az_az.iso88599e': 'az_AZ.ISO8859-9E',
@@ -795,6 +856,7 @@ locale_alias = {
'bg_bg.koi8r': 'bg_BG.KOI8-R',
'bg_bg.microsoftcp1251': 'bg_BG.CP1251',
'bn_in': 'bn_IN.UTF-8',
+ 'bo_in': 'bo_IN.UTF-8',
'bokmal': 'nb_NO.ISO8859-1',
'bokm\xe5l': 'nb_NO.ISO8859-1',
'br': 'br_FR.ISO8859-1',
@@ -812,6 +874,7 @@ locale_alias = {
'c': 'C',
'c-french': 'fr_CA.ISO8859-1',
'c-french.iso88591': 'fr_CA.ISO8859-1',
+ 'c.ascii': 'C',
'c.en': 'C',
'c.iso88591': 'en_US.ISO8859-1',
'c_c': 'C',
@@ -849,7 +912,7 @@ locale_alias = {
'croatian': 'hr_HR.ISO8859-2',
'cs': 'cs_CZ.ISO8859-2',
'cs_cs': 'cs_CZ.ISO8859-2',
- 'cs_cs.iso88592': 'cs_CS.ISO8859-2',
+ 'cs_cs.iso88592': 'cs_CZ.ISO8859-2',
'cs_cz': 'cs_CZ.ISO8859-2',
'cs_cz.iso88592': 'cs_CZ.ISO8859-2',
'cy': 'cy_GB.ISO8859-1',
@@ -1153,12 +1216,13 @@ locale_alias = {
'he_il.cp1255': 'he_IL.CP1255',
'he_il.iso88598': 'he_IL.ISO8859-8',
'he_il.microsoftcp1255': 'he_IL.CP1255',
- 'hebrew': 'iw_IL.ISO8859-8',
- 'hebrew.iso88598': 'iw_IL.ISO8859-8',
+ 'hebrew': 'he_IL.ISO8859-8',
+ 'hebrew.iso88598': 'he_IL.ISO8859-8',
'hi': 'hi_IN.ISCII-DEV',
'hi_in': 'hi_IN.ISCII-DEV',
'hi_in.isciidev': 'hi_IN.ISCII-DEV',
'hne': 'hne_IN.UTF-8',
+ 'hne_in': 'hne_IN.UTF-8',
'hr': 'hr_HR.ISO8859-2',
'hr_hr': 'hr_HR.ISO8859-2',
'hr_hr.iso88592': 'hr_HR.ISO8859-2',
@@ -1246,7 +1310,8 @@ locale_alias = {
'korean': 'ko_KR.eucKR',
'korean.euc': 'ko_KR.eucKR',
'ks': 'ks_IN.UTF-8',
- 'ks_in@devanagari': 'ks_IN@devanagari.UTF-8',
+ 'ks_in': 'ks_IN.UTF-8',
+ 'ks_in@devanagari': 'ks_IN.UTF-8@devanagari',
'kw': 'kw_GB.ISO8859-1',
'kw_gb': 'kw_GB.ISO8859-1',
'kw_gb.iso88591': 'kw_GB.ISO8859-1',
@@ -1270,6 +1335,7 @@ locale_alias = {
'lv_lv.iso885913': 'lv_LV.ISO8859-13',
'lv_lv.iso88594': 'lv_LV.ISO8859-4',
'mai': 'mai_IN.UTF-8',
+ 'mai_in': 'mai_IN.UTF-8',
'mi': 'mi_NZ.ISO8859-1',
'mi_nz': 'mi_NZ.ISO8859-1',
'mi_nz.iso88591': 'mi_NZ.ISO8859-1',
@@ -1279,6 +1345,7 @@ locale_alias = {
'mk_mk.iso88595': 'mk_MK.ISO8859-5',
'mk_mk.microsoftcp1251': 'mk_MK.CP1251',
'ml': 'ml_IN.UTF-8',
+ 'ml_in': 'ml_IN.UTF-8',
'mr': 'mr_IN.UTF-8',
'mr_in': 'mr_IN.UTF-8',
'ms': 'ms_MY.ISO8859-1',
@@ -1293,6 +1360,7 @@ locale_alias = {
'nb_no.iso88591': 'nb_NO.ISO8859-1',
'nb_no.iso885915': 'nb_NO.ISO8859-15',
'nb_no@euro': 'nb_NO.ISO8859-15',
+ 'ne_np': 'ne_NP.UTF-8',
'nl': 'nl_NL.ISO8859-1',
'nl.iso885915': 'nl_NL.ISO8859-15',
'nl_be': 'nl_BE.ISO8859-1',
@@ -1345,6 +1413,7 @@ locale_alias = {
'oc_fr.iso885915': 'oc_FR.ISO8859-15',
'oc_fr@euro': 'oc_FR.ISO8859-15',
'or': 'or_IN.UTF-8',
+ 'or_in': 'or_IN.UTF-8',
'pa': 'pa_IN.UTF-8',
'pa_in': 'pa_IN.UTF-8',
'pd': 'pd_US.ISO8859-1',
@@ -1406,7 +1475,10 @@ locale_alias = {
'rw': 'rw_RW.ISO8859-1',
'rw_rw': 'rw_RW.ISO8859-1',
'rw_rw.iso88591': 'rw_RW.ISO8859-1',
- 'sd': 'sd_IN@devanagari.UTF-8',
+ 'sd': 'sd_IN.UTF-8',
+ 'sd@devanagari': 'sd_IN.UTF-8@devanagari',
+ 'sd_in': 'sd_IN.UTF-8',
+ 'sd_in@devanagari': 'sd_IN.UTF-8@devanagari',
'se_no': 'se_NO.UTF-8',
'serbocroatian': 'sr_RS.UTF-8@latin',
'sh': 'sr_RS.UTF-8@latin',
@@ -1440,13 +1512,13 @@ locale_alias = {
'sr': 'sr_RS.UTF-8',
'sr@cyrillic': 'sr_RS.UTF-8',
'sr@latin': 'sr_RS.UTF-8@latin',
- 'sr@latn': 'sr_RS.UTF-8@latin',
- 'sr_cs': 'sr_RS.UTF-8',
+ 'sr@latn': 'sr_CS.UTF-8@latin',
+ 'sr_cs': 'sr_CS.UTF-8',
'sr_cs.iso88592': 'sr_CS.ISO8859-2',
'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2',
'sr_cs.iso88595': 'sr_CS.ISO8859-5',
- 'sr_cs.utf8@latn': 'sr_RS.UTF-8@latin',
- 'sr_cs@latn': 'sr_RS.UTF-8@latin',
+ 'sr_cs.utf8@latn': 'sr_CS.UTF-8@latin',
+ 'sr_cs@latn': 'sr_CS.UTF-8@latin',
'sr_me': 'sr_ME.UTF-8',
'sr_rs': 'sr_RS.UTF-8',
'sr_rs.utf8@latn': 'sr_RS.UTF-8@latin',
@@ -1524,6 +1596,7 @@ locale_alias = {
'universal': 'en_US.utf',
'universal.utf8@ucs4': 'en_US.UTF-8',
'ur': 'ur_PK.CP1256',
+ 'ur_in': 'ur_IN.UTF-8',
'ur_pk': 'ur_PK.CP1256',
'ur_pk.cp1256': 'ur_PK.CP1256',
'ur_pk.microsoftcp1256': 'ur_PK.CP1256',
@@ -1581,8 +1654,7 @@ locale_alias = {
# to include every locale up to Windows Vista.
#
# NOTE: this mapping is incomplete. If your language is missing, please
-# submit a bug report to Python bug manager, which you can find via:
-# http://www.python.org/dev/
+# submit a bug report to the Python bug tracker at http://bugs.python.org/
# Make sure you include the missing language identifier and the suggested
# locale code.
#
diff --git a/Lib/logging/__init__.py b/Lib/logging/__init__.py
index be775e8..b2a7711 100644
--- a/Lib/logging/__init__.py
+++ b/Lib/logging/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
+# Copyright 2001-2014 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
@@ -16,14 +16,14 @@
"""
Logging package for Python. Based on PEP 282 and comments thereto in
-comp.lang.python, and influenced by Apache's log4j system.
+comp.lang.python.
-Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
+Copyright (C) 2001-2014 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
-import sys, os, time, cStringIO, traceback, warnings, weakref
+import sys, os, time, cStringIO, traceback, warnings, weakref, collections
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
@@ -46,6 +46,7 @@ except ImportError:
__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
__status__ = "production"
+# Note: the attributes below are no longer maintained.
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
@@ -180,7 +181,7 @@ def addLevelName(level, levelName):
_releaseLock()
def _checkLevel(level):
- if isinstance(level, int):
+ if isinstance(level, (int, long)):
rv = level
elif str(level) == level:
if level not in _levelNames:
@@ -260,7 +261,13 @@ class LogRecord(object):
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
- if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
+ # Issue #21172: a request was made to relax the isinstance check
+ # to hasattr(args[0], '__getitem__'). However, the docs on string
+ # formatting still seem to suggest a mapping object is required.
+ # Thus, while not removing the isinstance check, it does now look
+ # for collections.Mapping rather than, as before, dict.
+ if (args and len(args) == 1 and isinstance(args[0], collections.Mapping)
+ and args[0]):
args = args[0]
self.args = args
self.levelname = getLevelName(level)
@@ -622,15 +629,17 @@ def _removeHandlerRef(wr):
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
- # set to None. If _acquireLock is None, assume this is the case and do
- # nothing.
- if _acquireLock is not None:
- _acquireLock()
+ # set to None. It can also be called from another thread. So we need to
+ # pre-emptively grab the necessary globals and check if they're None,
+ # to prevent race conditions and failures during interpreter shutdown.
+ acquire, release, handlers = _acquireLock, _releaseLock, _handlerList
+ if acquire and release and handlers:
+ acquire()
try:
- if wr in _handlerList:
- _handlerList.remove(wr)
+ if wr in handlers:
+ handlers.remove(wr)
finally:
- _releaseLock()
+ release()
def _addHandlerRef(handler):
"""
@@ -828,8 +837,12 @@ class StreamHandler(Handler):
"""
Flushes the stream.
"""
- if self.stream and hasattr(self.stream, "flush"):
- self.stream.flush()
+ self.acquire()
+ try:
+ if self.stream and hasattr(self.stream, "flush"):
+ self.stream.flush()
+ finally:
+ self.release()
def emit(self, record):
"""
@@ -852,7 +865,7 @@ class StreamHandler(Handler):
try:
if (isinstance(msg, unicode) and
getattr(stream, 'encoding', None)):
- ufs = fs.decode(stream.encoding)
+ ufs = u'%s\n'
try:
stream.write(ufs % msg)
except UnicodeEncodeError:
@@ -888,6 +901,7 @@ class FileHandler(StreamHandler):
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
+ self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
@@ -900,12 +914,18 @@ class FileHandler(StreamHandler):
"""
Closes the stream.
"""
- if self.stream:
- self.flush()
- if hasattr(self.stream, "close"):
- self.stream.close()
+ self.acquire()
+ try:
+ if self.stream:
+ self.flush()
+ if hasattr(self.stream, "close"):
+ self.stream.close()
+ self.stream = None
+ # Issue #19523: call unconditionally to
+ # prevent a handler leak when delay is set
StreamHandler.close(self)
- self.stream = None
+ finally:
+ self.release()
def _open(self):
"""
@@ -1165,11 +1185,12 @@ class Logger(Filterer):
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
- def exception(self, msg, *args):
+ def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
- self.error(msg, exc_info=1, *args)
+ kwargs['exc_info'] = 1
+ self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
@@ -1242,7 +1263,7 @@ class Logger(Filterer):
all the handlers of this logger to handle the record.
"""
if _srcfile:
- #IronPython doesn't track Python frames, so findCaller throws an
+ #IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
@@ -1574,12 +1595,13 @@ def error(msg, *args, **kwargs):
basicConfig()
root.error(msg, *args, **kwargs)
-def exception(msg, *args):
+def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger,
with exception information.
"""
- error(msg, exc_info=1, *args)
+ kwargs['exc_info'] = 1
+ error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
diff --git a/Lib/logging/config.py b/Lib/logging/config.py
index 8e01a56..8b37956 100644
--- a/Lib/logging/config.py
+++ b/Lib/logging/config.py
@@ -1,4 +1,4 @@
-# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
+# Copyright 2001-2014 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
@@ -19,13 +19,23 @@ Configuration functions for the logging package for Python. The core package
is based on PEP 282 and comments thereto in comp.lang.python, and influenced
by Apache's log4j system.
-Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
+Copyright (C) 2001-2014 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
-import sys, logging, logging.handlers, socket, struct, os, traceback, re
-import types, cStringIO
+import cStringIO
+import errno
+import io
+import logging
+import logging.handlers
+import os
+import re
+import socket
+import struct
+import sys
+import traceback
+import types
try:
import thread
@@ -38,10 +48,7 @@ from SocketServer import ThreadingTCPServer, StreamRequestHandler
DEFAULT_LOGGING_CONFIG_PORT = 9030
-if sys.platform == "win32":
- RESET_ERROR = 10054 #WSAECONNRESET
-else:
- RESET_ERROR = 104 #ECONNRESET
+RESET_ERROR = errno.ECONNRESET
#
# The following code implements a socket listener for on-the-fly
@@ -260,8 +267,8 @@ def _install_loggers(cp, handlers, disable_existing_loggers):
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = 1
- elif disable_existing_loggers:
- logger.disabled = 1
+ else:
+ logger.disabled = disable_existing_loggers
@@ -275,6 +282,30 @@ def valid_ident(s):
return True
+class ConvertingMixin(object):
+ """For ConvertingXXX's, this mixin class provides common functions"""
+
+ def convert_with_key(self, key, value, replace=True):
+ result = self.configurator.convert(value)
+ #If the converted value is different, save for next time
+ if value is not result:
+ if replace:
+ self[key] = result
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ result.key = key
+ return result
+
+ def convert(self, value):
+ result = self.configurator.convert(value)
+ if value is not result:
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ return result
+
+
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
@@ -284,77 +315,37 @@ def valid_ident(s):
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
-class ConvertingDict(dict):
+class ConvertingDict(dict, ConvertingMixin):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
- result = self.configurator.convert(value)
- #If the converted value is different, save for next time
- if value is not result:
- self[key] = result
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- result.key = key
- return result
+ return self.convert_with_key(key, value)
def get(self, key, default=None):
value = dict.get(self, key, default)
- result = self.configurator.convert(value)
- #If the converted value is different, save for next time
- if value is not result:
- self[key] = result
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- result.key = key
- return result
+ return self.convert_with_key(key, value)
def pop(self, key, default=None):
value = dict.pop(self, key, default)
- result = self.configurator.convert(value)
- if value is not result:
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- result.key = key
- return result
+ return self.convert_with_key(key, value, replace=False)
-class ConvertingList(list):
+class ConvertingList(list, ConvertingMixin):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
- result = self.configurator.convert(value)
- #If the converted value is different, save for next time
- if value is not result:
- self[key] = result
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- result.key = key
- return result
+ return self.convert_with_key(key, value)
def pop(self, idx=-1):
value = list.pop(self, idx)
- result = self.configurator.convert(value)
- if value is not result:
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- return result
+ return self.convert(value)
-class ConvertingTuple(tuple):
+class ConvertingTuple(tuple, ConvertingMixin):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
- result = self.configurator.convert(value)
- if value is not result:
- if type(result) in (ConvertingDict, ConvertingList,
- ConvertingTuple):
- result.parent = self
- result.key = key
- return result
+ # Can't replace a tuple entry.
+ return self.convert_with_key(key, value, replace=False)
class BaseConfigurator(object):
"""
@@ -379,6 +370,12 @@ class BaseConfigurator(object):
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
+ # Issue 12718: winpdb replaces __import__ with a Python function, which
+ # ends up being treated as a bound method. To avoid problems, we
+ # set the importer on the instance, but leave it defined in the class
+ # so existing code doesn't break
+ if type(__import__) == types.FunctionType:
+ self.importer = __import__
def resolve(self, s):
"""
@@ -520,21 +517,21 @@ class DictConfigurator(BaseConfigurator):
level = handler_config.get('level', None)
if level:
handler.setLevel(logging._checkLevel(level))
- except StandardError, e:
+ except StandardError as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
- except StandardError, e:
+ except StandardError as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
- except StandardError, e:
+ except StandardError as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
@@ -549,7 +546,7 @@ class DictConfigurator(BaseConfigurator):
try:
formatters[name] = self.configure_formatter(
formatters[name])
- except StandardError, e:
+ except StandardError as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
@@ -557,7 +554,7 @@ class DictConfigurator(BaseConfigurator):
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
- except StandardError, e:
+ except StandardError as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
@@ -565,14 +562,29 @@ class DictConfigurator(BaseConfigurator):
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
+ deferred = []
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
- except StandardError, e:
+ except StandardError as e:
+ if 'target not configured yet' in str(e):
+ deferred.append(name)
+ else:
+ raise ValueError('Unable to configure handler '
+ '%r: %s' % (name, e))
+
+ # Now do any that were deferred
+ for name in deferred:
+ try:
+ handler = self.configure_handler(handlers[name])
+ handler.name = name
+ handlers[name] = handler
+ except StandardError as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
+
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
@@ -610,7 +622,7 @@ class DictConfigurator(BaseConfigurator):
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
- except StandardError, e:
+ except StandardError as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
@@ -633,7 +645,7 @@ class DictConfigurator(BaseConfigurator):
if root:
try:
self.configure_root(root)
- except StandardError, e:
+ except StandardError as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
@@ -645,7 +657,7 @@ class DictConfigurator(BaseConfigurator):
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
- except TypeError, te:
+ except TypeError as te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
@@ -675,7 +687,7 @@ class DictConfigurator(BaseConfigurator):
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
- except StandardError, e:
+ except StandardError as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
@@ -684,7 +696,7 @@ class DictConfigurator(BaseConfigurator):
if formatter:
try:
formatter = self.config['formatters'][formatter]
- except StandardError, e:
+ except StandardError as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
@@ -695,13 +707,18 @@ class DictConfigurator(BaseConfigurator):
c = self.resolve(c)
factory = c
else:
- klass = self.resolve(config.pop('class'))
+ cname = config.pop('class')
+ klass = self.resolve(cname)
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
- config['target'] = self.config['handlers'][config['target']]
- except StandardError, e:
+ th = self.config['handlers'][config['target']]
+ if not isinstance(th, logging.Handler):
+ config['class'] = cname # restore for deferred configuration
+ raise StandardError('target not configured yet')
+ config['target'] = th
+ except StandardError as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
@@ -714,7 +731,7 @@ class DictConfigurator(BaseConfigurator):
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
- except TypeError, te:
+ except TypeError as te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
@@ -736,7 +753,7 @@ class DictConfigurator(BaseConfigurator):
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
- except StandardError, e:
+ except StandardError as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
@@ -831,13 +848,9 @@ def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
traceback.print_exc()
if self.server.ready:
self.server.ready.set()
- except socket.error, e:
- if not isinstance(e.args, tuple):
+ except socket.error as e:
+ if e.errno != RESET_ERROR:
raise
- else:
- errcode = e.args[0]
- if errcode != RESET_ERROR:
- raise
class ConfigSocketReceiver(ThreadingTCPServer):
"""
diff --git a/Lib/logging/handlers.py b/Lib/logging/handlers.py
index f8c7164..b7bf931 100644
--- a/Lib/logging/handlers.py
+++ b/Lib/logging/handlers.py
@@ -1,4 +1,4 @@
-# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
+# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
@@ -16,15 +16,14 @@
"""
Additional handlers for the logging package for Python. The core package is
-based on PEP 282 and comments thereto in comp.lang.python, and influenced by
-Apache's log4j system.
+based on PEP 282 and comments thereto in comp.lang.python.
-Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
+Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
-import logging, socket, os, cPickle, struct, time, re
+import errno, logging, socket, os, cPickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
try:
@@ -138,10 +137,11 @@ class RotatingFileHandler(BaseRotatingHandler):
dfn = self.baseFilename + ".1"
if os.path.exists(dfn):
os.remove(dfn)
- os.rename(self.baseFilename, dfn)
- #print "%s -> %s" % (self.baseFilename, dfn)
- self.mode = 'w'
- self.stream = self._open()
+ # Issue 18940: A file may not have been created if delay is True.
+ if os.path.exists(self.baseFilename):
+ os.rename(self.baseFilename, dfn)
+ if not self.delay:
+ self.stream = self._open()
def shouldRollover(self, record):
"""
@@ -273,9 +273,10 @@ class TimedRotatingFileHandler(BaseRotatingHandler):
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
- newRolloverAt = newRolloverAt - 3600
+ addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
- newRolloverAt = newRolloverAt + 3600
+ addend = 3600
+ newRolloverAt += addend
result = newRolloverAt
return result
@@ -327,39 +328,43 @@ class TimedRotatingFileHandler(BaseRotatingHandler):
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
+ currentTime = int(time.time())
+ dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
+ dstThen = timeTuple[-1]
+ if dstNow != dstThen:
+ if dstNow:
+ addend = 3600
+ else:
+ addend = -3600
+ timeTuple = time.localtime(t + addend)
dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
if os.path.exists(dfn):
os.remove(dfn)
- os.rename(self.baseFilename, dfn)
+ # Issue 18940: A file may not have been created if delay is True.
+ if os.path.exists(self.baseFilename):
+ os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
- # find the oldest log file and delete it
- #s = glob.glob(self.baseFilename + ".20*")
- #if len(s) > self.backupCount:
- # s.sort()
- # os.remove(s[0])
for s in self.getFilesToDelete():
os.remove(s)
- #print "%s -> %s" % (self.baseFilename, dfn)
- self.mode = 'w'
- self.stream = self._open()
- currentTime = int(time.time())
+ if not self.delay:
+ self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
- dstNow = time.localtime(currentTime)[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
- newRolloverAt = newRolloverAt - 3600
+ addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
- newRolloverAt = newRolloverAt + 3600
+ addend = 3600
+ newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
@@ -384,11 +389,13 @@ class WatchedFileHandler(logging.FileHandler):
"""
def __init__(self, filename, mode='a', encoding=None, delay=0):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
- if not os.path.exists(self.baseFilename):
- self.dev, self.ino = -1, -1
- else:
- stat = os.stat(self.baseFilename)
- self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
+ self.dev, self.ino = -1, -1
+ self._statstream()
+
+ def _statstream(self):
+ if self.stream:
+ sres = os.fstat(self.stream.fileno())
+ self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def emit(self, record):
"""
@@ -398,19 +405,28 @@ class WatchedFileHandler(logging.FileHandler):
has, close the old stream and reopen the file to get the
current stream.
"""
- if not os.path.exists(self.baseFilename):
- stat = None
- changed = 1
- else:
- stat = os.stat(self.baseFilename)
- changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino)
- if changed and self.stream is not None:
- self.stream.flush()
- self.stream.close()
- self.stream = self._open()
- if stat is None:
- stat = os.stat(self.baseFilename)
- self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
+ # Reduce the chance of race conditions by stat'ing by path only
+ # once and then fstat'ing our new fd if we opened a new log stream.
+ # See issue #14632: Thanks to John Mulligan for the problem report
+ # and patch.
+ try:
+ # stat the file by path, checking for existence
+ sres = os.stat(self.baseFilename)
+ except OSError as err:
+ if err.errno == errno.ENOENT:
+ sres = None
+ else:
+ raise
+ # compare file system stat with that of our stream file handle
+ if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
+ if self.stream is not None:
+ # we have an open file handle, clean it up
+ self.stream.flush()
+ self.stream.close()
+ self.stream = None # See Issue #21742: _open () might fail.
+ # open a new file handle and get new stat info from that fd
+ self.stream = self._open()
+ self._statstream()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
@@ -520,9 +536,16 @@ class SocketHandler(logging.Handler):
"""
ei = record.exc_info
if ei:
- dummy = self.format(record) # just to get traceback text into record.exc_text
+ # just to get traceback text into record.exc_text ...
+ dummy = self.format(record)
record.exc_info = None # to avoid Unpickleable error
- s = cPickle.dumps(record.__dict__, 1)
+ # See issue #14436: If msg or args are objects, they may not be
+ # available on the receiving end. So we convert the msg % args
+ # to a string, save it as msg and zap the args.
+ d = dict(record.__dict__)
+ d['msg'] = record.getMessage()
+ d['args'] = None
+ s = cPickle.dumps(d, 1)
if ei:
record.exc_info = ei # for next handler
slen = struct.pack(">L", len(s))
@@ -563,9 +586,13 @@ class SocketHandler(logging.Handler):
"""
Closes the socket.
"""
- if self.sock:
- self.sock.close()
- self.sock = None
+ self.acquire()
+ try:
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+ finally:
+ self.release()
logging.Handler.close(self)
class DatagramHandler(SocketHandler):
@@ -710,13 +737,17 @@ class SysLogHandler(logging.Handler):
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
- facility=LOG_USER, socktype=socket.SOCK_DGRAM):
+ facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
- If facility is not specified, LOG_USER is used.
+ If facility is not specified, LOG_USER is used. If socktype is
+ specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
+ socket type will be used. For Unix sockets, you can also specify a
+ socktype of None, in which case socket.SOCK_DGRAM will be used, falling
+ back to socket.SOCK_STREAM.
"""
logging.Handler.__init__(self)
@@ -729,20 +760,37 @@ class SysLogHandler(logging.Handler):
self._connect_unixsocket(address)
else:
self.unixsocket = 0
+ if socktype is None:
+ socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_INET, socktype)
if socktype == socket.SOCK_STREAM:
self.socket.connect(address)
+ self.socktype = socktype
self.formatter = None
def _connect_unixsocket(self, address):
- self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
- # syslog may require either DGRAM or STREAM sockets
+ use_socktype = self.socktype
+ if use_socktype is None:
+ use_socktype = socket.SOCK_DGRAM
+ self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
+ # it worked, so set self.socktype to the used type
+ self.socktype = use_socktype
except socket.error:
self.socket.close()
- self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- self.socket.connect(address)
+ if self.socktype is not None:
+ # user didn't specify falling back, so fail
+ raise
+ use_socktype = socket.SOCK_STREAM
+ self.socket = socket.socket(socket.AF_UNIX, use_socktype)
+ try:
+ self.socket.connect(address)
+ # it worked, so set self.socktype to the used type
+ self.socktype = use_socktype
+ except socket.error:
+ self.socket.close()
+ raise
# curious: when talking to the unix-domain '/dev/log' socket, a
# zero-terminator seems to be required. this string is placed
@@ -767,8 +815,12 @@ class SysLogHandler(logging.Handler):
"""
Closes the socket.
"""
- if self.unixsocket:
- self.socket.close()
+ self.acquire()
+ try:
+ if self.unixsocket:
+ self.socket.close()
+ finally:
+ self.release()
logging.Handler.close(self)
def mapPriority(self, levelName):
@@ -798,14 +850,13 @@ class SysLogHandler(logging.Handler):
# Message is a string. Convert to bytes as required by RFC 5424
if type(msg) is unicode:
msg = msg.encode('utf-8')
- if codecs:
- msg = codecs.BOM_UTF8 + msg
msg = prio + msg
try:
if self.unixsocket:
try:
self.socket.send(msg)
except socket.error:
+ self.socket.close() # See issue 17981
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
@@ -852,6 +903,7 @@ class SMTPHandler(logging.Handler):
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
+ self._timeout = 5.0
def getSubject(self, record):
"""
@@ -874,7 +926,7 @@ class SMTPHandler(logging.Handler):
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
- smtp = smtplib.SMTP(self.mailhost, port)
+ smtp = smtplib.SMTP(self.mailhost, port, timeout=self._timeout)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
@@ -1016,7 +1068,7 @@ class HTTPHandler(logging.Handler):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
- Contributed by Franz Glasner.
+ Contributed by Franz Glasner.
"""
return record.__dict__
@@ -1096,7 +1148,11 @@ class BufferingHandler(logging.Handler):
This version just zaps the buffer to empty.
"""
- self.buffer = []
+ self.acquire()
+ try:
+ self.buffer = []
+ finally:
+ self.release()
def close(self):
"""
@@ -1144,15 +1200,23 @@ class MemoryHandler(BufferingHandler):
records to the target, if there is one. Override if you want
different behaviour.
"""
- if self.target:
- for record in self.buffer:
- self.target.handle(record)
- self.buffer = []
+ self.acquire()
+ try:
+ if self.target:
+ for record in self.buffer:
+ self.target.handle(record)
+ self.buffer = []
+ finally:
+ self.release()
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
self.flush()
- self.target = None
- BufferingHandler.close(self)
+ self.acquire()
+ try:
+ self.target = None
+ BufferingHandler.close(self)
+ finally:
+ self.release()
diff --git a/Lib/macurl2path.py b/Lib/macurl2path.py
index 4c5ae64..6f8260f 100644
--- a/Lib/macurl2path.py
+++ b/Lib/macurl2path.py
@@ -75,23 +75,3 @@ def pathname2url(pathname):
def _pncomp2url(component):
component = urllib.quote(component[:31], safe='') # We want to quote slashes
return component
-
-def test():
- for url in ["index.html",
- "bar/index.html",
- "/foo/bar/index.html",
- "/foo/bar/",
- "/"]:
- print '%r -> %r' % (url, url2pathname(url))
- for path in ["drive:",
- "drive:dir:",
- "drive:dir:file",
- "drive:file",
- "file",
- ":file",
- ":dir:",
- ":dir:file"]:
- print '%r -> %r' % (path, pathname2url(path))
-
-if __name__ == '__main__':
- test()
diff --git a/Lib/mailbox.py b/Lib/mailbox.py
index 0efd743..ba49753 100644
--- a/Lib/mailbox.py
+++ b/Lib/mailbox.py
@@ -1,5 +1,3 @@
-#! /usr/bin/env python
-
"""Read/write support for Maildir, mbox, MH, Babyl, and MMDF mailboxes."""
# Notes for authors of new mailbox subclasses:
@@ -197,6 +195,9 @@ class Mailbox:
"""Flush and close the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
+ # Whether each message must end in a newline
+ _append_newline = False
+
def _dump_message(self, message, target, mangle_from_=False):
# Most files are opened in binary mode to allow predictable seeking.
# To get native line endings on disk, the user-friendly \n line endings
@@ -207,13 +208,21 @@ class Mailbox:
gen = email.generator.Generator(buffer, mangle_from_, 0)
gen.flatten(message)
buffer.seek(0)
- target.write(buffer.read().replace('\n', os.linesep))
+ data = buffer.read().replace('\n', os.linesep)
+ target.write(data)
+ if self._append_newline and not data.endswith(os.linesep):
+ # Make sure the message ends with a newline
+ target.write(os.linesep)
elif isinstance(message, str):
if mangle_from_:
message = message.replace('\nFrom ', '\n>From ')
message = message.replace('\n', os.linesep)
target.write(message)
+ if self._append_newline and not message.endswith(os.linesep):
+ # Make sure the message ends with a newline
+ target.write(os.linesep)
elif hasattr(message, 'read'):
+ lastline = None
while True:
line = message.readline()
if line == '':
@@ -222,6 +231,10 @@ class Mailbox:
line = '>From ' + line[5:]
line = line.replace('\n', os.linesep)
target.write(line)
+ lastline = line
+ if self._append_newline and lastline and not lastline.endswith(os.linesep):
+ # Make sure the message ends with a newline
+ target.write(os.linesep)
else:
raise TypeError('Invalid message type: %s' % type(message))
@@ -271,6 +284,12 @@ class Maildir(Mailbox):
suffix = ''
uniq = os.path.basename(tmp_file.name).split(self.colon)[0]
dest = os.path.join(self._path, subdir, uniq + suffix)
+ if isinstance(message, MaildirMessage):
+ os.utime(tmp_file.name,
+ (os.path.getatime(tmp_file.name), message.get_date()))
+ # No file modification should be done after the file is moved to its
+ # final position in order to prevent race conditions with changes
+ # from other programs
try:
if hasattr(os, 'link'):
os.link(tmp_file.name, dest)
@@ -284,8 +303,6 @@ class Maildir(Mailbox):
% dest)
else:
raise
- if isinstance(message, MaildirMessage):
- os.utime(dest, (os.path.getatime(dest), message.get_date()))
return uniq
def remove(self, key):
@@ -320,11 +337,15 @@ class Maildir(Mailbox):
else:
suffix = ''
self.discard(key)
+ tmp_path = os.path.join(self._path, temp_subpath)
new_path = os.path.join(self._path, subdir, key + suffix)
- os.rename(os.path.join(self._path, temp_subpath), new_path)
if isinstance(message, MaildirMessage):
- os.utime(new_path, (os.path.getatime(new_path),
- message.get_date()))
+ os.utime(tmp_path,
+ (os.path.getatime(tmp_path), message.get_date()))
+ # No file modification should be done after the file is moved to its
+ # final position in order to prevent race conditions with changes
+ # from other programs
+ os.rename(tmp_path, new_path)
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
@@ -561,16 +582,19 @@ class _singlefileMailbox(Mailbox):
self._file = f
self._toc = None
self._next_key = 0
- self._pending = False # No changes require rewriting the file.
+ self._pending = False # No changes require rewriting the file.
+ self._pending_sync = False # No need to sync the file
self._locked = False
- self._file_length = None # Used to record mailbox size
+ self._file_length = None # Used to record mailbox size
def add(self, message):
"""Add message and return assigned key."""
self._lookup()
self._toc[self._next_key] = self._append_message(message)
self._next_key += 1
- self._pending = True
+ # _append_message appends the message to the mailbox file. We
+ # don't need a full rewrite + rename, sync is enough.
+ self._pending_sync = True
return self._next_key - 1
def remove(self, key):
@@ -616,6 +640,11 @@ class _singlefileMailbox(Mailbox):
def flush(self):
"""Write any pending changes to disk."""
if not self._pending:
+ if self._pending_sync:
+ # Messages have only been added, so syncing the file
+ # is enough.
+ _sync_flush(self._file)
+ self._pending_sync = False
return
# In order to be writing anything out at all, self._toc must
@@ -649,6 +678,7 @@ class _singlefileMailbox(Mailbox):
new_file.write(buffer)
new_toc[key] = (new_start, new_file.tell())
self._post_message_hook(new_file)
+ self._file_length = new_file.tell()
except:
new_file.close()
os.remove(new_file.name)
@@ -656,6 +686,9 @@ class _singlefileMailbox(Mailbox):
_sync_close(new_file)
# self._file is about to get replaced, so no need to sync.
self._file.close()
+ # Make sure the new file's mode is the same as the old file's
+ mode = os.stat(self._path).st_mode
+ os.chmod(new_file.name, mode)
try:
os.rename(new_file.name, self._path)
except OSError, e:
@@ -668,6 +701,7 @@ class _singlefileMailbox(Mailbox):
self._file = open(self._path, 'rb+')
self._toc = new_toc
self._pending = False
+ self._pending_sync = False
if self._locked:
_lock_file(self._file, dotlock=False)
@@ -704,6 +738,12 @@ class _singlefileMailbox(Mailbox):
"""Append message to mailbox and return (start, stop) offsets."""
self._file.seek(0, 2)
before = self._file.tell()
+ if len(self._toc) == 0 and not self._pending:
+ # This is the first message, and the _pre_mailbox_hook
+ # hasn't yet been called. If self._pending is True,
+ # messages have been removed, so _pre_mailbox_hook must
+ # have been called already.
+ self._pre_mailbox_hook(self._file)
try:
self._pre_message_hook(self._file)
offsets = self._install_message(message)
@@ -778,30 +818,48 @@ class mbox(_mboxMMDF):
_mangle_from_ = True
+ # All messages must end in a newline character, and
+ # _post_message_hooks outputs an empty line between messages.
+ _append_newline = True
+
def __init__(self, path, factory=None, create=True):
"""Initialize an mbox mailbox."""
self._message_factory = mboxMessage
_mboxMMDF.__init__(self, path, factory, create)
- def _pre_message_hook(self, f):
- """Called before writing each message to file f."""
- if f.tell() != 0:
- f.write(os.linesep)
+ def _post_message_hook(self, f):
+ """Called after writing each message to file f."""
+ f.write(os.linesep)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
+ last_was_empty = False
self._file.seek(0)
while True:
line_pos = self._file.tell()
line = self._file.readline()
if line.startswith('From '):
if len(stops) < len(starts):
- stops.append(line_pos - len(os.linesep))
+ if last_was_empty:
+ stops.append(line_pos - len(os.linesep))
+ else:
+ # The last line before the "From " line wasn't
+ # blank, but we consider it a start of a
+ # message anyway.
+ stops.append(line_pos)
starts.append(line_pos)
- elif line == '':
- stops.append(line_pos)
+ last_was_empty = False
+ elif not line:
+ if last_was_empty:
+ stops.append(line_pos - len(os.linesep))
+ else:
+ stops.append(line_pos)
break
+ elif line == os.linesep:
+ last_was_empty = True
+ else:
+ last_was_empty = False
self._toc = dict(enumerate(zip(starts, stops)))
self._next_key = len(self._toc)
self._file_length = self._file.tell()
@@ -1367,9 +1425,9 @@ class Babyl(_singlefileMailbox):
line = message.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
- self._file.write('*** EOOH ***' + os.linesep)
if first_pass:
first_pass = False
+ self._file.write('*** EOOH ***' + os.linesep)
message.seek(original_pos)
else:
break
diff --git a/Lib/mailcap.py b/Lib/mailcap.py
index b2ddacd..04077ba 100644
--- a/Lib/mailcap.py
+++ b/Lib/mailcap.py
@@ -22,8 +22,8 @@ def getcaps():
fp = open(mailcap, 'r')
except IOError:
continue
- morecaps = readmailcapfile(fp)
- fp.close()
+ with fp:
+ morecaps = readmailcapfile(fp)
for key, value in morecaps.iteritems():
if not key in caps:
caps[key] = value
diff --git a/Lib/mimetypes.py b/Lib/mimetypes.py
index 4c054c9..ec8fd99 100644
--- a/Lib/mimetypes.py
+++ b/Lib/mimetypes.py
@@ -242,35 +242,31 @@ class MimeTypes:
i = 0
while True:
try:
- ctype = _winreg.EnumKey(mimedb, i)
+ yield _winreg.EnumKey(mimedb, i)
except EnvironmentError:
break
- try:
- ctype = ctype.encode(default_encoding) # omit in 3.x!
- except UnicodeEncodeError:
- pass
- else:
- yield ctype
i += 1
default_encoding = sys.getdefaultencoding()
- with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT,
- r'MIME\Database\Content Type') as mimedb:
- for ctype in enum_types(mimedb):
+ with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, '') as hkcr:
+ for subkeyname in enum_types(hkcr):
try:
- with _winreg.OpenKey(mimedb, ctype) as key:
- suffix, datatype = _winreg.QueryValueEx(key,
- 'Extension')
+ with _winreg.OpenKey(hkcr, subkeyname) as subkey:
+ # Only check file extensions
+ if not subkeyname.startswith("."):
+ continue
+ # raises EnvironmentError if no 'Content Type' value
+ mimetype, datatype = _winreg.QueryValueEx(
+ subkey, 'Content Type')
+ if datatype != _winreg.REG_SZ:
+ continue
+ try:
+ mimetype = mimetype.encode(default_encoding)
+ except UnicodeEncodeError:
+ continue
+ self.add_type(mimetype, subkeyname, strict)
except EnvironmentError:
continue
- if datatype != _winreg.REG_SZ:
- continue
- try:
- suffix = suffix.encode(default_encoding) # omit in 3.x!
- except UnicodeEncodeError:
- continue
- self.add_type(ctype, suffix, strict)
-
def guess_type(url, strict=True):
"""Guess the type of a file based on its URL.
@@ -370,9 +366,10 @@ def read_mime_types(file):
f = open(file)
except IOError:
return None
- db = MimeTypes()
- db.readfp(f, True)
- return db.types_map[True]
+ with f:
+ db = MimeTypes()
+ db.readfp(f, True)
+ return db.types_map[True]
def _default_mime_types():
@@ -386,12 +383,14 @@ def _default_mime_types():
'.taz': '.tar.gz',
'.tz': '.tar.gz',
'.tbz2': '.tar.bz2',
+ '.txz': '.tar.xz',
}
encodings_map = {
'.gz': 'gzip',
'.Z': 'compress',
'.bz2': 'bzip2',
+ '.xz': 'xz',
}
# Before adding new types, make sure they are either registered with IANA,
@@ -432,11 +431,12 @@ def _default_mime_types():
'.hdf' : 'application/x-hdf',
'.htm' : 'text/html',
'.html' : 'text/html',
+ '.ico' : 'image/vnd.microsoft.icon',
'.ief' : 'image/ief',
'.jpe' : 'image/jpeg',
'.jpeg' : 'image/jpeg',
'.jpg' : 'image/jpeg',
- '.js' : 'application/x-javascript',
+ '.js' : 'application/javascript',
'.ksh' : 'text/plain',
'.latex' : 'application/x-latex',
'.m1v' : 'video/mpeg',
diff --git a/Lib/modulefinder.py b/Lib/modulefinder.py
index 7f2bf8b..f6f84f3 100644
--- a/Lib/modulefinder.py
+++ b/Lib/modulefinder.py
@@ -516,7 +516,7 @@ class ModuleFinder:
# Print modules that may be missing, but then again, maybe not...
if maybe:
print
- print "Submodules thay appear to be missing, but could also be",
+ print "Submodules that appear to be missing, but could also be",
print "global names in the parent package:"
for name in maybe:
mods = self.badmodules[name].keys()
diff --git a/Lib/msilib/__init__.py b/Lib/msilib/__init__.py
index 63f3923..907a739 100644
--- a/Lib/msilib/__init__.py
+++ b/Lib/msilib/__init__.py
@@ -326,7 +326,7 @@ class Directory:
def add_file(self, file, src=None, version=None, language=None):
"""Add a file to the current component of the directory, starting a new one
- one if there is no current component. By default, the file name in the source
+ if there is no current component. By default, the file name in the source
and the file table will be identical. If the src file is specified, it is
interpreted relative to the current directory. Optionally, a version and a
language can be specified for the entry in the File table."""
diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py
index 530a8df..e4d520f 100644
--- a/Lib/multiprocessing/connection.py
+++ b/Lib/multiprocessing/connection.py
@@ -90,7 +90,7 @@ def arbitrary_address(family):
return tempfile.mktemp(prefix='listener-', dir=get_temp_dir())
elif family == 'AF_PIPE':
return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
- (os.getpid(), _mmap_counter.next()))
+ (os.getpid(), _mmap_counter.next()), dir="")
else:
raise ValueError('unrecognized family')
@@ -186,6 +186,8 @@ if sys.platform != 'win32':
'''
if duplex:
s1, s2 = socket.socketpair()
+ s1.setblocking(True)
+ s2.setblocking(True)
c1 = _multiprocessing.Connection(os.dup(s1.fileno()))
c2 = _multiprocessing.Connection(os.dup(s2.fileno()))
s1.close()
@@ -198,7 +200,6 @@ if sys.platform != 'win32':
return c1, c2
else:
-
from _multiprocessing import win32
def Pipe(duplex=True):
@@ -251,6 +252,7 @@ class SocketListener(object):
self._socket = socket.socket(getattr(socket, family))
try:
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ self._socket.setblocking(True)
self._socket.bind(address)
self._socket.listen(backlog)
self._address = self._socket.getsockname()
@@ -268,7 +270,15 @@ class SocketListener(object):
self._unlink = None
def accept(self):
- s, self._last_accepted = self._socket.accept()
+ while True:
+ try:
+ s, self._last_accepted = self._socket.accept()
+ except socket.error as e:
+ if e.args[0] != errno.EINTR:
+ raise
+ else:
+ break
+ s.setblocking(True)
fd = duplicate(s.fileno())
conn = _multiprocessing.Connection(fd)
s.close()
@@ -284,14 +294,16 @@ def SocketClient(address):
'''
Return a connection object connected to the socket given by `address`
'''
- family = address_type(address)
- s = socket.socket( getattr(socket, family) )
+ family = getattr(socket, address_type(address))
t = _init_timeout()
while 1:
+ s = socket.socket(family)
+ s.setblocking(True)
try:
s.connect(address)
except socket.error, e:
+ s.close()
if e.args[0] != errno.ECONNREFUSED or _check_timeout(t):
debug('failed to connect to address %s', address)
raise
@@ -348,7 +360,10 @@ if sys.platform == 'win32':
try:
win32.ConnectNamedPipe(handle, win32.NULL)
except WindowsError, e:
- if e.args[0] != win32.ERROR_PIPE_CONNECTED:
+ # ERROR_NO_DATA can occur if a client has already connected,
+ # written data and then disconnected -- see Issue 14725.
+ if e.args[0] not in (win32.ERROR_PIPE_CONNECTED,
+ win32.ERROR_NO_DATA):
raise
return _multiprocessing.PipeConnection(handle)
diff --git a/Lib/multiprocessing/dummy/__init__.py b/Lib/multiprocessing/dummy/__init__.py
index 19a3b69..e3b126e 100644
--- a/Lib/multiprocessing/dummy/__init__.py
+++ b/Lib/multiprocessing/dummy/__init__.py
@@ -70,7 +70,8 @@ class DummyProcess(threading.Thread):
def start(self):
assert self._parent is current_process()
self._start_called = True
- self._parent._children[self] = None
+ if hasattr(self._parent, '_children'):
+ self._parent._children[self] = None
threading.Thread.start(self)
@property
diff --git a/Lib/multiprocessing/forking.py b/Lib/multiprocessing/forking.py
index 3fca8b1..6bddfb7 100644
--- a/Lib/multiprocessing/forking.py
+++ b/Lib/multiprocessing/forking.py
@@ -35,6 +35,7 @@
import os
import sys
import signal
+import errno
from multiprocessing import util, process
@@ -129,12 +130,17 @@ if sys.platform != 'win32':
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
- try:
- pid, sts = os.waitpid(self.pid, flag)
- except os.error:
- # Child process not yet created. See #1731717
- # e.errno == errno.ECHILD == 10
- return None
+ while True:
+ try:
+ pid, sts = os.waitpid(self.pid, flag)
+ except os.error as e:
+ if e.errno == errno.EINTR:
+ continue
+ # Child process not yet created. See #1731717
+ # e.errno == errno.ECHILD == 10
+ return None
+ else:
+ break
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
@@ -336,7 +342,7 @@ else:
'''
Returns prefix of command line used for spawning a child process
'''
- if process.current_process()._identity==() and is_forking(sys.argv):
+ if getattr(process.current_process(), '_inheriting', False):
raise RuntimeError('''
Attempt to start a new process before the current process
has finished its bootstrapping phase.
@@ -355,12 +361,13 @@ else:
return [sys.executable, '--multiprocessing-fork']
else:
prog = 'from multiprocessing.forking import main; main()'
- return [_python_exe, '-c', prog, '--multiprocessing-fork']
+ opts = util._args_from_interpreter_flags()
+ return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork']
def main():
'''
- Run code specifed by data received over pipe
+ Run code specified by data received over pipe
'''
assert is_forking(sys.argv)
diff --git a/Lib/multiprocessing/managers.py b/Lib/multiprocessing/managers.py
index ffe5812..08d35d8 100644
--- a/Lib/multiprocessing/managers.py
+++ b/Lib/multiprocessing/managers.py
@@ -763,6 +763,7 @@ class BaseProxy(object):
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
+ token.address = self._token.address
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
diff --git a/Lib/multiprocessing/pool.py b/Lib/multiprocessing/pool.py
index bcbf7e3..04531b9 100644
--- a/Lib/multiprocessing/pool.py
+++ b/Lib/multiprocessing/pool.py
@@ -68,6 +68,23 @@ def mapstar(args):
# Code run by worker processes
#
+class MaybeEncodingError(Exception):
+ """Wraps possible unpickleable errors, so they can be
+ safely sent through the socket."""
+
+ def __init__(self, exc, value):
+ self.exc = repr(exc)
+ self.value = repr(value)
+ super(MaybeEncodingError, self).__init__(self.exc, self.value)
+
+ def __str__(self):
+ return "Error sending result: '%s'. Reason: '%s'" % (self.value,
+ self.exc)
+
+ def __repr__(self):
+ return "<MaybeEncodingError: %s>" % str(self)
+
+
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
put = outqueue.put
@@ -96,7 +113,13 @@ def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
result = (True, func(*args, **kwds))
except Exception, e:
result = (False, e)
- put((job, i, result))
+ try:
+ put((job, i, result))
+ except Exception as e:
+ wrapped = MaybeEncodingError(e, result[1])
+ debug("Possible encoding error while sending result: %s" % (
+ wrapped))
+ put((job, i, (False, wrapped)))
completed += 1
debug('worker exiting after %d tasks' % completed)
@@ -146,7 +169,8 @@ class Pool(object):
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
- args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
+ args=(self._taskqueue, self._quick_put, self._outqueue,
+ self._pool, self._cache)
)
self._task_handler.daemon = True
self._task_handler._state = RUN
@@ -306,7 +330,7 @@ class Pool(object):
debug('worker handler exiting')
@staticmethod
- def _handle_tasks(taskqueue, put, outqueue, pool):
+ def _handle_tasks(taskqueue, put, outqueue, pool, cache):
thread = threading.current_thread()
for taskseq, set_length in iter(taskqueue.get, None):
@@ -317,9 +341,12 @@ class Pool(object):
break
try:
put(task)
- except IOError:
- debug('could not put task on queue')
- break
+ except Exception as e:
+ job, ind = task[:2]
+ try:
+ cache[job]._set(ind, (False, e))
+ except KeyError:
+ pass
else:
if set_length:
debug('doing set_length()')
@@ -466,7 +493,8 @@ class Pool(object):
# We must wait for the worker handler to exit before terminating
# workers because we don't want workers to be restarted behind our back.
debug('joining worker handler')
- worker_handler.join()
+ if threading.current_thread() is not worker_handler:
+ worker_handler.join(1e100)
# Terminate workers which haven't already finished.
if pool and hasattr(pool[0], 'terminate'):
@@ -476,10 +504,12 @@ class Pool(object):
p.terminate()
debug('joining task handler')
- task_handler.join(1e100)
+ if threading.current_thread() is not task_handler:
+ task_handler.join(1e100)
debug('joining result handler')
- result_handler.join(1e100)
+ if threading.current_thread() is not result_handler:
+ result_handler.join(1e100)
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
@@ -539,6 +569,8 @@ class ApplyResult(object):
self._cond.release()
del self._cache[self._job]
+AsyncResult = ApplyResult # create alias -- see #17805
+
#
# Class whose instances are returned by `Pool.map_async()`
#
@@ -553,6 +585,7 @@ class MapResult(ApplyResult):
if chunksize <= 0:
self._number_left = 0
self._ready = True
+ del cache[self._job]
else:
self._number_left = length//chunksize + bool(length % chunksize)
diff --git a/Lib/multiprocessing/process.py b/Lib/multiprocessing/process.py
index 0697e74..44c1e44 100644
--- a/Lib/multiprocessing/process.py
+++ b/Lib/multiprocessing/process.py
@@ -262,10 +262,10 @@ class Process(object):
except SystemExit, e:
if not e.args:
exitcode = 1
- elif type(e.args[0]) is int:
+ elif isinstance(e.args[0], int):
exitcode = e.args[0]
else:
- sys.stderr.write(e.args[0] + '\n')
+ sys.stderr.write(str(e.args[0]) + '\n')
sys.stderr.flush()
exitcode = 1
except:
diff --git a/Lib/multiprocessing/synchronize.py b/Lib/multiprocessing/synchronize.py
index 4b077e5..d845f72 100644
--- a/Lib/multiprocessing/synchronize.py
+++ b/Lib/multiprocessing/synchronize.py
@@ -226,7 +226,7 @@ class Condition(object):
num_waiters = (self._sleeping_count._semlock._get_value() -
self._woken_count._semlock._get_value())
except Exception:
- num_waiters = 'unkown'
+ num_waiters = 'unknown'
return '<Condition(%s, %s)>' % (self._lock, num_waiters)
def wait(self, timeout=None):
diff --git a/Lib/multiprocessing/util.py b/Lib/multiprocessing/util.py
index c65dd99..092b61c 100644
--- a/Lib/multiprocessing/util.py
+++ b/Lib/multiprocessing/util.py
@@ -32,11 +32,13 @@
# SUCH DAMAGE.
#
+import os
import itertools
import weakref
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
+from subprocess import _args_from_interpreter_flags
from multiprocessing.process import current_process, active_children
@@ -183,6 +185,7 @@ class Finalize(object):
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, _finalizer_counter.next())
+ self._pid = os.getpid()
_finalizer_registry[self._key] = self
@@ -195,9 +198,13 @@ class Finalize(object):
except KeyError:
sub_debug('finalizer no longer registered')
else:
- sub_debug('finalizer calling %s with args %s and kwargs %s',
- self._callback, self._args, self._kwargs)
- res = self._callback(*self._args, **self._kwargs)
+ if self._pid != os.getpid():
+ sub_debug('finalizer ignored because different process')
+ res = None
+ else:
+ sub_debug('finalizer calling %s with args %s and kwargs %s',
+ self._callback, self._args, self._kwargs)
+ res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
@@ -247,6 +254,12 @@ def _run_finalizers(minpriority=None):
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
+ if _finalizer_registry is None:
+ # This function may be called after this module's globals are
+ # destroyed. See the _exit_function function in this module for more
+ # notes.
+ return
+
if minpriority is None:
f = lambda p : p[0][0] is not None
else:
@@ -278,21 +291,38 @@ def is_exiting():
_exiting = False
-def _exit_function():
+def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
+ active_children=active_children,
+ current_process=current_process):
+ # NB: we hold on to references to functions in the arglist due to the
+ # situation described below, where this function is called after this
+ # module's globals are destroyed.
+
global _exiting
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
- for p in active_children():
- if p._daemonic:
- info('calling terminate() for daemon %s', p.name)
- p._popen.terminate()
-
- for p in active_children():
- info('calling join() for process %s', p.name)
- p.join()
+ if current_process() is not None:
+ # NB: we check if the current process is None here because if
+ # it's None, any call to ``active_children()`` will throw an
+ # AttributeError (active_children winds up trying to get
+ # attributes from util._current_process). This happens in a
+ # variety of shutdown circumstances that are not well-understood
+ # because module-scope variables are not apparently supposed to
+ # be destroyed until after this function is called. However,
+ # they are indeed destroyed before this function is called. See
+ # issues 9775 and 15881. Also related: 4106, 9205, and 9207.
+
+ for p in active_children():
+ if p._daemonic:
+ info('calling terminate() for daemon %s', p.name)
+ p._popen.terminate()
+
+ for p in active_children():
+ info('calling join() for process %s', p.name)
+ p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
@@ -305,10 +335,13 @@ atexit.register(_exit_function)
class ForkAwareThreadLock(object):
def __init__(self):
+ self._reset()
+ register_after_fork(self, ForkAwareThreadLock._reset)
+
+ def _reset(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
- register_after_fork(self, ForkAwareThreadLock.__init__)
class ForkAwareLocal(threading.local):
def __init__(self):
diff --git a/Lib/netrc.py b/Lib/netrc.py
index 0fd37e3..4b18973 100644
--- a/Lib/netrc.py
+++ b/Lib/netrc.py
@@ -2,7 +2,9 @@
# Module and documentation by Eric S. Raymond, 21 Dec 1998
-import os, shlex
+import os, stat, shlex
+if os.name == 'posix':
+ import pwd
__all__ = ["netrc", "NetrcParseError"]
@@ -21,6 +23,7 @@ class NetrcParseError(Exception):
class netrc:
def __init__(self, file=None):
+ default_netrc = file is None
if file is None:
try:
file = os.path.join(os.environ['HOME'], ".netrc")
@@ -29,9 +32,9 @@ class netrc:
self.hosts = {}
self.macros = {}
with open(file) as fp:
- self._parse(file, fp)
+ self._parse(file, fp, default_netrc)
- def _parse(self, file, fp):
+ def _parse(self, file, fp, default_netrc):
lexer = shlex.shlex(fp)
lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
lexer.commenters = lexer.commenters.replace('#', '')
@@ -88,6 +91,26 @@ class netrc:
elif tt == 'account':
account = lexer.get_token()
elif tt == 'password':
+ if os.name == 'posix' and default_netrc:
+ prop = os.fstat(fp.fileno())
+ if prop.st_uid != os.getuid():
+ try:
+ fowner = pwd.getpwuid(prop.st_uid)[0]
+ except KeyError:
+ fowner = 'uid %s' % prop.st_uid
+ try:
+ user = pwd.getpwuid(os.getuid())[0]
+ except KeyError:
+ user = 'uid %s' % os.getuid()
+ raise NetrcParseError(
+ ("~/.netrc file owner (%s) does not match"
+ " current user (%s)") % (fowner, user),
+ file, lexer.lineno)
+ if (prop.st_mode & (stat.S_IRWXG | stat.S_IRWXO)):
+ raise NetrcParseError(
+ "~/.netrc access too permissive: access"
+ " permissions must restrict access to only"
+ " the owner", file, lexer.lineno)
password = lexer.get_token()
else:
raise NetrcParseError("bad follower token %r" % tt,
diff --git a/Lib/nntplib.py b/Lib/nntplib.py
index 2dc82a9..81ebe4b 100644
--- a/Lib/nntplib.py
+++ b/Lib/nntplib.py
@@ -37,6 +37,13 @@ __all__ = ["NNTP","NNTPReplyError","NNTPTemporaryError",
"error_reply","error_temp","error_perm","error_proto",
"error_data",]
+# maximal line length when calling readline(). This is to prevent
+# reading arbitrary length lines. RFC 3977 limits NNTP line length to
+# 512 characters, including CRLF. We have selected 2048 just to be on
+# the safe side.
+_MAXLINE = 2048
+
+
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
@@ -200,7 +207,9 @@ class NNTP:
def getline(self):
"""Internal: return one line from the server, stripping CRLF.
Raise EOFError if the connection is closed."""
- line = self.file.readline()
+ line = self.file.readline(_MAXLINE + 1)
+ if len(line) > _MAXLINE:
+ raise NNTPDataError('line too long')
if self.debugging > 1:
print '*get*', repr(line)
if not line: raise EOFError
diff --git a/Lib/ntpath.py b/Lib/ntpath.py
index 4f8f423..228bbb3 100644
--- a/Lib/ntpath.py
+++ b/Lib/ntpath.py
@@ -59,73 +59,79 @@ def isabs(s):
# Join two (or more) paths.
-
-def join(a, *p):
- """Join two or more pathname components, inserting "\\" as needed.
- If any component is an absolute path, all previous path components
- will be discarded."""
- path = a
- for b in p:
- b_wins = 0 # set to 1 iff b makes path irrelevant
- if path == "":
- b_wins = 1
-
- elif isabs(b):
- # This probably wipes out path so far. However, it's more
- # complicated if path begins with a drive letter:
- # 1. join('c:', '/a') == 'c:/a'
- # 2. join('c:/', '/a') == 'c:/a'
- # But
- # 3. join('c:/a', '/b') == '/b'
- # 4. join('c:', 'd:/') = 'd:/'
- # 5. join('c:/', 'd:/') = 'd:/'
- if path[1:2] != ":" or b[1:2] == ":":
- # Path doesn't start with a drive letter, or cases 4 and 5.
- b_wins = 1
-
- # Else path has a drive letter, and b doesn't but is absolute.
- elif len(path) > 3 or (len(path) == 3 and
- path[-1] not in "/\\"):
- # case 3
- b_wins = 1
-
- if b_wins:
- path = b
- else:
- # Join, and ensure there's a separator.
- assert len(path) > 0
- if path[-1] in "/\\":
- if b and b[0] in "/\\":
- path += b[1:]
- else:
- path += b
- elif path[-1] == ":":
- path += b
- elif b:
- if b[0] in "/\\":
- path += b
- else:
- path += "\\" + b
- else:
- # path is not empty and does not end with a backslash,
- # but b is empty; since, e.g., split('a/') produces
- # ('a', ''), it's best if join() adds a backslash in
- # this case.
- path += '\\'
-
- return path
+def join(path, *paths):
+ """Join two or more pathname components, inserting "\\" as needed."""
+ result_drive, result_path = splitdrive(path)
+ for p in paths:
+ p_drive, p_path = splitdrive(p)
+ if p_path and p_path[0] in '\\/':
+ # Second path is absolute
+ if p_drive or not result_drive:
+ result_drive = p_drive
+ result_path = p_path
+ continue
+ elif p_drive and p_drive != result_drive:
+ if p_drive.lower() != result_drive.lower():
+ # Different drives => ignore the first path entirely
+ result_drive = p_drive
+ result_path = p_path
+ continue
+ # Same drive in different case
+ result_drive = p_drive
+ # Second path is relative to the first
+ if result_path and result_path[-1] not in '\\/':
+ result_path = result_path + '\\'
+ result_path = result_path + p_path
+ ## add separator between UNC and non-absolute path
+ if (result_path and result_path[0] not in '\\/' and
+ result_drive and result_drive[-1:] != ':'):
+ return result_drive + sep + result_path
+ return result_drive + result_path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
- """Split a pathname into drive and path specifiers. Returns a 2-tuple
-"(drive,path)"; either part may be empty"""
- if p[1:2] == ':':
- return p[0:2], p[2:]
- return '', p
+ """Split a pathname into drive/UNC sharepoint and relative path specifiers.
+ Returns a 2-tuple (drive_or_unc, path); either part may be empty.
+
+ If you assign
+ result = splitdrive(p)
+ It is always true that:
+ result[0] + result[1] == p
+ If the path contained a drive letter, drive_or_unc will contain everything
+ up to and including the colon. e.g. splitdrive("c:/dir") returns ("c:", "/dir")
+
+ If the path contained a UNC path, the drive_or_unc will contain the host name
+ and share up to but not including the fourth directory separator character.
+ e.g. splitdrive("//host/computer/dir") returns ("//host/computer", "/dir")
+
+ Paths cannot contain both a drive letter and a UNC path.
+
+ """
+ if len(p) > 1:
+ normp = p.replace(altsep, sep)
+ if (normp[0:2] == sep*2) and (normp[2] != sep):
+ # is a UNC path:
+ # vvvvvvvvvvvvvvvvvvvv drive letter or UNC path
+ # \\machine\mountpoint\directory\etc\...
+ # directory ^^^^^^^^^^^^^^^
+ index = normp.find(sep, 2)
+ if index == -1:
+ return '', p
+ index2 = normp.find(sep, index + 1)
+ # a UNC path can't have two slashes in a row
+ # (after the initial two)
+ if index2 == index + 1:
+ return '', p
+ if index2 == -1:
+ index2 = len(p)
+ return p[:index2], p[index2:]
+ if normp[1] == ':':
+ return p[:2], p[2:]
+ return '', p
# Parse UNC paths
def splitunc(p):
@@ -144,15 +150,18 @@ def splitunc(p):
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
- normp = normcase(p)
- index = normp.find('\\', 2)
- if index == -1:
- ##raise RuntimeError, 'illegal UNC path: "' + p + '"'
- return ("", p)
- index = normp.find('\\', index + 1)
- if index == -1:
- index = len(p)
- return p[:index], p[index:]
+ normp = p.replace('\\', '/')
+ index = normp.find('/', 2)
+ if index <= 2:
+ return '', p
+ index2 = normp.find('/', index + 1)
+ # a UNC path can't have two slashes in a row
+ # (after the initial two)
+ if index2 == index + 1:
+ return '', p
+ if index2 == -1:
+ index2 = len(p)
+ return p[:index2], p[index2:]
return '', p
@@ -322,6 +331,13 @@ def expandvars(path):
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
+ if isinstance(path, unicode):
+ encoding = sys.getfilesystemencoding()
+ def getenv(var):
+ return os.environ[var.encode(encoding)].decode(encoding)
+ else:
+ def getenv(var):
+ return os.environ[var]
res = ''
index = 0
pathlen = len(path)
@@ -350,9 +366,9 @@ def expandvars(path):
index = pathlen - 1
else:
var = path[:index]
- if var in os.environ:
- res = res + os.environ[var]
- else:
+ try:
+ res = res + getenv(var)
+ except KeyError:
res = res + '%' + var + '%'
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
@@ -364,9 +380,9 @@ def expandvars(path):
try:
index = path.index('}')
var = path[:index]
- if var in os.environ:
- res = res + os.environ[var]
- else:
+ try:
+ res = res + getenv(var)
+ except KeyError:
res = res + '${' + var + '}'
except ValueError:
res = res + '${' + path
@@ -379,9 +395,9 @@ def expandvars(path):
var = var + c
index = index + 1
c = path[index:index + 1]
- if var in os.environ:
- res = res + os.environ[var]
- else:
+ try:
+ res = res + getenv(var)
+ except KeyError:
res = res + '$' + var
if c != '':
index = index - 1
diff --git a/Lib/numbers.py b/Lib/numbers.py
index 2592643..bdc6dd6 100644
--- a/Lib/numbers.py
+++ b/Lib/numbers.py
@@ -303,7 +303,7 @@ class Integral(Rational):
raise NotImplementedError
def __index__(self):
- """index(self)"""
+ """Called whenever an index is needed, such as in slicing"""
return long(self)
@abstractmethod
diff --git a/Lib/optparse.py b/Lib/optparse.py
index 731a2bb..433276d 100644
--- a/Lib/optparse.py
+++ b/Lib/optparse.py
@@ -204,7 +204,6 @@ class HelpFormatter:
short_first):
self.parser = None
self.indent_increment = indent_increment
- self.help_position = self.max_help_position = max_help_position
if width is None:
try:
width = int(os.environ['COLUMNS'])
@@ -212,6 +211,8 @@ class HelpFormatter:
width = 80
width -= 2
self.width = width
+ self.help_position = self.max_help_position = \
+ min(max_help_position, max(width - 20, indent_increment * 2))
self.current_indent = 0
self.level = 0
self.help_width = None # computed later
@@ -256,7 +257,7 @@ class HelpFormatter:
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
"""
- text_width = self.width - self.current_indent
+ text_width = max(self.width - self.current_indent, 11)
indent = " "*self.current_indent
return textwrap.fill(text,
text_width,
@@ -337,7 +338,7 @@ class HelpFormatter:
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
- self.help_width = self.width - self.help_position
+ self.help_width = max(self.width - self.help_position, 11)
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
@@ -1471,7 +1472,7 @@ class OptionParser (OptionContainer):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
- it is an unambiguous abbrevation for. Raises BadOptionError if
+ it is an unambiguous abbreviation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
diff --git a/Lib/os.py b/Lib/os.py
index 6d598f3..53fad6b 100644
--- a/Lib/os.py
+++ b/Lib/os.py
@@ -1,4 +1,4 @@
-r"""OS routines for Mac, NT, or Posix depending on what system we're on.
+r"""OS routines for NT or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
@@ -229,11 +229,12 @@ def walk(top, topdown=True, onerror=None, followlinks=False):
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
- subdirectories whose names remain in dirnames; this can be used to prune
- the search, or to impose a specific order of visiting. Modifying
- dirnames when topdown is false is ineffective, since the directories in
- dirnames have already been generated by the time dirnames itself is
- generated.
+ subdirectories whose names remain in dirnames; this can be used to prune the
+ search, or to impose a specific order of visiting. Modifying dirnames when
+ topdown is false is ineffective, since the directories in dirnames have
+ already been generated by the time dirnames itself is generated. No matter
+ the value of topdown, the list of subdirectories is retrieved before the
+ tuples for the directory and its subdirectories are generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
@@ -261,6 +262,7 @@ def walk(top, topdown=True, onerror=None, followlinks=False):
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
+
"""
islink, join, isdir = path.islink, path.join, path.isdir
diff --git a/Lib/pdb.py b/Lib/pdb.py
index 5468d3f..113b4e0 100755
--- a/Lib/pdb.py
+++ b/Lib/pdb.py
@@ -1095,7 +1095,7 @@ command with a 'global' command, e.g.:
def help_run(self):
print """run [args...]
Restart the debugged python program. If a string is supplied, it is
-splitted with "shlex" and the result is used as the new sys.argv.
+split with "shlex" and the result is used as the new sys.argv.
History, breakpoints, actions and debugger options are preserved.
"restart" is an alias for "run"."""
diff --git a/Lib/pickle.py b/Lib/pickle.py
index 5b95cba..299de16 100644
--- a/Lib/pickle.py
+++ b/Lib/pickle.py
@@ -269,7 +269,7 @@ class Pickler:
def save(self, obj):
# Check for persistent id (defined by a subclass)
pid = self.persistent_id(obj)
- if pid:
+ if pid is not None:
self.save_pers(pid)
return
@@ -962,7 +962,7 @@ class Unpickler:
rep = self.readline()[:-1]
for q in "\"'": # double or single quote
if rep.startswith(q):
- if not rep.endswith(q):
+ if len(rep) < 2 or not rep.endswith(q):
raise ValueError, "insecure string pickle"
rep = rep[len(q):-len(q)]
break
diff --git a/Lib/pickletools.py b/Lib/pickletools.py
index d717728..8de53dd 100644
--- a/Lib/pickletools.py
+++ b/Lib/pickletools.py
@@ -804,7 +804,7 @@ stackslice = StackObject(
obtype=StackObject,
doc="""An object representing a contiguous slice of the stack.
- This is used in conjuction with markobject, to represent all
+ This is used in conjunction with markobject, to represent all
of the stack following the topmost markobject. For example,
the POP_MARK opcode changes the stack from
@@ -1929,7 +1929,7 @@ def dis(pickle, out=None, memo=None, indentlevel=4):
stack = [] # crude emulation of unpickler stack
if memo is None:
- memo = {} # crude emulation of unpicker memo
+ memo = {} # crude emulation of unpickler memo
maxproto = -1 # max protocol number seen
markstack = [] # bytecode positions of MARK opcodes
indentchunk = ' ' * indentlevel
diff --git a/Lib/plat-generic/regen b/Lib/plat-generic/regen
index a20cdc1..c96167d 100755
--- a/Lib/plat-generic/regen
+++ b/Lib/plat-generic/regen
@@ -1,3 +1,3 @@
#! /bin/sh
set -v
-python$EXE ../../Tools/scripts/h2py.py -i '(u_long)' /usr/include/netinet/in.h
+eval $PYTHON_FOR_BUILD ../../Tools/scripts/h2py.py -i "'(u_long)'" /usr/include/netinet/in.h
diff --git a/Lib/plat-mac/EasyDialogs.py b/Lib/plat-mac/EasyDialogs.py
index 129cf2c..1d3edb3 100644
--- a/Lib/plat-mac/EasyDialogs.py
+++ b/Lib/plat-mac/EasyDialogs.py
@@ -243,8 +243,15 @@ def AskYesNoCancel(question, default = 0, yes=None, no=None, cancel=None, id=262
+# The deprecated Carbon QuickDraw APIs are no longer available as of
+# OS X 10.8. Raise an ImportError here in that case so that callers
+# of EasyDialogs, like BuildApplet, will do the right thing.
+
+try:
+ screenbounds = Qd.GetQDGlobalsScreenBits().bounds
+except AttributeError:
+ raise ImportError("QuickDraw APIs not available")
-screenbounds = Qd.GetQDGlobalsScreenBits().bounds
screenbounds = screenbounds[0]+4, screenbounds[1]+4, \
screenbounds[2]-4, screenbounds[3]-4
diff --git a/Lib/platform.py b/Lib/platform.py
index d20c39d..df2af83 100755
--- a/Lib/platform.py
+++ b/Lib/platform.py
@@ -228,7 +228,7 @@ def _dist_try_harder(distname,version,id):
return 'OpenLinux',pkg[1],id
if os.path.isdir('/usr/lib/setup'):
- # Check for slackware verson tag file (thanks to Greg Andruk)
+ # Check for slackware version tag file (thanks to Greg Andruk)
verfiles = os.listdir('/usr/lib/setup')
for n in range(len(verfiles)-1, -1, -1):
if verfiles[n][:14] != 'slack-version-':
@@ -280,7 +280,7 @@ def _parse_release_file(firstline):
if m is not None:
return tuple(m.groups())
- # Unkown format... take the first two words
+ # Unknown format... take the first two words
l = string.split(string.strip(firstline))
if l:
version = l[0]
@@ -673,8 +673,13 @@ def win32_ver(release='',version='',csd='',ptype=''):
release = '7'
else:
release = '2008ServerR2'
+ elif min == 2:
+ if product_type == VER_NT_WORKSTATION:
+ release = '8'
+ else:
+ release = '2012Server'
else:
- release = 'post2008Server'
+ release = 'post2012Server'
else:
if not release:
@@ -795,7 +800,7 @@ def mac_ver(release='',versioninfo=('','',''),machine=''):
versioninfo, machine) with versioninfo being a tuple (version,
dev_stage, non_release_version).
- Entries which cannot be determined are set to the paramter values
+ Entries which cannot be determined are set to the parameter values
which default to ''. All tuple entries are strings.
"""
@@ -1020,16 +1025,38 @@ def _syscmd_file(target,default=''):
case the command should fail.
"""
+
+ # We do the import here to avoid a bootstrap issue.
+ # See c73b90b6dadd changeset.
+ #
+ # [..]
+ # ranlib libpython2.7.a
+ # gcc -o python \
+ # Modules/python.o \
+ # libpython2.7.a -lsocket -lnsl -ldl -lm
+ # Traceback (most recent call last):
+ # File "./setup.py", line 8, in <module>
+ # from platform import machine as platform_machine
+ # File "[..]/build/Lib/platform.py", line 116, in <module>
+ # import sys,string,os,re,subprocess
+ # File "[..]/build/Lib/subprocess.py", line 429, in <module>
+ # import select
+ # ImportError: No module named select
+
+ import subprocess
+
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
- target = _follow_symlinks(target).replace('"', '\\"')
+ target = _follow_symlinks(target)
try:
- f = os.popen('file "%s" 2> %s' % (target, DEV_NULL))
+ proc = subprocess.Popen(['file', target],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
except (AttributeError,os.error):
return default
- output = string.strip(f.read())
- rc = f.close()
+ output = proc.communicate()[0]
+ rc = proc.wait()
if not output or rc:
return default
else:
@@ -1344,6 +1371,14 @@ _ironpython_sys_version_parser = re.compile(
'(?: \(([\d\.]+)\))?'
' on (.NET [\d\.]+)')
+# IronPython covering 2.6 and 2.7
+_ironpython26_sys_version_parser = re.compile(
+ r'([\d.]+)\s*'
+ '\(IronPython\s*'
+ '[\d.]+\s*'
+ '\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)'
+)
+
_pypy_sys_version_parser = re.compile(
r'([\w.+]+)\s*'
'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
@@ -1381,19 +1416,24 @@ def _sys_version(sys_version=None):
return result
# Parse it
- if sys_version[:10] == 'IronPython':
+ if 'IronPython' in sys_version:
# IronPython
name = 'IronPython'
- match = _ironpython_sys_version_parser.match(sys_version)
+ if sys_version.startswith('IronPython'):
+ match = _ironpython_sys_version_parser.match(sys_version)
+ else:
+ match = _ironpython26_sys_version_parser.match(sys_version)
+
if match is None:
raise ValueError(
'failed to parse IronPython sys.version: %s' %
repr(sys_version))
+
version, alt_version, compiler = match.groups()
buildno = ''
builddate = ''
- elif sys.platform[:4] == 'java':
+ elif sys.platform.startswith('java'):
# Jython
name = 'Jython'
match = _sys_version_parser.match(sys_version)
diff --git a/Lib/plistlib.py b/Lib/plistlib.py
index 51944ee..42897b8 100644
--- a/Lib/plistlib.py
+++ b/Lib/plistlib.py
@@ -262,8 +262,8 @@ class PlistWriter(DumbXMLWriter):
def writeData(self, data):
self.beginElement("data")
self.indentLevel -= 1
- maxlinelength = 76 - len(self.indent.replace("\t", " " * 8) *
- self.indentLevel)
+ maxlinelength = max(16, 76 - len(self.indent.replace("\t", " " * 8) *
+ self.indentLevel))
for line in data.asBase64(maxlinelength).split("\n"):
if line:
self.writeln(line)
diff --git a/Lib/poplib.py b/Lib/poplib.py
index e2b33ef..dc7cbdf 100644
--- a/Lib/poplib.py
+++ b/Lib/poplib.py
@@ -321,7 +321,7 @@ else:
hostname - the hostname of the pop3 over ssl server
port - port number
- keyfile - PEM formatted file that countains your private key
+ keyfile - PEM formatted file that contains your private key
certfile - PEM formatted certificate chain file
See the methods of the parent class POP3 for more documentation.
diff --git a/Lib/posixpath.py b/Lib/posixpath.py
index aae38d5..0378004 100644
--- a/Lib/posixpath.py
+++ b/Lib/posixpath.py
@@ -17,6 +17,14 @@ import genericpath
import warnings
from genericpath import *
+try:
+ _unicode = unicode
+except NameError:
+ # If Python is built without Unicode support, the unicode type
+ # will not exist. Fake one.
+ class _unicode(object):
+ pass
+
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime","islink","exists","lexists","isdir","isfile",
@@ -60,7 +68,8 @@ def isabs(s):
def join(a, *p):
"""Join two or more pathname components, inserting '/' as needed.
If any component is an absolute path, all previous path components
- will be discarded."""
+ will be discarded. An empty last part will result in a path that
+ ends with a separator."""
path = a
for b in p:
if b.startswith('/'):
@@ -267,8 +276,8 @@ def expanduser(path):
except KeyError:
return path
userhome = pwent.pw_dir
- userhome = userhome.rstrip('/') or userhome
- return userhome + path[i:]
+ userhome = userhome.rstrip('/')
+ return (userhome + path[i:]) or '/'
# Expand paths containing shell variable substitutions.
@@ -276,28 +285,43 @@ def expanduser(path):
# Non-existent variables are left unchanged.
_varprog = None
+_uvarprog = None
def expandvars(path):
"""Expand shell variables of form $var and ${var}. Unknown variables
are left unchanged."""
- global _varprog
+ global _varprog, _uvarprog
if '$' not in path:
return path
- if not _varprog:
- import re
- _varprog = re.compile(r'\$(\w+|\{[^}]*\})')
+ if isinstance(path, _unicode):
+ if not _varprog:
+ import re
+ _varprog = re.compile(r'\$(\w+|\{[^}]*\})')
+ varprog = _varprog
+ encoding = sys.getfilesystemencoding()
+ else:
+ if not _uvarprog:
+ import re
+ _uvarprog = re.compile(_unicode(r'\$(\w+|\{[^}]*\})'), re.UNICODE)
+ varprog = _uvarprog
+ encoding = None
i = 0
while True:
- m = _varprog.search(path, i)
+ m = varprog.search(path, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith('{') and name.endswith('}'):
name = name[1:-1]
+ if encoding:
+ name = name.encode(encoding)
if name in os.environ:
tail = path[j:]
- path = path[:i] + os.environ[name]
+ value = os.environ[name]
+ if encoding:
+ value = value.decode(encoding)
+ path = path[:i] + value
i = len(path)
path += tail
else:
@@ -312,7 +336,7 @@ def expandvars(path):
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
- slash, dot = (u'/', u'.') if isinstance(path, unicode) else ('/', '.')
+ slash, dot = (u'/', u'.') if isinstance(path, _unicode) else ('/', '.')
if path == '':
return dot
initial_slashes = path.startswith('/')
@@ -341,7 +365,7 @@ def normpath(path):
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
- if isinstance(path, unicode):
+ if isinstance(path, _unicode):
cwd = os.getcwdu()
else:
cwd = os.getcwd()
@@ -355,45 +379,52 @@ def abspath(path):
def realpath(filename):
"""Return the canonical path of the specified filename, eliminating any
symbolic links encountered in the path."""
- if isabs(filename):
- bits = ['/'] + filename.split('/')[1:]
- else:
- bits = [''] + filename.split('/')
-
- for i in range(2, len(bits)+1):
- component = join(*bits[0:i])
- # Resolve symbolic links.
- if islink(component):
- resolved = _resolve_link(component)
- if resolved is None:
- # Infinite loop -- return original component + rest of the path
- return abspath(join(*([component] + bits[i:])))
+ path, ok = _joinrealpath('', filename, {})
+ return abspath(path)
+
+# Join two paths, normalizing ang eliminating any symbolic links
+# encountered in the second path.
+def _joinrealpath(path, rest, seen):
+ if isabs(rest):
+ rest = rest[1:]
+ path = sep
+
+ while rest:
+ name, _, rest = rest.partition(sep)
+ if not name or name == curdir:
+ # current dir
+ continue
+ if name == pardir:
+ # parent dir
+ if path:
+ path, name = split(path)
+ if name == pardir:
+ path = join(path, pardir, pardir)
else:
- newpath = join(*([resolved] + bits[i:]))
- return realpath(newpath)
-
- return abspath(filename)
-
-
-def _resolve_link(path):
- """Internal helper function. Takes a path and follows symlinks
- until we either arrive at something that isn't a symlink, or
- encounter a path we've seen before (meaning that there's a loop).
- """
- paths_seen = set()
- while islink(path):
- if path in paths_seen:
- # Already seen this path, so we must have a symlink loop
- return None
- paths_seen.add(path)
- # Resolve where the link points to
- resolved = os.readlink(path)
- if not isabs(resolved):
- dir = dirname(path)
- path = normpath(join(dir, resolved))
- else:
- path = normpath(resolved)
- return path
+ path = pardir
+ continue
+ newpath = join(path, name)
+ if not islink(newpath):
+ path = newpath
+ continue
+ # Resolve the symbolic link
+ if newpath in seen:
+ # Already seen this path
+ path = seen[newpath]
+ if path is not None:
+ # use cached value
+ continue
+ # The symlink is not resolved, so we must have a symlink loop.
+ # Return already resolved part + rest of the path unchanged.
+ return join(newpath, rest), False
+ seen[newpath] = None # not resolved symlink
+ path, ok = _joinrealpath(path, os.readlink(newpath), seen)
+ if not ok:
+ return join(path, rest), False
+ seen[newpath] = path # resolved symlink
+
+ return path, True
+
supports_unicode_filenames = (sys.platform == 'darwin')
diff --git a/Lib/pprint.py b/Lib/pprint.py
index 910283e..77f2a56 100644
--- a/Lib/pprint.py
+++ b/Lib/pprint.py
@@ -37,7 +37,10 @@ saferepr()
import sys as _sys
import warnings
-from cStringIO import StringIO as _StringIO
+try:
+ from cStringIO import StringIO as _StringIO
+except ImportError:
+ from StringIO import StringIO as _StringIO
__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr",
"PrettyPrinter"]
@@ -182,25 +185,18 @@ class PrettyPrinter:
if issubclass(typ, list):
write('[')
endchar = ']'
- elif issubclass(typ, set):
- if not length:
- write('set()')
- return
- write('set([')
- endchar = '])'
- object = _sorted(object)
- indent += 4
- elif issubclass(typ, frozenset):
+ elif issubclass(typ, tuple):
+ write('(')
+ endchar = ')'
+ else:
if not length:
- write('frozenset()')
+ write(rep)
return
- write('frozenset([')
+ write(typ.__name__)
+ write('([')
endchar = '])'
+ indent += len(typ.__name__) + 1
object = _sorted(object)
- indent += 10
- else:
- write('(')
- endchar = ')'
if self._indent_per_level > 1 and sepLines:
write((self._indent_per_level - 1) * ' ')
if length:
diff --git a/Lib/pstats.py b/Lib/pstats.py
index 3dc61d6..4338994 100644
--- a/Lib/pstats.py
+++ b/Lib/pstats.py
@@ -120,8 +120,8 @@ class Stats:
self.stats = arg.stats
arg.stats = {}
if not self.stats:
- raise TypeError, "Cannot create or construct a %r object from '%r''" % (
- self.__class__, arg)
+ raise TypeError("Cannot create or construct a %r object from %r"
+ % (self.__class__, arg))
return
def get_top_level_stats(self):
@@ -172,15 +172,19 @@ class Stats:
# along with some printable description
sort_arg_dict_default = {
"calls" : (((1,-1), ), "call count"),
+ "ncalls" : (((1,-1), ), "call count"),
+ "cumtime" : (((3,-1), ), "cumulative time"),
"cumulative": (((3,-1), ), "cumulative time"),
"file" : (((4, 1), ), "file name"),
+ "filename" : (((4, 1), ), "file name"),
"line" : (((5, 1), ), "line number"),
"module" : (((4, 1), ), "file name"),
"name" : (((6, 1), ), "function name"),
"nfl" : (((6, 1),(4, 1),(5, 1),), "name/file/line"),
- "pcalls" : (((0,-1), ), "call count"),
+ "pcalls" : (((0,-1), ), "primitive call count"),
"stdname" : (((7, 1), ), "standard name"),
"time" : (((2,-1), ), "internal time"),
+ "tottime" : (((2,-1), ), "internal time"),
}
def get_sort_arg_defs(self):
diff --git a/Lib/py_compile.py b/Lib/py_compile.py
index dc1cae9..c0bc1e4 100644
--- a/Lib/py_compile.py
+++ b/Lib/py_compile.py
@@ -112,7 +112,7 @@ def compile(file, cfile=None, dfile=None, doraise=False):
try:
codeobject = __builtin__.compile(codestring, dfile or file,'exec')
except Exception,err:
- py_exc = PyCompileError(err.__class__,err.args,dfile or file)
+ py_exc = PyCompileError(err.__class__, err, dfile or file)
if doraise:
raise py_exc
else:
diff --git a/Lib/pyclbr.py b/Lib/pyclbr.py
index 97f74f1..b8f71ae 100644
--- a/Lib/pyclbr.py
+++ b/Lib/pyclbr.py
@@ -128,6 +128,8 @@ def _readmodule(module, path, inpackage=None):
parent = _readmodule(package, path, inpackage)
if inpackage is not None:
package = "%s.%s" % (inpackage, package)
+ if not '__path__' in parent:
+ raise ImportError('No package named {}'.format(package))
return _readmodule(submodule, parent['__path__'], package)
# Search the path for the module
diff --git a/Lib/pydoc.py b/Lib/pydoc.py
index 19a71d8..218fd30 100755
--- a/Lib/pydoc.py
+++ b/Lib/pydoc.py
@@ -81,6 +81,7 @@ def pathdirs():
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
+ result = _encode(result)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
@@ -182,6 +183,36 @@ def classify_class_attrs(object):
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
+# ----------------------------------------------------- Unicode support helpers
+
+try:
+ _unicode = unicode
+except NameError:
+ # If Python is built without Unicode support, the unicode type
+ # will not exist. Fake one that nothing will match, and make
+ # the _encode function that do nothing.
+ class _unicode(object):
+ pass
+ _encoding = 'ascii'
+ def _encode(text, encoding='ascii'):
+ return text
+else:
+ import locale
+ _encoding = locale.getpreferredencoding()
+
+ def _encode(text, encoding=None):
+ if isinstance(text, unicode):
+ return text.encode(encoding or _encoding, 'xmlcharrefreplace')
+ else:
+ return text
+
+def _binstr(obj):
+ # Ensure that we have an encoded (binary) string representation of obj,
+ # even if it is a unicode string.
+ if isinstance(obj, _unicode):
+ return obj.encode(_encoding, 'xmlcharrefreplace')
+ return str(obj)
+
# ----------------------------------------------------- module manipulation
def ispackage(path):
@@ -424,12 +455,13 @@ class HTMLDoc(Doc):
def page(self, title, contents):
"""Format an HTML page."""
- return '''
+ return _encode('''
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
+<meta charset="utf-8">
</head><body bgcolor="#f0f0f8">
%s
-</body></html>''' % (title, contents)
+</body></html>''' % (title, contents), 'ascii')
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
@@ -549,10 +581,15 @@ class HTMLDoc(Doc):
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
+ elif selfdot:
+ # Create a link for methods like 'self.method(...)'
+ # and use <strong> for attributes like 'self.attr'
+ if text[end:end+1] == '(':
+ results.append('self.' + self.namelink(name, methods))
+ else:
+ results.append('self.<strong>%s</strong>' % name)
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
- elif selfdot:
- results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
@@ -606,12 +643,12 @@ class HTMLDoc(Doc):
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
- version = str(object.__version__)
+ version = _binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
- info.append(self.escape(str(object.__date__)))
+ info.append(self.escape(_binstr(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
@@ -694,11 +731,11 @@ class HTMLDoc(Doc):
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
- contents = self.markup(str(object.__author__), self.preformat)
+ contents = self.markup(_binstr(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
- contents = self.markup(str(object.__credits__), self.preformat)
+ contents = self.markup(_binstr(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
@@ -1116,16 +1153,16 @@ class TextDoc(Doc):
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
- version = str(object.__version__)
+ version = _binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
- result = result + self.section('DATE', str(object.__date__))
+ result = result + self.section('DATE', _binstr(object.__date__))
if hasattr(object, '__author__'):
- result = result + self.section('AUTHOR', str(object.__author__))
+ result = result + self.section('AUTHOR', _binstr(object.__author__))
if hasattr(object, '__credits__'):
- result = result + self.section('CREDITS', str(object.__credits__))
+ result = result + self.section('CREDITS', _binstr(object.__credits__))
return result
def docclass(self, object, name=None, mod=None, *ignored):
@@ -1340,6 +1377,8 @@ def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
+ if not hasattr(sys.stdin, "isatty"):
+ return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
@@ -1375,7 +1414,7 @@ def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
- pipe.write(text)
+ pipe.write(_encode(text))
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
@@ -1385,7 +1424,7 @@ def tempfilepager(text, cmd):
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
- file.write(text)
+ file.write(_encode(text))
file.close()
try:
os.system(cmd + ' "' + filename + '"')
@@ -1394,7 +1433,7 @@ def tempfilepager(text, cmd):
def ttypager(text):
"""Page through text on a text terminal."""
- lines = split(plain(text), '\n')
+ lines = plain(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding))).split('\n')
try:
import tty
fd = sys.stdin.fileno()
@@ -1432,7 +1471,7 @@ def ttypager(text):
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
- sys.stdout.write(plain(text))
+ sys.stdout.write(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding)))
def describe(thing):
"""Produce a short description of the given thing."""
@@ -1498,7 +1537,8 @@ def resolve(thing, forceload=0):
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
- return thing, getattr(thing, '__name__', None)
+ name = getattr(thing, '__name__', None)
+ return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Render text documentation, given an object or a path to an object."""
@@ -1799,7 +1839,7 @@ has the same effect as typing a particular string at the help> prompt.
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
-the tutorial on the Internet at http://docs.python.org/tutorial/.
+the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
@@ -1809,7 +1849,7 @@ To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
-''' % sys.version[:3])
+''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = items[:]
diff --git a/Lib/pydoc_data/topics.py b/Lib/pydoc_data/topics.py
index 13ee272..bf84731 100644
--- a/Lib/pydoc_data/topics.py
+++ b/Lib/pydoc_data/topics.py
@@ -1,7 +1,7 @@
-# Autogenerated by Sphinx on Thu Feb 23 15:17:35 2012
+# Autogenerated by Sphinx on Sun Jun 29 18:55:25 2014
topics = {'assert': '\nThe ``assert`` statement\n************************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, ``assert expression``, is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, ``assert expression1, expression2``, is equivalent\nto\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that ``__debug__`` and ``AssertionError``\nrefer to the built-in variables with those names. In the current\nimplementation, the built-in variable ``__debug__`` is ``True`` under\nnormal circumstances, ``False`` when optimization is requested\n(command line option -O). The current code generator emits no code\nfor an assert statement when optimization is requested at compile\ntime. Note that it is unnecessary to include the source code for the\nexpression that failed in the error message; it will be displayed as\npart of the stack trace.\n\nAssignments to ``__debug__`` are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n',
'assignment': '\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list is recursively defined as\nfollows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The object\n must be an iterable with the same number of items as there are\n targets in the target list, and the items are assigned, from left to\n right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a ``global`` statement in the\n current code block: the name is bound to the object in the current\n local namespace.\n\n * Otherwise: the name is bound to the object in the current global\n namespace.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in square\n brackets: The object must be an iterable with the same number of\n items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, ``TypeError`` is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily ``AttributeError``).\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n ``a.x`` can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target ``a.x`` is\n always set as an instance attribute, creating it if necessary.\n Thus, the two occurrences of ``a.x`` do not necessarily refer to the\n same attribute: if the RHS expression refers to a class attribute,\n the LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with ``property()``.\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield a plain integer. If it is negative, the\n sequence\'s length is added to it. The resulting value must be a\n nonnegative integer less than the sequence\'s length, and the\n sequence is asked to assign the assigned object to its item with\n that index. If the index is out of range, ``IndexError`` is raised\n (assignment to a subscripted sequence cannot add new items to a\n list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n* If the target is a slicing: The primary expression in the reference\n is evaluated. It should yield a mutable sequence object (such as a\n list). The assigned object should be a sequence object of the same\n type. Next, the lower and upper bound expressions are evaluated,\n insofar they are present; defaults are zero and the sequence\'s\n length. The bounds should evaluate to (small) integers. If either\n bound is negative, the sequence\'s length is added to it. The\n resulting bounds are clipped to lie between zero and the sequence\'s\n length, inclusive. Finally, the sequence object is asked to replace\n the slice with the items of the assigned sequence. The length of\n the slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the object\n allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nWARNING: Although the definition of assignment implies that overlaps\nbetween the left-hand side and the right-hand side are \'safe\' (for\nexample ``a, b = b, a`` swaps two variables), overlaps *within* the\ncollection of assigned-to variables are not safe! For instance, the\nfollowing program prints ``[0, 2]``:\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2\n print x\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like ``x += 1`` can be rewritten as\n``x = x + 1`` to achieve a similar, but not exactly equal effect. In\nthe augmented version, ``x`` is only evaluated once. Also, when\npossible, the actual operation is performed *in-place*, meaning that\nrather than creating a new object and assigning that to the target,\nthe old object is modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n',
- 'atom-identifiers': '\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a ``NameError`` exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name in front of the name, with leading underscores removed, and\na single underscore inserted in front of the class name. For example,\nthe identifier ``__spam`` occurring in a class named ``Ham`` will be\ntransformed to ``_Ham__spam``. This transformation is independent of\nthe syntactical context in which the identifier is used. If the\ntransformed name is extremely long (longer than 255 characters),\nimplementation defined truncation may happen. If the class name\nconsists only of underscores, no transformation is done.\n',
+ 'atom-identifiers': '\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a ``NameError`` exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name, with leading underscores removed and a single underscore\ninserted, in front of the name. For example, the identifier\n``__spam`` occurring in a class named ``Ham`` will be transformed to\n``_Ham__spam``. This transformation is independent of the syntactical\ncontext in which the identifier is used. If the transformed name is\nextremely long (longer than 255 characters), implementation defined\ntruncation may happen. If the class name consists only of underscores,\nno transformation is done.\n',
'atom-literals': "\nLiterals\n********\n\nPython supports string literals and various numeric literals:\n\n literal ::= stringliteral | integer | longinteger\n | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\ninteger, long integer, floating point number, complex number) with the\ngiven value. The value may be approximated in the case of floating\npoint and imaginary (complex) literals. See section *Literals* for\ndetails.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value. Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n",
'attribute-access': '\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should not simply execute ``self.name = value`` --- this would\n cause a recursive call to itself. Instead, it should insert the\n value in the dictionary of instance attributes, e.g.,\n ``self.__dict__[name] = value``. For new-style classes, rather\n than accessing the instance dictionary, it should call the base\n class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n===========================================\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup for new-style\n classes*.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' ``__dict__``.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass ``object()`` or\n``type()``).\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to a new-style object instance, ``a.x`` is transformed\n into the call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a new-style class, ``A.x`` is transformed into the\n call: ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, obj.__class__)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of ``__get__()``, ``__set__()`` and ``__delete__()``.\nIf it does not define ``__get__()``, then accessing the attribute will\nreturn the descriptor object itself unless there is a value in the\nobject\'s instance dictionary. If the descriptor defines ``__set__()``\nand/or ``__delete__()``, it is a data descriptor; if it defines\nneither, it is a non-data descriptor. Normally, data descriptors\ndefine both ``__get__()`` and ``__set__()``, while non-data\ndescriptors have just the ``__get__()`` method. Data descriptors with\n``__set__()`` and ``__get__()`` defined always override a redefinition\nin an instance dictionary. In contrast, non-data descriptors can be\noverridden by instances.\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding ``\'__dict__\'`` to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n Changed in version 2.3: Previously, adding ``\'__weakref__\'`` to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``long``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n',
'attribute-references': '\nAttribute references\n********************\n\nAn attribute reference is a primary followed by a period and a name:\n\n attributeref ::= primary "." identifier\n\nThe primary must evaluate to an object of a type that supports\nattribute references, e.g., a module, list, or an instance. This\nobject is then asked to produce the attribute whose name is the\nidentifier. If this attribute is not available, the exception\n``AttributeError`` is raised. Otherwise, the type and value of the\nobject produced is determined by the object. Multiple evaluations of\nthe same attribute reference may yield different objects.\n',
@@ -15,63 +15,65 @@ topics = {'assert': '\nThe ``assert`` statement\n************************\n\nAss
'booleans': '\nBoolean operations\n******************\n\n or_test ::= and_test | or_test "or" and_test\n and_test ::= not_test | and_test "and" not_test\n not_test ::= comparison | "not" not_test\n\nIn the context of Boolean operations, and also when expressions are\nused by control flow statements, the following values are interpreted\nas false: ``False``, ``None``, numeric zero of all types, and empty\nstrings and containers (including strings, tuples, lists,\ndictionaries, sets and frozensets). All other values are interpreted\nas true. (See the ``__nonzero__()`` special method for a way to\nchange this.)\n\nThe operator ``not`` yields ``True`` if its argument is false,\n``False`` otherwise.\n\nThe expression ``x and y`` first evaluates *x*; if *x* is false, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\nThe expression ``x or y`` first evaluates *x*; if *x* is true, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\n(Note that neither ``and`` nor ``or`` restrict the value and type they\nreturn to ``False`` and ``True``, but rather return the last evaluated\nargument. This is sometimes useful, e.g., if ``s`` is a string that\nshould be replaced by a default value if it is empty, the expression\n``s or \'foo\'`` yields the desired value. Because ``not`` has to\ninvent a value anyway, it does not bother to return a value of the\nsame type as its argument, so e.g., ``not \'foo\'`` yields ``False``,\nnot ``\'\'``.)\n',
'break': '\nThe ``break`` statement\n***********************\n\n break_stmt ::= "break"\n\n``break`` may only occur syntactically nested in a ``for`` or\n``while`` loop, but not nested in a function or class definition\nwithin that loop.\n\nIt terminates the nearest enclosing loop, skipping the optional\n``else`` clause if the loop has one.\n\nIf a ``for`` loop is terminated by ``break``, the loop control target\nkeeps its current value.\n\nWhen ``break`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nleaving the loop.\n',
'callable-types': '\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, ``x(arg1, arg2, ...)`` is a shorthand for\n ``x.__call__(arg1, arg2, ...)``.\n',
- 'calls': '\nCalls\n*****\n\nA call calls a callable object (e.g., a function) with a possibly\nempty series of arguments:\n\n call ::= primary "(" [argument_list [","]\n | expression genexpr_for] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," "**" expression]\n | "*" expression ["," "*" expression] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nA trailing comma may be present after the positional and keyword\narguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and certain class instances\nthemselves are callable; extensions may define additional callable\nobject types). All argument expressions are evaluated before the call\nis attempted. Please refer to section *Function definitions* for the\nsyntax of formal parameter lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a ``TypeError`` exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is ``None``, it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a\n``TypeError`` exception is raised. Otherwise, the list of filled\nslots is used as the argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use ``PyArg_ParseTuple()`` to\nparse their arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a ``TypeError`` exception is raised, unless a formal parameter\nusing the syntax ``*identifier`` is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a ``TypeError`` exception is raised, unless a formal parameter\nusing the syntax ``**identifier`` is present; in this case, that\nformal parameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax ``*expression`` appears in the function call,\n``expression`` must evaluate to an iterable. Elements from this\niterable are treated as if they were additional positional arguments;\nif there are positional arguments *x1*, ..., *xN*, and ``expression``\nevaluates to a sequence *y1*, ..., *yM*, this is equivalent to a call\nwith M+N positional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the ``*expression`` syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the ``**expression`` argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print a, b\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the ``*expression``\nsyntax to be used in the same call, so in practice this confusion does\nnot arise.\n\nIf the syntax ``**expression`` appears in the function call,\n``expression`` must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both ``expression`` and as an explicit keyword argument,\na ``TypeError`` exception is raised.\n\nFormal parameters using the syntax ``*identifier`` or ``**identifier``\ncannot be used as positional argument slots or as keyword argument\nnames. Formal parameters using the syntax ``(sublist)`` cannot be\nused as keyword argument names; the outermost sublist corresponds to a\nsingle unnamed argument slot, and the argument value is assigned to\nthe sublist using the usual tuple assignment rules after all other\nparameter processing is done.\n\nA call always returns some value, possibly ``None``, unless it raises\nan exception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n *Function definitions*. When the code block executes a ``return``\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see *Built-in Functions* for\n the descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a ``__call__()`` method; the effect is then\n the same as if that method was called.\n',
+ 'calls': '\nCalls\n*****\n\nA call calls a callable object (e.g., a *function*) with a possibly\nempty series of *arguments*:\n\n call ::= primary "(" [argument_list [","]\n | expression genexpr_for] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," "**" expression]\n | "*" expression ["," "*" expression] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nA trailing comma may be present after the positional and keyword\narguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and certain class instances\nthemselves are callable; extensions may define additional callable\nobject types). All argument expressions are evaluated before the call\nis attempted. Please refer to section *Function definitions* for the\nsyntax of formal *parameter* lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a ``TypeError`` exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is ``None``, it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a\n``TypeError`` exception is raised. Otherwise, the list of filled\nslots is used as the argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use ``PyArg_ParseTuple()`` to\nparse their arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a ``TypeError`` exception is raised, unless a formal parameter\nusing the syntax ``*identifier`` is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a ``TypeError`` exception is raised, unless a formal parameter\nusing the syntax ``**identifier`` is present; in this case, that\nformal parameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax ``*expression`` appears in the function call,\n``expression`` must evaluate to an iterable. Elements from this\niterable are treated as if they were additional positional arguments;\nif there are positional arguments *x1*, ..., *xN*, and ``expression``\nevaluates to a sequence *y1*, ..., *yM*, this is equivalent to a call\nwith M+N positional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the ``*expression`` syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the ``**expression`` argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print a, b\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the ``*expression``\nsyntax to be used in the same call, so in practice this confusion does\nnot arise.\n\nIf the syntax ``**expression`` appears in the function call,\n``expression`` must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both ``expression`` and as an explicit keyword argument,\na ``TypeError`` exception is raised.\n\nFormal parameters using the syntax ``*identifier`` or ``**identifier``\ncannot be used as positional argument slots or as keyword argument\nnames. Formal parameters using the syntax ``(sublist)`` cannot be\nused as keyword argument names; the outermost sublist corresponds to a\nsingle unnamed argument slot, and the argument value is assigned to\nthe sublist using the usual tuple assignment rules after all other\nparameter processing is done.\n\nA call always returns some value, possibly ``None``, unless it raises\nan exception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n *Function definitions*. When the code block executes a ``return``\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see *Built-in Functions* for\n the descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a ``__call__()`` method; the effect is then\n the same as if that method was called.\n',
'class': '\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section *Naming and binding*), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with ``self.name = value``.\nBoth class and instance variables are accessible through the notation\n"``self.name``", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless there\n is a ``finally`` clause which happens to raise another exception.\n That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n',
'comparisons': '\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like ``a < b < c`` have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: ``True`` or ``False``.\n\nComparisons can be chained arbitrarily, e.g., ``x < y <= z`` is\nequivalent to ``x < y and y <= z``, except that ``y`` is evaluated\nonly once (but in both cases ``z`` is not evaluated at all when ``x <\ny`` is found to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then ``a op1 b op2 c ... y\nopN z`` is equivalent to ``a op1 b and b op2 c and ... y opN z``,\nexcept that each expression is evaluated at most once.\n\nNote that ``a op1 b op2 c`` doesn\'t imply any kind of comparison\nbetween *a* and *c*, so that, e.g., ``x < y > z`` is perfectly legal\n(though perhaps not pretty).\n\nThe forms ``<>`` and ``!=`` are equivalent; for consistency with C,\n``!=`` is preferred; where ``!=`` is mentioned below ``<>`` is also\naccepted. The ``<>`` spelling is considered obsolescent.\n\nThe operators ``<``, ``>``, ``==``, ``>=``, ``<=``, and ``!=`` compare\nthe values of two objects. The objects need not have the same type.\nIf both are numbers, they are converted to a common type. Otherwise,\nobjects of different types *always* compare unequal, and are ordered\nconsistently but arbitrarily. You can control comparison behavior of\nobjects of non-built-in types by defining a ``__cmp__`` method or rich\ncomparison methods like ``__gt__``, described in section *Special\nmethod names*.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the ``in`` and ``not in``\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function ``ord()``) of their characters.\n Unicode and 8-bit strings are fully interoperable in this behavior.\n [4]\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, ``cmp([1,2,x], [1,2,y])`` returns\n the same as ``cmp(x,y)``. If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, ``[1,2] <\n [1,2,3]``).\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n (key, value) lists compare equal. [5] Outcomes other than equality\n are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of built-in types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nThe operators ``in`` and ``not in`` test for collection membership.\n``x in s`` evaluates to true if *x* is a member of the collection *s*,\nand false otherwise. ``x not in s`` returns the negation of ``x in\ns``. The collection membership test has traditionally been bound to\nsequences; an object is a member of a collection if the collection is\na sequence and contains an element equal to that object. However, it\nmake sense for many other object types to support membership tests\nwithout being a sequence. In particular, dictionaries (for keys) and\nsets support membership testing.\n\nFor the list and tuple types, ``x in y`` is true if and only if there\nexists an index *i* such that ``x == y[i]`` is true.\n\nFor the Unicode and string types, ``x in y`` is true if and only if\n*x* is a substring of *y*. An equivalent test is ``y.find(x) != -1``.\nNote, *x* and *y* need not be the same type; consequently, ``u\'ab\' in\n\'abc\'`` will return ``True``. Empty strings are always considered to\nbe a substring of any other string, so ``"" in "abc"`` will return\n``True``.\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength ``1``.\n\nFor user-defined classes which define the ``__contains__()`` method,\n``x in y`` is true if and only if ``y.__contains__(x)`` is true.\n\nFor user-defined classes which do not define ``__contains__()`` but do\ndefine ``__iter__()``, ``x in y`` is true if some value ``z`` with ``x\n== z`` is produced while iterating over ``y``. If an exception is\nraised during the iteration, it is as if ``in`` raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n``__getitem__()``, ``x in y`` is true if and only if there is a non-\nnegative integer index *i* such that ``x == y[i]``, and all lower\ninteger indices do not raise ``IndexError`` exception. (If any other\nexception is raised, it is as if ``in`` raised that exception).\n\nThe operator ``not in`` is defined to have the inverse true value of\n``in``.\n\nThe operators ``is`` and ``is not`` test for object identity: ``x is\ny`` is true if and only if *x* and *y* are the same object. ``x is\nnot y`` yields the inverse truth value. [7]\n',
- 'compound': '\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe ``if``, ``while`` and ``for`` statements implement traditional\ncontrol flow constructs. ``try`` specifies exception handlers and/or\ncleanup code for a group of statements. Function and class\ndefinitions are also syntactically compound statements.\n\nCompound statements consist of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which ``if`` clause a following ``else`` clause would belong:\n\n if test1: if test2: print x\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n``print`` statements are executed:\n\n if x < y < z: print x; print y; print z\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n | decorated\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a ``NEWLINE`` possibly followed by\na ``DEDENT``. Also note that optional continuation clauses always\nbegin with a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling ``else``\' problem is solved in Python by\nrequiring nested ``if`` statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe ``if`` statement\n====================\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n\n\nThe ``while`` statement\n=======================\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n\n\nThe ``for`` statement\n=====================\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the ``else`` clause, if present, is executed, and the loop\nterminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function ``range()`` returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s ``for i := a to b\ndo``; e.g., ``range(3)`` returns the list ``[0, 1, 2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An internal\n counter is used to keep track of which item is used next, and this\n is incremented on each iteration. When this counter has reached the\n length of the sequence the loop terminates. This means that if the\n suite deletes the current (or a previous) item from the sequence,\n the next item will be skipped (since it gets the index of the\n current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe ``try`` statement\n=====================\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n``try``...``except``...``finally`` did not work. ``try``...``except``\nhad to be nested in ``try``...``finally``.\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object, a tuple containing an item compatible with the\nexception, or, in the (deprecated) case of string exceptions, is the\nraised string itself (note that the object identities must match, i.e.\nit must be the same string object, not just a string with the same\nvalue).\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the ``sys`` module:\n``sys.exc_type`` receives the object identifying the exception;\n``sys.exc_value`` receives the exception\'s parameter;\n``sys.exc_traceback`` receives a traceback object (see section *The\nstandard type hierarchy*) identifying the point in the program where\nthe exception occurred. These details are also available through the\n``sys.exc_info()`` function, which returns a tuple ``(exc_type,\nexc_value, exc_traceback)``. Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception, it is re-raised at the end\nof the ``finally`` clause. If the ``finally`` clause raises another\nexception or executes a ``return`` or ``break`` statement, the saved\nexception is lost. The exception information is not available to the\nprogram during execution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe ``with`` statement\n======================\n\nNew in version 2.5.\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the ``with_item``)\n is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the ``with`` statement is only allowed when the\n ``with_statement`` feature has been enabled. It is always enabled\n in Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier [, "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level parameters have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding argument may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section *Naming and binding*), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with ``self.name = value``.\nBoth class and instance variables are accessible through the notation\n"``self.name``", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless there\n is a ``finally`` clause which happens to raise another exception.\n That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n',
+ 'compound': '\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe ``if``, ``while`` and ``for`` statements implement traditional\ncontrol flow constructs. ``try`` specifies exception handlers and/or\ncleanup code for a group of statements. Function and class\ndefinitions are also syntactically compound statements.\n\nCompound statements consist of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which ``if`` clause a following ``else`` clause would belong:\n\n if test1: if test2: print x\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n``print`` statements are executed:\n\n if x < y < z: print x; print y; print z\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n | decorated\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a ``NEWLINE`` possibly followed by\na ``DEDENT``. Also note that optional continuation clauses always\nbegin with a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling ``else``\' problem is solved in Python by\nrequiring nested ``if`` statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe ``if`` statement\n====================\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n\n\nThe ``while`` statement\n=======================\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n\n\nThe ``for`` statement\n=====================\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the ``else`` clause, if present, is executed, and the loop\nterminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function ``range()`` returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s ``for i := a to b\ndo``; e.g., ``range(3)`` returns the list ``[0, 1, 2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An internal\n counter is used to keep track of which item is used next, and this\n is incremented on each iteration. When this counter has reached the\n length of the sequence the loop terminates. This means that if the\n suite deletes the current (or a previous) item from the sequence,\n the next item will be skipped (since it gets the index of the\n current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe ``try`` statement\n=====================\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n``try``...``except``...``finally`` did not work. ``try``...``except``\nhad to be nested in ``try``...``finally``.\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object, or a tuple containing an item compatible with the\nexception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the ``sys`` module:\n``sys.exc_type`` receives the object identifying the exception;\n``sys.exc_value`` receives the exception\'s parameter;\n``sys.exc_traceback`` receives a traceback object (see section *The\nstandard type hierarchy*) identifying the point in the program where\nthe exception occurred. These details are also available through the\n``sys.exc_info()`` function, which returns a tuple ``(exc_type,\nexc_value, exc_traceback)``. Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception, it is re-raised at the end\nof the ``finally`` clause. If the ``finally`` clause raises another\nexception or executes a ``return`` or ``break`` statement, the saved\nexception is discarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nThe return value of a function is determined by the last ``return``\nstatement executed. Since the ``finally`` clause always executes, a\n``return`` statement executed in the ``finally`` clause will always be\nthe last one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe ``with`` statement\n======================\n\nNew in version 2.5.\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the ``with_item``)\n is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the ``with`` statement is only allowed when the\n ``with_statement`` feature has been enabled. It is always enabled\n in Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier ["," "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level *parameters* have the form *parameter*\n``=`` *expression*, the function is said to have "default parameter\nvalues." For a parameter with a default value, the corresponding\n*argument* may be omitted from a call, in which case the parameter\'s\ndefault value is substituted. If a parameter has a default value, all\nfollowing parameters must also have a default value --- this is a\nsyntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section *Lambdas*. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a "``def``" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The "``def``" form is actually more powerful since it\nallows the execution of multiple statements.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section *Naming and binding*), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with ``self.name = value``.\nBoth class and instance variables are accessible through the notation\n"``self.name``", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless there\n is a ``finally`` clause which happens to raise another exception.\n That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n',
'context-managers': '\nWith Statement Context Managers\n*******************************\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n',
'continue': '\nThe ``continue`` statement\n**************************\n\n continue_stmt ::= "continue"\n\n``continue`` may only occur syntactically nested in a ``for`` or\n``while`` loop, but not nested in a function or class definition or\n``finally`` clause within that loop. It continues with the next cycle\nof the nearest enclosing loop.\n\nWhen ``continue`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nstarting the next loop cycle.\n',
'conversions': '\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," the arguments\nare coerced using the coercion rules listed at *Coercion rules*. If\nboth arguments are standard numeric types, the following coercions are\napplied:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the other\n is converted to floating point;\n\n* otherwise, if either argument is a long integer, the other is\n converted to long integer;\n\n* otherwise, both must be plain integers and no conversion is\n necessary.\n\nSome additional rules apply for certain operators (e.g., a string left\nargument to the \'%\' operator). Extensions can define their own\ncoercions.\n',
- 'customization': '\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_traceback`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.exc_traceback`` or ``sys.last_traceback``. Circular\n references which are garbage are detected when the option cycle\n detector is enabled (it\'s on by default), but can only be cleaned\n up if there are no Python-level ``__del__()`` methods involved.\n Refer to the documentation for the ``gc`` module for more\n information about how ``__del__()`` methods are handled by the\n cycle detector, particularly the description of the ``garbage``\n value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\n See also the *-R* command-line option.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function and by string\n conversions (reverse quotes) to compute the "official" string\n representation of an object. If at all possible, this should look\n like a valid Python expression that could be used to recreate an\n object with the same value (given an appropriate environment). If\n this is not possible, a string of the form ``<...some useful\n description...>`` should be returned. The return value must be a\n string object. If a class defines ``__repr__()`` but not\n ``__str__()``, then ``__repr__()`` is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the ``str()`` built-in function and by the ``print``\n statement to compute the "informal" string representation of an\n object. This differs from ``__repr__()`` in that it does not have\n to be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to ``__cmp__()`` below. The\n correspondence between operator symbols and method names is as\n follows: ``x<y`` calls ``x.__lt__(y)``, ``x<=y`` calls\n ``x.__le__(y)``, ``x==y`` calls ``x.__eq__(y)``, ``x!=y`` and\n ``x<>y`` call ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and\n ``x>=y`` calls ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see ``functools.total_ordering()``.\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if ``self < other``,\n zero if ``self == other``, a positive integer if ``self > other``.\n If no ``__cmp__()``, ``__eq__()`` or ``__ne__()`` operation is\n defined, class instances are compared by object identity\n ("address"). See also the description of ``__hash__()`` for some\n important notes on creating *hashable* objects which support custom\n comparison operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by ``__cmp__()`` has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define a ``__cmp__()`` or ``__eq__()`` method\n it should not define a ``__hash__()`` operation either; if it\n defines ``__cmp__()`` or ``__eq__()`` but not ``__hash__()``, its\n instances will not be usable in hashed collections. If a class\n defines mutable objects and implements a ``__cmp__()`` or\n ``__eq__()`` method, it should not implement ``__hash__()``, since\n hashable collection implementations require that a object\'s hash\n value is immutable (if the object\'s hash value changes, it will be\n in the wrong hash bucket).\n\n User-defined classes have ``__cmp__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns ``id(x)``.\n\n Classes which inherit a ``__hash__()`` method from a parent class\n but change the meaning of ``__cmp__()`` or ``__eq__()`` such that\n the hash value returned is no longer appropriate (e.g. by switching\n to a value-based concept of equality instead of the default\n identity based equality) can explicitly flag themselves as being\n unhashable by setting ``__hash__ = None`` in the class definition.\n Doing so means that not only will instances of the class raise an\n appropriate ``TypeError`` when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking ``isinstance(obj, collections.Hashable)``\n (unlike classes which define their own ``__hash__()`` to explicitly\n raise ``TypeError``).\n\n Changed in version 2.5: ``__hash__()`` may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: ``__hash__`` may now be set to ``None`` to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``, or their integer\n equivalents ``0`` or ``1``. When this method is not defined,\n ``__len__()`` is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither ``__len__()`` nor ``__nonzero__()``, all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement ``unicode()`` built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n',
+ 'customization': '\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_traceback`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.exc_traceback`` or ``sys.last_traceback``. Circular\n references which are garbage are detected when the option cycle\n detector is enabled (it\'s on by default), but can only be cleaned\n up if there are no Python-level ``__del__()`` methods involved.\n Refer to the documentation for the ``gc`` module for more\n information about how ``__del__()`` methods are handled by the\n cycle detector, particularly the description of the ``garbage``\n value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\n See also the *-R* command-line option.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function and by string\n conversions (reverse quotes) to compute the "official" string\n representation of an object. If at all possible, this should look\n like a valid Python expression that could be used to recreate an\n object with the same value (given an appropriate environment). If\n this is not possible, a string of the form ``<...some useful\n description...>`` should be returned. The return value must be a\n string object. If a class defines ``__repr__()`` but not\n ``__str__()``, then ``__repr__()`` is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the ``str()`` built-in function and by the ``print``\n statement to compute the "informal" string representation of an\n object. This differs from ``__repr__()`` in that it does not have\n to be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to ``__cmp__()`` below. The\n correspondence between operator symbols and method names is as\n follows: ``x<y`` calls ``x.__lt__(y)``, ``x<=y`` calls\n ``x.__le__(y)``, ``x==y`` calls ``x.__eq__(y)``, ``x!=y`` and\n ``x<>y`` call ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and\n ``x>=y`` calls ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see ``functools.total_ordering()``.\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if ``self < other``,\n zero if ``self == other``, a positive integer if ``self > other``.\n If no ``__cmp__()``, ``__eq__()`` or ``__ne__()`` operation is\n defined, class instances are compared by object identity\n ("address"). See also the description of ``__hash__()`` for some\n important notes on creating *hashable* objects which support custom\n comparison operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by ``__cmp__()`` has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define a ``__cmp__()`` or ``__eq__()`` method\n it should not define a ``__hash__()`` operation either; if it\n defines ``__cmp__()`` or ``__eq__()`` but not ``__hash__()``, its\n instances will not be usable in hashed collections. If a class\n defines mutable objects and implements a ``__cmp__()`` or\n ``__eq__()`` method, it should not implement ``__hash__()``, since\n hashable collection implementations require that a object\'s hash\n value is immutable (if the object\'s hash value changes, it will be\n in the wrong hash bucket).\n\n User-defined classes have ``__cmp__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns a result derived from\n ``id(x)``.\n\n Classes which inherit a ``__hash__()`` method from a parent class\n but change the meaning of ``__cmp__()`` or ``__eq__()`` such that\n the hash value returned is no longer appropriate (e.g. by switching\n to a value-based concept of equality instead of the default\n identity based equality) can explicitly flag themselves as being\n unhashable by setting ``__hash__ = None`` in the class definition.\n Doing so means that not only will instances of the class raise an\n appropriate ``TypeError`` when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking ``isinstance(obj, collections.Hashable)``\n (unlike classes which define their own ``__hash__()`` to explicitly\n raise ``TypeError``).\n\n Changed in version 2.5: ``__hash__()`` may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: ``__hash__`` may now be set to ``None`` to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``, or their integer\n equivalents ``0`` or ``1``. When this method is not defined,\n ``__len__()`` is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither ``__len__()`` nor ``__nonzero__()``, all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement ``unicode()`` built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n',
'debugger': '\n``pdb`` --- The Python Debugger\n*******************************\n\nThe module ``pdb`` defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible --- it is actually defined as the class\n``Pdb``. This is currently undocumented but easily understood by\nreading the source. The extension interface uses the modules ``bdb``\nand ``cmd``.\n\nThe debugger\'s prompt is ``(Pdb)``. Typical usage to run a program\nunder control of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > <string>(0)?()\n (Pdb) continue\n > <string>(1)?()\n (Pdb) continue\n NameError: \'spam\'\n > <string>(1)?()\n (Pdb)\n\n``pdb.py`` can also be invoked as a script to debug other scripts.\nFor example:\n\n python -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 2.4: Restarting post-mortem behavior added.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the ``c`` command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print spam\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print spam\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement[, globals[, locals]])\n\n Execute the *statement* (given as a string) under debugger control.\n The debugger prompt appears before any code is executed; you can\n set breakpoints and type ``continue``, or you can step through the\n statement using ``step`` or ``next`` (all these commands are\n explained below). The optional *globals* and *locals* arguments\n specify the environment in which the code is executed; by default\n the dictionary of the module ``__main__`` is used. (See the\n explanation of the ``exec`` statement or the ``eval()`` built-in\n function.)\n\npdb.runeval(expression[, globals[, locals]])\n\n Evaluate the *expression* (given as a string) under debugger\n control. When ``runeval()`` returns, it returns the value of the\n expression. Otherwise this function is similar to ``run()``.\n\npdb.runcall(function[, argument, ...])\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When ``runcall()`` returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem([traceback])\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n ``sys.last_traceback``.\n\nThe ``run*`` functions and ``set_trace()`` are aliases for\ninstantiating the ``Pdb`` class and calling the method of the same\nname. If you want to access further features, you have to do this\nyourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None)\n\n ``Pdb`` is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying ``cmd.Cmd`` class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 2.7: The *skip* argument.\n\n run(statement[, globals[, locals]])\n runeval(expression[, globals[, locals]])\n runcall(function[, argument, ...])\n set_trace()\n\n See the documentation for the functions explained above.\n',
'del': '\nThe ``del`` statement\n*********************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather than spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a ``global``\nstatement in the same code block. If the name is unbound, a\n``NameError`` exception will be raised.\n\nIt is illegal to delete a name from the local namespace if it occurs\nas a free variable in a nested block.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n',
'dict': '\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list | dict_comprehension] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum. This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection *The standard type hierarchy*. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n',
'dynamic-features': '\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nIf ``exec`` is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n``SyntaxError`` unless the exec explicitly specifies the local\nnamespace for the ``exec``. (In other words, ``exec obj`` would be\nillegal, but ``exec obj in ns`` would be legal.)\n\nThe ``eval()``, ``execfile()``, and ``input()`` functions and the\n``exec`` statement do not have access to the full environment for\nresolving names. Names may be resolved in the local and global\nnamespaces of the caller. Free variables are not resolved in the\nnearest enclosing namespace, but in the global namespace. [1] The\n``exec`` statement and the ``eval()`` and ``execfile()`` functions\nhave optional arguments to override the global and local namespace.\nIf only one namespace is specified, it is used for both.\n',
'else': '\nThe ``if`` statement\n********************\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n',
'exceptions': '\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the ``raise`` statement. Exception\nhandlers are specified with the ``try`` ... ``except`` statement. The\n``finally`` clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n``SystemExit``.\n\nExceptions are identified by class instances. The ``except`` clause\nis selected depending on the class of the instance: it must reference\nthe class of the instance or a base class thereof. The instance can\nbe received by the handler and can carry additional information about\nthe exceptional condition.\n\nExceptions can also be identified by strings, in which case the\n``except`` clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nNote: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the ``try`` statement in section *The try\nstatement* and ``raise`` statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n',
+ 'exec': '\nThe ``exec`` statement\n**********************\n\n exec_stmt ::= "exec" or_expr ["in" expression ["," expression]]\n\nThis statement supports dynamic execution of Python code. The first\nexpression should evaluate to either a Unicode string, a *Latin-1*\nencoded string, an open file object, a code object, or a tuple. If it\nis a string, the string is parsed as a suite of Python statements\nwhich is then executed (unless a syntax error occurs). [1] If it is an\nopen file, the file is parsed until EOF and executed. If it is a code\nobject, it is simply executed. For the interpretation of a tuple, see\nbelow. In all cases, the code that\'s executed is expected to be valid\nas file input (see section *File input*). Be aware that the\n``return`` and ``yield`` statements may not be used outside of\nfunction definitions even within the context of code passed to the\n``exec`` statement.\n\nIn all cases, if the optional parts are omitted, the code is executed\nin the current scope. If only the first expression after ``in`` is\nspecified, it should be a dictionary, which will be used for both the\nglobal and the local variables. If two expressions are given, they\nare used for the global and local variables, respectively. If\nprovided, *locals* can be any mapping object. Remember that at module\nlevel, globals and locals are the same dictionary. If two separate\nobjects are given as *globals* and *locals*, the code will be executed\nas if it were embedded in a class definition.\n\nThe first expression may also be a tuple of length 2 or 3. In this\ncase, the optional parts must be omitted. The form ``exec(expr,\nglobals)`` is equivalent to ``exec expr in globals``, while the form\n``exec(expr, globals, locals)`` is equivalent to ``exec expr in\nglobals, locals``. The tuple form of ``exec`` provides compatibility\nwith Python 3, where ``exec`` is a function rather than a statement.\n\nChanged in version 2.4: Formerly, *locals* was required to be a\ndictionary.\n\nAs a side effect, an implementation may insert additional keys into\nthe dictionaries given besides those corresponding to variable names\nset by the executed code. For example, the current implementation may\nadd a reference to the dictionary of the built-in module\n``__builtin__`` under the key ``__builtins__`` (!).\n\n**Programmer\'s hints:** dynamic evaluation of expressions is supported\nby the built-in function ``eval()``. The built-in functions\n``globals()`` and ``locals()`` return the current global and local\ndictionary, respectively, which may be useful to pass around for use\nby ``exec``.\n\n-[ Footnotes ]-\n\n[1] Note that the parser only accepts the Unix-style end of line\n convention. If you are reading the code from a file, make sure to\n use *universal newlines* mode to convert Windows or Mac-style\n newlines.\n',
'execmodel': '\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The file read by the\nbuilt-in function ``execfile()`` is a code block. The string argument\npassed to the built-in function ``eval()`` and to the ``exec``\nstatement is a code block. The expression read and evaluated by the\nbuilt-in function ``input()`` is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a ``NameError`` exception is raised.\nIf the name refers to a local variable that has not been bound, a\n``UnboundLocalError`` exception is raised. ``UnboundLocalError`` is a\nsubclass of ``NameError``.\n\nThe following constructs bind names: formal parameters to functions,\n``import`` statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, ``for`` loop header, in the\nsecond position of an ``except`` clause header or after ``as`` in a\n``with`` statement. The ``import`` statement of the form ``from ...\nimport *`` binds all names defined in the imported module, except\nthose beginning with an underscore. This form may only be used at the\nmodule level.\n\nA target occurring in a ``del`` statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a ``SyntaxError``.\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module ``__builtin__``. The global namespace is searched\nfirst. If the name is not found there, the builtins namespace is\nsearched. The global statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name ``__builtins__`` in its\nglobal namespace; this should be a dictionary or a module (in the\nlatter case the module\'s dictionary is used). By default, when in the\n``__main__`` module, ``__builtins__`` is the built-in module\n``__builtin__`` (note: no \'s\'); when in any other module,\n``__builtins__`` is an alias for the dictionary of the ``__builtin__``\nmodule itself. ``__builtins__`` can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n``__builtins__``; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should ``import``\nthe ``__builtin__`` (no \'s\') module and modify its attributes\nappropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n``__main__``.\n\nThe ``global`` statement has the same scope as a name binding\noperation in the same block. If the nearest enclosing scope for a\nfree variable contains a global statement, the free variable is\ntreated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nIf ``exec`` is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n``SyntaxError`` unless the exec explicitly specifies the local\nnamespace for the ``exec``. (In other words, ``exec obj`` would be\nillegal, but ``exec obj in ns`` would be legal.)\n\nThe ``eval()``, ``execfile()``, and ``input()`` functions and the\n``exec`` statement do not have access to the full environment for\nresolving names. Names may be resolved in the local and global\nnamespaces of the caller. Free variables are not resolved in the\nnearest enclosing namespace, but in the global namespace. [1] The\n``exec`` statement and the ``eval()`` and ``execfile()`` functions\nhave optional arguments to override the global and local namespace.\nIf only one namespace is specified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the ``raise`` statement. Exception\nhandlers are specified with the ``try`` ... ``except`` statement. The\n``finally`` clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n``SystemExit``.\n\nExceptions are identified by class instances. The ``except`` clause\nis selected depending on the class of the instance: it must reference\nthe class of the instance or a base class thereof. The instance can\nbe received by the handler and can carry additional information about\nthe exceptional condition.\n\nExceptions can also be identified by strings, in which case the\n``except`` clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nNote: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the ``try`` statement in section *The try\nstatement* and ``raise`` statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by these\n operations is not available at the time the module is compiled.\n',
'exprlists': '\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: ``()``.)\n',
'floating': '\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts of floating point numbers can\nlook like octal integers, but are interpreted using radix 10. For\nexample, ``077e010`` is legal, and denotes the same number as\n``77e10``. The allowed range of floating point literals is\nimplementation-dependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like ``-1``\nis actually an expression composed of the unary operator ``-`` and the\nliteral ``1``.\n',
'for': '\nThe ``for`` statement\n*********************\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the ``else`` clause, if present, is executed, and the loop\nterminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function ``range()`` returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s ``for i := a to b\ndo``; e.g., ``range(3)`` returns the list ``[0, 1, 2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An internal\n counter is used to keep track of which item is used next, and this\n is incremented on each iteration. When this counter has reached the\n length of the sequence the loop terminates. This means that if the\n suite deletes the current (or a previous) item from the sequence,\n the next item will be skipped (since it gets the index of the\n current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n',
- 'formatstrings': '\nFormat String Syntax\n********************\n\nThe ``str.format()`` method and the ``Formatter`` class share the same\nsyntax for format strings (although in the case of ``Formatter``,\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n``{}``. Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n``{{`` and ``}}``.\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= <any source character except "]"> +\n conversion ::= "r" | "s"\n format_spec ::= <described in the next section>\n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point ``\'!\'``, and a *format_spec*, which\nis preceded by a colon ``\':\'``. These specify a non-default format\nfor the replacement value.\n\nSee also the *Format Specification Mini-Language* section.\n\nThe *field_name* itself begins with an *arg_name* that is either a\nnumber or a keyword. If it\'s a number, it refers to a positional\nargument, and if it\'s a keyword, it refers to a named keyword\nargument. If the numerical arg_names in a format string are 0, 1, 2,\n... in sequence, they can all be omitted (not just some) and the\nnumbers 0, 1, 2, ... will be automatically inserted in that order.\nBecause *arg_name* is not quote-delimited, it is not possible to\nspecify arbitrary dictionary keys (e.g., the strings ``\'10\'`` or\n``\':-]\'``) within a format string. The *arg_name* can be followed by\nany number of index or attribute expressions. An expression of the\nform ``\'.name\'`` selects the named attribute using ``getattr()``,\nwhile an expression of the form ``\'[index]\'`` does an index lookup\nusing ``__getitem__()``.\n\nChanged in version 2.7: The positional argument specifiers can be\nomitted, so ``\'{} {}\'`` is equivalent to ``\'{0} {1}\'``.\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the\n``__format__()`` method of the value itself. However, in some cases\nit is desirable to force a type to be formatted as a string,\noverriding its own definition of formatting. By converting the value\nto a string before calling ``__format__()``, the normal formatting\nlogic is bypassed.\n\nTwo conversion flags are currently supported: ``\'!s\'`` which calls\n``str()`` on the value, and ``\'!r\'`` which calls ``repr()``.\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the *Format examples* section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see *Format String Syntax*). They can also be passed directly to the\nbuilt-in ``format()`` function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string (``""``) produces\nthe same result as if you had called ``str()`` on the value. A non-\nempty format string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= <a character other than \'}\'>\n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nThe *fill* character can be any character other than \'{\' or \'}\'. The\npresence of a fill character is signaled by the character following\nit, which must be one of the alignment options. If the second\ncharacter of *format_spec* is not a valid alignment option, then it is\nassumed that both the fill character and the alignment option are\nabsent.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'<\'`` | Forces the field to be left-aligned within the available |\n | | space (this is the default for most objects). |\n +-----------+------------------------------------------------------------+\n | ``\'>\'`` | Forces the field to be right-aligned within the available |\n | | space (this is the default for numbers). |\n +-----------+------------------------------------------------------------+\n | ``\'=\'`` | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \'+000000120\'. This alignment option is only |\n | | valid for numeric types. |\n +-----------+------------------------------------------------------------+\n | ``\'^\'`` | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'+\'`` | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | ``\'-\'`` | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe ``\'#\'`` option is only valid for integers, and only for binary,\noctal, or hexadecimal output. If present, it specifies that the\noutput will be prefixed by ``\'0b\'``, ``\'0o\'``, or ``\'0x\'``,\nrespectively.\n\nThe ``\',\'`` option signals the use of a comma for a thousands\nseparator. For a locale aware separator, use the ``\'n\'`` integer\npresentation type instead.\n\nChanged in version 2.7: Added the ``\',\'`` option (see also **PEP\n378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nIf the *width* field is preceded by a zero (``\'0\'``) character, this\nenables zero-padding. This is equivalent to an *alignment* type of\n``\'=\'`` and a *fill* character of ``\'0\'``.\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with ``\'f\'`` and ``\'F\'``, or before and after the decimal\npoint for a floating point value formatted with ``\'g\'`` or ``\'G\'``.\nFor non-number types the field indicates the maximum field size - in\nother words, how many characters will be used from the field content.\nThe *precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'s\'`` | String format. This is the default type for strings and |\n | | may be omitted. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'s\'``. |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'b\'`` | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | ``\'c\'`` | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | ``\'d\'`` | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | ``\'o\'`` | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | ``\'x\'`` | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'X\'`` | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'d\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'d\'``. |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except\n``\'n\'`` and None). When doing so, ``float()`` is used to convert the\ninteger to a floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'e\'`` | Exponent notation. Prints the number in scientific |\n | | notation using the letter \'e\' to indicate the exponent. |\n +-----------+------------------------------------------------------------+\n | ``\'E\'`` | Exponent notation. Same as ``\'e\'`` except it uses an upper |\n | | case \'E\' as the separator character. |\n +-----------+------------------------------------------------------------+\n | ``\'f\'`` | Fixed point. Displays the number as a fixed-point number. |\n +-----------+------------------------------------------------------------+\n | ``\'F\'`` | Fixed point. Same as ``\'f\'``. |\n +-----------+------------------------------------------------------------+\n | ``\'g\'`` | General format. For a given precision ``p >= 1``, this |\n | | rounds the number to ``p`` significant digits and then |\n | | formats the result in either fixed-point format or in |\n | | scientific notation, depending on its magnitude. The |\n | | precise rules are as follows: suppose that the result |\n | | formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1`` would have exponent ``exp``. Then if ``-4 <= exp |\n | | < p``, the number is formatted with presentation type |\n | | ``\'f\'`` and precision ``p-1-exp``. Otherwise, the number |\n | | is formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1``. In both cases insignificant trailing zeros are |\n | | removed from the significand, and the decimal point is |\n | | also removed if there are no remaining digits following |\n | | it. Positive and negative infinity, positive and negative |\n | | zero, and nans, are formatted as ``inf``, ``-inf``, ``0``, |\n | | ``-0`` and ``nan`` respectively, regardless of the |\n | | precision. A precision of ``0`` is treated as equivalent |\n | | to a precision of ``1``. |\n +-----------+------------------------------------------------------------+\n | ``\'G\'`` | General format. Same as ``\'g\'`` except switches to ``\'E\'`` |\n | | if the number gets too large. The representations of |\n | | infinity and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'g\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | ``\'%\'`` | Percentage. Multiplies the number by 100 and displays in |\n | | fixed (``\'f\'``) format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'g\'``. |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old ``%``-formatting.\n\nIn most of the cases the syntax is similar to the old\n``%``-formatting, with the addition of the ``{}`` and with ``:`` used\ninstead of ``%``. For example, ``\'%03.2f\'`` can be translated to\n``\'{:03.2f}\'``.\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n \'a, b, c\'\n >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\') # 2.7+ only\n \'a, b, c\'\n >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n \'c, b, a\'\n >>> \'{2}, {1}, {0}\'.format(*\'abc\') # unpacking argument sequence\n \'c, b, a\'\n >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\') # arguments\' indices can be repeated\n \'abracadabra\'\n\nAccessing arguments by name:\n\n >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n \'Coordinates: 37.24N, -115.81W\'\n >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n >>> c = 3-5j\n >>> (\'The complex number {0} is formed from the real part {0.real} \'\n ... \'and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point(object):\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\'.format(self=self)\n ...\n >>> str(Point(4, 2))\n \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n >>> coord = (3, 5)\n >>> \'X: {0[0]}; Y: {0[1]}\'.format(coord)\n \'X: 3; Y: 5\'\n\nReplacing ``%s`` and ``%r``:\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n >>> \'{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\'.format(\'right aligned\')\n \' right aligned\'\n >>> \'{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\'.format(\'centered\') # use \'*\' as a fill char\n \'***********centered***********\'\n\nReplacing ``%+f``, ``%-f``, and ``% f`` and specifying a sign:\n\n >>> \'{:+f}; {:+f}\'.format(3.14, -3.14) # show it always\n \'+3.140000; -3.140000\'\n >>> \'{: f}; {: f}\'.format(3.14, -3.14) # show a space for positive numbers\n \' 3.140000; -3.140000\'\n >>> \'{:-f}; {:-f}\'.format(3.14, -3.14) # show only the minus -- same as \'{:f}; {:f}\'\n \'3.140000; -3.140000\'\n\nReplacing ``%x`` and ``%o`` and converting the value to different\nbases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \'int: 42; hex: 2a; oct: 52; bin: 101010\'\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19.5\n >>> total = 22\n >>> \'Correct answers: {:.2%}\'.format(points/total)\n \'Correct answers: 88.64%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{fill}{align}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12):\n ... for base in \'dXob\':\n ... print \'{0:{width}{base}}\'.format(num, base=base, width=width),\n ... print\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n',
- 'function': '\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier [, "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level parameters have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding argument may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n',
+ 'formatstrings': '\nFormat String Syntax\n********************\n\nThe ``str.format()`` method and the ``Formatter`` class share the same\nsyntax for format strings (although in the case of ``Formatter``,\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n``{}``. Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n``{{`` and ``}}``.\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= <any source character except "]"> +\n conversion ::= "r" | "s"\n format_spec ::= <described in the next section>\n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point ``\'!\'``, and a *format_spec*, which\nis preceded by a colon ``\':\'``. These specify a non-default format\nfor the replacement value.\n\nSee also the *Format Specification Mini-Language* section.\n\nThe *field_name* itself begins with an *arg_name* that is either a\nnumber or a keyword. If it\'s a number, it refers to a positional\nargument, and if it\'s a keyword, it refers to a named keyword\nargument. If the numerical arg_names in a format string are 0, 1, 2,\n... in sequence, they can all be omitted (not just some) and the\nnumbers 0, 1, 2, ... will be automatically inserted in that order.\nBecause *arg_name* is not quote-delimited, it is not possible to\nspecify arbitrary dictionary keys (e.g., the strings ``\'10\'`` or\n``\':-]\'``) within a format string. The *arg_name* can be followed by\nany number of index or attribute expressions. An expression of the\nform ``\'.name\'`` selects the named attribute using ``getattr()``,\nwhile an expression of the form ``\'[index]\'`` does an index lookup\nusing ``__getitem__()``.\n\nChanged in version 2.7: The positional argument specifiers can be\nomitted, so ``\'{} {}\'`` is equivalent to ``\'{0} {1}\'``.\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the\n``__format__()`` method of the value itself. However, in some cases\nit is desirable to force a type to be formatted as a string,\noverriding its own definition of formatting. By converting the value\nto a string before calling ``__format__()``, the normal formatting\nlogic is bypassed.\n\nTwo conversion flags are currently supported: ``\'!s\'`` which calls\n``str()`` on the value, and ``\'!r\'`` which calls ``repr()``.\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the *Format examples* section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see *Format String Syntax*). They can also be passed directly to the\nbuilt-in ``format()`` function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string (``""``) produces\nthe same result as if you had called ``str()`` on the value. A non-\nempty format string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= <any character>\n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nIf a valid *align* value is specified, it can be preceded by a *fill*\ncharacter that can be any character and defaults to a space if\nomitted. Note that it is not possible to use ``{`` and ``}`` as *fill*\nchar while using the ``str.format()`` method; this limitation however\ndoesn\'t affect the ``format()`` function.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'<\'`` | Forces the field to be left-aligned within the available |\n | | space (this is the default for most objects). |\n +-----------+------------------------------------------------------------+\n | ``\'>\'`` | Forces the field to be right-aligned within the available |\n | | space (this is the default for numbers). |\n +-----------+------------------------------------------------------------+\n | ``\'=\'`` | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \'+000000120\'. This alignment option is only |\n | | valid for numeric types. |\n +-----------+------------------------------------------------------------+\n | ``\'^\'`` | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'+\'`` | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | ``\'-\'`` | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe ``\'#\'`` option is only valid for integers, and only for binary,\noctal, or hexadecimal output. If present, it specifies that the\noutput will be prefixed by ``\'0b\'``, ``\'0o\'``, or ``\'0x\'``,\nrespectively.\n\nThe ``\',\'`` option signals the use of a comma for a thousands\nseparator. For a locale aware separator, use the ``\'n\'`` integer\npresentation type instead.\n\nChanged in version 2.7: Added the ``\',\'`` option (see also **PEP\n378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nPreceding the *width* field by a zero (``\'0\'``) character enables\nsign-aware zero-padding for numeric types. This is equivalent to a\n*fill* character of ``\'0\'`` with an *alignment* type of ``\'=\'``.\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with ``\'f\'`` and ``\'F\'``, or before and after the decimal\npoint for a floating point value formatted with ``\'g\'`` or ``\'G\'``.\nFor non-number types the field indicates the maximum field size - in\nother words, how many characters will be used from the field content.\nThe *precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'s\'`` | String format. This is the default type for strings and |\n | | may be omitted. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'s\'``. |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'b\'`` | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | ``\'c\'`` | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | ``\'d\'`` | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | ``\'o\'`` | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | ``\'x\'`` | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'X\'`` | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'d\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'d\'``. |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except\n``\'n\'`` and None). When doing so, ``float()`` is used to convert the\ninteger to a floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'e\'`` | Exponent notation. Prints the number in scientific |\n | | notation using the letter \'e\' to indicate the exponent. |\n | | The default precision is ``6``. |\n +-----------+------------------------------------------------------------+\n | ``\'E\'`` | Exponent notation. Same as ``\'e\'`` except it uses an upper |\n | | case \'E\' as the separator character. |\n +-----------+------------------------------------------------------------+\n | ``\'f\'`` | Fixed point. Displays the number as a fixed-point number. |\n | | The default precision is ``6``. |\n +-----------+------------------------------------------------------------+\n | ``\'F\'`` | Fixed point. Same as ``\'f\'``. |\n +-----------+------------------------------------------------------------+\n | ``\'g\'`` | General format. For a given precision ``p >= 1``, this |\n | | rounds the number to ``p`` significant digits and then |\n | | formats the result in either fixed-point format or in |\n | | scientific notation, depending on its magnitude. The |\n | | precise rules are as follows: suppose that the result |\n | | formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1`` would have exponent ``exp``. Then if ``-4 <= exp |\n | | < p``, the number is formatted with presentation type |\n | | ``\'f\'`` and precision ``p-1-exp``. Otherwise, the number |\n | | is formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1``. In both cases insignificant trailing zeros are |\n | | removed from the significand, and the decimal point is |\n | | also removed if there are no remaining digits following |\n | | it. Positive and negative infinity, positive and negative |\n | | zero, and nans, are formatted as ``inf``, ``-inf``, ``0``, |\n | | ``-0`` and ``nan`` respectively, regardless of the |\n | | precision. A precision of ``0`` is treated as equivalent |\n | | to a precision of ``1``. The default precision is ``6``. |\n +-----------+------------------------------------------------------------+\n | ``\'G\'`` | General format. Same as ``\'g\'`` except switches to ``\'E\'`` |\n | | if the number gets too large. The representations of |\n | | infinity and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'g\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | ``\'%\'`` | Percentage. Multiplies the number by 100 and displays in |\n | | fixed (``\'f\'``) format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'g\'``. |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old ``%``-formatting.\n\nIn most of the cases the syntax is similar to the old\n``%``-formatting, with the addition of the ``{}`` and with ``:`` used\ninstead of ``%``. For example, ``\'%03.2f\'`` can be translated to\n``\'{:03.2f}\'``.\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n \'a, b, c\'\n >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\') # 2.7+ only\n \'a, b, c\'\n >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n \'c, b, a\'\n >>> \'{2}, {1}, {0}\'.format(*\'abc\') # unpacking argument sequence\n \'c, b, a\'\n >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\') # arguments\' indices can be repeated\n \'abracadabra\'\n\nAccessing arguments by name:\n\n >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n \'Coordinates: 37.24N, -115.81W\'\n >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n >>> c = 3-5j\n >>> (\'The complex number {0} is formed from the real part {0.real} \'\n ... \'and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point(object):\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\'.format(self=self)\n ...\n >>> str(Point(4, 2))\n \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n >>> coord = (3, 5)\n >>> \'X: {0[0]}; Y: {0[1]}\'.format(coord)\n \'X: 3; Y: 5\'\n\nReplacing ``%s`` and ``%r``:\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n >>> \'{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\'.format(\'right aligned\')\n \' right aligned\'\n >>> \'{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\'.format(\'centered\') # use \'*\' as a fill char\n \'***********centered***********\'\n\nReplacing ``%+f``, ``%-f``, and ``% f`` and specifying a sign:\n\n >>> \'{:+f}; {:+f}\'.format(3.14, -3.14) # show it always\n \'+3.140000; -3.140000\'\n >>> \'{: f}; {: f}\'.format(3.14, -3.14) # show a space for positive numbers\n \' 3.140000; -3.140000\'\n >>> \'{:-f}; {:-f}\'.format(3.14, -3.14) # show only the minus -- same as \'{:f}; {:f}\'\n \'3.140000; -3.140000\'\n\nReplacing ``%x`` and ``%o`` and converting the value to different\nbases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \'int: 42; hex: 2a; oct: 52; bin: 101010\'\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19.5\n >>> total = 22\n >>> \'Correct answers: {:.2%}\'.format(points/total)\n \'Correct answers: 88.64%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{fill}{align}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12):\n ... for base in \'dXob\':\n ... print \'{0:{width}{base}}\'.format(num, base=base, width=width),\n ... print\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n',
+ 'function': '\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier ["," "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level *parameters* have the form *parameter*\n``=`` *expression*, the function is said to have "default parameter\nvalues." For a parameter with a default value, the corresponding\n*argument* may be omitted from a call, in which case the parameter\'s\ndefault value is substituted. If a parameter has a default value, all\nfollowing parameters must also have a default value --- this is a\nsyntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section *Lambdas*. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a "``def``" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The "``def``" form is actually more powerful since it\nallows the execution of multiple statements.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n',
'global': '\nThe ``global`` statement\n************************\n\n global_stmt ::= "global" identifier ("," identifier)*\n\nThe ``global`` statement is a declaration which holds for the entire\ncurrent code block. It means that the listed identifiers are to be\ninterpreted as globals. It would be impossible to assign to a global\nvariable without ``global``, although free variables may refer to\nglobals without being declared global.\n\nNames listed in a ``global`` statement must not be used in the same\ncode block textually preceding that ``global`` statement.\n\nNames listed in a ``global`` statement must not be defined as formal\nparameters or in a ``for`` loop control target, ``class`` definition,\nfunction definition, or ``import`` statement.\n\n**CPython implementation detail:** The current implementation does not\nenforce the latter two restrictions, but programs should not abuse\nthis freedom, as future implementations may enforce them or silently\nchange the meaning of the program.\n\n**Programmer\'s note:** the ``global`` is a directive to the parser.\nIt applies only to code parsed at the same time as the ``global``\nstatement. In particular, a ``global`` statement contained in an\n``exec`` statement does not affect the code block *containing* the\n``exec`` statement, and code contained in an ``exec`` statement is\nunaffected by ``global`` statements in the code containing the\n``exec`` statement. The same applies to the ``eval()``,\n``execfile()`` and ``compile()`` functions.\n',
'id-classes': '\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``__builtin__`` module.\n When not in interactive mode, ``_`` has no special meaning and is\n not defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of ``__*__`` names, in any context, that does\n not follow explicitly documented use, is subject to breakage\n without warning.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n',
'identifiers': '\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions:\n\n identifier ::= (letter|"_") (letter | digit | "_")*\n letter ::= lowercase | uppercase\n lowercase ::= "a"..."z"\n uppercase ::= "A"..."Z"\n digit ::= "0"..."9"\n\nIdentifiers are unlimited in length. Case is significant.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n and del from not while\n as elif global or with\n assert else if pass yield\n break except import print\n class exec in raise\n continue finally is return\n def for lambda try\n\nChanged in version 2.4: ``None`` became a constant and is now\nrecognized by the compiler as a name for the built-in object ``None``.\nAlthough it is not a keyword, you cannot assign a different object to\nit.\n\nChanged in version 2.5: Using ``as`` and ``with`` as identifiers\ntriggers a warning. To use them as keywords, enable the\n``with_statement`` future feature .\n\nChanged in version 2.6: ``as`` and ``with`` are full keywords.\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``__builtin__`` module.\n When not in interactive mode, ``_`` has no special meaning and is\n not defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of ``__*__`` names, in any context, that does\n not follow explicitly documented use, is subject to breakage\n without warning.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n',
'if': '\nThe ``if`` statement\n********************\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n',
'imaginary': '\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range. To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., ``(3+4j)``. Some examples of imaginary literals:\n\n 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n',
- 'import': '\nThe ``import`` statement\n************************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nImport statements are executed in two steps: (1) find a module, and\ninitialize it if necessary; (2) define a name or names in the local\nnamespace (of the scope where the ``import`` statement occurs). The\nstatement comes in two forms differing on whether it uses the ``from``\nkeyword. The first form (without ``from``) repeats these steps for\neach identifier in the list. The form with ``from`` performs step (1)\nonce, and then performs step (2) repeatedly.\n\nTo understand how step (1) occurs, one must first understand how\nPython handles hierarchical naming of modules. To help organize\nmodules and provide a hierarchy in naming, Python has a concept of\npackages. A package can contain other packages and modules while\nmodules cannot contain other modules or packages. From a file system\nperspective, packages are directories and modules are files. The\noriginal specification for packages is still available to read,\nalthough minor details have changed since the writing of that\ndocument.\n\nOnce the name of the module is known (unless otherwise specified, the\nterm "module" will refer to both packages and modules), searching for\nthe module or package can begin. The first place checked is\n``sys.modules``, the cache of all modules that have been imported\npreviously. If the module is found there then it is used in step (2)\nof import.\n\nIf the module is not found in the cache, then ``sys.meta_path`` is\nsearched (the specification for ``sys.meta_path`` can be found in\n**PEP 302**). The object is a list of *finder* objects which are\nqueried in order as to whether they know how to load the module by\ncalling their ``find_module()`` method with the name of the module. If\nthe module happens to be contained within a package (as denoted by the\nexistence of a dot in the name), then a second argument to\n``find_module()`` is given as the value of the ``__path__`` attribute\nfrom the parent package (everything up to the last dot in the name of\nthe module being imported). If a finder can find the module it returns\na *loader* (discussed later) or returns ``None``.\n\nIf none of the finders on ``sys.meta_path`` are able to find the\nmodule then some implicitly defined finders are queried.\nImplementations of Python vary in what implicit meta path finders are\ndefined. The one they all do define, though, is one that handles\n``sys.path_hooks``, ``sys.path_importer_cache``, and ``sys.path``.\n\nThe implicit finder searches for the requested module in the "paths"\nspecified in one of two places ("paths" do not have to be file system\npaths). If the module being imported is supposed to be contained\nwithin a package then the second argument passed to ``find_module()``,\n``__path__`` on the parent package, is used as the source of paths. If\nthe module is not contained in a package then ``sys.path`` is used as\nthe source of paths.\n\nOnce the source of paths is chosen it is iterated over to find a\nfinder that can handle that path. The dict at\n``sys.path_importer_cache`` caches finders for paths and is checked\nfor a finder. If the path does not have a finder cached then\n``sys.path_hooks`` is searched by calling each object in the list with\na single argument of the path, returning a finder or raises\n``ImportError``. If a finder is returned then it is cached in\n``sys.path_importer_cache`` and then used for that path entry. If no\nfinder can be found but the path exists then a value of ``None`` is\nstored in ``sys.path_importer_cache`` to signify that an implicit,\nfile-based finder that handles modules stored as individual files\nshould be used for that path. If the path does not exist then a finder\nwhich always returns *None`* is placed in the cache for the path.\n\nIf no finder can find the module then ``ImportError`` is raised.\nOtherwise some finder returned a loader whose ``load_module()`` method\nis called with the name of the module to load (see **PEP 302** for the\noriginal definition of loaders). A loader has several responsibilities\nto perform on a module it loads. First, if the module already exists\nin ``sys.modules`` (a possibility if the loader is called outside of\nthe import machinery) then it is to use that module for initialization\nand not a new module. But if the module does not exist in\n``sys.modules`` then it is to be added to that dict before\ninitialization begins. If an error occurs during loading of the module\nand it was added to ``sys.modules`` it is to be removed from the dict.\nIf an error occurs but the module was already in ``sys.modules`` it is\nleft in the dict.\n\nThe loader must set several attributes on the module. ``__name__`` is\nto be set to the name of the module. ``__file__`` is to be the "path"\nto the file unless the module is built-in (and thus listed in\n``sys.builtin_module_names``) in which case the attribute is not set.\nIf what is being imported is a package then ``__path__`` is to be set\nto a list of paths to be searched when looking for modules and\npackages contained within the package being imported. ``__package__``\nis optional but should be set to the name of package that contains the\nmodule or package (the empty string is used for module not contained\nin a package). ``__loader__`` is also optional but should be set to\nthe loader object that is loading the module.\n\nIf an error occurs during loading then the loader raises\n``ImportError`` if some other exception is not already being\npropagated. Otherwise the loader returns the module that was loaded\nand initialized.\n\nWhen step (1) finishes without raising an exception, step (2) can\nbegin.\n\nThe first form of ``import`` statement binds the module name in the\nlocal namespace to the module object, and then goes on to import the\nnext identifier, if any. If the module name is followed by ``as``,\nthe name following ``as`` is used as the local name for the module.\n\nThe ``from`` form does not bind the module name: it goes through the\nlist of identifiers, looks each one of them up in the module found in\nstep (1), and binds the name in the local namespace to the object thus\nfound. As with the first form of ``import``, an alternate local name\ncan be supplied by specifying "``as`` localname". If a name is not\nfound, ``ImportError`` is raised. If the list of identifiers is\nreplaced by a star (``\'*\'``), all public names defined in the module\nare bound in the local namespace of the ``import`` statement..\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named ``__all__``; if defined, it\nmust be a sequence of strings which are names defined or imported by\nthat module. The names given in ``__all__`` are all considered public\nand are required to exist. If ``__all__`` is not defined, the set of\npublic names includes all names found in the module\'s namespace which\ndo not begin with an underscore character (``\'_\'``). ``__all__``\nshould contain the entire public API. It is intended to avoid\naccidentally exporting items that are not part of the API (such as\nlibrary modules which were imported and used within the module).\n\nThe ``from`` form with ``*`` may only occur in a module scope. If the\nwild card form of import --- ``import *`` --- is used in a function\nand the function contains or is a nested block with free variables,\nthe compiler will raise a ``SyntaxError``.\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after ``from``\nyou can specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n``from . import mod`` from a module in the ``pkg`` package then you\nwill end up importing ``pkg.mod``. If you execute ``from ..subpkg2\nimport mod`` from within ``pkg.subpkg1`` you will import\n``pkg.subpkg2.mod``. The specification for relative imports is\ncontained within **PEP 328**.\n\n``importlib.import_module()`` is provided to support applications that\ndetermine which modules need to be loaded dynamically.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python. The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language. It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 2.6 are ``unicode_literals``,\n``print_function``, ``absolute_import``, ``division``, ``generators``,\n``nested_scopes`` and ``with_statement``. ``generators``,\n``with_statement``, ``nested_scopes`` are redundant in Python version\n2.6 and above because they are always enabled.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module ``__future__``, described later, and it\nwill be imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by an ``exec`` statement or calls to the built-in\nfunctions ``compile()`` and ``execfile()`` that occur in a module\n``M`` containing a future statement will, by default, use the new\nsyntax or semantics associated with the future statement. This can,\nstarting with Python 2.2 be controlled by optional arguments to\n``compile()`` --- see the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also:\n\n **PEP 236** - Back to the __future__\n The original proposal for the __future__ mechanism.\n',
+ 'import': '\nThe ``import`` statement\n************************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nImport statements are executed in two steps: (1) find a module, and\ninitialize it if necessary; (2) define a name or names in the local\nnamespace (of the scope where the ``import`` statement occurs). The\nstatement comes in two forms differing on whether it uses the ``from``\nkeyword. The first form (without ``from``) repeats these steps for\neach identifier in the list. The form with ``from`` performs step (1)\nonce, and then performs step (2) repeatedly.\n\nTo understand how step (1) occurs, one must first understand how\nPython handles hierarchical naming of modules. To help organize\nmodules and provide a hierarchy in naming, Python has a concept of\npackages. A package can contain other packages and modules while\nmodules cannot contain other modules or packages. From a file system\nperspective, packages are directories and modules are files. The\noriginal specification for packages is still available to read,\nalthough minor details have changed since the writing of that\ndocument.\n\nOnce the name of the module is known (unless otherwise specified, the\nterm "module" will refer to both packages and modules), searching for\nthe module or package can begin. The first place checked is\n``sys.modules``, the cache of all modules that have been imported\npreviously. If the module is found there then it is used in step (2)\nof import.\n\nIf the module is not found in the cache, then ``sys.meta_path`` is\nsearched (the specification for ``sys.meta_path`` can be found in\n**PEP 302**). The object is a list of *finder* objects which are\nqueried in order as to whether they know how to load the module by\ncalling their ``find_module()`` method with the name of the module. If\nthe module happens to be contained within a package (as denoted by the\nexistence of a dot in the name), then a second argument to\n``find_module()`` is given as the value of the ``__path__`` attribute\nfrom the parent package (everything up to the last dot in the name of\nthe module being imported). If a finder can find the module it returns\na *loader* (discussed later) or returns ``None``.\n\nIf none of the finders on ``sys.meta_path`` are able to find the\nmodule then some implicitly defined finders are queried.\nImplementations of Python vary in what implicit meta path finders are\ndefined. The one they all do define, though, is one that handles\n``sys.path_hooks``, ``sys.path_importer_cache``, and ``sys.path``.\n\nThe implicit finder searches for the requested module in the "paths"\nspecified in one of two places ("paths" do not have to be file system\npaths). If the module being imported is supposed to be contained\nwithin a package then the second argument passed to ``find_module()``,\n``__path__`` on the parent package, is used as the source of paths. If\nthe module is not contained in a package then ``sys.path`` is used as\nthe source of paths.\n\nOnce the source of paths is chosen it is iterated over to find a\nfinder that can handle that path. The dict at\n``sys.path_importer_cache`` caches finders for paths and is checked\nfor a finder. If the path does not have a finder cached then\n``sys.path_hooks`` is searched by calling each object in the list with\na single argument of the path, returning a finder or raises\n``ImportError``. If a finder is returned then it is cached in\n``sys.path_importer_cache`` and then used for that path entry. If no\nfinder can be found but the path exists then a value of ``None`` is\nstored in ``sys.path_importer_cache`` to signify that an implicit,\nfile-based finder that handles modules stored as individual files\nshould be used for that path. If the path does not exist then a finder\nwhich always returns ``None`` is placed in the cache for the path.\n\nIf no finder can find the module then ``ImportError`` is raised.\nOtherwise some finder returned a loader whose ``load_module()`` method\nis called with the name of the module to load (see **PEP 302** for the\noriginal definition of loaders). A loader has several responsibilities\nto perform on a module it loads. First, if the module already exists\nin ``sys.modules`` (a possibility if the loader is called outside of\nthe import machinery) then it is to use that module for initialization\nand not a new module. But if the module does not exist in\n``sys.modules`` then it is to be added to that dict before\ninitialization begins. If an error occurs during loading of the module\nand it was added to ``sys.modules`` it is to be removed from the dict.\nIf an error occurs but the module was already in ``sys.modules`` it is\nleft in the dict.\n\nThe loader must set several attributes on the module. ``__name__`` is\nto be set to the name of the module. ``__file__`` is to be the "path"\nto the file unless the module is built-in (and thus listed in\n``sys.builtin_module_names``) in which case the attribute is not set.\nIf what is being imported is a package then ``__path__`` is to be set\nto a list of paths to be searched when looking for modules and\npackages contained within the package being imported. ``__package__``\nis optional but should be set to the name of package that contains the\nmodule or package (the empty string is used for module not contained\nin a package). ``__loader__`` is also optional but should be set to\nthe loader object that is loading the module.\n\nIf an error occurs during loading then the loader raises\n``ImportError`` if some other exception is not already being\npropagated. Otherwise the loader returns the module that was loaded\nand initialized.\n\nWhen step (1) finishes without raising an exception, step (2) can\nbegin.\n\nThe first form of ``import`` statement binds the module name in the\nlocal namespace to the module object, and then goes on to import the\nnext identifier, if any. If the module name is followed by ``as``,\nthe name following ``as`` is used as the local name for the module.\n\nThe ``from`` form does not bind the module name: it goes through the\nlist of identifiers, looks each one of them up in the module found in\nstep (1), and binds the name in the local namespace to the object thus\nfound. As with the first form of ``import``, an alternate local name\ncan be supplied by specifying "``as`` localname". If a name is not\nfound, ``ImportError`` is raised. If the list of identifiers is\nreplaced by a star (``\'*\'``), all public names defined in the module\nare bound in the local namespace of the ``import`` statement..\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named ``__all__``; if defined, it\nmust be a sequence of strings which are names defined or imported by\nthat module. The names given in ``__all__`` are all considered public\nand are required to exist. If ``__all__`` is not defined, the set of\npublic names includes all names found in the module\'s namespace which\ndo not begin with an underscore character (``\'_\'``). ``__all__``\nshould contain the entire public API. It is intended to avoid\naccidentally exporting items that are not part of the API (such as\nlibrary modules which were imported and used within the module).\n\nThe ``from`` form with ``*`` may only occur in a module scope. If the\nwild card form of import --- ``import *`` --- is used in a function\nand the function contains or is a nested block with free variables,\nthe compiler will raise a ``SyntaxError``.\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after ``from``\nyou can specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n``from . import mod`` from a module in the ``pkg`` package then you\nwill end up importing ``pkg.mod``. If you execute ``from ..subpkg2\nimport mod`` from within ``pkg.subpkg1`` you will import\n``pkg.subpkg2.mod``. The specification for relative imports is\ncontained within **PEP 328**.\n\n``importlib.import_module()`` is provided to support applications that\ndetermine which modules need to be loaded dynamically.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python. The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language. It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 2.6 are ``unicode_literals``,\n``print_function``, ``absolute_import``, ``division``, ``generators``,\n``nested_scopes`` and ``with_statement``. ``generators``,\n``with_statement``, ``nested_scopes`` are redundant in Python version\n2.6 and above because they are always enabled.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module ``__future__``, described later, and it\nwill be imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by an ``exec`` statement or calls to the built-in\nfunctions ``compile()`` and ``execfile()`` that occur in a module\n``M`` containing a future statement will, by default, use the new\nsyntax or semantics associated with the future statement. This can,\nstarting with Python 2.2 be controlled by optional arguments to\n``compile()`` --- see the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also:\n\n **PEP 236** - Back to the __future__\n The original proposal for the __future__ mechanism.\n',
'in': '\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like ``a < b < c`` have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: ``True`` or ``False``.\n\nComparisons can be chained arbitrarily, e.g., ``x < y <= z`` is\nequivalent to ``x < y and y <= z``, except that ``y`` is evaluated\nonly once (but in both cases ``z`` is not evaluated at all when ``x <\ny`` is found to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then ``a op1 b op2 c ... y\nopN z`` is equivalent to ``a op1 b and b op2 c and ... y opN z``,\nexcept that each expression is evaluated at most once.\n\nNote that ``a op1 b op2 c`` doesn\'t imply any kind of comparison\nbetween *a* and *c*, so that, e.g., ``x < y > z`` is perfectly legal\n(though perhaps not pretty).\n\nThe forms ``<>`` and ``!=`` are equivalent; for consistency with C,\n``!=`` is preferred; where ``!=`` is mentioned below ``<>`` is also\naccepted. The ``<>`` spelling is considered obsolescent.\n\nThe operators ``<``, ``>``, ``==``, ``>=``, ``<=``, and ``!=`` compare\nthe values of two objects. The objects need not have the same type.\nIf both are numbers, they are converted to a common type. Otherwise,\nobjects of different types *always* compare unequal, and are ordered\nconsistently but arbitrarily. You can control comparison behavior of\nobjects of non-built-in types by defining a ``__cmp__`` method or rich\ncomparison methods like ``__gt__``, described in section *Special\nmethod names*.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the ``in`` and ``not in``\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function ``ord()``) of their characters.\n Unicode and 8-bit strings are fully interoperable in this behavior.\n [4]\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, ``cmp([1,2,x], [1,2,y])`` returns\n the same as ``cmp(x,y)``. If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, ``[1,2] <\n [1,2,3]``).\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n (key, value) lists compare equal. [5] Outcomes other than equality\n are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of built-in types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nThe operators ``in`` and ``not in`` test for collection membership.\n``x in s`` evaluates to true if *x* is a member of the collection *s*,\nand false otherwise. ``x not in s`` returns the negation of ``x in\ns``. The collection membership test has traditionally been bound to\nsequences; an object is a member of a collection if the collection is\na sequence and contains an element equal to that object. However, it\nmake sense for many other object types to support membership tests\nwithout being a sequence. In particular, dictionaries (for keys) and\nsets support membership testing.\n\nFor the list and tuple types, ``x in y`` is true if and only if there\nexists an index *i* such that ``x == y[i]`` is true.\n\nFor the Unicode and string types, ``x in y`` is true if and only if\n*x* is a substring of *y*. An equivalent test is ``y.find(x) != -1``.\nNote, *x* and *y* need not be the same type; consequently, ``u\'ab\' in\n\'abc\'`` will return ``True``. Empty strings are always considered to\nbe a substring of any other string, so ``"" in "abc"`` will return\n``True``.\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength ``1``.\n\nFor user-defined classes which define the ``__contains__()`` method,\n``x in y`` is true if and only if ``y.__contains__(x)`` is true.\n\nFor user-defined classes which do not define ``__contains__()`` but do\ndefine ``__iter__()``, ``x in y`` is true if some value ``z`` with ``x\n== z`` is produced while iterating over ``y``. If an exception is\nraised during the iteration, it is as if ``in`` raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n``__getitem__()``, ``x in y`` is true if and only if there is a non-\nnegative integer index *i* such that ``x == y[i]``, and all lower\ninteger indices do not raise ``IndexError`` exception. (If any other\nexception is raised, it is as if ``in`` raised that exception).\n\nThe operator ``not in`` is defined to have the inverse true value of\n``in``.\n\nThe operators ``is`` and ``is not`` test for object identity: ``x is\ny`` is true if and only if *x* and *y* are the same object. ``x is\nnot y`` yields the inverse truth value. [7]\n',
'integers': '\nInteger and long integer literals\n*********************************\n\nInteger and long integer literals are described by the following\nlexical definitions:\n\n longinteger ::= integer ("l" | "L")\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"\n octinteger ::= "0" ("o" | "O") octdigit+ | "0" octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n nonzerodigit ::= "1"..."9"\n octdigit ::= "0"..."7"\n bindigit ::= "0" | "1"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n\nAlthough both lower case ``\'l\'`` and upper case ``\'L\'`` are allowed as\nsuffix for long integers, it is strongly recommended to always use\n``\'L\'``, since the letter ``\'l\'`` looks too much like the digit\n``\'1\'``.\n\nPlain integer literals that are above the largest representable plain\ninteger (e.g., 2147483647 when using 32-bit arithmetic) are accepted\nas if they were long integers instead. [1] There is no limit for long\ninteger literals apart from what can be stored in available memory.\n\nSome examples of plain integer literals (first row) and long integer\nliterals (second and third rows):\n\n 7 2147483647 0177\n 3L 79228162514264337593543950336L 0377L 0x100000000L\n 79228162514264337593543950336 0xdeadbeef\n',
- 'lambda': '\nLambdas\n*******\n\n lambda_form ::= "lambda" [parameter_list]: expression\n old_lambda_form ::= "lambda" [parameter_list]: old_expression\n\nLambda forms (lambda expressions) have the same syntactic position as\nexpressions. They are a shorthand to create anonymous functions; the\nexpression ``lambda arguments: expression`` yields a function object.\nThe unnamed object behaves like a function object defined with\n\n def name(arguments):\n return expression\n\nSee section *Function definitions* for the syntax of parameter lists.\nNote that functions created with lambda forms cannot contain\nstatements.\n',
- 'lists': '\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | list_comprehension] "]"\n list_comprehension ::= expression list_for\n list_for ::= "for" target_list "in" old_expression_list [list_iter]\n old_expression_list ::= old_expression [("," old_expression)+ [","]]\n old_expression ::= or_test | old_lambda_form\n list_iter ::= list_for | list_if\n list_if ::= "if" old_expression [list_iter]\n\nA list display yields a new list object. Its contents are specified\nby providing either a list of expressions or a list comprehension.\nWhen a comma-separated list of expressions is supplied, its elements\nare evaluated from left to right and placed into the list object in\nthat order. When a list comprehension is supplied, it consists of a\nsingle expression followed by at least one ``for`` clause and zero or\nmore ``for`` or ``if`` clauses. In this case, the elements of the new\nlist are those that would be produced by considering each of the\n``for`` or ``if`` clauses a block, nesting from left to right, and\nevaluating the expression to produce a list element each time the\ninnermost block is reached [1].\n',
+ 'lambda': '\nLambdas\n*******\n\n lambda_expr ::= "lambda" [parameter_list]: expression\n old_lambda_expr ::= "lambda" [parameter_list]: old_expression\n\nLambda expressions (sometimes called lambda forms) have the same\nsyntactic position as expressions. They are a shorthand to create\nanonymous functions; the expression ``lambda arguments: expression``\nyields a function object. The unnamed object behaves like a function\nobject defined with\n\n def name(arguments):\n return expression\n\nSee section *Function definitions* for the syntax of parameter lists.\nNote that functions created with lambda expressions cannot contain\nstatements.\n',
+ 'lists': '\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | list_comprehension] "]"\n list_comprehension ::= expression list_for\n list_for ::= "for" target_list "in" old_expression_list [list_iter]\n old_expression_list ::= old_expression [("," old_expression)+ [","]]\n old_expression ::= or_test | old_lambda_expr\n list_iter ::= list_for | list_if\n list_if ::= "if" old_expression [list_iter]\n\nA list display yields a new list object. Its contents are specified\nby providing either a list of expressions or a list comprehension.\nWhen a comma-separated list of expressions is supplied, its elements\nare evaluated from left to right and placed into the list object in\nthat order. When a list comprehension is supplied, it consists of a\nsingle expression followed by at least one ``for`` clause and zero or\nmore ``for`` or ``if`` clauses. In this case, the elements of the new\nlist are those that would be produced by considering each of the\n``for`` or ``if`` clauses a block, nesting from left to right, and\nevaluating the expression to produce a list element each time the\ninnermost block is reached [1].\n',
'naming': "\nNaming and binding\n******************\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the '**-c**' option) is a code block. The file read by the\nbuilt-in function ``execfile()`` is a code block. The string argument\npassed to the built-in function ``eval()`` and to the ``exec``\nstatement is a code block. The expression read and evaluated by the\nbuilt-in function ``input()`` is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block's execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block's *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a ``NameError`` exception is raised.\nIf the name refers to a local variable that has not been bound, a\n``UnboundLocalError`` exception is raised. ``UnboundLocalError`` is a\nsubclass of ``NameError``.\n\nThe following constructs bind names: formal parameters to functions,\n``import`` statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, ``for`` loop header, in the\nsecond position of an ``except`` clause header or after ``as`` in a\n``with`` statement. The ``import`` statement of the form ``from ...\nimport *`` binds all names defined in the imported module, except\nthose beginning with an underscore. This form may only be used at the\nmodule level.\n\nA target occurring in a ``del`` statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a ``SyntaxError``.\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module ``__builtin__``. The global namespace is searched\nfirst. If the name is not found there, the builtins namespace is\nsearched. The global statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name ``__builtins__`` in its\nglobal namespace; this should be a dictionary or a module (in the\nlatter case the module's dictionary is used). By default, when in the\n``__main__`` module, ``__builtins__`` is the built-in module\n``__builtin__`` (note: no 's'); when in any other module,\n``__builtins__`` is an alias for the dictionary of the ``__builtin__``\nmodule itself. ``__builtins__`` can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n``__builtins__``; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should ``import``\nthe ``__builtin__`` (no 's') module and modify its attributes\nappropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n``__main__``.\n\nThe ``global`` statement has the same scope as a name binding\noperation in the same block. If the nearest enclosing scope for a\nfree variable contains a global statement, the free variable is\ntreated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nIf ``exec`` is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n``SyntaxError`` unless the exec explicitly specifies the local\nnamespace for the ``exec``. (In other words, ``exec obj`` would be\nillegal, but ``exec obj in ns`` would be legal.)\n\nThe ``eval()``, ``execfile()``, and ``input()`` functions and the\n``exec`` statement do not have access to the full environment for\nresolving names. Names may be resolved in the local and global\nnamespaces of the caller. Free variables are not resolved in the\nnearest enclosing namespace, but in the global namespace. [1] The\n``exec`` statement and the ``eval()`` and ``execfile()`` functions\nhave optional arguments to override the global and local namespace.\nIf only one namespace is specified, it is used for both.\n",
'numbers': "\nNumeric literals\n****************\n\nThere are four types of numeric literals: plain integers, long\nintegers, floating point numbers, and imaginary numbers. There are no\ncomplex literals (complex numbers can be formed by adding a real\nnumber and an imaginary number).\n\nNote that numeric literals do not include a sign; a phrase like ``-1``\nis actually an expression composed of the unary operator '``-``' and\nthe literal ``1``.\n",
'numeric-types': '\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``//``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``). For\n instance, to evaluate the expression ``x + y``, where *x* is an\n instance of a class that has an ``__add__()`` method,\n ``x.__add__(y)`` is called. The ``__divmod__()`` method should be\n the equivalent to using ``__floordiv__()`` and ``__mod__()``; it\n should not be related to ``__truediv__()`` (described below). Note\n that ``__pow__()`` should be defined to accept an optional third\n argument if the ternary version of the built-in ``pow()`` function\n is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return ``NotImplemented``.\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator (``/``) is implemented by these methods. The\n ``__truediv__()`` method is used when ``__future__.division`` is in\n effect, otherwise ``__div__()`` is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; ``TypeError`` will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``) with\n reflected (swapped) operands. These functions are only called if\n the left operand does not support the corresponding operation and\n the operands are of different types. [2] For instance, to evaluate\n the expression ``x - y``, where *y* is an instance of a class that\n has an ``__rsub__()`` method, ``y.__rsub__(x)`` is called if\n ``x.__sub__(y)`` returns *NotImplemented*.\n\n Note that ternary ``pow()`` will not try calling ``__rpow__()``\n (the coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left operand\'s\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand\'s\n non-reflected method. This behavior allows subclasses to\n override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``,\n ``**=``, ``<<=``, ``>>=``, ``&=``, ``^=``, ``|=``). These methods\n should attempt to do the operation in-place (modifying *self*) and\n return the result (which could be, but does not have to be,\n *self*). If a specific method is not defined, the augmented\n assignment falls back to the normal methods. For instance, to\n execute the statement ``x += y``, where *x* is an instance of a\n class that has an ``__iadd__()`` method, ``x.__iadd__(y)`` is\n called. If *x* is an instance of a class that does not define a\n ``__iadd__()`` method, ``x.__add__(y)`` and ``y.__radd__(x)`` are\n considered, as with the evaluation of ``x + y``.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations (``-``, ``+``,\n ``abs()`` and ``~``).\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions ``complex()``,\n ``int()``, ``long()``, and ``float()``. Should return a value of\n the appropriate type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions ``oct()`` and ``hex()``.\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement ``operator.index()``. Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or ``None`` if conversion is impossible. When\n the common type would be the type of ``other``, it is sufficient to\n return ``None``, since the interpreter will also ask the other\n object to attempt a coercion (but sometimes, if the implementation\n of the other type cannot be changed, it is useful to do the\n conversion to the other type here). A return value of\n ``NotImplemented`` is equivalent to returning ``None``.\n',
'objects': '\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'``is``\' operator compares the\nidentity of two objects; the ``id()`` function returns an integer\nrepresenting its identity (currently implemented as its address). An\nobject\'s *type* is also unchangeable. [1] An object\'s type determines\nthe operations that the object supports (e.g., "does it have a\nlength?") and also defines the possible values for objects of that\ntype. The ``type()`` function returns an object\'s type (which is an\nobject itself). The *value* of some objects can change. Objects\nwhose value can change are said to be *mutable*; objects whose value\nis unchangeable once they are created are called *immutable*. (The\nvalue of an immutable container object that contains a reference to a\nmutable object can change when the latter\'s value is changed; however\nthe container is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the ``gc`` module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change. Do not depend\non immediate finalization of objects when they become unreachable (ex:\nalways close files).\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'``try``...``except``\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a ``close()`` method. Programs\nare strongly recommended to explicitly close such objects. The\n\'``try``...``finally``\' statement provides a convenient way to do\nthis.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after ``a = 1; b =\n1``, ``a`` and ``b`` may or may not refer to the same object with the\nvalue one, depending on the implementation, but after ``c = []; d =\n[]``, ``c`` and ``d`` are guaranteed to refer to two different,\nunique, newly created empty lists. (Note that ``c = d = []`` assigns\nthe same object to both ``c`` and ``d``.)\n',
- 'operator-summary': '\nSummary\n*******\n\nThe following table summarizes the operator precedences in Python,\nfrom lowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for comparisons, including\ntests, which all have the same precedence and chain from left to right\n--- see section *Comparisons* --- and exponentiation, which groups\nfrom right to left).\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| ``lambda`` | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| ``if`` -- ``else`` | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| ``or`` | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| ``and`` | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| ``not`` *x* | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``in``, ``not`` ``in``, ``is``, ``is not``, | Comparisons, including membership |\n| ``<``, ``<=``, ``>``, ``>=``, ``<>``, ``!=``, | tests and identity tests, |\n| ``==`` | |\n+-------------------------------------------------+---------------------------------------+\n| ``|`` | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| ``^`` | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| ``&`` | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| ``<<``, ``>>`` | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| ``+``, ``-`` | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| ``*``, ``/``, ``//``, ``%`` | Multiplication, division, remainder |\n| | [8] |\n+-------------------------------------------------+---------------------------------------+\n| ``+x``, ``-x``, ``~x`` | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``**`` | Exponentiation [9] |\n+-------------------------------------------------+---------------------------------------+\n| ``x[index]``, ``x[index:index]``, | Subscription, slicing, call, |\n| ``x(arguments...)``, ``x.attribute`` | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| ``(expressions...)``, ``[expressions...]``, | Binding or tuple display, list |\n| ``{key:datum...}``, ```expressions...``` | display, dictionary display, string |\n| | conversion |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] In Python 2.3 and later releases, a list comprehension "leaks" the\n control variables of each ``for`` it contains into the containing\n scope. However, this behavior is deprecated, and relying on it\n will not work in Python 3.0\n\n[2] While ``abs(x%y) < abs(y)`` is true mathematically, for floats it\n may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that ``-1e-100 % 1e100`` have the same\n sign as ``1e100``, the computed result is ``-1e-100 + 1e100``,\n which is numerically exactly equal to ``1e100``. The function\n ``math.fmod()`` returns a result whose sign matches the sign of\n the first argument instead, and so returns ``-1e-100`` in this\n case. Which approach is more appropriate depends on the\n application.\n\n[3] If x is very close to an exact integer multiple of y, it\'s\n possible for ``floor(x/y)`` to be one larger than ``(x-x%y)/y``\n due to rounding. In such cases, Python returns the latter result,\n in order to preserve that ``divmod(x,y)[0] * y + x % y`` be very\n close to ``x``.\n\n[4] While comparisons between unicode strings make sense at the byte\n level, they may be counter-intuitive to users. For example, the\n strings ``u"\\u00C7"`` and ``u"\\u0043\\u0327"`` compare differently,\n even though they both represent the same unicode character (LATIN\n CAPITAL LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using ``unicodedata.normalize()``.\n\n[5] The implementation computes this efficiently, without constructing\n lists or sorting.\n\n[6] Earlier versions of Python used lexicographic comparison of the\n sorted (key, value) lists, but this was very expensive for the\n common case of comparing for equality. An even earlier version of\n Python compared dictionaries by identity only, but this caused\n surprises because people expected to be able to test a dictionary\n for emptiness by comparing it to ``{}``.\n\n[7] Due to automatic garbage-collection, free lists, and the dynamic\n nature of descriptors, you may notice seemingly unusual behaviour\n in certain uses of the ``is`` operator, like those involving\n comparisons between instance methods, or constants. Check their\n documentation for more info.\n\n[8] The ``%`` operator is also used for string formatting; the same\n precedence applies.\n\n[9] The power operator ``**`` binds less tightly than an arithmetic or\n bitwise unary operator on its right, that is, ``2**-1`` is\n ``0.5``.\n',
+ 'operator-summary': '\nOperator precedence\n*******************\n\nThe following table summarizes the operator precedences in Python,\nfrom lowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for comparisons, including\ntests, which all have the same precedence and chain from left to right\n--- see section *Comparisons* --- and exponentiation, which groups\nfrom right to left).\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| ``lambda`` | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| ``if`` -- ``else`` | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| ``or`` | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| ``and`` | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| ``not`` ``x`` | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``in``, ``not in``, ``is``, ``is not``, ``<``, | Comparisons, including membership |\n| ``<=``, ``>``, ``>=``, ``<>``, ``!=``, ``==`` | tests and identity tests |\n+-------------------------------------------------+---------------------------------------+\n| ``|`` | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| ``^`` | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| ``&`` | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| ``<<``, ``>>`` | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| ``+``, ``-`` | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| ``*``, ``/``, ``//``, ``%`` | Multiplication, division, remainder |\n| | [8] |\n+-------------------------------------------------+---------------------------------------+\n| ``+x``, ``-x``, ``~x`` | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| ``**`` | Exponentiation [9] |\n+-------------------------------------------------+---------------------------------------+\n| ``x[index]``, ``x[index:index]``, | Subscription, slicing, call, |\n| ``x(arguments...)``, ``x.attribute`` | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| ``(expressions...)``, ``[expressions...]``, | Binding or tuple display, list |\n| ``{key: value...}``, ```expressions...``` | display, dictionary display, string |\n| | conversion |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] In Python 2.3 and later releases, a list comprehension "leaks" the\n control variables of each ``for`` it contains into the containing\n scope. However, this behavior is deprecated, and relying on it\n will not work in Python 3.\n\n[2] While ``abs(x%y) < abs(y)`` is true mathematically, for floats it\n may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that ``-1e-100 % 1e100`` have the same\n sign as ``1e100``, the computed result is ``-1e-100 + 1e100``,\n which is numerically exactly equal to ``1e100``. The function\n ``math.fmod()`` returns a result whose sign matches the sign of\n the first argument instead, and so returns ``-1e-100`` in this\n case. Which approach is more appropriate depends on the\n application.\n\n[3] If x is very close to an exact integer multiple of y, it\'s\n possible for ``floor(x/y)`` to be one larger than ``(x-x%y)/y``\n due to rounding. In such cases, Python returns the latter result,\n in order to preserve that ``divmod(x,y)[0] * y + x % y`` be very\n close to ``x``.\n\n[4] While comparisons between unicode strings make sense at the byte\n level, they may be counter-intuitive to users. For example, the\n strings ``u"\\u00C7"`` and ``u"\\u0043\\u0327"`` compare differently,\n even though they both represent the same unicode character (LATIN\n CAPITAL LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using ``unicodedata.normalize()``.\n\n[5] The implementation computes this efficiently, without constructing\n lists or sorting.\n\n[6] Earlier versions of Python used lexicographic comparison of the\n sorted (key, value) lists, but this was very expensive for the\n common case of comparing for equality. An even earlier version of\n Python compared dictionaries by identity only, but this caused\n surprises because people expected to be able to test a dictionary\n for emptiness by comparing it to ``{}``.\n\n[7] Due to automatic garbage-collection, free lists, and the dynamic\n nature of descriptors, you may notice seemingly unusual behaviour\n in certain uses of the ``is`` operator, like those involving\n comparisons between instance methods, or constants. Check their\n documentation for more info.\n\n[8] The ``%`` operator is also used for string formatting; the same\n precedence applies.\n\n[9] The power operator ``**`` binds less tightly than an arithmetic or\n bitwise unary operator on its right, that is, ``2**-1`` is\n ``0.5``.\n',
'pass': '\nThe ``pass`` statement\n**********************\n\n pass_stmt ::= "pass"\n\n``pass`` is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n def f(arg): pass # a function that does nothing (yet)\n\n class C: pass # a class with no methods (yet)\n',
'power': '\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right. The\nsyntax is:\n\n power ::= primary ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): ``-1**2`` results in ``-1``.\n\nThe power operator has the same semantics as the built-in ``pow()``\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument. The numeric arguments are\nfirst converted to a common type. The result type is that of the\narguments after coercion.\n\nWith mixed operand types, the coercion rules for binary arithmetic\noperators apply. For int and long int operands, the result has the\nsame type as the operands (after coercion) unless the second argument\nis negative; in that case, all arguments are converted to float and a\nfloat result is delivered. For example, ``10**2`` returns ``100``, but\n``10**-2`` returns ``0.01``. (This last feature was added in Python\n2.2. In Python 2.1 and before, if both arguments were of integer types\nand the second argument was negative, an exception was raised).\n\nRaising ``0.0`` to a negative power results in a\n``ZeroDivisionError``. Raising a negative number to a fractional power\nresults in a ``ValueError``.\n',
+ 'print': '\nThe ``print`` statement\n***********************\n\n print_stmt ::= "print" ([expression ("," expression)* [","]]\n | ">>" expression [("," expression)+ [","]])\n\n``print`` evaluates each expression in turn and writes the resulting\nobject to standard output (see below). If an object is not a string,\nit is first converted to a string using the rules for string\nconversions. The (resulting or original) string is then written. A\nspace is written before each object is (converted and) written, unless\nthe output system believes it is positioned at the beginning of a\nline. This is the case (1) when no characters have yet been written\nto standard output, (2) when the last character written to standard\noutput is a whitespace character except ``\' \'``, or (3) when the last\nwrite operation on standard output was not a ``print`` statement. (In\nsome cases it may be functional to write an empty string to standard\noutput for this reason.)\n\nNote: Objects which act like file objects but which are not the built-in\n file objects often do not properly emulate this aspect of the file\n object\'s behavior, so it is best not to rely on this.\n\nA ``\'\\n\'`` character is written at the end, unless the ``print``\nstatement ends with a comma. This is the only action if the statement\ncontains just the keyword ``print``.\n\nStandard output is defined as the file object named ``stdout`` in the\nbuilt-in module ``sys``. If no such object exists, or if it does not\nhave a ``write()`` method, a ``RuntimeError`` exception is raised.\n\n``print`` also has an extended form, defined by the second portion of\nthe syntax described above. This form is sometimes referred to as\n"``print`` chevron." In this form, the first expression after the\n``>>`` must evaluate to a "file-like" object, specifically an object\nthat has a ``write()`` method as described above. With this extended\nform, the subsequent expressions are printed to this file object. If\nthe first expression evaluates to ``None``, then ``sys.stdout`` is\nused as the file for output.\n',
'raise': '\nThe ``raise`` statement\n***********************\n\n raise_stmt ::= "raise" [expression ["," expression ["," expression]]]\n\nIf no expressions are present, ``raise`` re-raises the last exception\nthat was active in the current scope. If no exception is active in\nthe current scope, a ``TypeError`` exception is raised indicating that\nthis is an error (if running under IDLE, a ``Queue.Empty`` exception\nis raised instead).\n\nOtherwise, ``raise`` evaluates the expressions to get three objects,\nusing ``None`` as the value of omitted expressions. The first two\nobjects are used to determine the *type* and *value* of the exception.\n\nIf the first object is an instance, the type of the exception is the\nclass of the instance, the instance itself is the value, and the\nsecond object must be ``None``.\n\nIf the first object is a class, it becomes the type of the exception.\nThe second object is used to determine the exception value: If it is\nan instance of the class, the instance becomes the exception value. If\nthe second object is a tuple, it is used as the argument list for the\nclass constructor; if it is ``None``, an empty argument list is used,\nand any other object is treated as a single argument to the\nconstructor. The instance so created by calling the constructor is\nused as the exception value.\n\nIf a third object is present and not ``None``, it must be a traceback\nobject (see section *The standard type hierarchy*), and it is\nsubstituted instead of the current location as the place where the\nexception occurred. If the third object is present and not a\ntraceback object or ``None``, a ``TypeError`` exception is raised.\nThe three-expression form of ``raise`` is useful to re-raise an\nexception transparently in an except clause, but ``raise`` with no\nexpressions should be preferred if the exception to be re-raised was\nthe most recently active exception in the current scope.\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information about handling exceptions is in section\n*The try statement*.\n',
'return': '\nThe ``return`` statement\n************************\n\n return_stmt ::= "return" [expression_list]\n\n``return`` may only occur syntactically nested in a function\ndefinition, not within a nested class definition.\n\nIf an expression list is present, it is evaluated, else ``None`` is\nsubstituted.\n\n``return`` leaves the current function call with the expression list\n(or ``None``) as return value.\n\nWhen ``return`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nleaving the function.\n\nIn a generator function, the ``return`` statement is not allowed to\ninclude an ``expression_list``. In that context, a bare ``return``\nindicates that the generator is done and will cause ``StopIteration``\nto be raised.\n',
'sequence-types': "\nEmulating container types\n*************************\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which ``0 <= k < N``\nwhere *N* is the length of the sequence, or slice objects, which\ndefine a range of items. (For backwards compatibility, the method\n``__getslice__()`` (see below) can also be defined to handle simple,\nbut not extended slices.) It is also recommended that mappings provide\nthe methods ``keys()``, ``values()``, ``items()``, ``has_key()``,\n``get()``, ``clear()``, ``setdefault()``, ``iterkeys()``,\n``itervalues()``, ``iteritems()``, ``pop()``, ``popitem()``,\n``copy()``, and ``update()`` behaving similar to those for Python's\nstandard dictionary objects. The ``UserDict`` module provides a\n``DictMixin`` class to help create those methods from a base set of\n``__getitem__()``, ``__setitem__()``, ``__delitem__()``, and\n``keys()``. Mutable sequences should provide methods ``append()``,\n``count()``, ``index()``, ``extend()``, ``insert()``, ``pop()``,\n``remove()``, ``reverse()`` and ``sort()``, like Python standard list\nobjects. Finally, sequence types should implement addition (meaning\nconcatenation) and multiplication (meaning repetition) by defining the\nmethods ``__add__()``, ``__radd__()``, ``__iadd__()``, ``__mul__()``,\n``__rmul__()`` and ``__imul__()`` described below; they should not\ndefine ``__coerce__()`` or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n``__contains__()`` method to allow efficient use of the ``in``\noperator; for mappings, ``in`` should be equivalent of ``has_key()``;\nfor sequences, it should search through the values. It is further\nrecommended that both mappings and sequences implement the\n``__iter__()`` method to allow efficient iteration through the\ncontainer; for mappings, ``__iter__()`` should be the same as\n``iterkeys()``; for sequences, it should iterate through the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function ``len()``. Should return\n the length of the object, an integer ``>=`` 0. Also, an object\n that doesn't define a ``__nonzero__()`` method and whose\n ``__len__()`` method returns zero is considered to be false in a\n Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of ``self[key]``. For sequence\n types, the accepted keys should be integers and slice objects.\n Note that the special interpretation of negative indexes (if the\n class wishes to emulate a sequence type) is up to the\n ``__getitem__()`` method. If *key* is of an inappropriate type,\n ``TypeError`` may be raised; if of a value outside the set of\n indexes for the sequence (after any special interpretation of\n negative values), ``IndexError`` should be raised. For mapping\n types, if *key* is missing (not in the container), ``KeyError``\n should be raised.\n\n Note: ``for`` loops expect that an ``IndexError`` will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the ``__getitem__()`` method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the ``__getitem__()``\n method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method ``iterkeys()``.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the ``reversed()`` built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the ``__reversed__()`` method is not provided, the\n ``reversed()`` built-in will fall back to using the sequence\n protocol (``__len__()`` and ``__getitem__()``). Objects that\n support the sequence protocol should only provide\n ``__reversed__()`` if they can provide an implementation that is\n more efficient than the one provided by ``reversed()``.\n\n New in version 2.6.\n\nThe membership test operators (``in`` and ``not in``) are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don't define ``__contains__()``, the membership\n test first tries iteration via ``__iter__()``, then the old\n sequence iteration protocol via ``__getitem__()``, see *this\n section in the language reference*.\n",
'shifting': '\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept plain or long integers as arguments. The\narguments are converted to a common type. They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as division by ``pow(2, n)``. A\nleft shift by *n* bits is defined as multiplication with ``pow(2,\nn)``. Negative shift counts raise a ``ValueError`` exception.\n\nNote: In the current implementation, the right-hand operand is required to\n be at most ``sys.maxsize``. If the right-hand operand is larger\n than ``sys.maxsize`` an ``OverflowError`` exception is raised.\n',
'slicings': '\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list). Slicings may be used as expressions or as\ntargets in assignment or ``del`` statements. The syntax for a\nslicing:\n\n slicing ::= simple_slicing | extended_slicing\n simple_slicing ::= primary "[" short_slice "]"\n extended_slicing ::= primary "[" slice_list "]"\n slice_list ::= slice_item ("," slice_item)* [","]\n slice_item ::= expression | proper_slice | ellipsis\n proper_slice ::= short_slice | long_slice\n short_slice ::= [lower_bound] ":" [upper_bound]\n long_slice ::= short_slice ":" [stride]\n lower_bound ::= expression\n upper_bound ::= expression\n stride ::= expression\n ellipsis ::= "..."\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing. Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice nor ellipses). Similarly, when the slice\nlist has exactly one short slice and no trailing comma, the\ninterpretation as a simple slicing takes priority over that as an\nextended slicing.\n\nThe semantics for a simple slicing are as follows. The primary must\nevaluate to a sequence object. The lower and upper bound expressions,\nif present, must evaluate to plain integers; defaults are zero and the\n``sys.maxint``, respectively. If either bound is negative, the\nsequence\'s length is added to it. The slicing now selects all items\nwith index *k* such that ``i <= k < j`` where *i* and *j* are the\nspecified lower and upper bounds. This may be an empty sequence. It\nis not an error if *i* or *j* lie outside the range of valid indexes\n(such items don\'t exist so they aren\'t selected).\n\nThe semantics for an extended slicing are as follows. The primary\nmust evaluate to a mapping object, and it is indexed with a key that\nis constructed from the slice list, as follows. If the slice list\ncontains at least one comma, the key is a tuple containing the\nconversion of the slice items; otherwise, the conversion of the lone\nslice item is the key. The conversion of a slice item that is an\nexpression is that expression. The conversion of an ellipsis slice\nitem is the built-in ``Ellipsis`` object. The conversion of a proper\nslice is a slice object (see section *The standard type hierarchy*)\nwhose ``start``, ``stop`` and ``step`` attributes are the values of\nthe expressions given as lower bound, upper bound and stride,\nrespectively, substituting ``None`` for missing expressions.\n',
'specialattrs': '\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant. Some of these are not reported\nby the ``dir()`` built-in function.\n\nobject.__dict__\n\n A dictionary or other mapping object used to store an object\'s\n (writable) attributes.\n\nobject.__methods__\n\n Deprecated since version 2.2: Use the built-in function ``dir()``\n to get a list of an object\'s attributes. This attribute is no\n longer available.\n\nobject.__members__\n\n Deprecated since version 2.2: Use the built-in function ``dir()``\n to get a list of an object\'s attributes. This attribute is no\n longer available.\n\ninstance.__class__\n\n The class to which a class instance belongs.\n\nclass.__bases__\n\n The tuple of base classes of a class object.\n\nclass.__name__\n\n The name of the class or type.\n\nThe following attributes are only supported by *new-style class*es.\n\nclass.__mro__\n\n This attribute is a tuple of classes that are considered when\n looking for base classes during method resolution.\n\nclass.mro()\n\n This method can be overridden by a metaclass to customize the\n method resolution order for its instances. It is called at class\n instantiation, and its result is stored in ``__mro__``.\n\nclass.__subclasses__()\n\n Each new-style class keeps a list of weak references to its\n immediate subclasses. This method returns a list of all those\n references still alive. Example:\n\n >>> int.__subclasses__()\n [<type \'bool\'>]\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found in\n the Python Reference Manual (*Basic customization*).\n\n[2] As a consequence, the list ``[1, 2]`` is considered equal to\n ``[1.0, 2.0]``, and similarly for tuples.\n\n[3] They must have since the parser can\'t tell the type of the\n operands.\n\n[4] Cased characters are those with general category property being\n one of "Lu" (Letter, uppercase), "Ll" (Letter, lowercase), or "Lt"\n (Letter, titlecase).\n\n[5] To format only a tuple you should therefore provide a singleton\n tuple whose only element is the tuple to be formatted.\n\n[6] The advantage of leaving the newline on is that returning an empty\n string is then an unambiguous EOF indication. It is also possible\n (in cases where it might matter, for example, if you want to make\n an exact copy of a file while scanning its lines) to tell whether\n the last line of a file ended in a newline or not (yes this\n happens!).\n',
- 'specialnames': '\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named ``__getitem__()``, and ``x`` is an instance of this\nclass, then ``x[i]`` is roughly equivalent to ``x.__getitem__(i)`` for\nold-style classes and ``type(x).__getitem__(x, i)`` for new-style\nclasses. Except where mentioned, attempts to execute an operation\nraise an exception when no appropriate method is defined (typically\n``AttributeError`` or ``TypeError``).\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n``NodeList`` interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_traceback`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.exc_traceback`` or ``sys.last_traceback``. Circular\n references which are garbage are detected when the option cycle\n detector is enabled (it\'s on by default), but can only be cleaned\n up if there are no Python-level ``__del__()`` methods involved.\n Refer to the documentation for the ``gc`` module for more\n information about how ``__del__()`` methods are handled by the\n cycle detector, particularly the description of the ``garbage``\n value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\n See also the *-R* command-line option.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function and by string\n conversions (reverse quotes) to compute the "official" string\n representation of an object. If at all possible, this should look\n like a valid Python expression that could be used to recreate an\n object with the same value (given an appropriate environment). If\n this is not possible, a string of the form ``<...some useful\n description...>`` should be returned. The return value must be a\n string object. If a class defines ``__repr__()`` but not\n ``__str__()``, then ``__repr__()`` is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the ``str()`` built-in function and by the ``print``\n statement to compute the "informal" string representation of an\n object. This differs from ``__repr__()`` in that it does not have\n to be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to ``__cmp__()`` below. The\n correspondence between operator symbols and method names is as\n follows: ``x<y`` calls ``x.__lt__(y)``, ``x<=y`` calls\n ``x.__le__(y)``, ``x==y`` calls ``x.__eq__(y)``, ``x!=y`` and\n ``x<>y`` call ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and\n ``x>=y`` calls ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see ``functools.total_ordering()``.\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if ``self < other``,\n zero if ``self == other``, a positive integer if ``self > other``.\n If no ``__cmp__()``, ``__eq__()`` or ``__ne__()`` operation is\n defined, class instances are compared by object identity\n ("address"). See also the description of ``__hash__()`` for some\n important notes on creating *hashable* objects which support custom\n comparison operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by ``__cmp__()`` has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define a ``__cmp__()`` or ``__eq__()`` method\n it should not define a ``__hash__()`` operation either; if it\n defines ``__cmp__()`` or ``__eq__()`` but not ``__hash__()``, its\n instances will not be usable in hashed collections. If a class\n defines mutable objects and implements a ``__cmp__()`` or\n ``__eq__()`` method, it should not implement ``__hash__()``, since\n hashable collection implementations require that a object\'s hash\n value is immutable (if the object\'s hash value changes, it will be\n in the wrong hash bucket).\n\n User-defined classes have ``__cmp__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns ``id(x)``.\n\n Classes which inherit a ``__hash__()`` method from a parent class\n but change the meaning of ``__cmp__()`` or ``__eq__()`` such that\n the hash value returned is no longer appropriate (e.g. by switching\n to a value-based concept of equality instead of the default\n identity based equality) can explicitly flag themselves as being\n unhashable by setting ``__hash__ = None`` in the class definition.\n Doing so means that not only will instances of the class raise an\n appropriate ``TypeError`` when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking ``isinstance(obj, collections.Hashable)``\n (unlike classes which define their own ``__hash__()`` to explicitly\n raise ``TypeError``).\n\n Changed in version 2.5: ``__hash__()`` may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: ``__hash__`` may now be set to ``None`` to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``, or their integer\n equivalents ``0`` or ``1``. When this method is not defined,\n ``__len__()`` is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither ``__len__()`` nor ``__nonzero__()``, all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement ``unicode()`` built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should not simply execute ``self.name = value`` --- this would\n cause a recursive call to itself. Instead, it should insert the\n value in the dictionary of instance attributes, e.g.,\n ``self.__dict__[name] = value``. For new-style classes, rather\n than accessing the instance dictionary, it should call the base\n class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n-------------------------------------------\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup for new-style\n classes*.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' ``__dict__``.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass ``object()`` or\n``type()``).\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to a new-style object instance, ``a.x`` is transformed\n into the call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a new-style class, ``A.x`` is transformed into the\n call: ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, obj.__class__)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of ``__get__()``, ``__set__()`` and ``__delete__()``.\nIf it does not define ``__get__()``, then accessing the attribute will\nreturn the descriptor object itself unless there is a value in the\nobject\'s instance dictionary. If the descriptor defines ``__set__()``\nand/or ``__delete__()``, it is a data descriptor; if it defines\nneither, it is a non-data descriptor. Normally, data descriptors\ndefine both ``__get__()`` and ``__set__()``, while non-data\ndescriptors have just the ``__get__()`` method. Data descriptors with\n``__set__()`` and ``__get__()`` defined always override a redefinition\nin an instance dictionary. In contrast, non-data descriptors can be\noverridden by instances.\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding ``\'__dict__\'`` to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n Changed in version 2.3: Previously, adding ``\'__weakref__\'`` to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``long``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, new-style classes are constructed using ``type()``. A\nclass definition is read into a separate namespace and the value of\nclass name is bound to the result of ``type(name, bases, dict)``.\n\nWhen the class definition is read, if *__metaclass__* is defined then\nthe callable assigned to it will be called instead of ``type()``. This\nallows classes or functions to be written which monitor or alter the\nclass creation process:\n\n* Modifying the class dictionary prior to the class being created.\n\n* Returning an instance of another class -- essentially performing the\n role of a factory function.\n\nThese steps will have to be performed in the metaclass\'s ``__new__()``\nmethod -- ``type.__new__()`` can then be called from this method to\ncreate a class with different properties. This example adds a new\nelement to the class dictionary before creating the class:\n\n class metacls(type):\n def __new__(mcs, name, bases, dict):\n dict[\'foo\'] = \'metacls was here\'\n return type.__new__(mcs, name, bases, dict)\n\nYou can of course also override other class methods (or add new\nmethods); for example defining a custom ``__call__()`` method in the\nmetaclass allows custom behavior when the class is called, e.g. not\nalways creating a new instance.\n\n__metaclass__\n\n This variable can be any callable accepting arguments for ``name``,\n ``bases``, and ``dict``. Upon class creation, the callable is used\n instead of the built-in ``type()``.\n\n New in version 2.2.\n\nThe appropriate metaclass is determined by the following precedence\nrules:\n\n* If ``dict[\'__metaclass__\']`` exists, it is used.\n\n* Otherwise, if there is at least one base class, its metaclass is\n used (this looks for a *__class__* attribute first and if not found,\n uses its type).\n\n* Otherwise, if a global variable named __metaclass__ exists, it is\n used.\n\n* Otherwise, the old-style, classic metaclass (types.ClassType) is\n used.\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored including logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\n\nCustomizing instance and subclass checks\n========================================\n\nNew in version 2.6.\n\nThe following methods are used to override the default behavior of the\n``isinstance()`` and ``issubclass()`` built-in functions.\n\nIn particular, the metaclass ``abc.ABCMeta`` implements these methods\nin order to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n ``isinstance(instance, class)``.\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n ``issubclass(subclass, class)``.\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also:\n\n **PEP 3119** - Introducing Abstract Base Classes\n Includes the specification for customizing ``isinstance()`` and\n ``issubclass()`` behavior through ``__instancecheck__()`` and\n ``__subclasscheck__()``, with motivation for this functionality\n in the context of adding Abstract Base Classes (see the ``abc``\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, ``x(arg1, arg2, ...)`` is a shorthand for\n ``x.__call__(arg1, arg2, ...)``.\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which ``0 <= k < N``\nwhere *N* is the length of the sequence, or slice objects, which\ndefine a range of items. (For backwards compatibility, the method\n``__getslice__()`` (see below) can also be defined to handle simple,\nbut not extended slices.) It is also recommended that mappings provide\nthe methods ``keys()``, ``values()``, ``items()``, ``has_key()``,\n``get()``, ``clear()``, ``setdefault()``, ``iterkeys()``,\n``itervalues()``, ``iteritems()``, ``pop()``, ``popitem()``,\n``copy()``, and ``update()`` behaving similar to those for Python\'s\nstandard dictionary objects. The ``UserDict`` module provides a\n``DictMixin`` class to help create those methods from a base set of\n``__getitem__()``, ``__setitem__()``, ``__delitem__()``, and\n``keys()``. Mutable sequences should provide methods ``append()``,\n``count()``, ``index()``, ``extend()``, ``insert()``, ``pop()``,\n``remove()``, ``reverse()`` and ``sort()``, like Python standard list\nobjects. Finally, sequence types should implement addition (meaning\nconcatenation) and multiplication (meaning repetition) by defining the\nmethods ``__add__()``, ``__radd__()``, ``__iadd__()``, ``__mul__()``,\n``__rmul__()`` and ``__imul__()`` described below; they should not\ndefine ``__coerce__()`` or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n``__contains__()`` method to allow efficient use of the ``in``\noperator; for mappings, ``in`` should be equivalent of ``has_key()``;\nfor sequences, it should search through the values. It is further\nrecommended that both mappings and sequences implement the\n``__iter__()`` method to allow efficient iteration through the\ncontainer; for mappings, ``__iter__()`` should be the same as\n``iterkeys()``; for sequences, it should iterate through the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function ``len()``. Should return\n the length of the object, an integer ``>=`` 0. Also, an object\n that doesn\'t define a ``__nonzero__()`` method and whose\n ``__len__()`` method returns zero is considered to be false in a\n Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of ``self[key]``. For sequence\n types, the accepted keys should be integers and slice objects.\n Note that the special interpretation of negative indexes (if the\n class wishes to emulate a sequence type) is up to the\n ``__getitem__()`` method. If *key* is of an inappropriate type,\n ``TypeError`` may be raised; if of a value outside the set of\n indexes for the sequence (after any special interpretation of\n negative values), ``IndexError`` should be raised. For mapping\n types, if *key* is missing (not in the container), ``KeyError``\n should be raised.\n\n Note: ``for`` loops expect that an ``IndexError`` will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the ``__getitem__()`` method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the ``__getitem__()``\n method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method ``iterkeys()``.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the ``reversed()`` built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the ``__reversed__()`` method is not provided, the\n ``reversed()`` built-in will fall back to using the sequence\n protocol (``__len__()`` and ``__getitem__()``). Objects that\n support the sequence protocol should only provide\n ``__reversed__()`` if they can provide an implementation that is\n more efficient than the one provided by ``reversed()``.\n\n New in version 2.6.\n\nThe membership test operators (``in`` and ``not in``) are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define ``__contains__()``, the membership\n test first tries iteration via ``__iter__()``, then the old\n sequence iteration protocol via ``__getitem__()``, see *this\n section in the language reference*.\n\n\nAdditional methods for emulation of sequence types\n==================================================\n\nThe following optional methods can be defined to further emulate\nsequence objects. Immutable sequences methods should at most only\ndefine ``__getslice__()``; mutable sequences might define all three\nmethods.\n\nobject.__getslice__(self, i, j)\n\n Deprecated since version 2.0: Support slice objects as parameters\n to the ``__getitem__()`` method. (However, built-in types in\n CPython currently still implement ``__getslice__()``. Therefore,\n you have to override it in derived classes when implementing\n slicing.)\n\n Called to implement evaluation of ``self[i:j]``. The returned\n object should be of the same type as *self*. Note that missing *i*\n or *j* in the slice expression are replaced by zero or\n ``sys.maxint``, respectively. If negative indexes are used in the\n slice, the length of the sequence is added to that index. If the\n instance does not implement the ``__len__()`` method, an\n ``AttributeError`` is raised. No guarantee is made that indexes\n adjusted this way are not still negative. Indexes which are\n greater than the length of the sequence are not modified. If no\n ``__getslice__()`` is found, a slice object is created instead, and\n passed to ``__getitem__()`` instead.\n\nobject.__setslice__(self, i, j, sequence)\n\n Called to implement assignment to ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``.\n\n This method is deprecated. If no ``__setslice__()`` is found, or\n for extended slicing of the form ``self[i:j:k]``, a slice object is\n created, and passed to ``__setitem__()``, instead of\n ``__setslice__()`` being called.\n\nobject.__delslice__(self, i, j)\n\n Called to implement deletion of ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``. This method is deprecated. If no\n ``__delslice__()`` is found, or for extended slicing of the form\n ``self[i:j:k]``, a slice object is created, and passed to\n ``__delitem__()``, instead of ``__delslice__()`` being called.\n\nNotice that these methods are only invoked when a single slice with a\nsingle colon is used, and the slice method is available. For slice\noperations involving extended slice notation, or in absence of the\nslice methods, ``__getitem__()``, ``__setitem__()`` or\n``__delitem__()`` is called with a slice object as argument.\n\nThe following example demonstrate how to make your program or module\ncompatible with earlier versions of Python (assuming that methods\n``__getitem__()``, ``__setitem__()`` and ``__delitem__()`` support\nslice objects as arguments):\n\n class MyClass:\n ...\n def __getitem__(self, index):\n ...\n def __setitem__(self, index, value):\n ...\n def __delitem__(self, index):\n ...\n\n if sys.version_info < (2, 0):\n # They won\'t be defined if version is at least 2.0 final\n\n def __getslice__(self, i, j):\n return self[max(0, i):max(0, j):]\n def __setslice__(self, i, j, seq):\n self[max(0, i):max(0, j):] = seq\n def __delslice__(self, i, j):\n del self[max(0, i):max(0, j):]\n ...\n\nNote the calls to ``max()``; these are necessary because of the\nhandling of negative indices before the ``__*slice__()`` methods are\ncalled. When negative indexes are used, the ``__*item__()`` methods\nreceive them as provided, but the ``__*slice__()`` methods get a\n"cooked" form of the index values. For each negative index value, the\nlength of the sequence is added to the index before calling the method\n(which may still result in a negative index); this is the customary\nhandling of negative indexes by the built-in sequence types, and the\n``__*item__()`` methods are expected to do this as well. However,\nsince they should already be doing that, negative indexes cannot be\npassed in; they must be constrained to the bounds of the sequence\nbefore being passed to the ``__*item__()`` methods. Calling ``max(0,\ni)`` conveniently returns the proper value.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``//``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``). For\n instance, to evaluate the expression ``x + y``, where *x* is an\n instance of a class that has an ``__add__()`` method,\n ``x.__add__(y)`` is called. The ``__divmod__()`` method should be\n the equivalent to using ``__floordiv__()`` and ``__mod__()``; it\n should not be related to ``__truediv__()`` (described below). Note\n that ``__pow__()`` should be defined to accept an optional third\n argument if the ternary version of the built-in ``pow()`` function\n is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return ``NotImplemented``.\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator (``/``) is implemented by these methods. The\n ``__truediv__()`` method is used when ``__future__.division`` is in\n effect, otherwise ``__div__()`` is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; ``TypeError`` will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``) with\n reflected (swapped) operands. These functions are only called if\n the left operand does not support the corresponding operation and\n the operands are of different types. [2] For instance, to evaluate\n the expression ``x - y``, where *y* is an instance of a class that\n has an ``__rsub__()`` method, ``y.__rsub__(x)`` is called if\n ``x.__sub__(y)`` returns *NotImplemented*.\n\n Note that ternary ``pow()`` will not try calling ``__rpow__()``\n (the coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left operand\'s\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand\'s\n non-reflected method. This behavior allows subclasses to\n override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``,\n ``**=``, ``<<=``, ``>>=``, ``&=``, ``^=``, ``|=``). These methods\n should attempt to do the operation in-place (modifying *self*) and\n return the result (which could be, but does not have to be,\n *self*). If a specific method is not defined, the augmented\n assignment falls back to the normal methods. For instance, to\n execute the statement ``x += y``, where *x* is an instance of a\n class that has an ``__iadd__()`` method, ``x.__iadd__(y)`` is\n called. If *x* is an instance of a class that does not define a\n ``__iadd__()`` method, ``x.__add__(y)`` and ``y.__radd__(x)`` are\n considered, as with the evaluation of ``x + y``.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations (``-``, ``+``,\n ``abs()`` and ``~``).\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions ``complex()``,\n ``int()``, ``long()``, and ``float()``. Should return a value of\n the appropriate type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions ``oct()`` and ``hex()``.\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement ``operator.index()``. Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or ``None`` if conversion is impossible. When\n the common type would be the type of ``other``, it is sufficient to\n return ``None``, since the interpreter will also ask the other\n object to attempt a coercion (but sometimes, if the implementation\n of the other type cannot be changed, it is useful to do the\n conversion to the other type here). A return value of\n ``NotImplemented`` is equivalent to returning ``None``.\n\n\nCoercion rules\n==============\n\nThis section used to document the rules for coercion. As the language\nhas evolved, the coercion rules have become hard to document\nprecisely; documenting what one version of one particular\nimplementation does is undesirable. Instead, here are some informal\nguidelines regarding coercion. In Python 3.0, coercion will not be\nsupported.\n\n* If the left operand of a % operator is a string or Unicode object,\n no coercion takes place and the string formatting operation is\n invoked instead.\n\n* It is no longer recommended to define a coercion operation. Mixed-\n mode operations on types that don\'t define coercion pass the\n original arguments to the operation.\n\n* New-style classes (those derived from ``object``) never invoke the\n ``__coerce__()`` method in response to a binary operator; the only\n time ``__coerce__()`` is invoked is when the built-in function\n ``coerce()`` is called.\n\n* For most intents and purposes, an operator that returns\n ``NotImplemented`` is treated the same as one that is not\n implemented at all.\n\n* Below, ``__op__()`` and ``__rop__()`` are used to signify the\n generic method names corresponding to an operator; ``__iop__()`` is\n used for the corresponding in-place operator. For example, for the\n operator \'``+``\', ``__add__()`` and ``__radd__()`` are used for the\n left and right variant of the binary operator, and ``__iadd__()``\n for the in-place variant.\n\n* For objects *x* and *y*, first ``x.__op__(y)`` is tried. If this is\n not implemented or returns ``NotImplemented``, ``y.__rop__(x)`` is\n tried. If this is also not implemented or returns\n ``NotImplemented``, a ``TypeError`` exception is raised. But see\n the following exception:\n\n* Exception to the previous item: if the left operand is an instance\n of a built-in type or a new-style class, and the right operand is an\n instance of a proper subclass of that type or class and overrides\n the base\'s ``__rop__()`` method, the right operand\'s ``__rop__()``\n method is tried *before* the left operand\'s ``__op__()`` method.\n\n This is done so that a subclass can completely override binary\n operators. Otherwise, the left operand\'s ``__op__()`` method would\n always accept the right operand: when an instance of a given class\n is expected, an instance of a subclass of that class is always\n acceptable.\n\n* When either operand type defines a coercion, this coercion is called\n before that type\'s ``__op__()`` or ``__rop__()`` method is called,\n but no sooner. If the coercion returns an object of a different\n type for the operand whose coercion is invoked, part of the process\n is redone using the new object.\n\n* When an in-place operator (like \'``+=``\') is used, if the left\n operand implements ``__iop__()``, it is invoked without any\n coercion. When the operation falls back to ``__op__()`` and/or\n ``__rop__()``, the normal coercion rules apply.\n\n* In ``x + y``, if *x* is a sequence that implements sequence\n concatenation, sequence concatenation is invoked.\n\n* In ``x * y``, if one operand is a sequence that implements sequence\n repetition, and the other is an integer (``int`` or ``long``),\n sequence repetition is invoked.\n\n* Rich comparisons (implemented by methods ``__eq__()`` and so on)\n never use coercion. Three-way comparison (implemented by\n ``__cmp__()``) does use coercion under the same conditions as other\n binary operations use it.\n\n* In the current implementation, the built-in numeric types ``int``,\n ``long``, ``float``, and ``complex`` do not use coercion. All these\n types implement a ``__coerce__()`` method, for use by the built-in\n ``coerce()`` function.\n\n Changed in version 2.7.\n\n\nWith Statement Context Managers\n===============================\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nSpecial method lookup for old-style classes\n===========================================\n\nFor old-style classes, special methods are always looked up in exactly\nthe same way as any other method or attribute. This is the case\nregardless of whether the method is being looked up explicitly as in\n``x.__getitem__(i)`` or implicitly as in ``x[i]``.\n\nThis behaviour means that special methods may exhibit different\nbehaviour for different instances of a single old-style class if the\nappropriate special attributes are set differently:\n\n >>> class C:\n ... pass\n ...\n >>> c1 = C()\n >>> c2 = C()\n >>> c1.__len__ = lambda: 5\n >>> c2.__len__ = lambda: 9\n >>> len(c1)\n 5\n >>> len(c2)\n 9\n\n\nSpecial method lookup for new-style classes\n===========================================\n\nFor new-style classes, implicit invocations of special methods are\nonly guaranteed to work correctly if defined on an object\'s type, not\nin the object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception (unlike the equivalent example\nwith old-style classes):\n\n >>> class C(object):\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as ``__hash__()`` and ``__repr__()`` that are implemented\nby all objects, including type objects. If the implicit lookup of\nthese methods used the conventional lookup process, they would fail\nwhen invoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe ``__getattribute__()`` method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print "Metaclass getattribute invoked"\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object):\n ... __metaclass__ = Meta\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print "Class getattribute invoked"\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the ``__getattribute__()`` machinery in this fashion\nprovides significant scope for speed optimisations within the\ninterpreter, at the cost of some flexibility in the handling of\nspecial methods (the special method *must* be set on the class object\nitself in order to be consistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type, under\n certain controlled conditions. It generally isn\'t a good idea\n though, since it can lead to some very strange behaviour if it is\n handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as ``__add__()``) fails the operation is\n not supported, which is why the reflected method is not called.\n',
- 'string-methods': '\nString Methods\n**************\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support. Some of them are also available on\n``bytearray`` objects.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the *Sequence Types --- str, unicode, list, tuple,\nbytearray, buffer, xrange* section. To output formatted strings use\ntemplate strings or the ``%`` operator described in the *String\nFormatting Operations* section. Also, see the ``re`` module for string\nfunctions based on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n ``\'strict\'``, meaning that encoding errors raise ``UnicodeError``.\n Other possible values are ``\'ignore\'``, ``\'replace\'`` and any other\n name registered via ``codecs.register_error()``, see section *Codec\n Base Classes*.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n ``\'strict\'``, meaning that encoding errors raise a\n ``UnicodeError``. Other possible values are ``\'ignore\'``,\n ``\'replace\'``, ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and\n any other name registered via ``codecs.register_error()``, see\n section *Codec Base Classes*. For a list of possible encodings, see\n section *Standard Encodings*.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for ``\'xmlcharrefreplace\'`` and\n ``\'backslashreplace\'`` and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. The column number is reset to zero after each\n newline occurring in the string. If *tabsize* is not given, a tab\n size of ``8`` characters is assumed. This doesn\'t understand other\n non-printing characters or escape sequences.\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\n Note: The ``find()`` method should be used only if you need to know the\n position of *sub*. To check if *sub* is a substring or not, use\n the ``in`` operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces ``{}``. Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3.0,\n and should be preferred to the ``%`` formatting described in\n *String Formatting Operations* in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified, then there is no limit\n on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),\n s)\n\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the ``maketrans()`` helper function in the ``string``\n module to create a translation table. For string objects, set the\n *table* argument to ``None`` for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a ``None`` *table* argument.\n\n For Unicode objects, the ``translate()`` method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or ``None``. Unmapped characters\n are left untouched. Characters mapped to ``None`` are deleted.\n Note, a more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see ``encodings.cp1251``\n for an example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that ``str.upper().isupper()`` might\n be ``False`` if ``s`` contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than or equal to ``len(s)``.\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return ``True`` if there are only numeric characters in S,\n ``False`` otherwise. Numeric characters include digit characters,\n and all characters that have the Unicode numeric value property,\n e.g. U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return ``True`` if there are only decimal characters in S,\n ``False`` otherwise. Decimal characters include digit characters,\n and all characters that can be used to form decimal-radix numbers,\n e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n',
- 'strings': '\nString literals\n***************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "ur" | "R" | "U" | "UR" | "Ur" | "uR"\n | "b" | "B" | "br" | "Br" | "bR" | "BR"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'"\n | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | escapeseq\n longstringitem ::= longstringchar | escapeseq\n shortstringchar ::= <any source character except "\\" or newline or the quote>\n longstringchar ::= <any source character except "\\">\n escapeseq ::= "\\" <any ASCII character>\n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the ``stringprefix`` and the rest of\nthe string literal. The source character set is defined by the\nencoding declaration; it is ASCII if no encoding declaration is given\nin the source file; see section *Encoding declarations*.\n\nIn plain English: String literals can be enclosed in matching single\nquotes (``\'``) or double quotes (``"``). They can also be enclosed in\nmatching groups of three single or double quotes (these are generally\nreferred to as *triple-quoted strings*). The backslash (``\\``)\ncharacter is used to escape characters that otherwise have a special\nmeaning, such as newline, backslash itself, or the quote character.\nString literals may optionally be prefixed with a letter ``\'r\'`` or\n``\'R\'``; such strings are called *raw strings* and use different rules\nfor interpreting backslash escape sequences. A prefix of ``\'u\'`` or\n``\'U\'`` makes the string a Unicode string. Unicode strings use the\nUnicode character set as defined by the Unicode Consortium and ISO\n10646. Some additional escape sequences, described below, are\navailable in Unicode strings. A prefix of ``\'b\'`` or ``\'B\'`` is\nignored in Python 2; it indicates that the literal should become a\nbytes literal in Python 3 (e.g. when code is automatically converted\nwith 2to3). A ``\'u\'`` or ``\'b\'`` prefix may be followed by an ``\'r\'``\nprefix.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string. (A "quote" is the character used to open the\nstring, i.e. either ``\'`` or ``"``.)\n\nUnless an ``\'r\'`` or ``\'R\'`` prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| ``\\newline`` | Ignored | |\n+-------------------+-----------------------------------+---------+\n| ``\\\\`` | Backslash (``\\``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\\'`` | Single quote (``\'``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\"`` | Double quote (``"``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\a`` | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| ``\\b`` | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| ``\\f`` | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\n`` | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\N{name}`` | Character named *name* in the | |\n| | Unicode database (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\r`` | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| ``\\t`` | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| ``\\uxxxx`` | Character with 16-bit hex value | (1) |\n| | *xxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\Uxxxxxxxx`` | Character with 32-bit hex value | (2) |\n| | *xxxxxxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\v`` | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| ``\\ooo`` | Character with octal value *ooo* | (3,5) |\n+-------------------+-----------------------------------+---------+\n| ``\\xhh`` | Character with hex value *hh* | (4,5) |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. Individual code units which form parts of a surrogate pair can be\n encoded using this escape sequence.\n\n2. Any Unicode character can be encoded this way, but characters\n outside the Basic Multilingual Plane (BMP) will be encoded using a\n surrogate pair if Python is compiled to use 16-bit code units (the\n default). Individual code units which form parts of a surrogate\n pair can be encoded using this escape sequence.\n\n3. As in Standard C, up to three octal digits are accepted.\n\n4. Unlike in Standard C, exactly two hex digits are required.\n\n5. In a string literal, hexadecimal and octal escapes denote the byte\n with the given value; it is not necessary that the byte encodes a\n character in the source character set. In a Unicode literal, these\n escapes denote a Unicode character with the given value.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences marked as "(Unicode only)"\nin the table above fall into the category of unrecognized escapes for\nnon-Unicode string literals.\n\nWhen an ``\'r\'`` or ``\'R\'`` prefix is present, a character following a\nbackslash is included in the string without change, and *all\nbackslashes are left in the string*. For example, the string literal\n``r"\\n"`` consists of two characters: a backslash and a lowercase\n``\'n\'``. String quotes can be escaped with a backslash, but the\nbackslash remains in the string; for example, ``r"\\""`` is a valid\nstring literal consisting of two characters: a backslash and a double\nquote; ``r"\\"`` is not a valid string literal (even a raw string\ncannot end in an odd number of backslashes). Specifically, *a raw\nstring cannot end in a single backslash* (since the backslash would\nescape the following quote character). Note also that a single\nbackslash followed by a newline is interpreted as those two characters\nas part of the string, *not* as a line continuation.\n\nWhen an ``\'r\'`` or ``\'R\'`` prefix is used in conjunction with a\n``\'u\'`` or ``\'U\'`` prefix, then the ``\\uXXXX`` and ``\\UXXXXXXXX``\nescape sequences are processed while *all other backslashes are left\nin the string*. For example, the string literal ``ur"\\u0062\\n"``\nconsists of three Unicode characters: \'LATIN SMALL LETTER B\', \'REVERSE\nSOLIDUS\', and \'LATIN SMALL LETTER N\'. Backslashes can be escaped with\na preceding backslash; however, both remain in the string. As a\nresult, ``\\uXXXX`` escape sequences are only recognized when there are\nan odd number of backslashes.\n',
+ 'specialnames': '\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named ``__getitem__()``, and ``x`` is an instance of this\nclass, then ``x[i]`` is roughly equivalent to ``x.__getitem__(i)`` for\nold-style classes and ``type(x).__getitem__(x, i)`` for new-style\nclasses. Except where mentioned, attempts to execute an operation\nraise an exception when no appropriate method is defined (typically\n``AttributeError`` or ``TypeError``).\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n``NodeList`` interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_traceback`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.exc_traceback`` or ``sys.last_traceback``. Circular\n references which are garbage are detected when the option cycle\n detector is enabled (it\'s on by default), but can only be cleaned\n up if there are no Python-level ``__del__()`` methods involved.\n Refer to the documentation for the ``gc`` module for more\n information about how ``__del__()`` methods are handled by the\n cycle detector, particularly the description of the ``garbage``\n value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\n See also the *-R* command-line option.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function and by string\n conversions (reverse quotes) to compute the "official" string\n representation of an object. If at all possible, this should look\n like a valid Python expression that could be used to recreate an\n object with the same value (given an appropriate environment). If\n this is not possible, a string of the form ``<...some useful\n description...>`` should be returned. The return value must be a\n string object. If a class defines ``__repr__()`` but not\n ``__str__()``, then ``__repr__()`` is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the ``str()`` built-in function and by the ``print``\n statement to compute the "informal" string representation of an\n object. This differs from ``__repr__()`` in that it does not have\n to be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to ``__cmp__()`` below. The\n correspondence between operator symbols and method names is as\n follows: ``x<y`` calls ``x.__lt__(y)``, ``x<=y`` calls\n ``x.__le__(y)``, ``x==y`` calls ``x.__eq__(y)``, ``x!=y`` and\n ``x<>y`` call ``x.__ne__(y)``, ``x>y`` calls ``x.__gt__(y)``, and\n ``x>=y`` calls ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see ``functools.total_ordering()``.\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if ``self < other``,\n zero if ``self == other``, a positive integer if ``self > other``.\n If no ``__cmp__()``, ``__eq__()`` or ``__ne__()`` operation is\n defined, class instances are compared by object identity\n ("address"). See also the description of ``__hash__()`` for some\n important notes on creating *hashable* objects which support custom\n comparison operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by ``__cmp__()`` has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define a ``__cmp__()`` or ``__eq__()`` method\n it should not define a ``__hash__()`` operation either; if it\n defines ``__cmp__()`` or ``__eq__()`` but not ``__hash__()``, its\n instances will not be usable in hashed collections. If a class\n defines mutable objects and implements a ``__cmp__()`` or\n ``__eq__()`` method, it should not implement ``__hash__()``, since\n hashable collection implementations require that a object\'s hash\n value is immutable (if the object\'s hash value changes, it will be\n in the wrong hash bucket).\n\n User-defined classes have ``__cmp__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns a result derived from\n ``id(x)``.\n\n Classes which inherit a ``__hash__()`` method from a parent class\n but change the meaning of ``__cmp__()`` or ``__eq__()`` such that\n the hash value returned is no longer appropriate (e.g. by switching\n to a value-based concept of equality instead of the default\n identity based equality) can explicitly flag themselves as being\n unhashable by setting ``__hash__ = None`` in the class definition.\n Doing so means that not only will instances of the class raise an\n appropriate ``TypeError`` when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking ``isinstance(obj, collections.Hashable)``\n (unlike classes which define their own ``__hash__()`` to explicitly\n raise ``TypeError``).\n\n Changed in version 2.5: ``__hash__()`` may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: ``__hash__`` may now be set to ``None`` to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``, or their integer\n equivalents ``0`` or ``1``. When this method is not defined,\n ``__len__()`` is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither ``__len__()`` nor ``__nonzero__()``, all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement ``unicode()`` built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of ``x.name``)\nfor class instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for ``self``). ``name`` is the attribute name.\n This method should return the (computed) attribute value or raise\n an ``AttributeError`` exception.\n\n Note that if the attribute is found through the normal mechanism,\n ``__getattr__()`` is not called. (This is an intentional asymmetry\n between ``__getattr__()`` and ``__setattr__()``.) This is done both\n for efficiency reasons and because otherwise ``__getattr__()``\n would have no way to access other attributes of the instance. Note\n that at least for instance variables, you can fake total control by\n not inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n ``__getattribute__()`` method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If ``__setattr__()`` wants to assign to an instance attribute, it\n should not simply execute ``self.name = value`` --- this would\n cause a recursive call to itself. Instead, it should insert the\n value in the dictionary of instance attributes, e.g.,\n ``self.__dict__[name] = value``. For new-style classes, rather\n than accessing the instance dictionary, it should call the base\n class method with the same name, for example,\n ``object.__setattr__(self, name, value)``.\n\nobject.__delattr__(self, name)\n\n Like ``__setattr__()`` but for attribute deletion instead of\n assignment. This should only be implemented if ``del obj.name`` is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n-------------------------------------------\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines\n ``__getattr__()``, the latter will not be called unless\n ``__getattribute__()`` either calls it explicitly or raises an\n ``AttributeError``. This method should return the (computed)\n attribute value or raise an ``AttributeError`` exception. In order\n to avoid infinite recursion in this method, its implementation\n should always call the base class method with the same name to\n access any attributes it needs, for example,\n ``object.__getattribute__(self, name)``.\n\n Note: This method may still be bypassed when looking up special methods\n as the result of implicit invocation via language syntax or\n built-in functions. See *Special method lookup for new-style\n classes*.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' ``__dict__``.\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or ``None`` when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an\n ``AttributeError`` exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: ``__get__()``, ``__set__()``, and\n``__delete__()``. If any of those methods are defined for an object,\nit is said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, ``a.x`` has a\nlookup chain starting with ``a.__dict__[\'x\']``, then\n``type(a).__dict__[\'x\']``, and continuing through the base classes of\n``type(a)`` excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass ``object()`` or\n``type()``).\n\nThe starting point for descriptor invocation is a binding, ``a.x``.\nHow the arguments are assembled depends on ``a``:\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: ``x.__get__(a)``.\n\nInstance Binding\n If binding to a new-style object instance, ``a.x`` is transformed\n into the call: ``type(a).__dict__[\'x\'].__get__(a, type(a))``.\n\nClass Binding\n If binding to a new-style class, ``A.x`` is transformed into the\n call: ``A.__dict__[\'x\'].__get__(None, A)``.\n\nSuper Binding\n If ``a`` is an instance of ``super``, then the binding ``super(B,\n obj).m()`` searches ``obj.__class__.__mro__`` for the base class\n ``A`` immediately preceding ``B`` and then invokes the descriptor\n with the call: ``A.__dict__[\'m\'].__get__(obj, obj.__class__)``.\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of ``__get__()``, ``__set__()`` and ``__delete__()``.\nIf it does not define ``__get__()``, then accessing the attribute will\nreturn the descriptor object itself unless there is a value in the\nobject\'s instance dictionary. If the descriptor defines ``__set__()``\nand/or ``__delete__()``, it is a data descriptor; if it defines\nneither, it is a non-data descriptor. Normally, data descriptors\ndefine both ``__get__()`` and ``__set__()``, while non-data\ndescriptors have just the ``__get__()`` method. Data descriptors with\n``__set__()`` and ``__get__()`` defined always override a redefinition\nin an instance dictionary. In contrast, non-data descriptors can be\noverridden by instances.\n\nPython methods (including ``staticmethod()`` and ``classmethod()``)\nare implemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe ``property()`` function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises ``AttributeError``. If\n dynamic assignment of new variables is desired, then add\n ``\'__dict__\'`` to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding ``\'__dict__\'`` to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes defining\n *__slots__* do not support weak references to its instances. If weak\n reference support is needed, then add ``\'__weakref__\'`` to the\n sequence of strings in the *__slots__* declaration.\n\n Changed in version 2.3: Previously, adding ``\'__weakref__\'`` to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the instance\n variable defined by the base class slot is inaccessible (except by\n retrieving its descriptor directly from the base class). This\n renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as ``long``, ``str`` and\n ``tuple``.\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings may\n also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, new-style classes are constructed using ``type()``. A\nclass definition is read into a separate namespace and the value of\nclass name is bound to the result of ``type(name, bases, dict)``.\n\nWhen the class definition is read, if *__metaclass__* is defined then\nthe callable assigned to it will be called instead of ``type()``. This\nallows classes or functions to be written which monitor or alter the\nclass creation process:\n\n* Modifying the class dictionary prior to the class being created.\n\n* Returning an instance of another class -- essentially performing the\n role of a factory function.\n\nThese steps will have to be performed in the metaclass\'s ``__new__()``\nmethod -- ``type.__new__()`` can then be called from this method to\ncreate a class with different properties. This example adds a new\nelement to the class dictionary before creating the class:\n\n class metacls(type):\n def __new__(mcs, name, bases, dict):\n dict[\'foo\'] = \'metacls was here\'\n return type.__new__(mcs, name, bases, dict)\n\nYou can of course also override other class methods (or add new\nmethods); for example defining a custom ``__call__()`` method in the\nmetaclass allows custom behavior when the class is called, e.g. not\nalways creating a new instance.\n\n__metaclass__\n\n This variable can be any callable accepting arguments for ``name``,\n ``bases``, and ``dict``. Upon class creation, the callable is used\n instead of the built-in ``type()``.\n\n New in version 2.2.\n\nThe appropriate metaclass is determined by the following precedence\nrules:\n\n* If ``dict[\'__metaclass__\']`` exists, it is used.\n\n* Otherwise, if there is at least one base class, its metaclass is\n used (this looks for a *__class__* attribute first and if not found,\n uses its type).\n\n* Otherwise, if a global variable named __metaclass__ exists, it is\n used.\n\n* Otherwise, the old-style, classic metaclass (types.ClassType) is\n used.\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored including logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\n\nCustomizing instance and subclass checks\n========================================\n\nNew in version 2.6.\n\nThe following methods are used to override the default behavior of the\n``isinstance()`` and ``issubclass()`` built-in functions.\n\nIn particular, the metaclass ``abc.ABCMeta`` implements these methods\nin order to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n ``isinstance(instance, class)``.\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n ``issubclass(subclass, class)``.\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also:\n\n **PEP 3119** - Introducing Abstract Base Classes\n Includes the specification for customizing ``isinstance()`` and\n ``issubclass()`` behavior through ``__instancecheck__()`` and\n ``__subclasscheck__()``, with motivation for this functionality\n in the context of adding Abstract Base Classes (see the ``abc``\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, ``x(arg1, arg2, ...)`` is a shorthand for\n ``x.__call__(arg1, arg2, ...)``.\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which ``0 <= k < N``\nwhere *N* is the length of the sequence, or slice objects, which\ndefine a range of items. (For backwards compatibility, the method\n``__getslice__()`` (see below) can also be defined to handle simple,\nbut not extended slices.) It is also recommended that mappings provide\nthe methods ``keys()``, ``values()``, ``items()``, ``has_key()``,\n``get()``, ``clear()``, ``setdefault()``, ``iterkeys()``,\n``itervalues()``, ``iteritems()``, ``pop()``, ``popitem()``,\n``copy()``, and ``update()`` behaving similar to those for Python\'s\nstandard dictionary objects. The ``UserDict`` module provides a\n``DictMixin`` class to help create those methods from a base set of\n``__getitem__()``, ``__setitem__()``, ``__delitem__()``, and\n``keys()``. Mutable sequences should provide methods ``append()``,\n``count()``, ``index()``, ``extend()``, ``insert()``, ``pop()``,\n``remove()``, ``reverse()`` and ``sort()``, like Python standard list\nobjects. Finally, sequence types should implement addition (meaning\nconcatenation) and multiplication (meaning repetition) by defining the\nmethods ``__add__()``, ``__radd__()``, ``__iadd__()``, ``__mul__()``,\n``__rmul__()`` and ``__imul__()`` described below; they should not\ndefine ``__coerce__()`` or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n``__contains__()`` method to allow efficient use of the ``in``\noperator; for mappings, ``in`` should be equivalent of ``has_key()``;\nfor sequences, it should search through the values. It is further\nrecommended that both mappings and sequences implement the\n``__iter__()`` method to allow efficient iteration through the\ncontainer; for mappings, ``__iter__()`` should be the same as\n``iterkeys()``; for sequences, it should iterate through the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function ``len()``. Should return\n the length of the object, an integer ``>=`` 0. Also, an object\n that doesn\'t define a ``__nonzero__()`` method and whose\n ``__len__()`` method returns zero is considered to be false in a\n Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of ``self[key]``. For sequence\n types, the accepted keys should be integers and slice objects.\n Note that the special interpretation of negative indexes (if the\n class wishes to emulate a sequence type) is up to the\n ``__getitem__()`` method. If *key* is of an inappropriate type,\n ``TypeError`` may be raised; if of a value outside the set of\n indexes for the sequence (after any special interpretation of\n negative values), ``IndexError`` should be raised. For mapping\n types, if *key* is missing (not in the container), ``KeyError``\n should be raised.\n\n Note: ``for`` loops expect that an ``IndexError`` will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the ``__getitem__()`` method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of ``self[key]``. Same note as for\n ``__getitem__()``. This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the ``__getitem__()``\n method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method ``iterkeys()``.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the ``reversed()`` built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the ``__reversed__()`` method is not provided, the\n ``reversed()`` built-in will fall back to using the sequence\n protocol (``__len__()`` and ``__getitem__()``). Objects that\n support the sequence protocol should only provide\n ``__reversed__()`` if they can provide an implementation that is\n more efficient than the one provided by ``reversed()``.\n\n New in version 2.6.\n\nThe membership test operators (``in`` and ``not in``) are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define ``__contains__()``, the membership\n test first tries iteration via ``__iter__()``, then the old\n sequence iteration protocol via ``__getitem__()``, see *this\n section in the language reference*.\n\n\nAdditional methods for emulation of sequence types\n==================================================\n\nThe following optional methods can be defined to further emulate\nsequence objects. Immutable sequences methods should at most only\ndefine ``__getslice__()``; mutable sequences might define all three\nmethods.\n\nobject.__getslice__(self, i, j)\n\n Deprecated since version 2.0: Support slice objects as parameters\n to the ``__getitem__()`` method. (However, built-in types in\n CPython currently still implement ``__getslice__()``. Therefore,\n you have to override it in derived classes when implementing\n slicing.)\n\n Called to implement evaluation of ``self[i:j]``. The returned\n object should be of the same type as *self*. Note that missing *i*\n or *j* in the slice expression are replaced by zero or\n ``sys.maxint``, respectively. If negative indexes are used in the\n slice, the length of the sequence is added to that index. If the\n instance does not implement the ``__len__()`` method, an\n ``AttributeError`` is raised. No guarantee is made that indexes\n adjusted this way are not still negative. Indexes which are\n greater than the length of the sequence are not modified. If no\n ``__getslice__()`` is found, a slice object is created instead, and\n passed to ``__getitem__()`` instead.\n\nobject.__setslice__(self, i, j, sequence)\n\n Called to implement assignment to ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``.\n\n This method is deprecated. If no ``__setslice__()`` is found, or\n for extended slicing of the form ``self[i:j:k]``, a slice object is\n created, and passed to ``__setitem__()``, instead of\n ``__setslice__()`` being called.\n\nobject.__delslice__(self, i, j)\n\n Called to implement deletion of ``self[i:j]``. Same notes for *i*\n and *j* as for ``__getslice__()``. This method is deprecated. If no\n ``__delslice__()`` is found, or for extended slicing of the form\n ``self[i:j:k]``, a slice object is created, and passed to\n ``__delitem__()``, instead of ``__delslice__()`` being called.\n\nNotice that these methods are only invoked when a single slice with a\nsingle colon is used, and the slice method is available. For slice\noperations involving extended slice notation, or in absence of the\nslice methods, ``__getitem__()``, ``__setitem__()`` or\n``__delitem__()`` is called with a slice object as argument.\n\nThe following example demonstrate how to make your program or module\ncompatible with earlier versions of Python (assuming that methods\n``__getitem__()``, ``__setitem__()`` and ``__delitem__()`` support\nslice objects as arguments):\n\n class MyClass:\n ...\n def __getitem__(self, index):\n ...\n def __setitem__(self, index, value):\n ...\n def __delitem__(self, index):\n ...\n\n if sys.version_info < (2, 0):\n # They won\'t be defined if version is at least 2.0 final\n\n def __getslice__(self, i, j):\n return self[max(0, i):max(0, j):]\n def __setslice__(self, i, j, seq):\n self[max(0, i):max(0, j):] = seq\n def __delslice__(self, i, j):\n del self[max(0, i):max(0, j):]\n ...\n\nNote the calls to ``max()``; these are necessary because of the\nhandling of negative indices before the ``__*slice__()`` methods are\ncalled. When negative indexes are used, the ``__*item__()`` methods\nreceive them as provided, but the ``__*slice__()`` methods get a\n"cooked" form of the index values. For each negative index value, the\nlength of the sequence is added to the index before calling the method\n(which may still result in a negative index); this is the customary\nhandling of negative indexes by the built-in sequence types, and the\n``__*item__()`` methods are expected to do this as well. However,\nsince they should already be doing that, negative indexes cannot be\npassed in; they must be constrained to the bounds of the sequence\nbefore being passed to the ``__*item__()`` methods. Calling ``max(0,\ni)`` conveniently returns the proper value.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``//``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``). For\n instance, to evaluate the expression ``x + y``, where *x* is an\n instance of a class that has an ``__add__()`` method,\n ``x.__add__(y)`` is called. The ``__divmod__()`` method should be\n the equivalent to using ``__floordiv__()`` and ``__mod__()``; it\n should not be related to ``__truediv__()`` (described below). Note\n that ``__pow__()`` should be defined to accept an optional third\n argument if the ternary version of the built-in ``pow()`` function\n is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return ``NotImplemented``.\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator (``/``) is implemented by these methods. The\n ``__truediv__()`` method is used when ``__future__.division`` is in\n effect, otherwise ``__div__()`` is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; ``TypeError`` will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations (``+``, ``-``, ``*``, ``/``, ``%``, ``divmod()``,\n ``pow()``, ``**``, ``<<``, ``>>``, ``&``, ``^``, ``|``) with\n reflected (swapped) operands. These functions are only called if\n the left operand does not support the corresponding operation and\n the operands are of different types. [2] For instance, to evaluate\n the expression ``x - y``, where *y* is an instance of a class that\n has an ``__rsub__()`` method, ``y.__rsub__(x)`` is called if\n ``x.__sub__(y)`` returns *NotImplemented*.\n\n Note that ternary ``pow()`` will not try calling ``__rpow__()``\n (the coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left operand\'s\n type and that subclass provides the reflected method for the\n operation, this method will be called before the left operand\'s\n non-reflected method. This behavior allows subclasses to\n override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments (``+=``, ``-=``, ``*=``, ``/=``, ``//=``, ``%=``,\n ``**=``, ``<<=``, ``>>=``, ``&=``, ``^=``, ``|=``). These methods\n should attempt to do the operation in-place (modifying *self*) and\n return the result (which could be, but does not have to be,\n *self*). If a specific method is not defined, the augmented\n assignment falls back to the normal methods. For instance, to\n execute the statement ``x += y``, where *x* is an instance of a\n class that has an ``__iadd__()`` method, ``x.__iadd__(y)`` is\n called. If *x* is an instance of a class that does not define a\n ``__iadd__()`` method, ``x.__add__(y)`` and ``y.__radd__(x)`` are\n considered, as with the evaluation of ``x + y``.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations (``-``, ``+``,\n ``abs()`` and ``~``).\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions ``complex()``,\n ``int()``, ``long()``, and ``float()``. Should return a value of\n the appropriate type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions ``oct()`` and ``hex()``.\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement ``operator.index()``. Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or ``None`` if conversion is impossible. When\n the common type would be the type of ``other``, it is sufficient to\n return ``None``, since the interpreter will also ask the other\n object to attempt a coercion (but sometimes, if the implementation\n of the other type cannot be changed, it is useful to do the\n conversion to the other type here). A return value of\n ``NotImplemented`` is equivalent to returning ``None``.\n\n\nCoercion rules\n==============\n\nThis section used to document the rules for coercion. As the language\nhas evolved, the coercion rules have become hard to document\nprecisely; documenting what one version of one particular\nimplementation does is undesirable. Instead, here are some informal\nguidelines regarding coercion. In Python 3, coercion will not be\nsupported.\n\n* If the left operand of a % operator is a string or Unicode object,\n no coercion takes place and the string formatting operation is\n invoked instead.\n\n* It is no longer recommended to define a coercion operation. Mixed-\n mode operations on types that don\'t define coercion pass the\n original arguments to the operation.\n\n* New-style classes (those derived from ``object``) never invoke the\n ``__coerce__()`` method in response to a binary operator; the only\n time ``__coerce__()`` is invoked is when the built-in function\n ``coerce()`` is called.\n\n* For most intents and purposes, an operator that returns\n ``NotImplemented`` is treated the same as one that is not\n implemented at all.\n\n* Below, ``__op__()`` and ``__rop__()`` are used to signify the\n generic method names corresponding to an operator; ``__iop__()`` is\n used for the corresponding in-place operator. For example, for the\n operator \'``+``\', ``__add__()`` and ``__radd__()`` are used for the\n left and right variant of the binary operator, and ``__iadd__()``\n for the in-place variant.\n\n* For objects *x* and *y*, first ``x.__op__(y)`` is tried. If this is\n not implemented or returns ``NotImplemented``, ``y.__rop__(x)`` is\n tried. If this is also not implemented or returns\n ``NotImplemented``, a ``TypeError`` exception is raised. But see\n the following exception:\n\n* Exception to the previous item: if the left operand is an instance\n of a built-in type or a new-style class, and the right operand is an\n instance of a proper subclass of that type or class and overrides\n the base\'s ``__rop__()`` method, the right operand\'s ``__rop__()``\n method is tried *before* the left operand\'s ``__op__()`` method.\n\n This is done so that a subclass can completely override binary\n operators. Otherwise, the left operand\'s ``__op__()`` method would\n always accept the right operand: when an instance of a given class\n is expected, an instance of a subclass of that class is always\n acceptable.\n\n* When either operand type defines a coercion, this coercion is called\n before that type\'s ``__op__()`` or ``__rop__()`` method is called,\n but no sooner. If the coercion returns an object of a different\n type for the operand whose coercion is invoked, part of the process\n is redone using the new object.\n\n* When an in-place operator (like \'``+=``\') is used, if the left\n operand implements ``__iop__()``, it is invoked without any\n coercion. When the operation falls back to ``__op__()`` and/or\n ``__rop__()``, the normal coercion rules apply.\n\n* In ``x + y``, if *x* is a sequence that implements sequence\n concatenation, sequence concatenation is invoked.\n\n* In ``x * y``, if one operand is a sequence that implements sequence\n repetition, and the other is an integer (``int`` or ``long``),\n sequence repetition is invoked.\n\n* Rich comparisons (implemented by methods ``__eq__()`` and so on)\n never use coercion. Three-way comparison (implemented by\n ``__cmp__()``) does use coercion under the same conditions as other\n binary operations use it.\n\n* In the current implementation, the built-in numeric types ``int``,\n ``long``, ``float``, and ``complex`` do not use coercion. All these\n types implement a ``__coerce__()`` method, for use by the built-in\n ``coerce()`` function.\n\n Changed in version 2.7.\n\n\nWith Statement Context Managers\n===============================\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nSpecial method lookup for old-style classes\n===========================================\n\nFor old-style classes, special methods are always looked up in exactly\nthe same way as any other method or attribute. This is the case\nregardless of whether the method is being looked up explicitly as in\n``x.__getitem__(i)`` or implicitly as in ``x[i]``.\n\nThis behaviour means that special methods may exhibit different\nbehaviour for different instances of a single old-style class if the\nappropriate special attributes are set differently:\n\n >>> class C:\n ... pass\n ...\n >>> c1 = C()\n >>> c2 = C()\n >>> c1.__len__ = lambda: 5\n >>> c2.__len__ = lambda: 9\n >>> len(c1)\n 5\n >>> len(c2)\n 9\n\n\nSpecial method lookup for new-style classes\n===========================================\n\nFor new-style classes, implicit invocations of special methods are\nonly guaranteed to work correctly if defined on an object\'s type, not\nin the object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception (unlike the equivalent example\nwith old-style classes):\n\n >>> class C(object):\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as ``__hash__()`` and ``__repr__()`` that are implemented\nby all objects, including type objects. If the implicit lookup of\nthese methods used the conventional lookup process, they would fail\nwhen invoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe ``__getattribute__()`` method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print "Metaclass getattribute invoked"\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object):\n ... __metaclass__ = Meta\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print "Class getattribute invoked"\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the ``__getattribute__()`` machinery in this fashion\nprovides significant scope for speed optimisations within the\ninterpreter, at the cost of some flexibility in the handling of\nspecial methods (the special method *must* be set on the class object\nitself in order to be consistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type, under\n certain controlled conditions. It generally isn\'t a good idea\n though, since it can lead to some very strange behaviour if it is\n handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as ``__add__()``) fails the operation is\n not supported, which is why the reflected method is not called.\n',
+ 'string-methods': '\nString Methods\n**************\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support. Some of them are also available on\n``bytearray`` objects.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the *Sequence Types --- str, unicode, list, tuple,\nbytearray, buffer, xrange* section. To output formatted strings use\ntemplate strings or the ``%`` operator described in the *String\nFormatting Operations* section. Also, see the ``re`` module for string\nfunctions based on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n ``\'strict\'``, meaning that encoding errors raise ``UnicodeError``.\n Other possible values are ``\'ignore\'``, ``\'replace\'`` and any other\n name registered via ``codecs.register_error()``, see section *Codec\n Base Classes*.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n ``\'strict\'``, meaning that encoding errors raise a\n ``UnicodeError``. Other possible values are ``\'ignore\'``,\n ``\'replace\'``, ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and\n any other name registered via ``codecs.register_error()``, see\n section *Codec Base Classes*. For a list of possible encodings, see\n section *Standard Encodings*.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for ``\'xmlcharrefreplace\'`` and\n ``\'backslashreplace\'`` and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. Tab positions occur every *tabsize* characters\n (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n To expand the string, the current column is set to zero and the\n string is examined character by character. If the character is a\n tab (``\\t``), one or more space characters are inserted in the\n result until the current column is equal to the next tab position.\n (The tab character itself is not copied.) If the character is a\n newline (``\\n``) or return (``\\r``), it is copied and the current\n column is reset to zero. Any other character is copied unchanged\n and the current column is incremented by one regardless of how the\n character is represented when printed.\n\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n \'01 012 0123 01234\'\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n \'01 012 0123 01234\'\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\n Note: The ``find()`` method should be used only if you need to know the\n position of *sub*. To check if *sub* is a substring or not, use\n the ``in`` operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces ``{}``. Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3,\n and should be preferred to the ``%`` formatting described in\n *String Formatting Operations* in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified or ``-1``, then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. This method uses the *universal newlines* approach to\n splitting lines. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n For example, ``\'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()`` returns\n ``[\'ab c\', \'\', \'de fg\', \'kl\']``, while the same call with\n ``splitlines(True)`` returns ``[\'ab c\\n\', \'\\n\', \'de fg\\r\',\n \'kl\\r\\n\']``.\n\n Unlike ``split()`` when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the ``maketrans()`` helper function in the ``string``\n module to create a translation table. For string objects, set the\n *table* argument to ``None`` for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a ``None`` *table* argument.\n\n For Unicode objects, the ``translate()`` method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or ``None``. Unmapped characters\n are left untouched. Characters mapped to ``None`` are deleted.\n Note, a more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see ``encodings.cp1251``\n for an example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that ``str.upper().isupper()`` might\n be ``False`` if ``s`` contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than or equal to ``len(s)``.\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return ``True`` if there are only numeric characters in S,\n ``False`` otherwise. Numeric characters include digit characters,\n and all characters that have the Unicode numeric value property,\n e.g. U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return ``True`` if there are only decimal characters in S,\n ``False`` otherwise. Decimal characters include digit characters,\n and all characters that can be used to form decimal-radix numbers,\n e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n',
+ 'strings': '\nString literals\n***************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "ur" | "R" | "U" | "UR" | "Ur" | "uR"\n | "b" | "B" | "br" | "Br" | "bR" | "BR"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'"\n | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | escapeseq\n longstringitem ::= longstringchar | escapeseq\n shortstringchar ::= <any source character except "\\" or newline or the quote>\n longstringchar ::= <any source character except "\\">\n escapeseq ::= "\\" <any ASCII character>\n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the ``stringprefix`` and the rest of\nthe string literal. The source character set is defined by the\nencoding declaration; it is ASCII if no encoding declaration is given\nin the source file; see section *Encoding declarations*.\n\nIn plain English: String literals can be enclosed in matching single\nquotes (``\'``) or double quotes (``"``). They can also be enclosed in\nmatching groups of three single or double quotes (these are generally\nreferred to as *triple-quoted strings*). The backslash (``\\``)\ncharacter is used to escape characters that otherwise have a special\nmeaning, such as newline, backslash itself, or the quote character.\nString literals may optionally be prefixed with a letter ``\'r\'`` or\n``\'R\'``; such strings are called *raw strings* and use different rules\nfor interpreting backslash escape sequences. A prefix of ``\'u\'`` or\n``\'U\'`` makes the string a Unicode string. Unicode strings use the\nUnicode character set as defined by the Unicode Consortium and ISO\n10646. Some additional escape sequences, described below, are\navailable in Unicode strings. A prefix of ``\'b\'`` or ``\'B\'`` is\nignored in Python 2; it indicates that the literal should become a\nbytes literal in Python 3 (e.g. when code is automatically converted\nwith 2to3). A ``\'u\'`` or ``\'b\'`` prefix may be followed by an ``\'r\'``\nprefix.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string. (A "quote" is the character used to open the\nstring, i.e. either ``\'`` or ``"``.)\n\nUnless an ``\'r\'`` or ``\'R\'`` prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| ``\\newline`` | Ignored | |\n+-------------------+-----------------------------------+---------+\n| ``\\\\`` | Backslash (``\\``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\\'`` | Single quote (``\'``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\"`` | Double quote (``"``) | |\n+-------------------+-----------------------------------+---------+\n| ``\\a`` | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| ``\\b`` | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| ``\\f`` | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\n`` | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| ``\\N{name}`` | Character named *name* in the | |\n| | Unicode database (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\r`` | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| ``\\t`` | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| ``\\uxxxx`` | Character with 16-bit hex value | (1) |\n| | *xxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\Uxxxxxxxx`` | Character with 32-bit hex value | (2) |\n| | *xxxxxxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| ``\\v`` | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| ``\\ooo`` | Character with octal value *ooo* | (3,5) |\n+-------------------+-----------------------------------+---------+\n| ``\\xhh`` | Character with hex value *hh* | (4,5) |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. Individual code units which form parts of a surrogate pair can be\n encoded using this escape sequence.\n\n2. Any Unicode character can be encoded this way, but characters\n outside the Basic Multilingual Plane (BMP) will be encoded using a\n surrogate pair if Python is compiled to use 16-bit code units (the\n default).\n\n3. As in Standard C, up to three octal digits are accepted.\n\n4. Unlike in Standard C, exactly two hex digits are required.\n\n5. In a string literal, hexadecimal and octal escapes denote the byte\n with the given value; it is not necessary that the byte encodes a\n character in the source character set. In a Unicode literal, these\n escapes denote a Unicode character with the given value.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences marked as "(Unicode only)"\nin the table above fall into the category of unrecognized escapes for\nnon-Unicode string literals.\n\nWhen an ``\'r\'`` or ``\'R\'`` prefix is present, a character following a\nbackslash is included in the string without change, and *all\nbackslashes are left in the string*. For example, the string literal\n``r"\\n"`` consists of two characters: a backslash and a lowercase\n``\'n\'``. String quotes can be escaped with a backslash, but the\nbackslash remains in the string; for example, ``r"\\""`` is a valid\nstring literal consisting of two characters: a backslash and a double\nquote; ``r"\\"`` is not a valid string literal (even a raw string\ncannot end in an odd number of backslashes). Specifically, *a raw\nstring cannot end in a single backslash* (since the backslash would\nescape the following quote character). Note also that a single\nbackslash followed by a newline is interpreted as those two characters\nas part of the string, *not* as a line continuation.\n\nWhen an ``\'r\'`` or ``\'R\'`` prefix is used in conjunction with a\n``\'u\'`` or ``\'U\'`` prefix, then the ``\\uXXXX`` and ``\\UXXXXXXXX``\nescape sequences are processed while *all other backslashes are left\nin the string*. For example, the string literal ``ur"\\u0062\\n"``\nconsists of three Unicode characters: \'LATIN SMALL LETTER B\', \'REVERSE\nSOLIDUS\', and \'LATIN SMALL LETTER N\'. Backslashes can be escaped with\na preceding backslash; however, both remain in the string. As a\nresult, ``\\uXXXX`` escape sequences are only recognized when there are\nan odd number of backslashes.\n',
'subscriptions': '\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object of a sequence or mapping type.\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey. (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to a\nplain integer. If this value is negative, the length of the sequence\nis added to it (so that, e.g., ``x[-1]`` selects the last item of\n``x``.) The resulting value must be a nonnegative integer less than\nthe number of items in the sequence, and the subscription selects the\nitem whose index is that value (counting from zero).\n\nA string\'s items are characters. A character is not a separate data\ntype but a string of exactly one character.\n',
'truth': "\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an ``if`` or\n``while`` condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* ``None``\n\n* ``False``\n\n* zero of any numeric type, for example, ``0``, ``0L``, ``0.0``,\n ``0j``.\n\n* any empty sequence, for example, ``''``, ``()``, ``[]``.\n\n* any empty mapping, for example, ``{}``.\n\n* instances of user-defined classes, if the class defines a\n ``__nonzero__()`` or ``__len__()`` method, when that method returns\n the integer zero or ``bool`` value ``False``. [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn ``0`` or ``False`` for false and ``1`` or ``True`` for true,\nunless otherwise stated. (Important exception: the Boolean operations\n``or`` and ``and`` always return one of their operands.)\n",
- 'try': '\nThe ``try`` statement\n*********************\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n``try``...``except``...``finally`` did not work. ``try``...``except``\nhad to be nested in ``try``...``finally``.\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object, a tuple containing an item compatible with the\nexception, or, in the (deprecated) case of string exceptions, is the\nraised string itself (note that the object identities must match, i.e.\nit must be the same string object, not just a string with the same\nvalue).\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the ``sys`` module:\n``sys.exc_type`` receives the object identifying the exception;\n``sys.exc_value`` receives the exception\'s parameter;\n``sys.exc_traceback`` receives a traceback object (see section *The\nstandard type hierarchy*) identifying the point in the program where\nthe exception occurred. These details are also available through the\n``sys.exc_info()`` function, which returns a tuple ``(exc_type,\nexc_value, exc_traceback)``. Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception, it is re-raised at the end\nof the ``finally`` clause. If the ``finally`` clause raises another\nexception or executes a ``return`` or ``break`` statement, the saved\nexception is lost. The exception information is not available to the\nprogram during execution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n',
- 'types': '\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.).\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name ``None``.\n It is used to signify the absence of a value in many situations,\n e.g., it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n ``NotImplemented``. Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n ``Ellipsis``. It is used to indicate the presence of the ``...``\n syntax in a slice. Its truth value is true.\n\n``numbers.Number``\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n ``numbers.Integral``\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are three types of integers:\n\n Plain integers\n These represent numbers in the range -2147483648 through\n 2147483647. (The range may be larger on machines with a\n larger natural word size, but not smaller.) When the result\n of an operation would fall outside this range, the result is\n normally returned as a long integer (in some cases, the\n exception ``OverflowError`` is raised instead). For the\n purpose of shift and mask operations, integers are assumed to\n have a binary, 2\'s complement notation using 32 or more bits,\n and hiding no bits from the user (i.e., all 4294967296\n different bit patterns correspond to different values).\n\n Long integers\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans\n These represent the truth values False and True. The two\n objects representing the values False and True are the only\n Boolean objects. The Boolean type is a subtype of plain\n integers, and Boolean values behave like the values 0 and 1,\n respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ``"False"`` or\n ``"True"`` are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers and the least surprises when\n switching between the plain and long integer domains. Any\n operation, if it yields a result in the plain integer domain,\n will yield the same result in the long integer domain or when\n using mixed operands. The switch between domains is transparent\n to the programmer.\n\n ``numbers.Real`` (``float``)\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these is\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n ``numbers.Complex``\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number ``z`` can be retrieved through the read-only\n attributes ``z.real`` and ``z.imag``.\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function ``len()`` returns the number of\n items of a sequence. When the length of a sequence is *n*, the\n index set contains the numbers 0, 1, ..., *n*-1. Item *i* of\n sequence *a* is selected by ``a[i]``.\n\n Sequences also support slicing: ``a[i:j]`` selects all items with\n index *k* such that *i* ``<=`` *k* ``<`` *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: ``a[i:j:k]`` selects all items of *a* with index *x*\n where ``x = i + n*k``, *n* ``>=`` ``0`` and *i* ``<=`` *x* ``<``\n *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n The items of a string are characters. There is no separate\n character type; a character is represented by a string of one\n item. Characters represent (at least) 8-bit bytes. The\n built-in functions ``chr()`` and ``ord()`` convert between\n characters and nonnegative integers representing the byte\n values. Bytes with the values 0-127 usually represent the\n corresponding ASCII values, but the interpretation of values\n is up to the program. The string data type is also used to\n represent arrays of bytes, e.g., to hold data read from a\n file.\n\n (On systems whose native character set is not ASCII, strings\n may use EBCDIC in their internal representation, provided the\n functions ``chr()`` and ``ord()`` implement a mapping between\n ASCII and EBCDIC, and string comparison preserves the ASCII\n order. Or perhaps someone can propose a better rule?)\n\n Unicode\n The items of a Unicode object are Unicode code units. A\n Unicode code unit is represented by a Unicode object of one\n item and can hold either a 16-bit or 32-bit value\n representing a Unicode ordinal (the maximum value for the\n ordinal is given in ``sys.maxunicode``, and depends on how\n Python is configured at compile time). Surrogate pairs may\n be present in the Unicode object, and will be reported as two\n separate items. The built-in functions ``unichr()`` and\n ``ord()`` convert between code units and nonnegative integers\n representing the Unicode ordinals as defined in the Unicode\n Standard 3.0. Conversion from and to other encodings are\n possible through the Unicode method ``encode()`` and the\n built-in function ``unicode()``.\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and ``del`` (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in ``bytearray()`` constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module ``array`` provides an additional example of\n a mutable sequence type.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function ``len()``\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n ``set()`` constructor and can be modified afterwards by several\n methods, such as ``add()``.\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in ``frozenset()`` constructor. As a frozenset is\n immutable and *hashable*, it can be used again as an element of\n another set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation ``a[k]`` selects the item indexed by\n ``k`` from the mapping ``a``; this can be used in expressions and\n as the target of assignments or ``del`` statements. The built-in\n function ``len()`` returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``) then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the ``{...}``\n notation (see section *Dictionary displays*).\n\n The extension modules ``dbm``, ``gdbm``, and ``bsddb`` provide\n additional examples of mapping types.\n\nCallable types\n These are the types to which the function call operation (see\n section *Calls*) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section *Function definitions*). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +-------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +=========================+=================================+=============+\n | ``func_doc`` | The function\'s documentation | Writable |\n | | string, or ``None`` if | |\n | | unavailable | |\n +-------------------------+---------------------------------+-------------+\n | ``__doc__`` | Another way of spelling | Writable |\n | | ``func_doc`` | |\n +-------------------------+---------------------------------+-------------+\n | ``func_name`` | The function\'s name | Writable |\n +-------------------------+---------------------------------+-------------+\n | ``__name__`` | Another way of spelling | Writable |\n | | ``func_name`` | |\n +-------------------------+---------------------------------+-------------+\n | ``__module__`` | The name of the module the | Writable |\n | | function was defined in, or | |\n | | ``None`` if unavailable. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_defaults`` | A tuple containing default | Writable |\n | | argument values for those | |\n | | arguments that have defaults, | |\n | | or ``None`` if no arguments | |\n | | have a default value | |\n +-------------------------+---------------------------------+-------------+\n | ``func_code`` | The code object representing | Writable |\n | | the compiled function body. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_globals`` | A reference to the dictionary | Read-only |\n | | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_dict`` | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +-------------------------+---------------------------------+-------------+\n | ``func_closure`` | ``None`` or a tuple of cells | Read-only |\n | | that contain bindings for the | |\n | | function\'s free variables. | |\n +-------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Changed in version 2.4: ``func_name`` is now writable.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n User-defined methods\n A user-defined method object combines a class, a class instance\n (or ``None``) and any callable object (normally a user-defined\n function).\n\n Special read-only attributes: ``im_self`` is the class instance\n object, ``im_func`` is the function object; ``im_class`` is the\n class of ``im_self`` for bound methods or the class that asked\n for the method for unbound methods; ``__doc__`` is the method\'s\n documentation (same as ``im_func.__doc__``); ``__name__`` is the\n method name (same as ``im_func.__name__``); ``__module__`` is\n the name of the module the method was defined in, or ``None`` if\n unavailable.\n\n Changed in version 2.2: ``im_self`` used to refer to the class\n that defined the method.\n\n Changed in version 2.6: For 3.0 forward-compatibility,\n ``im_func`` is also available as ``__func__``, and ``im_self``\n as ``__self__``.\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object, an unbound\n user-defined method object, or a class method object. When the\n attribute is a user-defined method object, a new method object\n is only created if the class from which it is being retrieved is\n the same as, or a derived class of, the class stored in the\n original method object; otherwise, the original method object is\n used as it is.\n\n When a user-defined method object is created by retrieving a\n user-defined function object from a class, its ``im_self``\n attribute is ``None`` and the method object is said to be\n unbound. When one is created by retrieving a user-defined\n function object from a class via one of its instances, its\n ``im_self`` attribute is the instance, and the method object is\n said to be bound. In either case, the new method\'s ``im_class``\n attribute is the class from which the retrieval takes place, and\n its ``im_func`` attribute is the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the ``im_func``\n attribute of the new instance is not the original method object\n but its ``im_func`` attribute.\n\n When a user-defined method object is created by retrieving a\n class method object from a class or instance, its ``im_self``\n attribute is the class itself (the same as the ``im_class``\n attribute), and its ``im_func`` attribute is the function object\n underlying the class method.\n\n When an unbound user-defined method object is called, the\n underlying function (``im_func``) is called, with the\n restriction that the first argument must be an instance of the\n proper class (``im_class``) or of a derived class thereof.\n\n When a bound user-defined method object is called, the\n underlying function (``im_func``) is called, inserting the class\n instance (``im_self``) in front of the argument list. For\n instance, when ``C`` is a class which contains a definition for\n a function ``f()``, and ``x`` is an instance of ``C``, calling\n ``x.f(1)`` is equivalent to calling ``C.f(x, 1)``.\n\n When a user-defined method object is derived from a class method\n object, the "class instance" stored in ``im_self`` will actually\n be the class itself, so that calling either ``x.f(1)`` or\n ``C.f(1)`` is equivalent to calling ``f(C,1)`` where ``f`` is\n the underlying function.\n\n Note that the transformation from function object to (unbound or\n bound) method object happens each time the attribute is\n retrieved from the class or instance. In some cases, a fruitful\n optimization is to assign the attribute to a local variable and\n call that local variable. Also notice that this transformation\n only happens for user-defined functions; other callable objects\n (and all non-callable objects) are retrieved without\n transformation. It is also important to note that user-defined\n functions which are attributes of a class instance are not\n converted to bound methods; this *only* happens when the\n function is an attribute of the class.\n\n Generator functions\n A function or method which uses the ``yield`` statement (see\n section *The yield statement*) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s ``next()`` method will cause the function to\n execute until it provides a value using the ``yield`` statement.\n When the function executes a ``return`` statement or falls off\n the end, a ``StopIteration`` exception is raised and the\n iterator will have reached the end of the set of values to be\n returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are ``len()`` and ``math.sin()``\n (``math`` is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: ``__doc__`` is the function\'s documentation\n string, or ``None`` if unavailable; ``__name__`` is the\n function\'s name; ``__self__`` is set to ``None`` (but see the\n next item); ``__module__`` is the name of the module the\n function was defined in or ``None`` if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n ``alist.append()``, assuming *alist* is a list object. In this\n case, the special read-only attribute ``__self__`` is set to the\n object denoted by *alist*.\n\n Class Types\n Class types, or "new-style classes," are callable. These\n objects normally act as factories for new instances of\n themselves, but variations are possible for class types that\n override ``__new__()``. The arguments of the call are passed to\n ``__new__()`` and, in the typical case, to ``__init__()`` to\n initialize the new instance.\n\n Classic Classes\n Class objects are described below. When a class object is\n called, a new class instance (also described below) is created\n and returned. This implies a call to the class\'s ``__init__()``\n method if it has one. Any arguments are passed on to the\n ``__init__()`` method. If there is no ``__init__()`` method,\n the class must be called without arguments.\n\n Class instances\n Class instances are described below. Class instances are\n callable only when the class has a ``__call__()`` method;\n ``x(arguments)`` is a shorthand for ``x.__call__(arguments)``.\n\nModules\n Modules are imported by the ``import`` statement (see section *The\n import statement*). A module object has a namespace implemented by\n a dictionary object (this is the dictionary referenced by the\n func_globals attribute of functions defined in the module).\n Attribute references are translated to lookups in this dictionary,\n e.g., ``m.x`` is equivalent to ``m.__dict__["x"]``. A module object\n does not contain the code object used to initialize the module\n (since it isn\'t needed once the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., ``m.x = 1`` is equivalent to ``m.__dict__["x"] = 1``.\n\n Special read-only attribute: ``__dict__`` is the module\'s namespace\n as a dictionary object.\n\n **CPython implementation detail:** Because of the way CPython\n clears module dictionaries, the module dictionary will be cleared\n when the module falls out of scope even if the dictionary still has\n live references. To avoid this, copy the dictionary or keep the\n module around while using its dictionary directly.\n\n Predefined (writable) attributes: ``__name__`` is the module\'s\n name; ``__doc__`` is the module\'s documentation string, or ``None``\n if unavailable; ``__file__`` is the pathname of the file from which\n the module was loaded, if it was loaded from a file. The\n ``__file__`` attribute is not present for C modules that are\n statically linked into the interpreter; for extension modules\n loaded dynamically from a shared library, it is the pathname of the\n shared library file.\n\nClasses\n Both class types (new-style classes) and class objects (old-\n style/classic classes) are typically created by class definitions\n (see section *Class definitions*). A class has a namespace\n implemented by a dictionary object. Class attribute references are\n translated to lookups in this dictionary, e.g., ``C.x`` is\n translated to ``C.__dict__["x"]`` (although for new-style classes\n in particular there are a number of hooks which allow for other\n means of locating attributes). When the attribute name is not found\n there, the attribute search continues in the base classes. For\n old-style classes, the search is depth-first, left-to-right in the\n order of occurrence in the base class list. New-style classes use\n the more complex C3 method resolution order which behaves correctly\n even in the presence of \'diamond\' inheritance structures where\n there are multiple inheritance paths leading back to a common\n ancestor. Additional details on the C3 MRO used by new-style\n classes can be found in the documentation accompanying the 2.3\n release at http://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class ``C``, say) would yield\n a user-defined function object or an unbound user-defined method\n object whose associated class is either ``C`` or one of its base\n classes, it is transformed into an unbound user-defined method\n object whose ``im_class`` attribute is ``C``. When it would yield a\n class method object, it is transformed into a bound user-defined\n method object whose ``im_class`` and ``im_self`` attributes are\n both ``C``. When it would yield a static method object, it is\n transformed into the object wrapped by the static method object.\n See section *Implementing Descriptors* for another way in which\n attributes retrieved from a class may differ from those actually\n contained in its ``__dict__`` (note that only new-style classes\n support descriptors).\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: ``__name__`` is the class name; ``__module__``\n is the module name in which the class was defined; ``__dict__`` is\n the dictionary containing the class\'s namespace; ``__bases__`` is a\n tuple (possibly empty or a singleton) containing the base classes,\n in the order of their occurrence in the base class list;\n ``__doc__`` is the class\'s documentation string, or None if\n undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object or an unbound user-defined method object whose\n associated class is the class (call it ``C``) of the instance for\n which the attribute reference was initiated or one of its bases, it\n is transformed into a bound user-defined method object whose\n ``im_class`` attribute is ``C`` and whose ``im_self`` attribute is\n the instance. Static method and class method objects are also\n transformed, as if they had been retrieved from class ``C``; see\n above under "Classes". See section *Implementing Descriptors* for\n another way in which attributes of a class retrieved via its\n instances may differ from the objects actually stored in the\n class\'s ``__dict__``. If no class attribute is found, and the\n object\'s class has a ``__getattr__()`` method, that is called to\n satisfy the lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n ``__setattr__()`` or ``__delattr__()`` method, this is called\n instead of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n *Special method names*.\n\n Special attributes: ``__dict__`` is the attribute dictionary;\n ``__class__`` is the instance\'s class.\n\nFiles\n A file object represents an open file. File objects are created by\n the ``open()`` built-in function, and also by ``os.popen()``,\n ``os.fdopen()``, and the ``makefile()`` method of socket objects\n (and perhaps by other functions or methods provided by extension\n modules). The objects ``sys.stdin``, ``sys.stdout`` and\n ``sys.stderr`` are initialized to file objects corresponding to the\n interpreter\'s standard input, output and error streams. See *File\n Objects* for complete documentation of file objects.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: ``co_name`` gives the function\n name; ``co_argcount`` is the number of positional arguments\n (including arguments with default values); ``co_nlocals`` is the\n number of local variables used by the function (including\n arguments); ``co_varnames`` is a tuple containing the names of\n the local variables (starting with the argument names);\n ``co_cellvars`` is a tuple containing the names of local\n variables that are referenced by nested functions;\n ``co_freevars`` is a tuple containing the names of free\n variables; ``co_code`` is a string representing the sequence of\n bytecode instructions; ``co_consts`` is a tuple containing the\n literals used by the bytecode; ``co_names`` is a tuple\n containing the names used by the bytecode; ``co_filename`` is\n the filename from which the code was compiled;\n ``co_firstlineno`` is the first line number of the function;\n ``co_lnotab`` is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); ``co_stacksize`` is the required stack size\n (including local variables); ``co_flags`` is an integer encoding\n a number of flags for the interpreter.\n\n The following flag bits are defined for ``co_flags``: bit\n ``0x04`` is set if the function uses the ``*arguments`` syntax\n to accept an arbitrary number of positional arguments; bit\n ``0x08`` is set if the function uses the ``**keywords`` syntax\n to accept arbitrary keyword arguments; bit ``0x20`` is set if\n the function is a generator.\n\n Future feature declarations (``from __future__ import\n division``) also use bits in ``co_flags`` to indicate whether a\n code object was compiled with a particular feature enabled: bit\n ``0x2000`` is set if the function was compiled with future\n division enabled; bits ``0x10`` and ``0x1000`` were used in\n earlier versions of Python.\n\n Other bits in ``co_flags`` are reserved for internal use.\n\n If a code object represents a function, the first item in\n ``co_consts`` is the documentation string of the function, or\n ``None`` if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: ``f_back`` is to the previous\n stack frame (towards the caller), or ``None`` if this is the\n bottom stack frame; ``f_code`` is the code object being executed\n in this frame; ``f_locals`` is the dictionary used to look up\n local variables; ``f_globals`` is used for global variables;\n ``f_builtins`` is used for built-in (intrinsic) names;\n ``f_restricted`` is a flag indicating whether the function is\n executing in restricted execution mode; ``f_lasti`` gives the\n precise instruction (this is an index into the bytecode string\n of the code object).\n\n Special writable attributes: ``f_trace``, if not ``None``, is a\n function called at the start of each source code line (this is\n used by the debugger); ``f_exc_type``, ``f_exc_value``,\n ``f_exc_traceback`` represent the last exception raised in the\n parent frame provided another exception was ever raised in the\n current frame (in all other cases they are None); ``f_lineno``\n is the current line number of the frame --- writing to this from\n within a trace function jumps to the given line (only for the\n bottom-most frame). A debugger can implement a Jump command\n (aka Set Next Statement) by writing to f_lineno.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n *The try statement*.) It is accessible as ``sys.exc_traceback``,\n and also as the third item of the tuple returned by\n ``sys.exc_info()``. The latter is the preferred interface,\n since it works correctly when the program is using multiple\n threads. When the program contains no suitable handler, the\n stack trace is written (nicely formatted) to the standard error\n stream; if the interpreter is interactive, it is also made\n available to the user as ``sys.last_traceback``.\n\n Special read-only attributes: ``tb_next`` is the next level in\n the stack trace (towards the frame where the exception\n occurred), or ``None`` if there is no next level; ``tb_frame``\n points to the execution frame of the current level;\n ``tb_lineno`` gives the line number where the exception\n occurred; ``tb_lasti`` indicates the precise instruction. The\n line number and last instruction in the traceback may differ\n from the line number of its frame object if the exception\n occurred in a ``try`` statement with no matching except clause\n or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices when *extended slice\n syntax* is used. This is a slice using two colons, or multiple\n slices or ellipses separated by commas, e.g., ``a[i:j:step]``,\n ``a[i:j, k:l]``, or ``a[..., i:j]``. They are also created by\n the built-in ``slice()`` function.\n\n Special read-only attributes: ``start`` is the lower bound;\n ``stop`` is the upper bound; ``step`` is the step value; each is\n ``None`` if omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the extended slice that the slice\n object would describe if applied to a sequence of *length*\n items. It returns a tuple of three integers; respectively\n these are the *start* and *stop* indices and the *step* or\n stride length of the slice. Missing or out-of-bounds indices\n are handled in a manner consistent with regular slices.\n\n New in version 2.3.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n ``staticmethod()`` constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in ``classmethod()`` constructor.\n',
+ 'try': '\nThe ``try`` statement\n*********************\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n``try``...``except``...``finally`` did not work. ``try``...``except``\nhad to be nested in ``try``...``finally``.\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object, or a tuple containing an item compatible with the\nexception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the ``sys`` module:\n``sys.exc_type`` receives the object identifying the exception;\n``sys.exc_value`` receives the exception\'s parameter;\n``sys.exc_traceback`` receives a traceback object (see section *The\nstandard type hierarchy*) identifying the point in the program where\nthe exception occurred. These details are also available through the\n``sys.exc_info()`` function, which returns a tuple ``(exc_type,\nexc_value, exc_traceback)``. Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception, it is re-raised at the end\nof the ``finally`` clause. If the ``finally`` clause raises another\nexception or executes a ``return`` or ``break`` statement, the saved\nexception is discarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the ``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nThe return value of a function is determined by the last ``return``\nstatement executed. Since the ``finally`` clause always executes, a\n``return`` statement executed in the ``finally`` clause will always be\nthe last one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n',
+ 'types': '\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.).\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name ``None``.\n It is used to signify the absence of a value in many situations,\n e.g., it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n ``NotImplemented``. Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n ``Ellipsis``. It is used to indicate the presence of the ``...``\n syntax in a slice. Its truth value is true.\n\n``numbers.Number``\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n ``numbers.Integral``\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are three types of integers:\n\n Plain integers\n These represent numbers in the range -2147483648 through\n 2147483647. (The range may be larger on machines with a\n larger natural word size, but not smaller.) When the result\n of an operation would fall outside this range, the result is\n normally returned as a long integer (in some cases, the\n exception ``OverflowError`` is raised instead). For the\n purpose of shift and mask operations, integers are assumed to\n have a binary, 2\'s complement notation using 32 or more bits,\n and hiding no bits from the user (i.e., all 4294967296\n different bit patterns correspond to different values).\n\n Long integers\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans\n These represent the truth values False and True. The two\n objects representing the values ``False`` and ``True`` are\n the only Boolean objects. The Boolean type is a subtype of\n plain integers, and Boolean values behave like the values 0\n and 1, respectively, in almost all contexts, the exception\n being that when converted to a string, the strings\n ``"False"`` or ``"True"`` are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers and the least surprises when\n switching between the plain and long integer domains. Any\n operation, if it yields a result in the plain integer domain,\n will yield the same result in the long integer domain or when\n using mixed operands. The switch between domains is transparent\n to the programmer.\n\n ``numbers.Real`` (``float``)\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these is\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n ``numbers.Complex``\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number ``z`` can be retrieved through the read-only\n attributes ``z.real`` and ``z.imag``.\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function ``len()`` returns the number of\n items of a sequence. When the length of a sequence is *n*, the\n index set contains the numbers 0, 1, ..., *n*-1. Item *i* of\n sequence *a* is selected by ``a[i]``.\n\n Sequences also support slicing: ``a[i:j]`` selects all items with\n index *k* such that *i* ``<=`` *k* ``<`` *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: ``a[i:j:k]`` selects all items of *a* with index *x*\n where ``x = i + n*k``, *n* ``>=`` ``0`` and *i* ``<=`` *x* ``<``\n *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n The items of a string are characters. There is no separate\n character type; a character is represented by a string of one\n item. Characters represent (at least) 8-bit bytes. The\n built-in functions ``chr()`` and ``ord()`` convert between\n characters and nonnegative integers representing the byte\n values. Bytes with the values 0-127 usually represent the\n corresponding ASCII values, but the interpretation of values\n is up to the program. The string data type is also used to\n represent arrays of bytes, e.g., to hold data read from a\n file.\n\n (On systems whose native character set is not ASCII, strings\n may use EBCDIC in their internal representation, provided the\n functions ``chr()`` and ``ord()`` implement a mapping between\n ASCII and EBCDIC, and string comparison preserves the ASCII\n order. Or perhaps someone can propose a better rule?)\n\n Unicode\n The items of a Unicode object are Unicode code units. A\n Unicode code unit is represented by a Unicode object of one\n item and can hold either a 16-bit or 32-bit value\n representing a Unicode ordinal (the maximum value for the\n ordinal is given in ``sys.maxunicode``, and depends on how\n Python is configured at compile time). Surrogate pairs may\n be present in the Unicode object, and will be reported as two\n separate items. The built-in functions ``unichr()`` and\n ``ord()`` convert between code units and nonnegative integers\n representing the Unicode ordinals as defined in the Unicode\n Standard 3.0. Conversion from and to other encodings are\n possible through the Unicode method ``encode()`` and the\n built-in function ``unicode()``.\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and ``del`` (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in ``bytearray()`` constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module ``array`` provides an additional example of\n a mutable sequence type.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function ``len()``\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n ``set()`` constructor and can be modified afterwards by several\n methods, such as ``add()``.\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in ``frozenset()`` constructor. As a frozenset is\n immutable and *hashable*, it can be used again as an element of\n another set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation ``a[k]`` selects the item indexed by\n ``k`` from the mapping ``a``; this can be used in expressions and\n as the target of assignments or ``del`` statements. The built-in\n function ``len()`` returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``) then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the ``{...}``\n notation (see section *Dictionary displays*).\n\n The extension modules ``dbm``, ``gdbm``, and ``bsddb`` provide\n additional examples of mapping types.\n\nCallable types\n These are the types to which the function call operation (see\n section *Calls*) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section *Function definitions*). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +-------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +=========================+=================================+=============+\n | ``__doc__`` | The function\'s documentation | Writable |\n | ``func_doc`` | string, or ``None`` if | |\n | | unavailable. | |\n +-------------------------+---------------------------------+-------------+\n | ``__name__`` | The function\'s name. | Writable |\n | ``func_name`` | | |\n +-------------------------+---------------------------------+-------------+\n | ``__module__`` | The name of the module the | Writable |\n | | function was defined in, or | |\n | | ``None`` if unavailable. | |\n +-------------------------+---------------------------------+-------------+\n | ``__defaults__`` | A tuple containing default | Writable |\n | ``func_defaults`` | argument values for those | |\n | | arguments that have defaults, | |\n | | or ``None`` if no arguments | |\n | | have a default value. | |\n +-------------------------+---------------------------------+-------------+\n | ``__code__`` | The code object representing | Writable |\n | ``func_code`` | the compiled function body. | |\n +-------------------------+---------------------------------+-------------+\n | ``__globals__`` | A reference to the dictionary | Read-only |\n | ``func_globals`` | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +-------------------------+---------------------------------+-------------+\n | ``__dict__`` | The namespace supporting | Writable |\n | ``func_dict`` | arbitrary function attributes. | |\n +-------------------------+---------------------------------+-------------+\n | ``__closure__`` | ``None`` or a tuple of cells | Read-only |\n | ``func_closure`` | that contain bindings for the | |\n | | function\'s free variables. | |\n +-------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Changed in version 2.4: ``func_name`` is now writable.\n\n Changed in version 2.6: The double-underscore attributes\n ``__closure__``, ``__code__``, ``__defaults__``, and\n ``__globals__`` were introduced as aliases for the corresponding\n ``func_*`` attributes for forwards compatibility with Python 3.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n User-defined methods\n A user-defined method object combines a class, a class instance\n (or ``None``) and any callable object (normally a user-defined\n function).\n\n Special read-only attributes: ``im_self`` is the class instance\n object, ``im_func`` is the function object; ``im_class`` is the\n class of ``im_self`` for bound methods or the class that asked\n for the method for unbound methods; ``__doc__`` is the method\'s\n documentation (same as ``im_func.__doc__``); ``__name__`` is the\n method name (same as ``im_func.__name__``); ``__module__`` is\n the name of the module the method was defined in, or ``None`` if\n unavailable.\n\n Changed in version 2.2: ``im_self`` used to refer to the class\n that defined the method.\n\n Changed in version 2.6: For Python 3 forward-compatibility,\n ``im_func`` is also available as ``__func__``, and ``im_self``\n as ``__self__``.\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object, an unbound\n user-defined method object, or a class method object. When the\n attribute is a user-defined method object, a new method object\n is only created if the class from which it is being retrieved is\n the same as, or a derived class of, the class stored in the\n original method object; otherwise, the original method object is\n used as it is.\n\n When a user-defined method object is created by retrieving a\n user-defined function object from a class, its ``im_self``\n attribute is ``None`` and the method object is said to be\n unbound. When one is created by retrieving a user-defined\n function object from a class via one of its instances, its\n ``im_self`` attribute is the instance, and the method object is\n said to be bound. In either case, the new method\'s ``im_class``\n attribute is the class from which the retrieval takes place, and\n its ``im_func`` attribute is the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the ``im_func``\n attribute of the new instance is not the original method object\n but its ``im_func`` attribute.\n\n When a user-defined method object is created by retrieving a\n class method object from a class or instance, its ``im_self``\n attribute is the class itself, and its ``im_func`` attribute is\n the function object underlying the class method.\n\n When an unbound user-defined method object is called, the\n underlying function (``im_func``) is called, with the\n restriction that the first argument must be an instance of the\n proper class (``im_class``) or of a derived class thereof.\n\n When a bound user-defined method object is called, the\n underlying function (``im_func``) is called, inserting the class\n instance (``im_self``) in front of the argument list. For\n instance, when ``C`` is a class which contains a definition for\n a function ``f()``, and ``x`` is an instance of ``C``, calling\n ``x.f(1)`` is equivalent to calling ``C.f(x, 1)``.\n\n When a user-defined method object is derived from a class method\n object, the "class instance" stored in ``im_self`` will actually\n be the class itself, so that calling either ``x.f(1)`` or\n ``C.f(1)`` is equivalent to calling ``f(C,1)`` where ``f`` is\n the underlying function.\n\n Note that the transformation from function object to (unbound or\n bound) method object happens each time the attribute is\n retrieved from the class or instance. In some cases, a fruitful\n optimization is to assign the attribute to a local variable and\n call that local variable. Also notice that this transformation\n only happens for user-defined functions; other callable objects\n (and all non-callable objects) are retrieved without\n transformation. It is also important to note that user-defined\n functions which are attributes of a class instance are not\n converted to bound methods; this *only* happens when the\n function is an attribute of the class.\n\n Generator functions\n A function or method which uses the ``yield`` statement (see\n section *The yield statement*) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s ``next()`` method will cause the function to\n execute until it provides a value using the ``yield`` statement.\n When the function executes a ``return`` statement or falls off\n the end, a ``StopIteration`` exception is raised and the\n iterator will have reached the end of the set of values to be\n returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are ``len()`` and ``math.sin()``\n (``math`` is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: ``__doc__`` is the function\'s documentation\n string, or ``None`` if unavailable; ``__name__`` is the\n function\'s name; ``__self__`` is set to ``None`` (but see the\n next item); ``__module__`` is the name of the module the\n function was defined in or ``None`` if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n ``alist.append()``, assuming *alist* is a list object. In this\n case, the special read-only attribute ``__self__`` is set to the\n object denoted by *alist*.\n\n Class Types\n Class types, or "new-style classes," are callable. These\n objects normally act as factories for new instances of\n themselves, but variations are possible for class types that\n override ``__new__()``. The arguments of the call are passed to\n ``__new__()`` and, in the typical case, to ``__init__()`` to\n initialize the new instance.\n\n Classic Classes\n Class objects are described below. When a class object is\n called, a new class instance (also described below) is created\n and returned. This implies a call to the class\'s ``__init__()``\n method if it has one. Any arguments are passed on to the\n ``__init__()`` method. If there is no ``__init__()`` method,\n the class must be called without arguments.\n\n Class instances\n Class instances are described below. Class instances are\n callable only when the class has a ``__call__()`` method;\n ``x(arguments)`` is a shorthand for ``x.__call__(arguments)``.\n\nModules\n Modules are imported by the ``import`` statement (see section *The\n import statement*). A module object has a namespace implemented by\n a dictionary object (this is the dictionary referenced by the\n func_globals attribute of functions defined in the module).\n Attribute references are translated to lookups in this dictionary,\n e.g., ``m.x`` is equivalent to ``m.__dict__["x"]``. A module object\n does not contain the code object used to initialize the module\n (since it isn\'t needed once the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., ``m.x = 1`` is equivalent to ``m.__dict__["x"] = 1``.\n\n Special read-only attribute: ``__dict__`` is the module\'s namespace\n as a dictionary object.\n\n **CPython implementation detail:** Because of the way CPython\n clears module dictionaries, the module dictionary will be cleared\n when the module falls out of scope even if the dictionary still has\n live references. To avoid this, copy the dictionary or keep the\n module around while using its dictionary directly.\n\n Predefined (writable) attributes: ``__name__`` is the module\'s\n name; ``__doc__`` is the module\'s documentation string, or ``None``\n if unavailable; ``__file__`` is the pathname of the file from which\n the module was loaded, if it was loaded from a file. The\n ``__file__`` attribute is not present for C modules that are\n statically linked into the interpreter; for extension modules\n loaded dynamically from a shared library, it is the pathname of the\n shared library file.\n\nClasses\n Both class types (new-style classes) and class objects (old-\n style/classic classes) are typically created by class definitions\n (see section *Class definitions*). A class has a namespace\n implemented by a dictionary object. Class attribute references are\n translated to lookups in this dictionary, e.g., ``C.x`` is\n translated to ``C.__dict__["x"]`` (although for new-style classes\n in particular there are a number of hooks which allow for other\n means of locating attributes). When the attribute name is not found\n there, the attribute search continues in the base classes. For\n old-style classes, the search is depth-first, left-to-right in the\n order of occurrence in the base class list. New-style classes use\n the more complex C3 method resolution order which behaves correctly\n even in the presence of \'diamond\' inheritance structures where\n there are multiple inheritance paths leading back to a common\n ancestor. Additional details on the C3 MRO used by new-style\n classes can be found in the documentation accompanying the 2.3\n release at http://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class ``C``, say) would yield\n a user-defined function object or an unbound user-defined method\n object whose associated class is either ``C`` or one of its base\n classes, it is transformed into an unbound user-defined method\n object whose ``im_class`` attribute is ``C``. When it would yield a\n class method object, it is transformed into a bound user-defined\n method object whose ``im_self`` attribute is ``C``. When it would\n yield a static method object, it is transformed into the object\n wrapped by the static method object. See section *Implementing\n Descriptors* for another way in which attributes retrieved from a\n class may differ from those actually contained in its ``__dict__``\n (note that only new-style classes support descriptors).\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: ``__name__`` is the class name; ``__module__``\n is the module name in which the class was defined; ``__dict__`` is\n the dictionary containing the class\'s namespace; ``__bases__`` is a\n tuple (possibly empty or a singleton) containing the base classes,\n in the order of their occurrence in the base class list;\n ``__doc__`` is the class\'s documentation string, or None if\n undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object or an unbound user-defined method object whose\n associated class is the class (call it ``C``) of the instance for\n which the attribute reference was initiated or one of its bases, it\n is transformed into a bound user-defined method object whose\n ``im_class`` attribute is ``C`` and whose ``im_self`` attribute is\n the instance. Static method and class method objects are also\n transformed, as if they had been retrieved from class ``C``; see\n above under "Classes". See section *Implementing Descriptors* for\n another way in which attributes of a class retrieved via its\n instances may differ from the objects actually stored in the\n class\'s ``__dict__``. If no class attribute is found, and the\n object\'s class has a ``__getattr__()`` method, that is called to\n satisfy the lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n ``__setattr__()`` or ``__delattr__()`` method, this is called\n instead of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n *Special method names*.\n\n Special attributes: ``__dict__`` is the attribute dictionary;\n ``__class__`` is the instance\'s class.\n\nFiles\n A file object represents an open file. File objects are created by\n the ``open()`` built-in function, and also by ``os.popen()``,\n ``os.fdopen()``, and the ``makefile()`` method of socket objects\n (and perhaps by other functions or methods provided by extension\n modules). The objects ``sys.stdin``, ``sys.stdout`` and\n ``sys.stderr`` are initialized to file objects corresponding to the\n interpreter\'s standard input, output and error streams. See *File\n Objects* for complete documentation of file objects.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: ``co_name`` gives the function\n name; ``co_argcount`` is the number of positional arguments\n (including arguments with default values); ``co_nlocals`` is the\n number of local variables used by the function (including\n arguments); ``co_varnames`` is a tuple containing the names of\n the local variables (starting with the argument names);\n ``co_cellvars`` is a tuple containing the names of local\n variables that are referenced by nested functions;\n ``co_freevars`` is a tuple containing the names of free\n variables; ``co_code`` is a string representing the sequence of\n bytecode instructions; ``co_consts`` is a tuple containing the\n literals used by the bytecode; ``co_names`` is a tuple\n containing the names used by the bytecode; ``co_filename`` is\n the filename from which the code was compiled;\n ``co_firstlineno`` is the first line number of the function;\n ``co_lnotab`` is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); ``co_stacksize`` is the required stack size\n (including local variables); ``co_flags`` is an integer encoding\n a number of flags for the interpreter.\n\n The following flag bits are defined for ``co_flags``: bit\n ``0x04`` is set if the function uses the ``*arguments`` syntax\n to accept an arbitrary number of positional arguments; bit\n ``0x08`` is set if the function uses the ``**keywords`` syntax\n to accept arbitrary keyword arguments; bit ``0x20`` is set if\n the function is a generator.\n\n Future feature declarations (``from __future__ import\n division``) also use bits in ``co_flags`` to indicate whether a\n code object was compiled with a particular feature enabled: bit\n ``0x2000`` is set if the function was compiled with future\n division enabled; bits ``0x10`` and ``0x1000`` were used in\n earlier versions of Python.\n\n Other bits in ``co_flags`` are reserved for internal use.\n\n If a code object represents a function, the first item in\n ``co_consts`` is the documentation string of the function, or\n ``None`` if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: ``f_back`` is to the previous\n stack frame (towards the caller), or ``None`` if this is the\n bottom stack frame; ``f_code`` is the code object being executed\n in this frame; ``f_locals`` is the dictionary used to look up\n local variables; ``f_globals`` is used for global variables;\n ``f_builtins`` is used for built-in (intrinsic) names;\n ``f_restricted`` is a flag indicating whether the function is\n executing in restricted execution mode; ``f_lasti`` gives the\n precise instruction (this is an index into the bytecode string\n of the code object).\n\n Special writable attributes: ``f_trace``, if not ``None``, is a\n function called at the start of each source code line (this is\n used by the debugger); ``f_exc_type``, ``f_exc_value``,\n ``f_exc_traceback`` represent the last exception raised in the\n parent frame provided another exception was ever raised in the\n current frame (in all other cases they are None); ``f_lineno``\n is the current line number of the frame --- writing to this from\n within a trace function jumps to the given line (only for the\n bottom-most frame). A debugger can implement a Jump command\n (aka Set Next Statement) by writing to f_lineno.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n *The try statement*.) It is accessible as ``sys.exc_traceback``,\n and also as the third item of the tuple returned by\n ``sys.exc_info()``. The latter is the preferred interface,\n since it works correctly when the program is using multiple\n threads. When the program contains no suitable handler, the\n stack trace is written (nicely formatted) to the standard error\n stream; if the interpreter is interactive, it is also made\n available to the user as ``sys.last_traceback``.\n\n Special read-only attributes: ``tb_next`` is the next level in\n the stack trace (towards the frame where the exception\n occurred), or ``None`` if there is no next level; ``tb_frame``\n points to the execution frame of the current level;\n ``tb_lineno`` gives the line number where the exception\n occurred; ``tb_lasti`` indicates the precise instruction. The\n line number and last instruction in the traceback may differ\n from the line number of its frame object if the exception\n occurred in a ``try`` statement with no matching except clause\n or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices when *extended slice\n syntax* is used. This is a slice using two colons, or multiple\n slices or ellipses separated by commas, e.g., ``a[i:j:step]``,\n ``a[i:j, k:l]``, or ``a[..., i:j]``. They are also created by\n the built-in ``slice()`` function.\n\n Special read-only attributes: ``start`` is the lower bound;\n ``stop`` is the upper bound; ``step`` is the step value; each is\n ``None`` if omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the extended slice that the slice\n object would describe if applied to a sequence of *length*\n items. It returns a tuple of three integers; respectively\n these are the *start* and *stop* indices and the *step* or\n stride length of the slice. Missing or out-of-bounds indices\n are handled in a manner consistent with regular slices.\n\n New in version 2.3.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n ``staticmethod()`` constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in ``classmethod()`` constructor.\n',
'typesfunctions': '\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: ``func(argument-list)``.\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee *Function definitions* for more information.\n',
- 'typesmapping': '\nMapping Types --- ``dict``\n**************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built\nin ``list``, ``set``, and ``tuple`` classes, and the ``collections``\nmodule.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as ``1`` and ``1.0``) then they can be used interchangeably to\nindex the same dictionary entry. (Note however, that since computers\nstore floating-point numbers as approximations it is usually unwise to\nuse them as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of\n``key: value`` pairs within braces, for example: ``{\'jack\': 4098,\n\'sjoerd\': 4127}`` or ``{4098: \'jack\', 4127: \'sjoerd\'}``, or by the\n``dict`` constructor.\n\nclass class dict([arg])\n\n Return a new dictionary initialized from an optional positional\n argument or from a set of keyword arguments. If no arguments are\n given, return a new empty dictionary. If the positional argument\n *arg* is a mapping object, return a dictionary mapping the same\n keys to the same values as does the mapping object. Otherwise the\n positional argument must be a sequence, a container that supports\n iteration, or an iterator object. The elements of the argument\n must each also be of one of those kinds, and each must in turn\n contain exactly two objects. The first is used as a key in the new\n dictionary, and the second as the key\'s value. If a given key is\n seen more than once, the last value associated with it is retained\n in the new dictionary.\n\n If keyword arguments are given, the keywords themselves with their\n associated values are added as items to the dictionary. If a key is\n specified both in the positional argument and as a keyword\n argument, the value associated with the keyword is retained in the\n dictionary. For example, these all return a dictionary equal to\n ``{"one": 1, "two": 2}``:\n\n * ``dict(one=1, two=2)``\n\n * ``dict({\'one\': 1, \'two\': 2})``\n\n * ``dict(zip((\'one\', \'two\'), (1, 2)))``\n\n * ``dict([[\'two\', 2], [\'one\', 1]])``\n\n The first example only works for keys that are valid Python\n identifiers; the others work with any valid keys.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for building a dictionary from\n keyword arguments added.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a ``KeyError`` if\n *key* is not in the map.\n\n New in version 2.5: If a subclass of dict defines a method\n ``__missing__()``, if the key *key* is not present, the\n ``d[key]`` operation calls that method with the key *key* as\n argument. The ``d[key]`` operation then returns or raises\n whatever is returned or raised by the ``__missing__(key)`` call\n if the key is not present. No other operations or methods invoke\n ``__missing__()``. If ``__missing__()`` is not defined,\n ``KeyError`` is raised. ``__missing__()`` must be a method; it\n cannot be an instance variable. For an example, see\n ``collections.defaultdict``.\n\n d[key] = value\n\n Set ``d[key]`` to *value*.\n\n del d[key]\n\n Remove ``d[key]`` from *d*. Raises a ``KeyError`` if *key* is\n not in the map.\n\n key in d\n\n Return ``True`` if *d* has a key *key*, else ``False``.\n\n New in version 2.2.\n\n key not in d\n\n Equivalent to ``not key in d``.\n\n New in version 2.2.\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for ``iterkeys()``.\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n ``fromkeys()`` is a class method that returns a new dictionary.\n *value* defaults to ``None``.\n\n New in version 2.3.\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to ``None``,\n so that this method never raises a ``KeyError``.\n\n has_key(key)\n\n Test for the presence of *key* in the dictionary. ``has_key()``\n is deprecated in favor of ``key in d``.\n\n items()\n\n Return a copy of the dictionary\'s list of ``(key, value)``\n pairs.\n\n **CPython implementation detail:** Keys and values are listed in\n an arbitrary order which is non-random, varies across Python\n implementations, and depends on the dictionary\'s history of\n insertions and deletions.\n\n If ``items()``, ``keys()``, ``values()``, ``iteritems()``,\n ``iterkeys()``, and ``itervalues()`` are called with no\n intervening modifications to the dictionary, the lists will\n directly correspond. This allows the creation of ``(value,\n key)`` pairs using ``zip()``: ``pairs = zip(d.values(),\n d.keys())``. The same relationship holds for the ``iterkeys()``\n and ``itervalues()`` methods: ``pairs = zip(d.itervalues(),\n d.iterkeys())`` provides the same value for ``pairs``. Another\n way to create the same list is ``pairs = [(v, k) for (k, v) in\n d.iteritems()]``.\n\n iteritems()\n\n Return an iterator over the dictionary\'s ``(key, value)`` pairs.\n See the note for ``dict.items()``.\n\n Using ``iteritems()`` while adding or deleting entries in the\n dictionary may raise a ``RuntimeError`` or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n iterkeys()\n\n Return an iterator over the dictionary\'s keys. See the note for\n ``dict.items()``.\n\n Using ``iterkeys()`` while adding or deleting entries in the\n dictionary may raise a ``RuntimeError`` or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n itervalues()\n\n Return an iterator over the dictionary\'s values. See the note\n for ``dict.items()``.\n\n Using ``itervalues()`` while adding or deleting entries in the\n dictionary may raise a ``RuntimeError`` or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n keys()\n\n Return a copy of the dictionary\'s list of keys. See the note\n for ``dict.items()``.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a ``KeyError`` is raised.\n\n New in version 2.3.\n\n popitem()\n\n Remove and return an arbitrary ``(key, value)`` pair from the\n dictionary.\n\n ``popitem()`` is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling ``popitem()`` raises a ``KeyError``.\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to ``None``.\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return ``None``.\n\n ``update()`` accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: ``d.update(red=1,\n blue=2)``.\n\n Changed in version 2.4: Allowed the argument to be an iterable\n of key/value pairs and allowed keyword arguments.\n\n values()\n\n Return a copy of the dictionary\'s list of values. See the note\n for ``dict.items()``.\n\n viewitems()\n\n Return a new view of the dictionary\'s items (``(key, value)``\n pairs). See below for documentation of view objects.\n\n New in version 2.7.\n\n viewkeys()\n\n Return a new view of the dictionary\'s keys. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n viewvalues()\n\n Return a new view of the dictionary\'s values. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n\nDictionary view objects\n=======================\n\nThe objects returned by ``dict.viewkeys()``, ``dict.viewvalues()`` and\n``dict.viewitems()`` are *view objects*. They provide a dynamic view\non the dictionary\'s entries, which means that when the dictionary\nchanges, the view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of ``(key, value)``) in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of ``(value, key)`` pairs\n using ``zip()``: ``pairs = zip(d.values(), d.keys())``. Another\n way to create the same list is ``pairs = [(v, k) for (k, v) in\n d.items()]``.\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a ``RuntimeError`` or fail to iterate over all entries.\n\nx in dictview\n\n Return ``True`` if *x* is in the underlying dictionary\'s keys,\n values or items (in the latter case, *x* should be a ``(key,\n value)`` tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that (key, value) pairs are unique and\nhashable, then the items view is also set-like. (Values views are not\ntreated as set-like since the entries are generally not unique.) Then\nthese set operations are available ("other" refers either to another\nview or a set):\n\ndictview & other\n\n Return the intersection of the dictview and the other object as a\n new set.\n\ndictview | other\n\n Return the union of the dictview and the other object as a new set.\n\ndictview - other\n\n Return the difference between the dictview and the other object\n (all elements in *dictview* that aren\'t in *other*) as a new set.\n\ndictview ^ other\n\n Return the symmetric difference (all elements either in *dictview*\n or *other*, but not in both) of the dictview and the other object\n as a new set.\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.viewkeys()\n >>> values = dishes.viewvalues()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n',
- 'typesmethods': "\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as ``append()`` on\nlists) and class instance methods. Built-in methods are described\nwith the types that support them.\n\nThe implementation adds two special read-only attributes to class\ninstance methods: ``m.im_self`` is the object on which the method\noperates, and ``m.im_func`` is the function implementing the method.\nCalling ``m(arg-1, arg-2, ..., arg-n)`` is completely equivalent to\ncalling ``m.im_func(m.im_self, arg-1, arg-2, ..., arg-n)``.\n\nClass instance methods are either *bound* or *unbound*, referring to\nwhether the method was accessed through an instance or a class,\nrespectively. When a method is unbound, its ``im_self`` attribute\nwill be ``None`` and if called, an explicit ``self`` object must be\npassed as the first argument. In this case, ``self`` must be an\ninstance of the unbound method's class (or a subclass of that class),\notherwise a ``TypeError`` is raised.\n\nLike function objects, methods objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object (``meth.im_func``), setting method\nattributes on either bound or unbound methods is disallowed.\nAttempting to set a method attribute results in a ``TypeError`` being\nraised. In order to set a method attribute, you need to explicitly\nset it on the underlying function object:\n\n class C:\n def method(self):\n pass\n\n c = C()\n c.method.im_func.whoami = 'my name is c'\n\nSee *The standard type hierarchy* for more information.\n",
+ 'typesmapping': '\nMapping Types --- ``dict``\n**************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built\nin ``list``, ``set``, and ``tuple`` classes, and the ``collections``\nmodule.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as ``1`` and ``1.0``) then they can be used interchangeably to\nindex the same dictionary entry. (Note however, that since computers\nstore floating-point numbers as approximations it is usually unwise to\nuse them as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of\n``key: value`` pairs within braces, for example: ``{\'jack\': 4098,\n\'sjoerd\': 4127}`` or ``{4098: \'jack\', 4127: \'sjoerd\'}``, or by the\n``dict`` constructor.\n\nclass class dict(**kwarg)\nclass class dict(mapping, **kwarg)\nclass class dict(iterable, **kwarg)\n\n Return a new dictionary initialized from an optional positional\n argument and a possibly empty set of keyword arguments.\n\n If no positional argument is given, an empty dictionary is created.\n If a positional argument is given and it is a mapping object, a\n dictionary is created with the same key-value pairs as the mapping\n object. Otherwise, the positional argument must be an *iterable*\n object. Each item in the iterable must itself be an iterable with\n exactly two objects. The first object of each item becomes a key\n in the new dictionary, and the second object the corresponding\n value. If a key occurs more than once, the last value for that key\n becomes the corresponding value in the new dictionary.\n\n If keyword arguments are given, the keyword arguments and their\n values are added to the dictionary created from the positional\n argument. If a key being added is already present, the value from\n the keyword argument replaces the value from the positional\n argument.\n\n To illustrate, the following examples all return a dictionary equal\n to ``{"one": 1, "two": 2, "three": 3}``:\n\n >>> a = dict(one=1, two=2, three=3)\n >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n >>> a == b == c == d == e\n True\n\n Providing keyword arguments as in the first example only works for\n keys that are valid Python identifiers. Otherwise, any valid keys\n can be used.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for building a dictionary from\n keyword arguments added.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a ``KeyError`` if\n *key* is not in the map.\n\n New in version 2.5: If a subclass of dict defines a method\n ``__missing__()``, if the key *key* is not present, the\n ``d[key]`` operation calls that method with the key *key* as\n argument. The ``d[key]`` operation then returns or raises\n whatever is returned or raised by the ``__missing__(key)`` call\n if the key is not present. No other operations or methods invoke\n ``__missing__()``. If ``__missing__()`` is not defined,\n ``KeyError`` is raised. ``__missing__()`` must be a method; it\n cannot be an instance variable. For an example, see\n ``collections.defaultdict``.\n\n d[key] = value\n\n Set ``d[key]`` to *value*.\n\n del d[key]\n\n Remove ``d[key]`` from *d*. Raises a ``KeyError`` if *key* is\n not in the map.\n\n key in d\n\n Return ``True`` if *d* has a key *key*, else ``False``.\n\n New in version 2.2.\n\n key not in d\n\n Equivalent to ``not key in d``.\n\n New in version 2.2.\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for ``iterkeys()``.\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n ``fromkeys()`` is a class method that returns a new dictionary.\n *value* defaults to ``None``.\n\n New in version 2.3.\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to ``None``,\n so that this method never raises a ``KeyError``.\n\n has_key(key)\n\n Test for the presence of *key* in the dictionary. ``has_key()``\n is deprecated in favor of ``key in d``.\n\n items()\n\n Return a copy of the dictionary\'s list of ``(key, value)``\n pairs.\n\n **CPython implementation detail:** Keys and values are listed in\n an arbitrary order which is non-random, varies across Python\n implementations, and depends on the dictionary\'s history of\n insertions and deletions.\n\n If ``items()``, ``keys()``, ``values()``, ``iteritems()``,\n ``iterkeys()``, and ``itervalues()`` are called with no\n intervening modifications to the dictionary, the lists will\n directly correspond. This allows the creation of ``(value,\n key)`` pairs using ``zip()``: ``pairs = zip(d.values(),\n d.keys())``. The same relationship holds for the ``iterkeys()``\n and ``itervalues()`` methods: ``pairs = zip(d.itervalues(),\n d.iterkeys())`` provides the same value for ``pairs``. Another\n way to create the same list is ``pairs = [(v, k) for (k, v) in\n d.iteritems()]``.\n\n iteritems()\n\n Return an iterator over the dictionary\'s ``(key, value)`` pairs.\n See the note for ``dict.items()``.\n\n Using ``iteritems()`` while adding or deleting entries in the\n dictionary may raise a ``RuntimeError`` or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n iterkeys()\n\n Return an iterator over the dictionary\'s keys. See the note for\n ``dict.items()``.\n\n Using ``iterkeys()`` while adding or deleting entries in the\n dictionary may raise a ``RuntimeError`` or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n itervalues()\n\n Return an iterator over the dictionary\'s values. See the note\n for ``dict.items()``.\n\n Using ``itervalues()`` while adding or deleting entries in the\n dictionary may raise a ``RuntimeError`` or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n keys()\n\n Return a copy of the dictionary\'s list of keys. See the note\n for ``dict.items()``.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a ``KeyError`` is raised.\n\n New in version 2.3.\n\n popitem()\n\n Remove and return an arbitrary ``(key, value)`` pair from the\n dictionary.\n\n ``popitem()`` is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling ``popitem()`` raises a ``KeyError``.\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to ``None``.\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return ``None``.\n\n ``update()`` accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: ``d.update(red=1,\n blue=2)``.\n\n Changed in version 2.4: Allowed the argument to be an iterable\n of key/value pairs and allowed keyword arguments.\n\n values()\n\n Return a copy of the dictionary\'s list of values. See the note\n for ``dict.items()``.\n\n viewitems()\n\n Return a new view of the dictionary\'s items (``(key, value)``\n pairs). See below for documentation of view objects.\n\n New in version 2.7.\n\n viewkeys()\n\n Return a new view of the dictionary\'s keys. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n viewvalues()\n\n Return a new view of the dictionary\'s values. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n\nDictionary view objects\n=======================\n\nThe objects returned by ``dict.viewkeys()``, ``dict.viewvalues()`` and\n``dict.viewitems()`` are *view objects*. They provide a dynamic view\non the dictionary\'s entries, which means that when the dictionary\nchanges, the view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of ``(key, value)``) in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of ``(value, key)`` pairs\n using ``zip()``: ``pairs = zip(d.values(), d.keys())``. Another\n way to create the same list is ``pairs = [(v, k) for (k, v) in\n d.items()]``.\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a ``RuntimeError`` or fail to iterate over all entries.\n\nx in dictview\n\n Return ``True`` if *x* is in the underlying dictionary\'s keys,\n values or items (in the latter case, *x* should be a ``(key,\n value)`` tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that (key, value) pairs are unique and\nhashable, then the items view is also set-like. (Values views are not\ntreated as set-like since the entries are generally not unique.) Then\nthese set operations are available ("other" refers either to another\nview or a set):\n\ndictview & other\n\n Return the intersection of the dictview and the other object as a\n new set.\n\ndictview | other\n\n Return the union of the dictview and the other object as a new set.\n\ndictview - other\n\n Return the difference between the dictview and the other object\n (all elements in *dictview* that aren\'t in *other*) as a new set.\n\ndictview ^ other\n\n Return the symmetric difference (all elements either in *dictview*\n or *other*, but not in both) of the dictview and the other object\n as a new set.\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.viewkeys()\n >>> values = dishes.viewvalues()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n',
+ 'typesmethods': '\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as ``append()`` on\nlists) and class instance methods. Built-in methods are described\nwith the types that support them.\n\nThe implementation adds two special read-only attributes to class\ninstance methods: ``m.im_self`` is the object on which the method\noperates, and ``m.im_func`` is the function implementing the method.\nCalling ``m(arg-1, arg-2, ..., arg-n)`` is completely equivalent to\ncalling ``m.im_func(m.im_self, arg-1, arg-2, ..., arg-n)``.\n\nClass instance methods are either *bound* or *unbound*, referring to\nwhether the method was accessed through an instance or a class,\nrespectively. When a method is unbound, its ``im_self`` attribute\nwill be ``None`` and if called, an explicit ``self`` object must be\npassed as the first argument. In this case, ``self`` must be an\ninstance of the unbound method\'s class (or a subclass of that class),\notherwise a ``TypeError`` is raised.\n\nLike function objects, methods objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object (``meth.im_func``), setting method\nattributes on either bound or unbound methods is disallowed.\nAttempting to set an attribute on a method results in an\n``AttributeError`` being raised. In order to set a method attribute,\nyou need to explicitly set it on the underlying function object:\n\n >>> class C:\n ... def method(self):\n ... pass\n ...\n >>> c = C()\n >>> c.method.whoami = \'my name is method\' # can\'t set on the method\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n AttributeError: \'instancemethod\' object has no attribute \'whoami\'\n >>> c.method.im_func.whoami = \'my name is method\'\n >>> c.method.whoami\n \'my name is method\'\n\nSee *The standard type hierarchy* for more information.\n',
'typesmodules': "\nModules\n*******\n\nThe only special operation on a module is attribute access:\n``m.name``, where *m* is a module and *name* accesses a name defined\nin *m*'s symbol table. Module attributes can be assigned to. (Note\nthat the ``import`` statement is not, strictly speaking, an operation\non a module object; ``import foo`` does not require a module object\nnamed *foo* to exist, rather it requires an (external) *definition*\nfor a module named *foo* somewhere.)\n\nA special attribute of every module is ``__dict__``. This is the\ndictionary containing the module's symbol table. Modifying this\ndictionary will actually change the module's symbol table, but direct\nassignment to the ``__dict__`` attribute is not possible (you can\nwrite ``m.__dict__['a'] = 1``, which defines ``m.a`` to be ``1``, but\nyou can't write ``m.__dict__ = {}``). Modifying ``__dict__`` directly\nis not recommended.\n\nModules built into the interpreter are written like this: ``<module\n'sys' (built-in)>``. If loaded from a file, they are written as\n``<module 'os' from '/usr/local/lib/pythonX.Y/os.pyc'>``.\n",
- 'typesseq': '\nSequence Types --- ``str``, ``unicode``, ``list``, ``tuple``, ``bytearray``, ``buffer``, ``xrange``\n***************************************************************************************************\n\nThere are seven sequence types: strings, Unicode strings, lists,\ntuples, bytearrays, buffers, and xrange objects.\n\nFor other containers see the built in ``dict`` and ``set`` classes,\nand the ``collections`` module.\n\nString literals are written in single or double quotes: ``\'xyzzy\'``,\n``"frobozz"``. See *String literals* for more about string literals.\nUnicode strings are much like strings, but are specified in the syntax\nusing a preceding ``\'u\'`` character: ``u\'abc\'``, ``u"def"``. In\naddition to the functionality described here, there are also string-\nspecific methods described in the *String Methods* section. Lists are\nconstructed with square brackets, separating items with commas: ``[a,\nb, c]``. Tuples are constructed by the comma operator (not within\nsquare brackets), with or without enclosing parentheses, but an empty\ntuple must have the enclosing parentheses, such as ``a, b, c`` or\n``()``. A single item tuple must have a trailing comma, such as\n``(d,)``.\n\nBytearray objects are created with the built-in function\n``bytearray()``.\n\nBuffer objects are not directly supported by Python syntax, but can be\ncreated by calling the built-in function ``buffer()``. They don\'t\nsupport concatenation or repetition.\n\nObjects of type xrange are similar to buffers in that there is no\nspecific syntax to create them, but they are created using the\n``xrange()`` function. They don\'t support slicing, concatenation or\nrepetition, and using ``in``, ``not in``, ``min()`` or ``max()`` on\nthem is inefficient.\n\nMost sequence types support the following operations. The ``in`` and\n``not in`` operations have the same priorities as the comparison\noperations. The ``+`` and ``*`` operations have the same priority as\nthe corresponding numeric operations. [3] Additional methods are\nprovided for *Mutable Sequence Types*.\n\nThis table lists the sequence operations sorted in ascending priority\n(operations in the same box have the same priority). In the table,\n*s* and *t* are sequences of the same type; *n*, *i* and *j* are\nintegers:\n\n+--------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+====================+==================================+============+\n| ``x in s`` | ``True`` if an item of *s* is | (1) |\n| | equal to *x*, else ``False`` | |\n+--------------------+----------------------------------+------------+\n| ``x not in s`` | ``False`` if an item of *s* is | (1) |\n| | equal to *x*, else ``True`` | |\n+--------------------+----------------------------------+------------+\n| ``s + t`` | the concatenation of *s* and *t* | (6) |\n+--------------------+----------------------------------+------------+\n| ``s * n, n * s`` | *n* shallow copies of *s* | (2) |\n| | concatenated | |\n+--------------------+----------------------------------+------------+\n| ``s[i]`` | *i*th item of *s*, origin 0 | (3) |\n+--------------------+----------------------------------+------------+\n| ``s[i:j]`` | slice of *s* from *i* to *j* | (3)(4) |\n+--------------------+----------------------------------+------------+\n| ``s[i:j:k]`` | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+--------------------+----------------------------------+------------+\n| ``len(s)`` | length of *s* | |\n+--------------------+----------------------------------+------------+\n| ``min(s)`` | smallest item of *s* | |\n+--------------------+----------------------------------+------------+\n| ``max(s)`` | largest item of *s* | |\n+--------------------+----------------------------------+------------+\n| ``s.index(i)`` | index of the first occurence of | |\n| | *i* in *s* | |\n+--------------------+----------------------------------+------------+\n| ``s.count(i)`` | total number of occurences of | |\n| | *i* in *s* | |\n+--------------------+----------------------------------+------------+\n\nSequence types also support comparisons. In particular, tuples and\nlists are compared lexicographically by comparing corresponding\nelements. This means that to compare equal, every element must compare\nequal and the two sequences must be of the same type and have the same\nlength. (For full details see *Comparisons* in the language\nreference.)\n\nNotes:\n\n1. When *s* is a string or Unicode string object the ``in`` and ``not\n in`` operations act like a substring test. In Python versions\n before 2.3, *x* had to be a string of length 1. In Python 2.3 and\n beyond, *x* may be a string of any length.\n\n2. Values of *n* less than ``0`` are treated as ``0`` (which yields an\n empty sequence of the same type as *s*). Note also that the copies\n are shallow; nested structures are not copied. This often haunts\n new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that ``[[]]`` is a one-element list containing\n an empty list, so all three elements of ``[[]] * 3`` are (pointers\n to) this single empty list. Modifying any of the elements of\n ``lists`` modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of the\n string: ``len(s) + i`` or ``len(s) + j`` is substituted. But note\n that ``-0`` is still ``0``.\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that ``i <= k < j``. If *i* or *j* is\n greater than ``len(s)``, use ``len(s)``. If *i* is omitted or\n ``None``, use ``0``. If *j* is omitted or ``None``, use\n ``len(s)``. If *i* is greater than or equal to *j*, the slice is\n empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index ``x = i + n*k`` such that ``0 <= n <\n (j-i)/k``. In other words, the indices are ``i``, ``i+k``,\n ``i+2*k``, ``i+3*k`` and so on, stopping when *j* is reached (but\n never including *j*). If *i* or *j* is greater than ``len(s)``,\n use ``len(s)``. If *i* or *j* are omitted or ``None``, they become\n "end" values (which end depends on the sign of *k*). Note, *k*\n cannot be zero. If *k* is ``None``, it is treated like ``1``.\n\n6. **CPython implementation detail:** If *s* and *t* are both strings,\n some Python implementations such as CPython can usually perform an\n in-place optimization for assignments of the form ``s = s + t`` or\n ``s += t``. When applicable, this optimization makes quadratic\n run-time much less likely. This optimization is both version and\n implementation dependent. For performance sensitive code, it is\n preferable to use the ``str.join()`` method which assures\n consistent linear concatenation performance across versions and\n implementations.\n\n Changed in version 2.4: Formerly, string concatenation never\n occurred in-place.\n\n\nString Methods\n==============\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support. Some of them are also available on\n``bytearray`` objects.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the *Sequence Types --- str, unicode, list, tuple,\nbytearray, buffer, xrange* section. To output formatted strings use\ntemplate strings or the ``%`` operator described in the *String\nFormatting Operations* section. Also, see the ``re`` module for string\nfunctions based on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n ``\'strict\'``, meaning that encoding errors raise ``UnicodeError``.\n Other possible values are ``\'ignore\'``, ``\'replace\'`` and any other\n name registered via ``codecs.register_error()``, see section *Codec\n Base Classes*.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n ``\'strict\'``, meaning that encoding errors raise a\n ``UnicodeError``. Other possible values are ``\'ignore\'``,\n ``\'replace\'``, ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and\n any other name registered via ``codecs.register_error()``, see\n section *Codec Base Classes*. For a list of possible encodings, see\n section *Standard Encodings*.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for ``\'xmlcharrefreplace\'`` and\n ``\'backslashreplace\'`` and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. The column number is reset to zero after each\n newline occurring in the string. If *tabsize* is not given, a tab\n size of ``8`` characters is assumed. This doesn\'t understand other\n non-printing characters or escape sequences.\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\n Note: The ``find()`` method should be used only if you need to know the\n position of *sub*. To check if *sub* is a substring or not, use\n the ``in`` operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces ``{}``. Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3.0,\n and should be preferred to the ``%`` formatting described in\n *String Formatting Operations* in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified, then there is no limit\n on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),\n s)\n\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the ``maketrans()`` helper function in the ``string``\n module to create a translation table. For string objects, set the\n *table* argument to ``None`` for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a ``None`` *table* argument.\n\n For Unicode objects, the ``translate()`` method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or ``None``. Unmapped characters\n are left untouched. Characters mapped to ``None`` are deleted.\n Note, a more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see ``encodings.cp1251``\n for an example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that ``str.upper().isupper()`` might\n be ``False`` if ``s`` contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than or equal to ``len(s)``.\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return ``True`` if there are only numeric characters in S,\n ``False`` otherwise. Numeric characters include digit characters,\n and all characters that have the Unicode numeric value property,\n e.g. U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return ``True`` if there are only decimal characters in S,\n ``False`` otherwise. Decimal characters include digit characters,\n and all characters that can be used to form decimal-radix numbers,\n e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\n\nString Formatting Operations\n============================\n\nString and Unicode objects have one unique built-in operation: the\n``%`` operator (modulo). This is also known as the string\n*formatting* or *interpolation* operator. Given ``format % values``\n(where *format* is a string or Unicode object), ``%`` conversion\nspecifications in *format* are replaced with zero or more elements of\n*values*. The effect is similar to the using ``sprintf()`` in the C\nlanguage. If *format* is a Unicode object, or if any of the objects\nbeing converted using the ``%s`` conversion are Unicode objects, the\nresult will also be a Unicode object.\n\nIf *format* requires a single argument, *values* may be a single non-\ntuple object. [5] Otherwise, *values* must be a tuple with exactly\nthe number of items specified by the format string, or a single\nmapping object (for example, a dictionary).\n\nA conversion specifier contains two or more characters and has the\nfollowing components, which must occur in this order:\n\n1. The ``\'%\'`` character, which marks the start of the specifier.\n\n2. Mapping key (optional), consisting of a parenthesised sequence of\n characters (for example, ``(somename)``).\n\n3. Conversion flags (optional), which affect the result of some\n conversion types.\n\n4. Minimum field width (optional). If specified as an ``\'*\'``\n (asterisk), the actual width is read from the next element of the\n tuple in *values*, and the object to convert comes after the\n minimum field width and optional precision.\n\n5. Precision (optional), given as a ``\'.\'`` (dot) followed by the\n precision. If specified as ``\'*\'`` (an asterisk), the actual width\n is read from the next element of the tuple in *values*, and the\n value to convert comes after the precision.\n\n6. Length modifier (optional).\n\n7. Conversion type.\n\nWhen the right argument is a dictionary (or other mapping type), then\nthe formats in the string *must* include a parenthesised mapping key\ninto that dictionary inserted immediately after the ``\'%\'`` character.\nThe mapping key selects the value to be formatted from the mapping.\nFor example:\n\n>>> print \'%(language)s has %(number)03d quote types.\' % \\\n... {"language": "Python", "number": 2}\nPython has 002 quote types.\n\nIn this case no ``*`` specifiers may occur in a format (since they\nrequire a sequential parameter list).\n\nThe conversion flag characters are:\n\n+-----------+-----------------------------------------------------------------------+\n| Flag | Meaning |\n+===========+=======================================================================+\n| ``\'#\'`` | The value conversion will use the "alternate form" (where defined |\n| | below). |\n+-----------+-----------------------------------------------------------------------+\n| ``\'0\'`` | The conversion will be zero padded for numeric values. |\n+-----------+-----------------------------------------------------------------------+\n| ``\'-\'`` | The converted value is left adjusted (overrides the ``\'0\'`` |\n| | conversion if both are given). |\n+-----------+-----------------------------------------------------------------------+\n| ``\' \'`` | (a space) A blank should be left before a positive number (or empty |\n| | string) produced by a signed conversion. |\n+-----------+-----------------------------------------------------------------------+\n| ``\'+\'`` | A sign character (``\'+\'`` or ``\'-\'``) will precede the conversion |\n| | (overrides a "space" flag). |\n+-----------+-----------------------------------------------------------------------+\n\nA length modifier (``h``, ``l``, or ``L``) may be present, but is\nignored as it is not necessary for Python -- so e.g. ``%ld`` is\nidentical to ``%d``.\n\nThe conversion types are:\n\n+--------------+-------------------------------------------------------+---------+\n| Conversion | Meaning | Notes |\n+==============+=======================================================+=========+\n| ``\'d\'`` | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'i\'`` | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'o\'`` | Signed octal value. | (1) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'u\'`` | Obsolete type -- it is identical to ``\'d\'``. | (7) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'x\'`` | Signed hexadecimal (lowercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'X\'`` | Signed hexadecimal (uppercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'e\'`` | Floating point exponential format (lowercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'E\'`` | Floating point exponential format (uppercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'f\'`` | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'F\'`` | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'g\'`` | Floating point format. Uses lowercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'G\'`` | Floating point format. Uses uppercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'c\'`` | Single character (accepts integer or single character | |\n| | string). | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'r\'`` | String (converts any Python object using ``repr()``). | (5) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'s\'`` | String (converts any Python object using ``str()``). | (6) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'%\'`` | No argument is converted, results in a ``\'%\'`` | |\n| | character in the result. | |\n+--------------+-------------------------------------------------------+---------+\n\nNotes:\n\n1. The alternate form causes a leading zero (``\'0\'``) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n2. The alternate form causes a leading ``\'0x\'`` or ``\'0X\'`` (depending\n on whether the ``\'x\'`` or ``\'X\'`` format was used) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n3. The alternate form causes the result to always contain a decimal\n point, even if no digits follow it.\n\n The precision determines the number of digits after the decimal\n point and defaults to 6.\n\n4. The alternate form causes the result to always contain a decimal\n point, and trailing zeroes are not removed as they would otherwise\n be.\n\n The precision determines the number of significant digits before\n and after the decimal point and defaults to 6.\n\n5. The ``%r`` conversion was added in Python 2.0.\n\n The precision determines the maximal number of characters used.\n\n6. If the object or format provided is a ``unicode`` string, the\n resulting string will also be ``unicode``.\n\n The precision determines the maximal number of characters used.\n\n7. See **PEP 237**.\n\nSince Python strings have an explicit length, ``%s`` conversions do\nnot assume that ``\'\\0\'`` is the end of the string.\n\nChanged in version 2.7: ``%f`` conversions for numbers whose absolute\nvalue is over 1e50 are no longer replaced by ``%g`` conversions.\n\nAdditional string operations are defined in standard modules\n``string`` and ``re``.\n\n\nXRange Type\n===========\n\nThe ``xrange`` type is an immutable sequence which is commonly used\nfor looping. The advantage of the ``xrange`` type is that an\n``xrange`` object will always take the same amount of memory, no\nmatter the size of the range it represents. There are no consistent\nperformance advantages.\n\nXRange objects have very little behavior: they only support indexing,\niteration, and the ``len()`` function.\n\n\nMutable Sequence Types\n======================\n\nList and ``bytearray`` objects support additional operations that\nallow in-place modification of the object. Other mutable sequence\ntypes (when added to the language) should also support these\noperations. Strings and tuples are immutable sequence types: such\nobjects cannot be modified once created. The following operations are\ndefined on mutable sequence types (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | same as ``s[len(s):len(s)] = | (2) |\n| | [x]`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(x)`` | same as ``s[len(s):len(s)] = x`` | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.count(x)`` | return number of *i*\'s for which | |\n| | ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.index(x[, i[, j]])`` | return smallest *k* such that | (4) |\n| | ``s[k] == x`` and ``i <= k < j`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | same as ``s[i:i] = [x]`` | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | same as ``x = s[i]; del s[i]; | (6) |\n| | return x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | same as ``del s[s.index(x)]`` | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])`` | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted multiple\n parameters and implicitly joined them into a tuple; this no longer\n works in Python 2.0. Use of this misfeature has been deprecated\n since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises ``ValueError`` when *x* is not found in *s*. When a negative\n index is passed as the second or third parameter to the ``index()``\n method, the list length is added, as for slice indices. If it is\n still negative, it is truncated to zero, as for slice indices.\n\n Changed in version 2.3: Previously, ``index()`` didn\'t have\n arguments for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n ``insert()`` method, the list length is added, as for slice\n indices. If it is still negative, it is truncated to zero, as for\n slice indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The ``pop()`` method is only supported by the list and array types.\n The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n7. The ``sort()`` and ``reverse()`` methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don\'t return the\n sorted or reversed list.\n\n8. The ``sort()`` method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: ``cmp=lambda x,y:\n cmp(x.lower(), y.lower())``. The default value is ``None``.\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: ``key=str.lower``. The\n default value is ``None``.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n ``functools.cmp_to_key()`` to convert an old-style *cmp* function\n to a *key* function.\n\n Changed in version 2.3: Support for ``None`` as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the ``sort()`` method is guaranteed to be\n stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python 2.3 and newer makes the\n list appear empty for the duration, and raises ``ValueError`` if\n it can detect that the list has been mutated during a sort.\n',
- 'typesseq-mutable': "\nMutable Sequence Types\n**********************\n\nList and ``bytearray`` objects support additional operations that\nallow in-place modification of the object. Other mutable sequence\ntypes (when added to the language) should also support these\noperations. Strings and tuples are immutable sequence types: such\nobjects cannot be modified once created. The following operations are\ndefined on mutable sequence types (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | same as ``s[len(s):len(s)] = | (2) |\n| | [x]`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(x)`` | same as ``s[len(s):len(s)] = x`` | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.count(x)`` | return number of *i*'s for which | |\n| | ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.index(x[, i[, j]])`` | return smallest *k* such that | (4) |\n| | ``s[k] == x`` and ``i <= k < j`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | same as ``s[i:i] = [x]`` | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | same as ``x = s[i]; del s[i]; | (6) |\n| | return x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | same as ``del s[s.index(x)]`` | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])`` | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted multiple\n parameters and implicitly joined them into a tuple; this no longer\n works in Python 2.0. Use of this misfeature has been deprecated\n since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises ``ValueError`` when *x* is not found in *s*. When a negative\n index is passed as the second or third parameter to the ``index()``\n method, the list length is added, as for slice indices. If it is\n still negative, it is truncated to zero, as for slice indices.\n\n Changed in version 2.3: Previously, ``index()`` didn't have\n arguments for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n ``insert()`` method, the list length is added, as for slice\n indices. If it is still negative, it is truncated to zero, as for\n slice indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The ``pop()`` method is only supported by the list and array types.\n The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n7. The ``sort()`` and ``reverse()`` methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don't return the\n sorted or reversed list.\n\n8. The ``sort()`` method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: ``cmp=lambda x,y:\n cmp(x.lower(), y.lower())``. The default value is ``None``.\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: ``key=str.lower``. The\n default value is ``None``.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n ``functools.cmp_to_key()`` to convert an old-style *cmp* function\n to a *key* function.\n\n Changed in version 2.3: Support for ``None`` as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the ``sort()`` method is guaranteed to be\n stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python 2.3 and newer makes the\n list appear empty for the duration, and raises ``ValueError`` if\n it can detect that the list has been mutated during a sort.\n",
+ 'typesseq': '\nSequence Types --- ``str``, ``unicode``, ``list``, ``tuple``, ``bytearray``, ``buffer``, ``xrange``\n***************************************************************************************************\n\nThere are seven sequence types: strings, Unicode strings, lists,\ntuples, bytearrays, buffers, and xrange objects.\n\nFor other containers see the built in ``dict`` and ``set`` classes,\nand the ``collections`` module.\n\nString literals are written in single or double quotes: ``\'xyzzy\'``,\n``"frobozz"``. See *String literals* for more about string literals.\nUnicode strings are much like strings, but are specified in the syntax\nusing a preceding ``\'u\'`` character: ``u\'abc\'``, ``u"def"``. In\naddition to the functionality described here, there are also string-\nspecific methods described in the *String Methods* section. Lists are\nconstructed with square brackets, separating items with commas: ``[a,\nb, c]``. Tuples are constructed by the comma operator (not within\nsquare brackets), with or without enclosing parentheses, but an empty\ntuple must have the enclosing parentheses, such as ``a, b, c`` or\n``()``. A single item tuple must have a trailing comma, such as\n``(d,)``.\n\nBytearray objects are created with the built-in function\n``bytearray()``.\n\nBuffer objects are not directly supported by Python syntax, but can be\ncreated by calling the built-in function ``buffer()``. They don\'t\nsupport concatenation or repetition.\n\nObjects of type xrange are similar to buffers in that there is no\nspecific syntax to create them, but they are created using the\n``xrange()`` function. They don\'t support slicing, concatenation or\nrepetition, and using ``in``, ``not in``, ``min()`` or ``max()`` on\nthem is inefficient.\n\nMost sequence types support the following operations. The ``in`` and\n``not in`` operations have the same priorities as the comparison\noperations. The ``+`` and ``*`` operations have the same priority as\nthe corresponding numeric operations. [3] Additional methods are\nprovided for *Mutable Sequence Types*.\n\nThis table lists the sequence operations sorted in ascending priority\n(operations in the same box have the same priority). In the table,\n*s* and *t* are sequences of the same type; *n*, *i* and *j* are\nintegers:\n\n+--------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+====================+==================================+============+\n| ``x in s`` | ``True`` if an item of *s* is | (1) |\n| | equal to *x*, else ``False`` | |\n+--------------------+----------------------------------+------------+\n| ``x not in s`` | ``False`` if an item of *s* is | (1) |\n| | equal to *x*, else ``True`` | |\n+--------------------+----------------------------------+------------+\n| ``s + t`` | the concatenation of *s* and *t* | (6) |\n+--------------------+----------------------------------+------------+\n| ``s * n, n * s`` | *n* shallow copies of *s* | (2) |\n| | concatenated | |\n+--------------------+----------------------------------+------------+\n| ``s[i]`` | *i*th item of *s*, origin 0 | (3) |\n+--------------------+----------------------------------+------------+\n| ``s[i:j]`` | slice of *s* from *i* to *j* | (3)(4) |\n+--------------------+----------------------------------+------------+\n| ``s[i:j:k]`` | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+--------------------+----------------------------------+------------+\n| ``len(s)`` | length of *s* | |\n+--------------------+----------------------------------+------------+\n| ``min(s)`` | smallest item of *s* | |\n+--------------------+----------------------------------+------------+\n| ``max(s)`` | largest item of *s* | |\n+--------------------+----------------------------------+------------+\n| ``s.index(x)`` | index of the first occurrence of | |\n| | *x* in *s* | |\n+--------------------+----------------------------------+------------+\n| ``s.count(x)`` | total number of occurrences of | |\n| | *x* in *s* | |\n+--------------------+----------------------------------+------------+\n\nSequence types also support comparisons. In particular, tuples and\nlists are compared lexicographically by comparing corresponding\nelements. This means that to compare equal, every element must compare\nequal and the two sequences must be of the same type and have the same\nlength. (For full details see *Comparisons* in the language\nreference.)\n\nNotes:\n\n1. When *s* is a string or Unicode string object the ``in`` and ``not\n in`` operations act like a substring test. In Python versions\n before 2.3, *x* had to be a string of length 1. In Python 2.3 and\n beyond, *x* may be a string of any length.\n\n2. Values of *n* less than ``0`` are treated as ``0`` (which yields an\n empty sequence of the same type as *s*). Note also that the copies\n are shallow; nested structures are not copied. This often haunts\n new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that ``[[]]`` is a one-element list containing\n an empty list, so all three elements of ``[[]] * 3`` are (pointers\n to) this single empty list. Modifying any of the elements of\n ``lists`` modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of the\n string: ``len(s) + i`` or ``len(s) + j`` is substituted. But note\n that ``-0`` is still ``0``.\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that ``i <= k < j``. If *i* or *j* is\n greater than ``len(s)``, use ``len(s)``. If *i* is omitted or\n ``None``, use ``0``. If *j* is omitted or ``None``, use\n ``len(s)``. If *i* is greater than or equal to *j*, the slice is\n empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index ``x = i + n*k`` such that ``0 <= n <\n (j-i)/k``. In other words, the indices are ``i``, ``i+k``,\n ``i+2*k``, ``i+3*k`` and so on, stopping when *j* is reached (but\n never including *j*). If *i* or *j* is greater than ``len(s)``,\n use ``len(s)``. If *i* or *j* are omitted or ``None``, they become\n "end" values (which end depends on the sign of *k*). Note, *k*\n cannot be zero. If *k* is ``None``, it is treated like ``1``.\n\n6. **CPython implementation detail:** If *s* and *t* are both strings,\n some Python implementations such as CPython can usually perform an\n in-place optimization for assignments of the form ``s = s + t`` or\n ``s += t``. When applicable, this optimization makes quadratic\n run-time much less likely. This optimization is both version and\n implementation dependent. For performance sensitive code, it is\n preferable to use the ``str.join()`` method which assures\n consistent linear concatenation performance across versions and\n implementations.\n\n Changed in version 2.4: Formerly, string concatenation never\n occurred in-place.\n\n\nString Methods\n==============\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support. Some of them are also available on\n``bytearray`` objects.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the *Sequence Types --- str, unicode, list, tuple,\nbytearray, buffer, xrange* section. To output formatted strings use\ntemplate strings or the ``%`` operator described in the *String\nFormatting Operations* section. Also, see the ``re`` module for string\nfunctions based on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n ``\'strict\'``, meaning that encoding errors raise ``UnicodeError``.\n Other possible values are ``\'ignore\'``, ``\'replace\'`` and any other\n name registered via ``codecs.register_error()``, see section *Codec\n Base Classes*.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n ``\'strict\'``, meaning that encoding errors raise a\n ``UnicodeError``. Other possible values are ``\'ignore\'``,\n ``\'replace\'``, ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and\n any other name registered via ``codecs.register_error()``, see\n section *Codec Base Classes*. For a list of possible encodings, see\n section *Standard Encodings*.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for ``\'xmlcharrefreplace\'`` and\n ``\'backslashreplace\'`` and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. Tab positions occur every *tabsize* characters\n (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n To expand the string, the current column is set to zero and the\n string is examined character by character. If the character is a\n tab (``\\t``), one or more space characters are inserted in the\n result until the current column is equal to the next tab position.\n (The tab character itself is not copied.) If the character is a\n newline (``\\n``) or return (``\\r``), it is copied and the current\n column is reset to zero. Any other character is copied unchanged\n and the current column is incremented by one regardless of how the\n character is represented when printed.\n\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n \'01 012 0123 01234\'\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n \'01 012 0123 01234\'\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\n Note: The ``find()`` method should be used only if you need to know the\n position of *sub*. To check if *sub* is a substring or not, use\n the ``in`` operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces ``{}``. Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3,\n and should be preferred to the ``%`` formatting described in\n *String Formatting Operations* in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to ``len(s)``.\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified or ``-1``, then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. This method uses the *universal newlines* approach to\n splitting lines. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n For example, ``\'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()`` returns\n ``[\'ab c\', \'\', \'de fg\', \'kl\']``, while the same call with\n ``splitlines(True)`` returns ``[\'ab c\\n\', \'\\n\', \'de fg\\r\',\n \'kl\\r\\n\']``.\n\n Unlike ``split()`` when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the ``maketrans()`` helper function in the ``string``\n module to create a translation table. For string objects, set the\n *table* argument to ``None`` for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a ``None`` *table* argument.\n\n For Unicode objects, the ``translate()`` method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or ``None``. Unmapped characters\n are left untouched. Characters mapped to ``None`` are deleted.\n Note, a more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see ``encodings.cp1251``\n for an example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that ``str.upper().isupper()`` might\n be ``False`` if ``s`` contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than or equal to ``len(s)``.\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return ``True`` if there are only numeric characters in S,\n ``False`` otherwise. Numeric characters include digit characters,\n and all characters that have the Unicode numeric value property,\n e.g. U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return ``True`` if there are only decimal characters in S,\n ``False`` otherwise. Decimal characters include digit characters,\n and all characters that can be used to form decimal-radix numbers,\n e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\n\nString Formatting Operations\n============================\n\nString and Unicode objects have one unique built-in operation: the\n``%`` operator (modulo). This is also known as the string\n*formatting* or *interpolation* operator. Given ``format % values``\n(where *format* is a string or Unicode object), ``%`` conversion\nspecifications in *format* are replaced with zero or more elements of\n*values*. The effect is similar to the using ``sprintf()`` in the C\nlanguage. If *format* is a Unicode object, or if any of the objects\nbeing converted using the ``%s`` conversion are Unicode objects, the\nresult will also be a Unicode object.\n\nIf *format* requires a single argument, *values* may be a single non-\ntuple object. [5] Otherwise, *values* must be a tuple with exactly\nthe number of items specified by the format string, or a single\nmapping object (for example, a dictionary).\n\nA conversion specifier contains two or more characters and has the\nfollowing components, which must occur in this order:\n\n1. The ``\'%\'`` character, which marks the start of the specifier.\n\n2. Mapping key (optional), consisting of a parenthesised sequence of\n characters (for example, ``(somename)``).\n\n3. Conversion flags (optional), which affect the result of some\n conversion types.\n\n4. Minimum field width (optional). If specified as an ``\'*\'``\n (asterisk), the actual width is read from the next element of the\n tuple in *values*, and the object to convert comes after the\n minimum field width and optional precision.\n\n5. Precision (optional), given as a ``\'.\'`` (dot) followed by the\n precision. If specified as ``\'*\'`` (an asterisk), the actual width\n is read from the next element of the tuple in *values*, and the\n value to convert comes after the precision.\n\n6. Length modifier (optional).\n\n7. Conversion type.\n\nWhen the right argument is a dictionary (or other mapping type), then\nthe formats in the string *must* include a parenthesised mapping key\ninto that dictionary inserted immediately after the ``\'%\'`` character.\nThe mapping key selects the value to be formatted from the mapping.\nFor example:\n\n>>> print \'%(language)s has %(number)03d quote types.\' % \\\n... {"language": "Python", "number": 2}\nPython has 002 quote types.\n\nIn this case no ``*`` specifiers may occur in a format (since they\nrequire a sequential parameter list).\n\nThe conversion flag characters are:\n\n+-----------+-----------------------------------------------------------------------+\n| Flag | Meaning |\n+===========+=======================================================================+\n| ``\'#\'`` | The value conversion will use the "alternate form" (where defined |\n| | below). |\n+-----------+-----------------------------------------------------------------------+\n| ``\'0\'`` | The conversion will be zero padded for numeric values. |\n+-----------+-----------------------------------------------------------------------+\n| ``\'-\'`` | The converted value is left adjusted (overrides the ``\'0\'`` |\n| | conversion if both are given). |\n+-----------+-----------------------------------------------------------------------+\n| ``\' \'`` | (a space) A blank should be left before a positive number (or empty |\n| | string) produced by a signed conversion. |\n+-----------+-----------------------------------------------------------------------+\n| ``\'+\'`` | A sign character (``\'+\'`` or ``\'-\'``) will precede the conversion |\n| | (overrides a "space" flag). |\n+-----------+-----------------------------------------------------------------------+\n\nA length modifier (``h``, ``l``, or ``L``) may be present, but is\nignored as it is not necessary for Python -- so e.g. ``%ld`` is\nidentical to ``%d``.\n\nThe conversion types are:\n\n+--------------+-------------------------------------------------------+---------+\n| Conversion | Meaning | Notes |\n+==============+=======================================================+=========+\n| ``\'d\'`` | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'i\'`` | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'o\'`` | Signed octal value. | (1) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'u\'`` | Obsolete type -- it is identical to ``\'d\'``. | (7) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'x\'`` | Signed hexadecimal (lowercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'X\'`` | Signed hexadecimal (uppercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'e\'`` | Floating point exponential format (lowercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'E\'`` | Floating point exponential format (uppercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'f\'`` | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'F\'`` | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'g\'`` | Floating point format. Uses lowercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'G\'`` | Floating point format. Uses uppercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'c\'`` | Single character (accepts integer or single character | |\n| | string). | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'r\'`` | String (converts any Python object using *repr()*). | (5) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'s\'`` | String (converts any Python object using ``str()``). | (6) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'%\'`` | No argument is converted, results in a ``\'%\'`` | |\n| | character in the result. | |\n+--------------+-------------------------------------------------------+---------+\n\nNotes:\n\n1. The alternate form causes a leading zero (``\'0\'``) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n2. The alternate form causes a leading ``\'0x\'`` or ``\'0X\'`` (depending\n on whether the ``\'x\'`` or ``\'X\'`` format was used) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n3. The alternate form causes the result to always contain a decimal\n point, even if no digits follow it.\n\n The precision determines the number of digits after the decimal\n point and defaults to 6.\n\n4. The alternate form causes the result to always contain a decimal\n point, and trailing zeroes are not removed as they would otherwise\n be.\n\n The precision determines the number of significant digits before\n and after the decimal point and defaults to 6.\n\n5. The ``%r`` conversion was added in Python 2.0.\n\n The precision determines the maximal number of characters used.\n\n6. If the object or format provided is a ``unicode`` string, the\n resulting string will also be ``unicode``.\n\n The precision determines the maximal number of characters used.\n\n7. See **PEP 237**.\n\nSince Python strings have an explicit length, ``%s`` conversions do\nnot assume that ``\'\\0\'`` is the end of the string.\n\nChanged in version 2.7: ``%f`` conversions for numbers whose absolute\nvalue is over 1e50 are no longer replaced by ``%g`` conversions.\n\nAdditional string operations are defined in standard modules\n``string`` and ``re``.\n\n\nXRange Type\n===========\n\nThe ``xrange`` type is an immutable sequence which is commonly used\nfor looping. The advantage of the ``xrange`` type is that an\n``xrange`` object will always take the same amount of memory, no\nmatter the size of the range it represents. There are no consistent\nperformance advantages.\n\nXRange objects have very little behavior: they only support indexing,\niteration, and the ``len()`` function.\n\n\nMutable Sequence Types\n======================\n\nList and ``bytearray`` objects support additional operations that\nallow in-place modification of the object. Other mutable sequence\ntypes (when added to the language) should also support these\noperations. Strings and tuples are immutable sequence types: such\nobjects cannot be modified once created. The following operations are\ndefined on mutable sequence types (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | same as ``s[len(s):len(s)] = | (2) |\n| | [x]`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(x)`` | same as ``s[len(s):len(s)] = x`` | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.count(x)`` | return number of *i*\'s for which | |\n| | ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.index(x[, i[, j]])`` | return smallest *k* such that | (4) |\n| | ``s[k] == x`` and ``i <= k < j`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | same as ``s[i:i] = [x]`` | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | same as ``x = s[i]; del s[i]; | (6) |\n| | return x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | same as ``del s[s.index(x)]`` | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])`` | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted multiple\n parameters and implicitly joined them into a tuple; this no longer\n works in Python 2.0. Use of this misfeature has been deprecated\n since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises ``ValueError`` when *x* is not found in *s*. When a negative\n index is passed as the second or third parameter to the ``index()``\n method, the list length is added, as for slice indices. If it is\n still negative, it is truncated to zero, as for slice indices.\n\n Changed in version 2.3: Previously, ``index()`` didn\'t have\n arguments for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n ``insert()`` method, the list length is added, as for slice\n indices. If it is still negative, it is truncated to zero, as for\n slice indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The ``pop()`` method\'s optional argument *i* defaults to ``-1``, so\n that by default the last item is removed and returned.\n\n7. The ``sort()`` and ``reverse()`` methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don\'t return the\n sorted or reversed list.\n\n8. The ``sort()`` method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: ``cmp=lambda x,y:\n cmp(x.lower(), y.lower())``. The default value is ``None``.\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: ``key=str.lower``. The\n default value is ``None``.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n ``functools.cmp_to_key()`` to convert an old-style *cmp* function\n to a *key* function.\n\n Changed in version 2.3: Support for ``None`` as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the ``sort()`` method is guaranteed to be\n stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python 2.3 and newer makes the\n list appear empty for the duration, and raises ``ValueError`` if\n it can detect that the list has been mutated during a sort.\n',
+ 'typesseq-mutable': "\nMutable Sequence Types\n**********************\n\nList and ``bytearray`` objects support additional operations that\nallow in-place modification of the object. Other mutable sequence\ntypes (when added to the language) should also support these\noperations. Strings and tuples are immutable sequence types: such\nobjects cannot be modified once created. The following operations are\ndefined on mutable sequence types (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | same as ``s[len(s):len(s)] = | (2) |\n| | [x]`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(x)`` | same as ``s[len(s):len(s)] = x`` | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.count(x)`` | return number of *i*'s for which | |\n| | ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.index(x[, i[, j]])`` | return smallest *k* such that | (4) |\n| | ``s[k] == x`` and ``i <= k < j`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | same as ``s[i:i] = [x]`` | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | same as ``x = s[i]; del s[i]; | (6) |\n| | return x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | same as ``del s[s.index(x)]`` | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])`` | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted multiple\n parameters and implicitly joined them into a tuple; this no longer\n works in Python 2.0. Use of this misfeature has been deprecated\n since Python 1.4.\n\n3. *x* can be any iterable object.\n\n4. Raises ``ValueError`` when *x* is not found in *s*. When a negative\n index is passed as the second or third parameter to the ``index()``\n method, the list length is added, as for slice indices. If it is\n still negative, it is truncated to zero, as for slice indices.\n\n Changed in version 2.3: Previously, ``index()`` didn't have\n arguments for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n ``insert()`` method, the list length is added, as for slice\n indices. If it is still negative, it is truncated to zero, as for\n slice indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The ``pop()`` method's optional argument *i* defaults to ``-1``, so\n that by default the last item is removed and returned.\n\n7. The ``sort()`` and ``reverse()`` methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don't return the\n sorted or reversed list.\n\n8. The ``sort()`` method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: ``cmp=lambda x,y:\n cmp(x.lower(), y.lower())``. The default value is ``None``.\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: ``key=str.lower``. The\n default value is ``None``.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n ``functools.cmp_to_key()`` to convert an old-style *cmp* function\n to a *key* function.\n\n Changed in version 2.3: Support for ``None`` as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the ``sort()`` method is guaranteed to be\n stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python 2.3 and newer makes the\n list appear empty for the duration, and raises ``ValueError`` if\n it can detect that the list has been mutated during a sort.\n",
'unary': '\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary ``-`` (minus) operator yields the negation of its numeric\nargument.\n\nThe unary ``+`` (plus) operator yields its numeric argument unchanged.\n\nThe unary ``~`` (invert) operator yields the bitwise inversion of its\nplain or long integer argument. The bitwise inversion of ``x`` is\ndefined as ``-(x+1)``. It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n``TypeError`` exception is raised.\n',
'while': '\nThe ``while`` statement\n***********************\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n',
'with': '\nThe ``with`` statement\n**********************\n\nNew in version 2.5.\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the ``with_item``)\n is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the ``with`` statement is only allowed when the\n ``with_statement`` feature has been enabled. It is always enabled\n in Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n',
- 'yield': '\nThe ``yield`` statement\n***********************\n\n yield_stmt ::= yield_expression\n\nThe ``yield`` statement is only used when defining a generator\nfunction, and is only used in the body of the generator function.\nUsing a ``yield`` statement in a function definition is sufficient to\ncause that definition to create a generator function instead of a\nnormal function.\n\nWhen a generator function is called, it returns an iterator known as a\ngenerator iterator, or more commonly, a generator. The body of the\ngenerator function is executed by calling the generator\'s ``next()``\nmethod repeatedly until it raises an exception.\n\nWhen a ``yield`` statement is executed, the state of the generator is\nfrozen and the value of ``expression_list`` is returned to\n``next()``\'s caller. By "frozen" we mean that all local state is\nretained, including the current bindings of local variables, the\ninstruction pointer, and the internal evaluation stack: enough\ninformation is saved so that the next time ``next()`` is invoked, the\nfunction can proceed exactly as if the ``yield`` statement were just\nanother external call.\n\nAs of Python version 2.5, the ``yield`` statement is now allowed in\nthe ``try`` clause of a ``try`` ... ``finally`` construct. If the\ngenerator is not resumed before it is finalized (by reaching a zero\nreference count or by being garbage collected), the generator-\niterator\'s ``close()`` method will be called, allowing any pending\n``finally`` clauses to execute.\n\nNote: In Python 2.2, the ``yield`` statement was only allowed when the\n ``generators`` feature has been enabled. This ``__future__`` import\n statement was used to enable the feature:\n\n from __future__ import generators\n\nSee also:\n\n **PEP 0255** - Simple Generators\n The proposal for adding generators and the ``yield`` statement\n to Python.\n\n **PEP 0342** - Coroutines via Enhanced Generators\n The proposal that, among other generator enhancements, proposed\n allowing ``yield`` to appear inside a ``try`` ... ``finally``\n block.\n'}
+ 'yield': '\nThe ``yield`` statement\n***********************\n\n yield_stmt ::= yield_expression\n\nThe ``yield`` statement is only used when defining a generator\nfunction, and is only used in the body of the generator function.\nUsing a ``yield`` statement in a function definition is sufficient to\ncause that definition to create a generator function instead of a\nnormal function.\n\nWhen a generator function is called, it returns an iterator known as a\ngenerator iterator, or more commonly, a generator. The body of the\ngenerator function is executed by calling the generator\'s ``next()``\nmethod repeatedly until it raises an exception.\n\nWhen a ``yield`` statement is executed, the state of the generator is\nfrozen and the value of ``expression_list`` is returned to\n``next()``\'s caller. By "frozen" we mean that all local state is\nretained, including the current bindings of local variables, the\ninstruction pointer, and the internal evaluation stack: enough\ninformation is saved so that the next time ``next()`` is invoked, the\nfunction can proceed exactly as if the ``yield`` statement were just\nanother external call.\n\nAs of Python version 2.5, the ``yield`` statement is now allowed in\nthe ``try`` clause of a ``try`` ... ``finally`` construct. If the\ngenerator is not resumed before it is finalized (by reaching a zero\nreference count or by being garbage collected), the generator-\niterator\'s ``close()`` method will be called, allowing any pending\n``finally`` clauses to execute.\n\nFor full details of ``yield`` semantics, refer to the *Yield\nexpressions* section.\n\nNote: In Python 2.2, the ``yield`` statement was only allowed when the\n ``generators`` feature has been enabled. This ``__future__`` import\n statement was used to enable the feature:\n\n from __future__ import generators\n\nSee also:\n\n **PEP 0255** - Simple Generators\n The proposal for adding generators and the ``yield`` statement\n to Python.\n\n **PEP 0342** - Coroutines via Enhanced Generators\n The proposal that, among other generator enhancements, proposed\n allowing ``yield`` to appear inside a ``try`` ... ``finally``\n block.\n'}
diff --git a/Lib/random.py b/Lib/random.py
index 36b9565..3f96a37 100644
--- a/Lib/random.py
+++ b/Lib/random.py
@@ -108,7 +108,9 @@ class Random(_random.Random):
if a is None:
try:
- a = long(_hexlify(_urandom(16)), 16)
+ # Seed with enough bytes to span the 19937 bit
+ # state space for the Mersenne Twister
+ a = long(_hexlify(_urandom(2500)), 16)
except NotImplementedError:
import time
a = long(time.time() * 256) # use fractional seconds
@@ -170,29 +172,28 @@ class Random(_random.Random):
## -------------------- integer methods -------------------
- def randrange(self, start, stop=None, step=1, int=int, default=None,
- maxwidth=1L<<BPF):
+ def randrange(self, start, stop=None, step=1, _int=int, _maxwidth=1L<<BPF):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
- Do not supply the 'int', 'default', and 'maxwidth' arguments.
+
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
- istart = int(start)
+ istart = _int(start)
if istart != start:
raise ValueError, "non-integer arg 1 for randrange()"
- if stop is default:
+ if stop is None:
if istart > 0:
- if istart >= maxwidth:
+ if istart >= _maxwidth:
return self._randbelow(istart)
- return int(self.random() * istart)
+ return _int(self.random() * istart)
raise ValueError, "empty range for randrange()"
# stop argument supplied.
- istop = int(stop)
+ istop = _int(stop)
if istop != stop:
raise ValueError, "non-integer stop for randrange()"
width = istop - istart
@@ -210,14 +211,14 @@ class Random(_random.Random):
# a long, but we're supposed to return an int (for backward
# compatibility).
- if width >= maxwidth:
- return int(istart + self._randbelow(width))
- return int(istart + int(self.random()*width))
+ if width >= _maxwidth:
+ return _int(istart + self._randbelow(width))
+ return _int(istart + _int(self.random()*width))
if step == 1:
raise ValueError, "empty range for randrange() (%d,%d, %d)" % (istart, istop, width)
# Non-unit step argument supplied.
- istep = int(step)
+ istep = _int(step)
if istep != step:
raise ValueError, "non-integer step for randrange()"
if istep > 0:
@@ -230,9 +231,9 @@ class Random(_random.Random):
if n <= 0:
raise ValueError, "empty range for randrange()"
- if n >= maxwidth:
+ if n >= _maxwidth:
return istart + istep*self._randbelow(n)
- return istart + istep*int(self.random() * n)
+ return istart + istep*_int(self.random() * n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
@@ -240,7 +241,7 @@ class Random(_random.Random):
return self.randrange(a, b+1)
- def _randbelow(self, n, _log=_log, int=int, _maxwidth=1L<<BPF,
+ def _randbelow(self, n, _log=_log, _int=int, _maxwidth=1L<<BPF,
_Method=_MethodType, _BuiltinMethod=_BuiltinMethodType):
"""Return a random int in the range [0,n)
@@ -257,7 +258,7 @@ class Random(_random.Random):
# has not been overridden or if a new getrandbits() was supplied.
# This assures that the two methods correspond.
if type(self.random) is _BuiltinMethod or type(getrandbits) is _Method:
- k = int(1.00001 + _log(n-1, 2.0)) # 2**k > n-1 > 2**(k-2)
+ k = _int(1.00001 + _log(n-1, 2.0)) # 2**k > n-1 > 2**(k-2)
r = getrandbits(k)
while r >= n:
r = getrandbits(k)
@@ -265,7 +266,7 @@ class Random(_random.Random):
if n >= _maxwidth:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large")
- return int(self.random() * n)
+ return _int(self.random() * n)
## -------------------- sequence methods -------------------
@@ -273,18 +274,20 @@ class Random(_random.Random):
"""Choose a random element from a non-empty sequence."""
return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty
- def shuffle(self, x, random=None, int=int):
+ def shuffle(self, x, random=None):
"""x, random=random.random -> shuffle list x in place; return None.
Optional arg random is a 0-argument function returning a random
float in [0.0, 1.0); by default, the standard random.random.
+
"""
if random is None:
random = self.random
+ _int = int
for i in reversed(xrange(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
- j = int(random() * (i+1))
+ j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
@@ -368,7 +371,10 @@ class Random(_random.Random):
"""
u = self.random()
- c = 0.5 if mode is None else (mode - low) / (high - low)
+ try:
+ c = 0.5 if mode is None else (mode - low) / (high - low)
+ except ZeroDivisionError:
+ return low
if u > c:
u = 1.0 - u
c = 1.0 - c
@@ -457,27 +463,25 @@ class Random(_random.Random):
if kappa <= 1e-6:
return TWOPI * random()
- a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa)
- b = (a - _sqrt(2.0 * a))/(2.0 * kappa)
- r = (1.0 + b * b)/(2.0 * b)
+ s = 0.5 / kappa
+ r = s + _sqrt(1.0 + s * s)
while 1:
u1 = random()
-
z = _cos(_pi * u1)
- f = (1.0 + r * z)/(r + z)
- c = kappa * (r - f)
+ d = z / (r + z)
u2 = random()
-
- if u2 < c * (2.0 - c) or u2 <= c * _exp(1.0 - c):
+ if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
break
+ q = 1.0 / r
+ f = (q + z) / (1.0 + q * z)
u3 = random()
if u3 > 0.5:
- theta = (mu % TWOPI) + _acos(f)
+ theta = (mu + _acos(f)) % TWOPI
else:
- theta = (mu % TWOPI) - _acos(f)
+ theta = (mu - _acos(f)) % TWOPI
return theta
diff --git a/Lib/re.py b/Lib/re.py
index aab5d37..9b01f3e 100644
--- a/Lib/re.py
+++ b/Lib/re.py
@@ -225,11 +225,13 @@ _MAXCACHE = 100
def _compile(*key):
# internal: compile pattern
- cachekey = (type(key[0]),) + key
- p = _cache.get(cachekey)
- if p is not None:
- return p
pattern, flags = key
+ bypass_cache = flags & DEBUG
+ if not bypass_cache:
+ cachekey = (type(key[0]),) + key
+ p = _cache.get(cachekey)
+ if p is not None:
+ return p
if isinstance(pattern, _pattern_type):
if flags:
raise ValueError('Cannot process flags argument with a compiled pattern')
@@ -240,9 +242,10 @@ def _compile(*key):
p = sre_compile.compile(pattern, flags)
except error, v:
raise error, v # invalid expression
- if len(_cache) >= _MAXCACHE:
- _cache.clear()
- _cache[cachekey] = p
+ if not bypass_cache:
+ if len(_cache) >= _MAXCACHE:
+ _cache.clear()
+ _cache[cachekey] = p
return p
def _compile_repl(*key):
diff --git a/Lib/rfc822.py b/Lib/rfc822.py
index 3b29a6a..b65d8da 100644
--- a/Lib/rfc822.py
+++ b/Lib/rfc822.py
@@ -212,7 +212,7 @@ class Message:
You may override this method if your application wants to bend the
rules, e.g. to strip trailing whitespace, or to recognize MH template
separators ('--------'). For convenience (e.g. for code reading from
- sockets) a line consisting of \r\n also matches.
+ sockets) a line consisting of \\r\\n also matches.
"""
return line in _blanklines
diff --git a/Lib/rlcompleter.py b/Lib/rlcompleter.py
index 53de296..6e4bd12 100644
--- a/Lib/rlcompleter.py
+++ b/Lib/rlcompleter.py
@@ -1,13 +1,11 @@
-"""Word completion for GNU readline 2.0.
+"""Word completion for GNU readline.
-This requires the latest extension to the readline module. The completer
-completes keywords, built-ins and globals in a selectable namespace (which
-defaults to __main__); when completing NAME.NAME..., it evaluates (!) the
-expression up to the last dot and completes its attributes.
+The completer completes keywords, built-ins and globals in a selectable
+namespace (which defaults to __main__); when completing NAME.NAME..., it
+evaluates (!) the expression up to the last dot and completes its attributes.
-It's very cool to do "import sys" type "sys.", hit the
-completion key (twice), and see the list of names defined by the
-sys module!
+It's very cool to do "import sys" type "sys.", hit the completion key (twice),
+and see the list of names defined by the sys module!
Tip: to use the tab key as the completion key, call
@@ -15,18 +13,16 @@ Tip: to use the tab key as the completion key, call
Notes:
-- Exceptions raised by the completer function are *ignored* (and
-generally cause the completion to fail). This is a feature -- since
-readline sets the tty device in raw (or cbreak) mode, printing a
-traceback wouldn't work well without some complicated hoopla to save,
-reset and restore the tty state.
+- Exceptions raised by the completer function are *ignored* (and generally cause
+ the completion to fail). This is a feature -- since readline sets the tty
+ device in raw (or cbreak) mode, printing a traceback wouldn't work well
+ without some complicated hoopla to save, reset and restore the tty state.
-- The evaluation of the NAME.NAME... form may cause arbitrary
-application defined code to be executed if an object with a
-__getattr__ hook is found. Since it is the responsibility of the
-application (or the user) to enable this feature, I consider this an
-acceptable risk. More complicated expressions (e.g. function calls or
-indexing operations) are *not* evaluated.
+- The evaluation of the NAME.NAME... form may cause arbitrary application
+ defined code to be executed if an object with a __getattr__ hook is found.
+ Since it is the responsibility of the application (or the user) to enable this
+ feature, I consider this an acceptable risk. More complicated expressions
+ (e.g. function calls or indexing operations) are *not* evaluated.
- GNU readline is also used by the built-in functions input() and
raw_input(), and thus these also benefit/suffer from the completer
@@ -35,7 +31,7 @@ specifying its own completer function and using raw_input() for all
its input.
- When the original stdin is not a tty device, GNU readline is never
-used, and this module (and the readline module) are silently inactive.
+ used, and this module (and the readline module) are silently inactive.
"""
@@ -120,7 +116,7 @@ class Completer:
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
- evaluatable in self.namespace, it will be evaluated and its attributes
+ evaluable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
diff --git a/Lib/robotparser.py b/Lib/robotparser.py
index 1722863..b46b753 100644
--- a/Lib/robotparser.py
+++ b/Lib/robotparser.py
@@ -7,7 +7,8 @@
2) PSF license for Python 2.2
The robots.txt Exclusion Protocol is implemented as specified in
- http://info.webcrawler.com/mak/projects/robots/norobots-rfc.html
+ http://www.robotstxt.org/norobots-rfc.txt
+
"""
import urlparse
import urllib
@@ -60,7 +61,7 @@ class RobotFileParser:
self.errcode = opener.errcode
if self.errcode in (401, 403):
self.disallow_all = True
- elif self.errcode >= 400:
+ elif self.errcode >= 400 and self.errcode < 500:
self.allow_all = True
elif self.errcode == 200 and lines:
self.parse(lines)
@@ -86,6 +87,7 @@ class RobotFileParser:
linenumber = 0
entry = Entry()
+ self.modified()
for line in lines:
linenumber += 1
if not line:
@@ -131,6 +133,14 @@ class RobotFileParser:
return False
if self.allow_all:
return True
+
+ # Until the robots.txt file has been read or found not
+ # to exist, we must assume that no url is allowable.
+ # This prevents false positives when a user erronenously
+ # calls can_fetch() before calling read().
+ if not self.last_checked:
+ return False
+
# search for given user agent matches
# the first match counts
parsed_url = urlparse.urlparse(urllib.unquote(url))
@@ -160,6 +170,7 @@ class RuleLine:
if path == '' and not allowance:
# an empty value means allow all
allowance = True
+ path = urlparse.urlunparse(urlparse.urlparse(path))
self.path = urllib.quote(path)
self.allowance = allowance
diff --git a/Lib/runpy.py b/Lib/runpy.py
index f635c4b..c4d7cc2 100644
--- a/Lib/runpy.py
+++ b/Lib/runpy.py
@@ -200,7 +200,7 @@ def _get_importer(path_name):
pass
else:
# The following check looks a bit odd. The trick is that
- # NullImporter throws ImportError if the supplied path is a
+ # NullImporter raises ImportError if the supplied path is a
# *valid* directory entry (and hence able to be handled
# by the standard import machinery)
try:
diff --git a/Lib/shutil.py b/Lib/shutil.py
index be83251..e12f791 100644
--- a/Lib/shutil.py
+++ b/Lib/shutil.py
@@ -102,8 +102,10 @@ def copystat(src, dst):
try:
os.chflags(dst, st.st_flags)
except OSError, why:
- if (not hasattr(errno, 'EOPNOTSUPP') or
- why.errno != errno.EOPNOTSUPP):
+ for err in 'EOPNOTSUPP', 'ENOTSUP':
+ if hasattr(errno, err) and why.errno == getattr(errno, err):
+ break
+ else:
raise
def copy(src, dst):
@@ -201,7 +203,7 @@ def copytree(src, dst, symlinks=False, ignore=None):
# Copying file access times may fail on Windows
pass
else:
- errors.extend((src, dst, str(why)))
+ errors.append((src, dst, str(why)))
if errors:
raise Error, errors
@@ -257,7 +259,8 @@ def rmtree(path, ignore_errors=False, onerror=None):
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
- return os.path.basename(path.rstrip(os.path.sep))
+ sep = os.path.sep + (os.path.altsep or '')
+ return os.path.basename(path.rstrip(sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
@@ -444,17 +447,15 @@ def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
zip_filename, base_dir)
if not dry_run:
- zip = zipfile.ZipFile(zip_filename, "w",
- compression=zipfile.ZIP_DEFLATED)
-
- for dirpath, dirnames, filenames in os.walk(base_dir):
- for name in filenames:
- path = os.path.normpath(os.path.join(dirpath, name))
- if os.path.isfile(path):
- zip.write(path, path)
- if logger is not None:
- logger.info("adding '%s'", path)
- zip.close()
+ with zipfile.ZipFile(zip_filename, "w",
+ compression=zipfile.ZIP_DEFLATED) as zf:
+ for dirpath, dirnames, filenames in os.walk(base_dir):
+ for name in filenames:
+ path = os.path.normpath(os.path.join(dirpath, name))
+ if os.path.isfile(path):
+ zf.write(path, path)
+ if logger is not None:
+ logger.info("adding '%s'", path)
return zip_filename
diff --git a/Lib/site.py b/Lib/site.py
index 54e5154..d2e18f1 100644
--- a/Lib/site.py
+++ b/Lib/site.py
@@ -114,18 +114,6 @@ def removeduppaths():
sys.path[:] = L
return known_paths
-# XXX This should not be part of site.py, since it is needed even when
-# using the -S option for Python. See http://www.python.org/sf/586680
-def addbuilddir():
- """Append ./build/lib.<platform> in case we're running in the build dir
- (especially for Guido :-)"""
- from sysconfig import get_platform
- s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
- if hasattr(sys, 'gettotalrefcount'):
- s += '-pydebug'
- s = os.path.join(os.path.dirname(sys.path.pop()), s)
- sys.path.append(s)
-
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
@@ -448,7 +436,7 @@ def setcopyright():
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
__builtin__.license = _Printer(
- "license", "See http://www.python.org/%.3s/license.html" % sys.version,
+ "license", "See http://www.python.org/psf/license/",
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
@@ -537,9 +525,6 @@ def main():
abs__file__()
known_paths = removeduppaths()
- if (os.name == "posix" and sys.path and
- os.path.basename(sys.path[-1]) == "Modules"):
- addbuilddir()
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
known_paths = addusersitepackages(known_paths)
diff --git a/Lib/smtplib.py b/Lib/smtplib.py
index 07582f6..7f07840 100755
--- a/Lib/smtplib.py
+++ b/Lib/smtplib.py
@@ -237,10 +237,12 @@ class SMTP:
If specified, `host' is the name of the remote host to which to
connect. If specified, `port' specifies the port to which to connect.
- By default, smtplib.SMTP_PORT is used. An SMTPConnectError is raised
- if the specified `host' doesn't respond correctly. If specified,
- `local_hostname` is used as the FQDN of the local host. By default,
- the local hostname is found using socket.getfqdn().
+ By default, smtplib.SMTP_PORT is used. If a host is specified the
+ connect method is called, and if it returns anything other than a
+ success code an SMTPConnectError is raised. If specified,
+ `local_hostname` is used as the FQDN of the local host for the
+ HELO/EHLO command. Otherwise, the local hostname is found using
+ socket.getfqdn().
"""
self.timeout = timeout
@@ -276,12 +278,12 @@ class SMTP:
"""
self.debuglevel = debuglevel
- def _get_socket(self, port, host, timeout):
+ def _get_socket(self, host, port, timeout):
# This makes it simpler for SMTP_SSL to use the SMTP connect code
# and just alter the socket connection bit.
if self.debuglevel > 0:
print>>stderr, 'connect:', (host, port)
- return socket.create_connection((port, host), timeout)
+ return socket.create_connection((host, port), timeout)
def connect(self, host='localhost', port=0):
"""Connect to a host on a given port.
@@ -758,12 +760,15 @@ class SMTP:
if _have_ssl:
class SMTP_SSL(SMTP):
- """ This is a subclass derived from SMTP that connects over an SSL encrypted
- socket (to use this class you need a socket module that was compiled with SSL
- support). If host is not specified, '' (the local host) is used. If port is
- omitted, the standard SMTP-over-SSL port (465) is used. keyfile and certfile
- are also optional - they can contain a PEM formatted private key and
- certificate chain file for the SSL connection.
+ """ This is a subclass derived from SMTP that connects over an SSL
+ encrypted socket (to use this class you need a socket module that was
+ compiled with SSL support). If host is not specified, '' (the local
+ host) is used. If port is omitted, the standard SMTP-over-SSL port
+ (465) is used. local_hostname has the same meaning as it does in the
+ SMTP class. keyfile and certfile are also optional - they can contain
+ a PEM formatted private key and certificate chain file for the SSL
+ connection.
+
"""
default_port = SMTP_SSL_PORT
@@ -794,9 +799,10 @@ class LMTP(SMTP):
"""LMTP - Local Mail Transfer Protocol
The LMTP protocol, which is very similar to ESMTP, is heavily based
- on the standard SMTP client. It's common to use Unix sockets for LMTP,
- so our connect() method must support that as well as a regular
- host:port server. To specify a Unix socket, you must use an absolute
+ on the standard SMTP client. It's common to use Unix sockets for
+ LMTP, so our connect() method must support that as well as a regular
+ host:port server. local_hostname has the same meaning as it does in
+ the SMTP class. To specify a Unix socket, you must use an absolute
path as the host, starting with a '/'.
Authentication is supported, using the regular SMTP mechanism. When
@@ -818,13 +824,13 @@ class LMTP(SMTP):
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(host)
- except socket.error, msg:
+ except socket.error:
if self.debuglevel > 0:
print>>stderr, 'connect fail:', host
if self.sock:
self.sock.close()
self.sock = None
- raise socket.error, msg
+ raise
(code, msg) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "connect:", msg
diff --git a/Lib/socket.py b/Lib/socket.py
index bd364e7..aac04f6 100644
--- a/Lib/socket.py
+++ b/Lib/socket.py
@@ -319,8 +319,8 @@ class _fileobject(object):
self._wbuf.append(data)
self._wbuf_len += len(data)
if (self._wbufsize == 0 or
- self._wbufsize == 1 and '\n' in data or
- self._wbuf_len >= self._wbufsize):
+ (self._wbufsize == 1 and '\n' in data) or
+ (self._wbufsize > 1 and self._wbuf_len >= self._wbufsize)):
self.flush()
def writelines(self, list):
diff --git a/Lib/sqlite3/dbapi2.py b/Lib/sqlite3/dbapi2.py
index 7eb28e8..0d4dcaf 100644
--- a/Lib/sqlite3/dbapi2.py
+++ b/Lib/sqlite3/dbapi2.py
@@ -1,4 +1,4 @@
-#-*- coding: ISO-8859-1 -*-
+# -*- coding: iso-8859-1 -*-
# pysqlite2/dbapi2.py: the DB-API 2.0 interface
#
# Copyright (C) 2004-2005 Gerhard Häring <gh@ghaering.de>
@@ -21,6 +21,7 @@
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
+import collections
import datetime
import time
@@ -51,6 +52,7 @@ version_info = tuple([int(x) for x in version.split(".")])
sqlite_version_info = tuple([int(x) for x in sqlite_version.split(".")])
Binary = buffer
+collections.Sequence.register(Row)
def register_adapters_and_converters():
def adapt_date(val):
@@ -68,7 +70,7 @@ def register_adapters_and_converters():
timepart_full = timepart.split(".")
hours, minutes, seconds = map(int, timepart_full[0].split(":"))
if len(timepart_full) == 2:
- microseconds = int(timepart_full[1])
+ microseconds = int('{:0<6.6}'.format(timepart_full[1].decode()))
else:
microseconds = 0
diff --git a/Lib/sqlite3/dump.py b/Lib/sqlite3/dump.py
index da6be68..e5c5ef2 100644
--- a/Lib/sqlite3/dump.py
+++ b/Lib/sqlite3/dump.py
@@ -25,9 +25,10 @@ def _iterdump(connection):
FROM "sqlite_master"
WHERE "sql" NOT NULL AND
"type" == 'table'
+ ORDER BY "name"
"""
schema_res = cu.execute(q)
- for table_name, type, sql in sorted(schema_res.fetchall()):
+ for table_name, type, sql in schema_res.fetchall():
if table_name == 'sqlite_sequence':
yield('DELETE FROM "sqlite_sequence";')
elif table_name == 'sqlite_stat1':
@@ -42,7 +43,7 @@ def _iterdump(connection):
# qtable,
# sql.replace("''")))
else:
- yield('{0};'.format(sql))
+ yield('%s;' % sql)
# Build the insert statement for each row of the current table
table_name_ident = table_name.replace('"', '""')
@@ -53,7 +54,7 @@ def _iterdump(connection):
",".join("""'||quote("{0}")||'""".format(col.replace('"', '""')) for col in column_names))
query_res = cu.execute(q)
for row in query_res:
- yield("{0};".format(row[0]))
+ yield("%s;" % row[0])
# Now when the type is 'index', 'trigger', or 'view'
q = """
@@ -64,6 +65,6 @@ def _iterdump(connection):
"""
schema_res = cu.execute(q)
for name, type, sql in schema_res.fetchall():
- yield('{0};'.format(sql))
+ yield('%s;' % sql)
yield('COMMIT;')
diff --git a/Lib/sqlite3/test/dump.py b/Lib/sqlite3/test/dump.py
index 2e9b436..b7de810 100644
--- a/Lib/sqlite3/test/dump.py
+++ b/Lib/sqlite3/test/dump.py
@@ -29,6 +29,8 @@ class DumpTests(unittest.TestCase):
,
"INSERT INTO \"t1\" VALUES(2,'foo2',30,30);"
,
+ u"INSERT INTO \"t1\" VALUES(3,'f\xc3\xb6',40,10);"
+ ,
"CREATE TABLE t2(id integer, t2_i1 integer, " \
"t2_i2 integer, primary key (id)," \
"foreign key(t2_i1) references t1(t1_i1));"
@@ -49,6 +51,27 @@ class DumpTests(unittest.TestCase):
[self.assertEqual(expected_sqls[i], actual_sqls[i])
for i in xrange(len(expected_sqls))]
+ def CheckUnorderableRow(self):
+ # iterdump() should be able to cope with unorderable row types (issue #15545)
+ class UnorderableRow:
+ def __init__(self, cursor, row):
+ self.row = row
+ def __getitem__(self, index):
+ return self.row[index]
+ self.cx.row_factory = UnorderableRow
+ CREATE_ALPHA = """CREATE TABLE "alpha" ("one");"""
+ CREATE_BETA = """CREATE TABLE "beta" ("two");"""
+ expected = [
+ "BEGIN TRANSACTION;",
+ CREATE_ALPHA,
+ CREATE_BETA,
+ "COMMIT;"
+ ]
+ self.cu.execute(CREATE_BETA)
+ self.cu.execute(CREATE_ALPHA)
+ got = list(self.cx.iterdump())
+ self.assertEqual(expected, got)
+
def suite():
return unittest.TestSuite(unittest.makeSuite(DumpTests, "Check"))
diff --git a/Lib/sqlite3/test/factory.py b/Lib/sqlite3/test/factory.py
index 52854be..0813a13 100644
--- a/Lib/sqlite3/test/factory.py
+++ b/Lib/sqlite3/test/factory.py
@@ -23,6 +23,7 @@
import unittest
import sqlite3 as sqlite
+from collections import Sequence
class MyConnection(sqlite.Connection):
def __init__(self, *args, **kwargs):
@@ -47,9 +48,7 @@ class ConnectionFactoryTests(unittest.TestCase):
self.con.close()
def CheckIsInstance(self):
- self.assertTrue(isinstance(self.con,
- MyConnection),
- "connection is not instance of MyConnection")
+ self.assertIsInstance(self.con, MyConnection)
class CursorFactoryTests(unittest.TestCase):
def setUp(self):
@@ -60,9 +59,7 @@ class CursorFactoryTests(unittest.TestCase):
def CheckIsInstance(self):
cur = self.con.cursor(factory=MyCursor)
- self.assertTrue(isinstance(cur,
- MyCursor),
- "cursor is not instance of MyCursor")
+ self.assertIsInstance(cur, MyCursor)
class RowFactoryTestsBackwardsCompat(unittest.TestCase):
def setUp(self):
@@ -72,9 +69,7 @@ class RowFactoryTestsBackwardsCompat(unittest.TestCase):
cur = self.con.cursor(factory=MyCursor)
cur.execute("select 4+5 as foo")
row = cur.fetchone()
- self.assertTrue(isinstance(row,
- dict),
- "row is not instance of dict")
+ self.assertIsInstance(row, dict)
cur.close()
def tearDown(self):
@@ -87,28 +82,42 @@ class RowFactoryTests(unittest.TestCase):
def CheckCustomFactory(self):
self.con.row_factory = lambda cur, row: list(row)
row = self.con.execute("select 1, 2").fetchone()
- self.assertTrue(isinstance(row,
- list),
- "row is not instance of list")
+ self.assertIsInstance(row, list)
def CheckSqliteRowIndex(self):
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
- self.assertTrue(isinstance(row,
- sqlite.Row),
- "row is not instance of sqlite.Row")
+ self.assertIsInstance(row, sqlite.Row)
col1, col2 = row["a"], row["b"]
- self.assertTrue(col1 == 1, "by name: wrong result for column 'a'")
- self.assertTrue(col2 == 2, "by name: wrong result for column 'a'")
+ self.assertEqual(col1, 1, "by name: wrong result for column 'a'")
+ self.assertEqual(col2, 2, "by name: wrong result for column 'a'")
col1, col2 = row["A"], row["B"]
- self.assertTrue(col1 == 1, "by name: wrong result for column 'A'")
- self.assertTrue(col2 == 2, "by name: wrong result for column 'B'")
-
- col1, col2 = row[0], row[1]
- self.assertTrue(col1 == 1, "by index: wrong result for column 0")
- self.assertTrue(col2 == 2, "by index: wrong result for column 1")
+ self.assertEqual(col1, 1, "by name: wrong result for column 'A'")
+ self.assertEqual(col2, 2, "by name: wrong result for column 'B'")
+
+ self.assertEqual(row[0], 1, "by index: wrong result for column 0")
+ self.assertEqual(row[0L], 1, "by index: wrong result for column 0")
+ self.assertEqual(row[1], 2, "by index: wrong result for column 1")
+ self.assertEqual(row[1L], 2, "by index: wrong result for column 1")
+ self.assertEqual(row[-1], 2, "by index: wrong result for column -1")
+ self.assertEqual(row[-1L], 2, "by index: wrong result for column -1")
+ self.assertEqual(row[-2], 1, "by index: wrong result for column -2")
+ self.assertEqual(row[-2L], 1, "by index: wrong result for column -2")
+
+ with self.assertRaises(IndexError):
+ row['c']
+ with self.assertRaises(IndexError):
+ row[2]
+ with self.assertRaises(IndexError):
+ row[2L]
+ with self.assertRaises(IndexError):
+ row[-3]
+ with self.assertRaises(IndexError):
+ row[-3L]
+ with self.assertRaises(IndexError):
+ row[2**1000]
def CheckSqliteRowIter(self):
"""Checks if the row object is iterable"""
@@ -122,6 +131,7 @@ class RowFactoryTests(unittest.TestCase):
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
t = tuple(row)
+ self.assertEqual(t, (row['a'], row['b']))
def CheckSqliteRowAsDict(self):
"""Checks if the row object can be correctly converted to a dictionary"""
@@ -138,8 +148,8 @@ class RowFactoryTests(unittest.TestCase):
row_2 = self.con.execute("select 1 as a, 2 as b").fetchone()
row_3 = self.con.execute("select 1 as a, 3 as b").fetchone()
- self.assertTrue(row_1 == row_1)
- self.assertTrue(row_1 == row_2)
+ self.assertEqual(row_1, row_1)
+ self.assertEqual(row_1, row_2)
self.assertTrue(row_2 != row_3)
self.assertFalse(row_1 != row_1)
@@ -151,6 +161,15 @@ class RowFactoryTests(unittest.TestCase):
self.assertNotEqual(row_1, row_3)
self.assertNotEqual(hash(row_1), hash(row_3))
+ def CheckSqliteRowAsSequence(self):
+ """ Checks if the row object can act like a sequence """
+ self.con.row_factory = sqlite.Row
+ row = self.con.execute("select 1 as a, 2 as b").fetchone()
+
+ as_tuple = tuple(row)
+ self.assertEqual(list(reversed(row)), list(reversed(as_tuple)))
+ self.assertIsInstance(row, Sequence)
+
def tearDown(self):
self.con.close()
@@ -161,20 +180,20 @@ class TextFactoryTests(unittest.TestCase):
def CheckUnicode(self):
austria = unicode("Österreich", "latin1")
row = self.con.execute("select ?", (austria,)).fetchone()
- self.assertTrue(type(row[0]) == unicode, "type of row[0] must be unicode")
+ self.assertEqual(type(row[0]), unicode, "type of row[0] must be unicode")
def CheckString(self):
self.con.text_factory = str
austria = unicode("Österreich", "latin1")
row = self.con.execute("select ?", (austria,)).fetchone()
- self.assertTrue(type(row[0]) == str, "type of row[0] must be str")
- self.assertTrue(row[0] == austria.encode("utf-8"), "column must equal original data in UTF-8")
+ self.assertEqual(type(row[0]), str, "type of row[0] must be str")
+ self.assertEqual(row[0], austria.encode("utf-8"), "column must equal original data in UTF-8")
def CheckCustom(self):
self.con.text_factory = lambda x: unicode(x, "utf-8", "ignore")
austria = unicode("Österreich", "latin1")
row = self.con.execute("select ?", (austria.encode("latin1"),)).fetchone()
- self.assertTrue(type(row[0]) == unicode, "type of row[0] must be unicode")
+ self.assertEqual(type(row[0]), unicode, "type of row[0] must be unicode")
self.assertTrue(row[0].endswith(u"reich"), "column must contain original data")
def CheckOptimizedUnicode(self):
@@ -183,8 +202,8 @@ class TextFactoryTests(unittest.TestCase):
germany = unicode("Deutchland")
a_row = self.con.execute("select ?", (austria,)).fetchone()
d_row = self.con.execute("select ?", (germany,)).fetchone()
- self.assertTrue(type(a_row[0]) == unicode, "type of non-ASCII row must be unicode")
- self.assertTrue(type(d_row[0]) == str, "type of ASCII-only row must be str")
+ self.assertEqual(type(a_row[0]), unicode, "type of non-ASCII row must be unicode")
+ self.assertEqual(type(d_row[0]), str, "type of ASCII-only row must be str")
def tearDown(self):
self.con.close()
diff --git a/Lib/sqlite3/test/hooks.py b/Lib/sqlite3/test/hooks.py
index b798e74..16f217d 100644
--- a/Lib/sqlite3/test/hooks.py
+++ b/Lib/sqlite3/test/hooks.py
@@ -76,6 +76,25 @@ class CollationTests(unittest.TestCase):
except sqlite.OperationalError, e:
self.assertEqual(e.args[0].lower(), "no such collation sequence: mycoll")
+ def CheckCollationReturnsLargeInteger(self):
+ def mycoll(x, y):
+ # reverse order
+ return -((x > y) - (x < y)) * 2**32
+ con = sqlite.connect(":memory:")
+ con.create_collation("mycoll", mycoll)
+ sql = """
+ select x from (
+ select 'a' as x
+ union
+ select 'b' as x
+ union
+ select 'c' as x
+ ) order by x collate mycoll
+ """
+ result = con.execute(sql).fetchall()
+ self.assertEqual(result, [('c',), ('b',), ('a',)],
+ msg="the expected order was not returned")
+
def CheckCollationRegisterTwice(self):
"""
Register two different collation functions under the same name.
@@ -143,7 +162,7 @@ class ProgressTests(unittest.TestCase):
create table bar (a, b)
""")
second_count = len(progress_calls)
- self.assertTrue(first_count > second_count)
+ self.assertGreaterEqual(first_count, second_count)
def CheckCancelOperation(self):
"""
diff --git a/Lib/sqlite3/test/regression.py b/Lib/sqlite3/test/regression.py
index eec2fcd..5b7759c 100644
--- a/Lib/sqlite3/test/regression.py
+++ b/Lib/sqlite3/test/regression.py
@@ -1,4 +1,4 @@
-#-*- coding: ISO-8859-1 -*-
+#-*- coding: iso-8859-1 -*-
# pysqlite2/test/regression.py: pysqlite regression tests
#
# Copyright (C) 2006-2007 Gerhard Häring <gh@ghaering.de>
@@ -159,7 +159,8 @@ class RegressionTests(unittest.TestCase):
def CheckCursorConstructorCallCheck(self):
"""
- Verifies that cursor methods check wether base class __init__ was called.
+ Verifies that cursor methods check whether base class __init__ was
+ called.
"""
class Cursor(sqlite.Cursor):
def __init__(self, con):
@@ -177,7 +178,8 @@ class RegressionTests(unittest.TestCase):
def CheckConnectionConstructorCallCheck(self):
"""
- Verifies that connection methods check wether base class __init__ was called.
+ Verifies that connection methods check whether base class __init__ was
+ called.
"""
class Connection(sqlite.Connection):
def __init__(self, name):
@@ -285,6 +287,37 @@ class RegressionTests(unittest.TestCase):
cur.executemany("insert into b (baz) values (?)",
((i,) for i in foo()))
+ def CheckConvertTimestampMicrosecondPadding(self):
+ """
+ http://bugs.python.org/issue14720
+
+ The microsecond parsing of convert_timestamp() should pad with zeros,
+ since the microsecond string "456" actually represents "456000".
+ """
+
+ con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_DECLTYPES)
+ cur = con.cursor()
+ cur.execute("CREATE TABLE t (x TIMESTAMP)")
+
+ # Microseconds should be 456000
+ cur.execute("INSERT INTO t (x) VALUES ('2012-04-04 15:06:00.456')")
+
+ # Microseconds should be truncated to 123456
+ cur.execute("INSERT INTO t (x) VALUES ('2012-04-04 15:06:00.123456789')")
+
+ cur.execute("SELECT * FROM t")
+ values = [x[0] for x in cur.fetchall()]
+
+ self.assertEqual(values, [
+ datetime.datetime(2012, 4, 4, 15, 6, 0, 456000),
+ datetime.datetime(2012, 4, 4, 15, 6, 0, 123456),
+ ])
+
+ def CheckInvalidIsolationLevelType(self):
+ # isolation level is a string, not an integer
+ self.assertRaises(TypeError,
+ sqlite.connect, ":memory:", isolation_level=123)
+
def suite():
regression_suite = unittest.makeSuite(RegressionTests, "Check")
diff --git a/Lib/sqlite3/test/types.py b/Lib/sqlite3/test/types.py
index c5ab39b..400a4f2 100644
--- a/Lib/sqlite3/test/types.py
+++ b/Lib/sqlite3/test/types.py
@@ -244,7 +244,7 @@ class DeclTypesTests(unittest.TestCase):
self.assertEqual(type(value), float)
def CheckNumber2(self):
- """Checks wether converter names are cut off at '(' characters"""
+ """Checks whether converter names are cut off at '(' characters"""
self.cur.execute("insert into test(n2) values (5)")
value = self.cur.execute("select n2 from test").fetchone()[0]
# if the converter is not used, it's an int instead of a float
diff --git a/Lib/sqlite3/test/userfunctions.py b/Lib/sqlite3/test/userfunctions.py
index 2db3a61..634812d 100644
--- a/Lib/sqlite3/test/userfunctions.py
+++ b/Lib/sqlite3/test/userfunctions.py
@@ -374,14 +374,15 @@ class AggregateTests(unittest.TestCase):
val = cur.fetchone()[0]
self.assertEqual(val, 60)
-def authorizer_cb(action, arg1, arg2, dbname, source):
- if action != sqlite.SQLITE_SELECT:
- return sqlite.SQLITE_DENY
- if arg2 == 'c2' or arg1 == 't2':
- return sqlite.SQLITE_DENY
- return sqlite.SQLITE_OK
-
class AuthorizerTests(unittest.TestCase):
+ @staticmethod
+ def authorizer_cb(action, arg1, arg2, dbname, source):
+ if action != sqlite.SQLITE_SELECT:
+ return sqlite.SQLITE_DENY
+ if arg2 == 'c2' or arg1 == 't2':
+ return sqlite.SQLITE_DENY
+ return sqlite.SQLITE_OK
+
def setUp(self):
self.con = sqlite.connect(":memory:")
self.con.executescript("""
@@ -394,12 +395,12 @@ class AuthorizerTests(unittest.TestCase):
# For our security test:
self.con.execute("select c2 from t2")
- self.con.set_authorizer(authorizer_cb)
+ self.con.set_authorizer(self.authorizer_cb)
def tearDown(self):
pass
- def CheckTableAccess(self):
+ def test_table_access(self):
try:
self.con.execute("select * from t2")
except sqlite.DatabaseError, e:
@@ -408,7 +409,7 @@ class AuthorizerTests(unittest.TestCase):
return
self.fail("should have raised an exception due to missing privileges")
- def CheckColumnAccess(self):
+ def test_column_access(self):
try:
self.con.execute("select c2 from t1")
except sqlite.DatabaseError, e:
@@ -417,11 +418,46 @@ class AuthorizerTests(unittest.TestCase):
return
self.fail("should have raised an exception due to missing privileges")
+class AuthorizerRaiseExceptionTests(AuthorizerTests):
+ @staticmethod
+ def authorizer_cb(action, arg1, arg2, dbname, source):
+ if action != sqlite.SQLITE_SELECT:
+ raise ValueError
+ if arg2 == 'c2' or arg1 == 't2':
+ raise ValueError
+ return sqlite.SQLITE_OK
+
+class AuthorizerIllegalTypeTests(AuthorizerTests):
+ @staticmethod
+ def authorizer_cb(action, arg1, arg2, dbname, source):
+ if action != sqlite.SQLITE_SELECT:
+ return 0.0
+ if arg2 == 'c2' or arg1 == 't2':
+ return 0.0
+ return sqlite.SQLITE_OK
+
+class AuthorizerLargeIntegerTests(AuthorizerTests):
+ @staticmethod
+ def authorizer_cb(action, arg1, arg2, dbname, source):
+ if action != sqlite.SQLITE_SELECT:
+ return 2**32
+ if arg2 == 'c2' or arg1 == 't2':
+ return 2**32
+ return sqlite.SQLITE_OK
+
+
def suite():
function_suite = unittest.makeSuite(FunctionTests, "Check")
aggregate_suite = unittest.makeSuite(AggregateTests, "Check")
- authorizer_suite = unittest.makeSuite(AuthorizerTests, "Check")
- return unittest.TestSuite((function_suite, aggregate_suite, authorizer_suite))
+ authorizer_suite = unittest.makeSuite(AuthorizerTests)
+ return unittest.TestSuite((
+ function_suite,
+ aggregate_suite,
+ authorizer_suite,
+ unittest.makeSuite(AuthorizerRaiseExceptionTests),
+ unittest.makeSuite(AuthorizerIllegalTypeTests),
+ unittest.makeSuite(AuthorizerLargeIntegerTests),
+ ))
def test():
runner = unittest.TextTestRunner()
diff --git a/Lib/sre_compile.py b/Lib/sre_compile.py
index 2a0c745..471753e 100644
--- a/Lib/sre_compile.py
+++ b/Lib/sre_compile.py
@@ -276,10 +276,10 @@ def _mk_bitmap(bits):
# set is constructed. Then, this bitmap is sliced into chunks of 256
# characters, duplicate chunks are eliminated, and each chunk is
# given a number. In the compiled expression, the charset is
-# represented by a 16-bit word sequence, consisting of one word for
-# the number of different chunks, a sequence of 256 bytes (128 words)
+# represented by a 32-bit word sequence, consisting of one word for
+# the number of different chunks, a sequence of 256 bytes (64 words)
# of chunk numbers indexed by their original chunk position, and a
-# sequence of chunks (16 words each).
+# sequence of 256-bit chunks (8 words each).
# Compression is normally good: in a typical charset, large ranges of
# Unicode will be either completely excluded (e.g. if only cyrillic
@@ -294,7 +294,7 @@ def _mk_bitmap(bits):
# In UCS-4 mode, the BIGCHARSET opcode still supports only subsets
# of the basic multilingual plane; an efficient representation
-# for all of UTF-16 has not yet been developed. This means,
+# for all of Unicode has not yet been developed. This means,
# in particular, that negated charsets cannot be represented as
# bigcharsets.
@@ -343,7 +343,7 @@ def _optimize_unicode(charset, fixup):
else:
code = 'I'
# Convert block indices to byte array of 256 bytes
- mapping = array.array('b', mapping).tostring()
+ mapping = array.array('B', mapping).tostring()
# Convert byte array to word array
mapping = array.array(code, mapping)
assert mapping.itemsize == _sre.CODESIZE
@@ -354,8 +354,6 @@ def _optimize_unicode(charset, fixup):
def _simple(av):
# check if av is a "simple" operator
lo, hi = av[2].getwidth()
- if lo == 0 and hi == MAXREPEAT:
- raise error, "nothing to repeat"
return lo == hi == 1 and av[2][0][0] != SUBPATTERN
def _compile_info(code, pattern, flags):
diff --git a/Lib/sre_constants.py b/Lib/sre_constants.py
index 1863f48..69224e2 100644
--- a/Lib/sre_constants.py
+++ b/Lib/sre_constants.py
@@ -15,9 +15,11 @@
MAGIC = 20031017
-# max code word in this release
-
-MAXREPEAT = 65535
+try:
+ from _sre import MAXREPEAT
+except ImportError:
+ import _sre
+ MAXREPEAT = _sre.MAXREPEAT = 65535
# SRE standard exception (access as sre.error)
# should this really be here?
diff --git a/Lib/sre_parse.py b/Lib/sre_parse.py
index 182d1eb..0a361ab 100644
--- a/Lib/sre_parse.py
+++ b/Lib/sre_parse.py
@@ -141,12 +141,12 @@ class SubPattern:
# determine the width (min, max) for this subpattern
if self.width:
return self.width
- lo = hi = 0L
+ lo = hi = 0
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
- i = sys.maxint
+ i = MAXREPEAT - 1
j = 0
for av in av[1]:
l, h = av.getwidth()
@@ -164,14 +164,14 @@ class SubPattern:
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
- lo = lo + long(i) * av[0]
- hi = hi + long(j) * av[1]
+ lo = lo + i * av[0]
+ hi = hi + j * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
- self.width = int(min(lo, sys.maxint)), int(min(hi, sys.maxint))
+ self.width = min(lo, MAXREPEAT - 1), min(hi, MAXREPEAT)
return self.width
class Tokenizer:
@@ -228,7 +228,7 @@ def _class_escape(source, escape):
if code:
return code
code = CATEGORIES.get(escape)
- if code:
+ if code and code[0] == IN:
return code
try:
c = escape[1:2]
@@ -498,10 +498,14 @@ def _parse(source, state):
continue
if lo:
min = int(lo)
+ if min >= MAXREPEAT:
+ raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
- if max < min:
- raise error, "bad repeat interval"
+ if max >= MAXREPEAT:
+ raise OverflowError("the repetition number is too large")
+ if max < min:
+ raise error("bad repeat interval")
else:
raise error, "not supported"
# figure out which item to repeat
@@ -541,8 +545,11 @@ def _parse(source, state):
break
name = name + char
group = 1
+ if not name:
+ raise error("missing group name")
if not isname(name):
- raise error, "bad character in group name"
+ raise error("bad character in group name %r" %
+ name)
elif sourcematch("="):
# named backreference
name = ""
@@ -553,11 +560,15 @@ def _parse(source, state):
if char == ")":
break
name = name + char
+ if not name:
+ raise error("missing group name")
if not isname(name):
- raise error, "bad character in group name"
+ raise error("bad character in backref group name "
+ "%r" % name)
gid = state.groupdict.get(name)
if gid is None:
- raise error, "unknown group name"
+ msg = "unknown group name: {0!r}".format(name)
+ raise error(msg)
subpatternappend((GROUPREF, gid))
continue
else:
@@ -605,10 +616,13 @@ def _parse(source, state):
break
condname = condname + char
group = 2
+ if not condname:
+ raise error("missing group name")
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
- raise error, "unknown group name"
+ msg = "unknown group name: {0!r}".format(condname)
+ raise error(msg)
else:
try:
condgroup = int(condname)
@@ -723,7 +737,7 @@ def parse_template(source, pattern):
break
name = name + char
if not name:
- raise error, "bad group name"
+ raise error, "missing group name"
try:
index = int(name)
if index < 0:
@@ -734,7 +748,8 @@ def parse_template(source, pattern):
try:
index = pattern.groupindex[name]
except KeyError:
- raise IndexError, "unknown group name"
+ msg = "unknown group name: {0!r}".format(name)
+ raise IndexError(msg)
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
diff --git a/Lib/ssl.py b/Lib/ssl.py
index 1951a62..666cea3 100644
--- a/Lib/ssl.py
+++ b/Lib/ssl.py
@@ -89,6 +89,7 @@ else:
from socket import socket, _fileobject, _delegate_methods, error as socket_error
from socket import getnameinfo as _getnameinfo
+from socket import SOL_SOCKET, SO_TYPE, SOCK_STREAM
import base64 # for DER-to-PEM translation
import errno
@@ -108,6 +109,10 @@ class SSLSocket(socket):
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True, ciphers=None):
+ # Can't use sock.type as other flags (such as SOCK_NONBLOCK) get
+ # mixed in.
+ if sock.getsockopt(SOL_SOCKET, SO_TYPE) != SOCK_STREAM:
+ raise NotImplementedError("only stream sockets are supported")
socket.__init__(self, _sock=sock._sock)
# The initializer for socket overrides the methods send(), recv(), etc.
# in the instancce, which we don't need -- but we want to provide the
@@ -313,17 +318,19 @@ class SSLSocket(socket):
self.cert_reqs, self.ssl_version,
self.ca_certs, self.ciphers)
try:
- socket.connect(self, addr)
- if self.do_handshake_on_connect:
- self.do_handshake()
- except socket_error as e:
if return_errno:
- return e.errno
+ rc = socket.connect_ex(self, addr)
else:
- self._sslobj = None
- raise e
- self._connected = True
- return 0
+ rc = None
+ socket.connect(self, addr)
+ if not rc:
+ if self.do_handshake_on_connect:
+ self.do_handshake()
+ self._connected = True
+ return rc
+ except socket_error:
+ self._sslobj = None
+ raise
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
@@ -342,17 +349,21 @@ class SSLSocket(socket):
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
- return (SSLSocket(newsock,
- keyfile=self.keyfile,
- certfile=self.certfile,
- server_side=True,
- cert_reqs=self.cert_reqs,
- ssl_version=self.ssl_version,
- ca_certs=self.ca_certs,
- ciphers=self.ciphers,
- do_handshake_on_connect=self.do_handshake_on_connect,
- suppress_ragged_eofs=self.suppress_ragged_eofs),
- addr)
+ try:
+ return (SSLSocket(newsock,
+ keyfile=self.keyfile,
+ certfile=self.certfile,
+ server_side=True,
+ cert_reqs=self.cert_reqs,
+ ssl_version=self.ssl_version,
+ ca_certs=self.ca_certs,
+ ciphers=self.ciphers,
+ do_handshake_on_connect=self.do_handshake_on_connect,
+ suppress_ragged_eofs=self.suppress_ragged_eofs),
+ addr)
+ except socket_error as e:
+ newsock.close()
+ raise e
def makefile(self, mode='r', bufsize=-1):
diff --git a/Lib/string.py b/Lib/string.py
index 9c0ebe1..9727803 100644
--- a/Lib/string.py
+++ b/Lib/string.py
@@ -601,12 +601,12 @@ class Formatter(object):
def convert_field(self, value, conversion):
# do any conversion on the resulting object
- if conversion == 'r':
- return repr(value)
+ if conversion is None:
+ return value
elif conversion == 's':
return str(value)
- elif conversion is None:
- return value
+ elif conversion == 'r':
+ return repr(value)
raise ValueError("Unknown conversion specifier {0!s}".format(conversion))
diff --git a/Lib/subprocess.py b/Lib/subprocess.py
index f0ef30e..ce47b5e 100644
--- a/Lib/subprocess.py
+++ b/Lib/subprocess.py
@@ -2,8 +2,6 @@
#
# For more information about this module, see PEP 324.
#
-# This module should remain compatible with Python 2.2, see PEP 291.
-#
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
#
# Licensed to PSF under a Contributor Agreement.
@@ -13,7 +11,7 @@ r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
-intends to replace several other, older modules and functions, like:
+intends to replace several older modules and functions:
os.system
os.spawn*
@@ -145,7 +143,7 @@ Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
-from the childs point of view.
+from the child's point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
@@ -482,6 +480,37 @@ def _eintr_retry_call(func, *args):
raise
+# XXX This function is only used by multiprocessing and the test suite,
+# but it's here so that it can be imported when Python is compiled without
+# threads.
+
+def _args_from_interpreter_flags():
+ """Return a list of command-line arguments reproducing the current
+ settings in sys.flags and sys.warnoptions."""
+ flag_opt_map = {
+ 'debug': 'd',
+ # 'inspect': 'i',
+ # 'interactive': 'i',
+ 'optimize': 'O',
+ 'dont_write_bytecode': 'B',
+ 'no_user_site': 's',
+ 'no_site': 'S',
+ 'ignore_environment': 'E',
+ 'verbose': 'v',
+ 'bytes_warning': 'b',
+ 'hash_randomization': 'R',
+ 'py3k_warning': '3',
+ }
+ args = []
+ for flag, opt in flag_opt_map.items():
+ v = getattr(sys.flags, flag)
+ if v > 0:
+ args.append('-' + opt * v)
+ for opt in sys.warnoptions:
+ args.append('-W' + opt)
+ return args
+
+
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
@@ -616,6 +645,8 @@ def list2cmdline(seq):
class Popen(object):
+ _child_created = False # Set here since __del__ checks it
+
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
@@ -624,7 +655,6 @@ class Popen(object):
"""Create new Popen instance."""
_cleanup()
- self._child_created = False
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
@@ -669,14 +699,29 @@ class Popen(object):
(p2cread, p2cwrite,
c2pread, c2pwrite,
- errread, errwrite) = self._get_handles(stdin, stdout, stderr)
+ errread, errwrite), to_close = self._get_handles(stdin, stdout, stderr)
- self._execute_child(args, executable, preexec_fn, close_fds,
- cwd, env, universal_newlines,
- startupinfo, creationflags, shell,
- p2cread, p2cwrite,
- c2pread, c2pwrite,
- errread, errwrite)
+ try:
+ self._execute_child(args, executable, preexec_fn, close_fds,
+ cwd, env, universal_newlines,
+ startupinfo, creationflags, shell, to_close,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+ except Exception:
+ # Preserve original exception in case os.close raises.
+ exc_type, exc_value, exc_trace = sys.exc_info()
+
+ for fd in to_close:
+ try:
+ if mswindows:
+ fd.Close()
+ else:
+ os.close(fd)
+ except EnvironmentError:
+ pass
+
+ raise exc_type, exc_value, exc_trace
if mswindows:
if p2cwrite is not None:
@@ -706,11 +751,11 @@ class Popen(object):
return data
- def __del__(self, _maxint=sys.maxint, _active=_active):
+ def __del__(self, _maxint=sys.maxint):
# If __init__ hasn't had a chance to execute (e.g. if it
# was passed an undeclared keyword argument), we don't
# have a _child_created attribute at all.
- if not getattr(self, '_child_created', False):
+ if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
@@ -766,8 +811,9 @@ class Popen(object):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
+ to_close = set()
if stdin is None and stdout is None and stderr is None:
- return (None, None, None, None, None, None)
+ return (None, None, None, None, None, None), to_close
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
@@ -785,6 +831,10 @@ class Popen(object):
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
+ # We just duplicated the handle, it has to be closed at the end
+ to_close.add(p2cread)
+ if stdin == PIPE:
+ to_close.add(p2cwrite)
if stdout is None:
c2pwrite = _subprocess.GetStdHandle(_subprocess.STD_OUTPUT_HANDLE)
@@ -798,6 +848,10 @@ class Popen(object):
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
+ # We just duplicated the handle, it has to be closed at the end
+ to_close.add(c2pwrite)
+ if stdout == PIPE:
+ to_close.add(c2pread)
if stderr is None:
errwrite = _subprocess.GetStdHandle(_subprocess.STD_ERROR_HANDLE)
@@ -813,10 +867,14 @@ class Popen(object):
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
+ # We just duplicated the handle, it has to be closed at the end
+ to_close.add(errwrite)
+ if stderr == PIPE:
+ to_close.add(errread)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
- errread, errwrite)
+ errread, errwrite), to_close
def _make_inheritable(self, handle):
@@ -845,7 +903,7 @@ class Popen(object):
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
- startupinfo, creationflags, shell,
+ startupinfo, creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
@@ -884,6 +942,10 @@ class Popen(object):
# kill children.
creationflags |= _subprocess.CREATE_NEW_CONSOLE
+ def _close_in_parent(fd):
+ fd.Close()
+ to_close.remove(fd)
+
# Start the process
try:
hp, ht, pid, tid = _subprocess.CreateProcess(executable, args,
@@ -908,11 +970,11 @@ class Popen(object):
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread is not None:
- p2cread.Close()
+ _close_in_parent(p2cread)
if c2pwrite is not None:
- c2pwrite.Close()
+ _close_in_parent(c2pwrite)
if errwrite is not None:
- errwrite.Close()
+ _close_in_parent(errwrite)
# Retain the process handle, but close the thread handle
self._child_created = True
@@ -1016,7 +1078,17 @@ class Popen(object):
def terminate(self):
"""Terminates the process
"""
- _subprocess.TerminateProcess(self._handle, 1)
+ try:
+ _subprocess.TerminateProcess(self._handle, 1)
+ except OSError as e:
+ # ERROR_ACCESS_DENIED (winerror 5) is received when the
+ # process already died.
+ if e.winerror != 5:
+ raise
+ rc = _subprocess.GetExitCodeProcess(self._handle)
+ if rc == _subprocess.STILL_ACTIVE:
+ raise
+ self.returncode = rc
kill = terminate
@@ -1028,6 +1100,7 @@ class Popen(object):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
+ to_close = set()
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
@@ -1036,6 +1109,7 @@ class Popen(object):
pass
elif stdin == PIPE:
p2cread, p2cwrite = self.pipe_cloexec()
+ to_close.update((p2cread, p2cwrite))
elif isinstance(stdin, int):
p2cread = stdin
else:
@@ -1046,6 +1120,7 @@ class Popen(object):
pass
elif stdout == PIPE:
c2pread, c2pwrite = self.pipe_cloexec()
+ to_close.update((c2pread, c2pwrite))
elif isinstance(stdout, int):
c2pwrite = stdout
else:
@@ -1056,6 +1131,7 @@ class Popen(object):
pass
elif stderr == PIPE:
errread, errwrite = self.pipe_cloexec()
+ to_close.update((errread, errwrite))
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
@@ -1066,7 +1142,7 @@ class Popen(object):
return (p2cread, p2cwrite,
c2pread, c2pwrite,
- errread, errwrite)
+ errread, errwrite), to_close
def _set_cloexec_flag(self, fd, cloexec=True):
@@ -1110,7 +1186,7 @@ class Popen(object):
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
- startupinfo, creationflags, shell,
+ startupinfo, creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
@@ -1129,6 +1205,10 @@ class Popen(object):
if executable is None:
executable = args[0]
+ def _close_in_parent(fd):
+ os.close(fd)
+ to_close.remove(fd)
+
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
@@ -1187,16 +1267,17 @@ class Popen(object):
os.close(fd)
closed.add(fd)
- # Close all other fds, if asked for
- if close_fds:
- self._close_fds(but=errpipe_write)
-
if cwd is not None:
os.chdir(cwd)
if preexec_fn:
preexec_fn()
+ # Close all other fds, if asked for - after
+ # preexec_fn(), which may open FDs.
+ if close_fds:
+ self._close_fds(but=errpipe_write)
+
if env is None:
os.execvp(executable, args)
else:
@@ -1222,17 +1303,17 @@ class Popen(object):
# be sure the FD is closed no matter what
os.close(errpipe_write)
- if p2cread is not None and p2cwrite is not None:
- os.close(p2cread)
- if c2pwrite is not None and c2pread is not None:
- os.close(c2pwrite)
- if errwrite is not None and errread is not None:
- os.close(errwrite)
-
# Wait for exec to fail or succeed; possibly raising exception
# Exception limited to 1M
data = _eintr_retry_call(os.read, errpipe_read, 1048576)
finally:
+ if p2cread is not None and p2cwrite is not None:
+ _close_in_parent(p2cread)
+ if c2pwrite is not None and c2pread is not None:
+ _close_in_parent(c2pwrite)
+ if errwrite is not None and errread is not None:
+ _close_in_parent(errwrite)
+
# be sure the FD is closed no matter what
os.close(errpipe_read)
@@ -1243,9 +1324,6 @@ class Popen(object):
if e.errno != errno.ECHILD:
raise
child_exception = pickle.loads(data)
- for fd in (p2cwrite, c2pread, errread):
- if fd is not None:
- os.close(fd)
raise child_exception
@@ -1253,7 +1331,7 @@ class Popen(object):
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
_WEXITSTATUS=os.WEXITSTATUS):
# This method is called (indirectly) by __del__, so it cannot
- # refer to anything outside of its local scope."""
+ # refer to anything outside of its local scope.
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
@@ -1264,7 +1342,7 @@ class Popen(object):
def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
- _WNOHANG=os.WNOHANG, _os_error=os.error):
+ _WNOHANG=os.WNOHANG, _os_error=os.error, _ECHILD=errno.ECHILD):
"""Check if child process has terminated. Returns returncode
attribute.
@@ -1277,16 +1355,23 @@ class Popen(object):
pid, sts = _waitpid(self.pid, _WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
- except _os_error:
+ except _os_error as e:
if _deadstate is not None:
self.returncode = _deadstate
+ if e.errno == _ECHILD:
+ # This happens if SIGCLD is set to be ignored or
+ # waiting for child processes has otherwise been
+ # disabled for our process. This child is dead, we
+ # can't get the status.
+ # http://bugs.python.org/issue15756
+ self.returncode = 0
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
- if self.returncode is None:
+ while self.returncode is None:
try:
pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
@@ -1295,8 +1380,12 @@ class Popen(object):
# This happens if SIGCLD is set to be ignored or waiting
# for child processes has otherwise been disabled for our
# process. This child is dead, we can't get the status.
+ pid = self.pid
sts = 0
- self._handle_exitstatus(sts)
+ # Check the pid and loop as waitpid has been known to return
+ # 0 even without WNOHANG in odd situations. issue14396.
+ if pid == self.pid:
+ self._handle_exitstatus(sts)
return self.returncode
diff --git a/Lib/sunau.py b/Lib/sunau.py
index a04d8c0..b53044d 100644
--- a/Lib/sunau.py
+++ b/Lib/sunau.py
@@ -203,6 +203,10 @@ class Au_read:
break
else:
self._info = ''
+ try:
+ self._data_pos = file.tell()
+ except (AttributeError, IOError):
+ self._data_pos = None
def getfp(self):
return self._file
@@ -220,7 +224,7 @@ class Au_read:
if self._data_size == AUDIO_UNKNOWN_SIZE:
return AUDIO_UNKNOWN_SIZE
if self._encoding in _simple_encodings:
- return self._data_size / self._framesize
+ return self._data_size // self._framesize
return 0 # XXX--must do some arithmetic here
def getcomptype(self):
@@ -255,7 +259,8 @@ class Au_read:
if nframes == AUDIO_UNKNOWN_SIZE:
data = self._file.read()
else:
- data = self._file.read(nframes * self._framesize * self._nchannels)
+ data = self._file.read(nframes * self._framesize)
+ self._soundpos += len(data) // self._framesize
if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
import audioop
data = audioop.ulaw2lin(data, self._sampwidth)
@@ -263,8 +268,10 @@ class Au_read:
return None # XXX--not implemented yet
def rewind(self):
+ if self._data_pos is None:
+ raise IOError('cannot seek')
+ self._file.seek(self._data_pos)
self._soundpos = 0
- self._file.seek(self._hdr_size)
def tell(self):
return self._soundpos
@@ -272,7 +279,9 @@ class Au_read:
def setpos(self, pos):
if pos < 0 or pos > self.getnframes():
raise Error, 'position not in range'
- self._file.seek(pos * self._framesize + self._hdr_size)
+ if self._data_pos is None:
+ raise IOError('cannot seek')
+ self._file.seek(self._data_pos + pos * self._framesize)
self._soundpos = pos
def close(self):
@@ -382,10 +391,10 @@ class Au_write:
def writeframesraw(self, data):
self._ensure_header_written()
- nframes = len(data) / self._framesize
if self._comptype == 'ULAW':
import audioop
data = audioop.lin2ulaw(data, self._sampwidth)
+ nframes = len(data) // self._framesize
self._file.write(data)
self._nframeswritten = self._nframeswritten + nframes
self._datawritten = self._datawritten + len(data)
@@ -397,12 +406,15 @@ class Au_write:
self._patchheader()
def close(self):
- self._ensure_header_written()
- if self._nframeswritten != self._nframes or \
- self._datalength != self._datawritten:
- self._patchheader()
- self._file.flush()
- self._file = None
+ if self._file:
+ try:
+ self._ensure_header_written()
+ if self._nframeswritten != self._nframes or \
+ self._datalength != self._datawritten:
+ self._patchheader()
+ self._file.flush()
+ finally:
+ self._file = None
#
# private methods
@@ -445,6 +457,10 @@ class Au_write:
length = AUDIO_UNKNOWN_SIZE
else:
length = self._nframes * self._framesize
+ try:
+ self._form_length_pos = self._file.tell()
+ except (AttributeError, IOError):
+ self._form_length_pos = None
_write_u32(self._file, length)
self._datalength = length
_write_u32(self._file, encoding)
@@ -454,7 +470,9 @@ class Au_write:
self._file.write('\0'*(header_size - len(self._info) - 24))
def _patchheader(self):
- self._file.seek(8)
+ if self._form_length_pos is None:
+ raise IOError('cannot seek')
+ self._file.seek(self._form_length_pos)
_write_u32(self._file, self._datawritten)
self._datalength = self._datawritten
self._file.seek(0, 2)
diff --git a/Lib/symbol.py b/Lib/symbol.py
index cf179e5..b4d4e13 100755
--- a/Lib/symbol.py
+++ b/Lib/symbol.py
@@ -7,7 +7,7 @@
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
-# python Lib/symbol.py
+# ./python Lib/symbol.py
#--start constants--
single_input = 256
diff --git a/Lib/symtable.py b/Lib/symtable.py
index ca73f58..0ba9d1a 100644
--- a/Lib/symtable.py
+++ b/Lib/symtable.py
@@ -10,10 +10,7 @@ import weakref
__all__ = ["symtable", "SymbolTable", "Class", "Function", "Symbol"]
def symtable(code, filename, compile_type):
- raw = _symtable.symtable(code, filename, compile_type)
- for top in raw.itervalues():
- if top.name == 'top':
- break
+ top = _symtable.symtable(code, filename, compile_type)
return _newSymbolTable(top, filename)
class SymbolTableFactory:
diff --git a/Lib/sysconfig.py b/Lib/sysconfig.py
index 6314cfe..aa69351 100644
--- a/Lib/sysconfig.py
+++ b/Lib/sysconfig.py
@@ -116,6 +116,10 @@ if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
+# set for cross builds
+if "_PYTHON_PROJECT_BASE" in os.environ:
+ # the build directory for posix builds
+ _PROJECT_BASE = os.path.normpath(os.path.abspath("."))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
@@ -274,9 +278,10 @@ def _get_makefile_filename():
return os.path.join(_PROJECT_BASE, "Makefile")
return os.path.join(get_path('platstdlib'), "config", "Makefile")
-
-def _init_posix(vars):
- """Initialize the module as appropriate for POSIX systems."""
+def _generate_posix_vars():
+ """Generate the Python module containing build-time variables."""
+ import pprint
+ vars = {}
# load the installed Makefile:
makefile = _get_makefile_filename()
try:
@@ -304,6 +309,49 @@ def _init_posix(vars):
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED']
+ # There's a chicken-and-egg situation on OS X with regards to the
+ # _sysconfigdata module after the changes introduced by #15298:
+ # get_config_vars() is called by get_platform() as part of the
+ # `make pybuilddir.txt` target -- which is a precursor to the
+ # _sysconfigdata.py module being constructed. Unfortunately,
+ # get_config_vars() eventually calls _init_posix(), which attempts
+ # to import _sysconfigdata, which we won't have built yet. In order
+ # for _init_posix() to work, if we're on Darwin, just mock up the
+ # _sysconfigdata module manually and populate it with the build vars.
+ # This is more than sufficient for ensuring the subsequent call to
+ # get_platform() succeeds.
+ name = '_sysconfigdata'
+ if 'darwin' in sys.platform:
+ import imp
+ module = imp.new_module(name)
+ module.build_time_vars = vars
+ sys.modules[name] = module
+
+ pybuilddir = 'build/lib.%s-%s' % (get_platform(), sys.version[:3])
+ if hasattr(sys, "gettotalrefcount"):
+ pybuilddir += '-pydebug'
+ try:
+ os.makedirs(pybuilddir)
+ except OSError:
+ pass
+ destfile = os.path.join(pybuilddir, name + '.py')
+
+ with open(destfile, 'wb') as f:
+ f.write('# system configuration generated and used by'
+ ' the sysconfig module\n')
+ f.write('build_time_vars = ')
+ pprint.pprint(vars, stream=f)
+
+ # Create file used for sys.path fixup -- see Modules/getpath.c
+ with open('pybuilddir.txt', 'w') as f:
+ f.write(pybuilddir)
+
+def _init_posix(vars):
+ """Initialize the module as appropriate for POSIX systems."""
+ # _sysconfigdata is generated at build time, see _generate_posix_vars()
+ from _sysconfigdata import build_time_vars
+ vars.update(build_time_vars)
+
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
@@ -445,64 +493,11 @@ def get_config_vars(*args):
srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
_CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
+ # OS X platforms require special customization to handle
+ # multi-architecture, multi-os-version installers
if sys.platform == 'darwin':
- kernel_version = os.uname()[2] # Kernel version (8.4.3)
- major_version = int(kernel_version.split('.')[0])
-
- if major_version < 8:
- # On Mac OS X before 10.4, check if -arch and -isysroot
- # are in CFLAGS or LDFLAGS and remove them if they are.
- # This is needed when building extensions on a 10.3 system
- # using a universal build of python.
- for key in ('LDFLAGS', 'BASECFLAGS',
- # a number of derived variables. These need to be
- # patched up as well.
- 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
- flags = _CONFIG_VARS[key]
- flags = re.sub('-arch\s+\w+\s', ' ', flags)
- flags = re.sub('-isysroot [^ \t]*', ' ', flags)
- _CONFIG_VARS[key] = flags
- else:
- # Allow the user to override the architecture flags using
- # an environment variable.
- # NOTE: This name was introduced by Apple in OSX 10.5 and
- # is used by several scripting languages distributed with
- # that OS release.
- if 'ARCHFLAGS' in os.environ:
- arch = os.environ['ARCHFLAGS']
- for key in ('LDFLAGS', 'BASECFLAGS',
- # a number of derived variables. These need to be
- # patched up as well.
- 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
-
- flags = _CONFIG_VARS[key]
- flags = re.sub('-arch\s+\w+\s', ' ', flags)
- flags = flags + ' ' + arch
- _CONFIG_VARS[key] = flags
-
- # If we're on OSX 10.5 or later and the user tries to
- # compiles an extension using an SDK that is not present
- # on the current machine it is better to not use an SDK
- # than to fail.
- #
- # The major usecase for this is users using a Python.org
- # binary installer on OSX 10.6: that installer uses
- # the 10.4u SDK, but that SDK is not installed by default
- # when you install Xcode.
- #
- CFLAGS = _CONFIG_VARS.get('CFLAGS', '')
- m = re.search('-isysroot\s+(\S+)', CFLAGS)
- if m is not None:
- sdk = m.group(1)
- if not os.path.exists(sdk):
- for key in ('LDFLAGS', 'BASECFLAGS',
- # a number of derived variables. These need to be
- # patched up as well.
- 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
-
- flags = _CONFIG_VARS[key]
- flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
- _CONFIG_VARS[key] = flags
+ import _osx_support
+ _osx_support.customize_config_vars(_CONFIG_VARS)
if args:
vals = []
@@ -560,6 +555,10 @@ def get_platform():
return 'win-ia64'
return sys.platform
+ # Set for cross builds explicitly
+ if "_PYTHON_HOST_PLATFORM" in os.environ:
+ return os.environ["_PYTHON_HOST_PLATFORM"]
+
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
@@ -600,94 +599,38 @@ def get_platform():
if m:
release = m.group()
elif osname[:6] == "darwin":
- #
- # For our purposes, we'll assume that the system version from
- # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
- # to. This makes the compatibility story a bit more sane because the
- # machine is going to compile and link as if it were
- # MACOSX_DEPLOYMENT_TARGET.
- cfgvars = get_config_vars()
- macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
-
- if 1:
- # Always calculate the release of the running machine,
- # needed to determine if we can build fat binaries or not.
-
- macrelease = macver
- # Get the system version. Reading this plist is a documented
- # way to get the system version (see the documentation for
- # the Gestalt Manager)
- try:
- f = open('/System/Library/CoreServices/SystemVersion.plist')
- except IOError:
- # We're on a plain darwin box, fall back to the default
- # behaviour.
- pass
- else:
- try:
- m = re.search(
- r'<key>ProductUserVisibleVersion</key>\s*' +
- r'<string>(.*?)</string>', f.read())
- if m is not None:
- macrelease = '.'.join(m.group(1).split('.')[:2])
- # else: fall back to the default behaviour
- finally:
- f.close()
-
- if not macver:
- macver = macrelease
-
- if macver:
- release = macver
- osname = "macosx"
-
- if (macrelease + '.') >= '10.4.' and \
- '-arch' in get_config_vars().get('CFLAGS', '').strip():
- # The universal build will build fat binaries, but not on
- # systems before 10.4
- #
- # Try to detect 4-way universal builds, those have machine-type
- # 'universal' instead of 'fat'.
-
- machine = 'fat'
- cflags = get_config_vars().get('CFLAGS')
-
- archs = re.findall('-arch\s+(\S+)', cflags)
- archs = tuple(sorted(set(archs)))
-
- if len(archs) == 1:
- machine = archs[0]
- elif archs == ('i386', 'ppc'):
- machine = 'fat'
- elif archs == ('i386', 'x86_64'):
- machine = 'intel'
- elif archs == ('i386', 'ppc', 'x86_64'):
- machine = 'fat3'
- elif archs == ('ppc64', 'x86_64'):
- machine = 'fat64'
- elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
- machine = 'universal'
- else:
- raise ValueError(
- "Don't know machine value for archs=%r"%(archs,))
-
- elif machine == 'i386':
- # On OSX the machine type returned by uname is always the
- # 32-bit variant, even if the executable architecture is
- # the 64-bit variant
- if sys.maxint >= 2**32:
- machine = 'x86_64'
-
- elif machine in ('PowerPC', 'Power_Macintosh'):
- # Pick a sane name for the PPC architecture.
- # See 'i386' case
- if sys.maxint >= 2**32:
- machine = 'ppc64'
- else:
- machine = 'ppc'
+ import _osx_support
+ osname, release, machine = _osx_support.get_platform_osx(
+ get_config_vars(),
+ osname, release, machine)
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
+
+
+def _print_dict(title, data):
+ for index, (key, value) in enumerate(sorted(data.items())):
+ if index == 0:
+ print '%s: ' % (title)
+ print '\t%s = "%s"' % (key, value)
+
+
+def _main():
+ """Display all information sysconfig detains."""
+ if '--generate-posix-vars' in sys.argv:
+ _generate_posix_vars()
+ return
+ print 'Platform: "%s"' % get_platform()
+ print 'Python version: "%s"' % get_python_version()
+ print 'Current installation scheme: "%s"' % _get_default_scheme()
+ print
+ _print_dict('Paths', get_paths())
+ print
+ _print_dict('Variables', get_config_vars())
+
+
+if __name__ == '__main__':
+ _main()
diff --git a/Lib/tarfile.py b/Lib/tarfile.py
index bd73965..57ea877 100644
--- a/Lib/tarfile.py
+++ b/Lib/tarfile.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#-------------------------------------------------------------------
# tarfile.py
@@ -330,7 +329,7 @@ class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
- """Exception for unreadble tar archives."""
+ """Exception for unreadable tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
@@ -1509,10 +1508,11 @@ class TarFile(object):
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
- if len(mode) > 1 or mode not in "raw":
+ modes = {"r": "rb", "a": "r+b", "w": "wb"}
+ if mode not in modes:
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
- self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
+ self._mode = modes[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
@@ -1682,7 +1682,7 @@ class TarFile(object):
filemode = filemode or "r"
comptype = comptype or "tar"
- if filemode not in "rw":
+ if filemode not in ("r", "w"):
raise ValueError("mode must be 'r' or 'w'")
t = cls(name, filemode,
@@ -1691,7 +1691,7 @@ class TarFile(object):
t._extfileobj = False
return t
- elif mode in "aw":
+ elif mode in ("a", "w"):
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@@ -1700,7 +1700,7 @@ class TarFile(object):
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
- if len(mode) > 1 or mode not in "raw":
+ if mode not in ("r", "a", "w"):
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@@ -1709,7 +1709,7 @@ class TarFile(object):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
- if len(mode) > 1 or mode not in "rw":
+ if mode not in ("r", "w"):
raise ValueError("mode must be 'r' or 'w'")
try:
@@ -1726,7 +1726,9 @@ class TarFile(object):
gzip.GzipFile(name, mode, compresslevel, fileobj),
**kwargs)
except IOError:
- raise ReadError("not a gzip file")
+ if mode == 'r':
+ raise ReadError("not a gzip file")
+ raise
t._extfileobj = False
return t
@@ -1735,7 +1737,7 @@ class TarFile(object):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
- if len(mode) > 1 or mode not in "rw":
+ if mode not in ("r", "w"):
raise ValueError("mode must be 'r' or 'w'.")
try:
@@ -1751,7 +1753,9 @@ class TarFile(object):
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
- raise ReadError("not a bzip2 file")
+ if mode == 'r':
+ raise ReadError("not a bzip2 file")
+ raise
t._extfileobj = False
return t
@@ -1987,9 +1991,8 @@ class TarFile(object):
# Append the tar header and data to the archive.
if tarinfo.isreg():
- f = bltn_open(name, "rb")
- self.addfile(tarinfo, f)
- f.close()
+ with bltn_open(name, "rb") as f:
+ self.addfile(tarinfo, f)
elif tarinfo.isdir():
self.addfile(tarinfo)
@@ -2197,10 +2200,11 @@ class TarFile(object):
"""Make a file called targetpath.
"""
source = self.extractfile(tarinfo)
- target = bltn_open(targetpath, "wb")
- copyfileobj(source, target)
- source.close()
- target.close()
+ try:
+ with bltn_open(targetpath, "wb") as target:
+ copyfileobj(source, target)
+ finally:
+ source.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
@@ -2397,7 +2401,7 @@ class TarFile(object):
"""
if tarinfo.issym():
# Always search the entire archive.
- linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
+ linkname = "/".join(filter(None, (os.path.dirname(tarinfo.name), tarinfo.linkname)))
limit = None
else:
# Search the archive before the link, because a hard link is
@@ -2462,16 +2466,18 @@ class TarIter:
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
- if not self.tarfile._loaded:
+
+ if self.index == 0 and self.tarfile.firstmember is not None:
+ tarinfo = self.tarfile.next()
+ elif self.index < len(self.tarfile.members):
+ tarinfo = self.tarfile.members[self.index]
+ elif not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
- try:
- tarinfo = self.tarfile.members[self.index]
- except IndexError:
- raise StopIteration
+ raise StopIteration
self.index += 1
return tarinfo
diff --git a/Lib/telnetlib.py b/Lib/telnetlib.py
index bae4ae7..88aa482 100644
--- a/Lib/telnetlib.py
+++ b/Lib/telnetlib.py
@@ -34,6 +34,7 @@ To do:
# Imported modules
+import errno
import sys
import socket
import select
@@ -205,6 +206,7 @@ class Telnet:
self.sb = 0 # flag for SB and SE sequence.
self.sbdataq = ''
self.option_callback = None
+ self._has_poll = hasattr(select, 'poll')
if host is not None:
self.open(host, port, timeout)
@@ -287,6 +289,63 @@ class Telnet:
is closed and no cooked data is available.
"""
+ if self._has_poll:
+ return self._read_until_with_poll(match, timeout)
+ else:
+ return self._read_until_with_select(match, timeout)
+
+ def _read_until_with_poll(self, match, timeout):
+ """Read until a given string is encountered or until timeout.
+
+ This method uses select.poll() to implement the timeout.
+ """
+ n = len(match)
+ call_timeout = timeout
+ if timeout is not None:
+ from time import time
+ time_start = time()
+ self.process_rawq()
+ i = self.cookedq.find(match)
+ if i < 0:
+ poller = select.poll()
+ poll_in_or_priority_flags = select.POLLIN | select.POLLPRI
+ poller.register(self, poll_in_or_priority_flags)
+ while i < 0 and not self.eof:
+ try:
+ # Poll takes its timeout in milliseconds.
+ ready = poller.poll(None if timeout is None
+ else 1000 * call_timeout)
+ except select.error as e:
+ if e.errno == errno.EINTR:
+ if timeout is not None:
+ elapsed = time() - time_start
+ call_timeout = timeout-elapsed
+ continue
+ raise
+ for fd, mode in ready:
+ if mode & poll_in_or_priority_flags:
+ i = max(0, len(self.cookedq)-n)
+ self.fill_rawq()
+ self.process_rawq()
+ i = self.cookedq.find(match, i)
+ if timeout is not None:
+ elapsed = time() - time_start
+ if elapsed >= timeout:
+ break
+ call_timeout = timeout-elapsed
+ poller.unregister(self)
+ if i >= 0:
+ i = i + n
+ buf = self.cookedq[:i]
+ self.cookedq = self.cookedq[i:]
+ return buf
+ return self.read_very_lazy()
+
+ def _read_until_with_select(self, match, timeout=None):
+ """Read until a given string is encountered or until timeout.
+
+ The timeout is implemented using select.select().
+ """
n = len(match)
self.process_rawq()
i = self.cookedq.find(match)
@@ -589,6 +648,80 @@ class Telnet:
results are undeterministic, and may depend on the I/O timing.
"""
+ if self._has_poll:
+ return self._expect_with_poll(list, timeout)
+ else:
+ return self._expect_with_select(list, timeout)
+
+ def _expect_with_poll(self, expect_list, timeout=None):
+ """Read until one from a list of a regular expressions matches.
+
+ This method uses select.poll() to implement the timeout.
+ """
+ re = None
+ expect_list = expect_list[:]
+ indices = range(len(expect_list))
+ for i in indices:
+ if not hasattr(expect_list[i], "search"):
+ if not re: import re
+ expect_list[i] = re.compile(expect_list[i])
+ call_timeout = timeout
+ if timeout is not None:
+ from time import time
+ time_start = time()
+ self.process_rawq()
+ m = None
+ for i in indices:
+ m = expect_list[i].search(self.cookedq)
+ if m:
+ e = m.end()
+ text = self.cookedq[:e]
+ self.cookedq = self.cookedq[e:]
+ break
+ if not m:
+ poller = select.poll()
+ poll_in_or_priority_flags = select.POLLIN | select.POLLPRI
+ poller.register(self, poll_in_or_priority_flags)
+ while not m and not self.eof:
+ try:
+ ready = poller.poll(None if timeout is None
+ else 1000 * call_timeout)
+ except select.error as e:
+ if e.errno == errno.EINTR:
+ if timeout is not None:
+ elapsed = time() - time_start
+ call_timeout = timeout-elapsed
+ continue
+ raise
+ for fd, mode in ready:
+ if mode & poll_in_or_priority_flags:
+ self.fill_rawq()
+ self.process_rawq()
+ for i in indices:
+ m = expect_list[i].search(self.cookedq)
+ if m:
+ e = m.end()
+ text = self.cookedq[:e]
+ self.cookedq = self.cookedq[e:]
+ break
+ if timeout is not None:
+ elapsed = time() - time_start
+ if elapsed >= timeout:
+ break
+ call_timeout = timeout-elapsed
+ poller.unregister(self)
+ if m:
+ return (i, m, text)
+ text = self.read_very_lazy()
+ if not text and self.eof:
+ raise EOFError
+ return (-1, None, text)
+
+ def _expect_with_select(self, list, timeout=None):
+ """Read until one from a list of a regular expressions matches.
+
+ The timeout is implemented using select.select().
+ """
re = None
list = list[:]
indices = range(len(list))
diff --git a/Lib/tempfile.py b/Lib/tempfile.py
index 2023280..c3246e5 100644
--- a/Lib/tempfile.py
+++ b/Lib/tempfile.py
@@ -29,6 +29,7 @@ __all__ = [
# Imports.
+import io as _io
import os as _os
import errno as _errno
from random import Random as _Random
@@ -193,15 +194,18 @@ def _get_default_tempdir():
name = namer.next()
filename = _os.path.join(dir, name)
try:
- fd = _os.open(filename, flags, 0600)
- fp = _os.fdopen(fd, 'w')
- fp.write('blat')
- fp.close()
- _os.unlink(filename)
- del fp, fd
+ fd = _os.open(filename, flags, 0o600)
+ try:
+ try:
+ with _io.open(fd, 'wb', closefd=False) as fp:
+ fp.write(b'blat')
+ finally:
+ _os.close(fd)
+ finally:
+ _os.unlink(filename)
return dir
- except (OSError, IOError), e:
- if e[0] != _errno.EEXIST:
+ except (OSError, IOError) as e:
+ if e.args[0] != _errno.EEXIST:
break # no point trying more names in this directory
pass
raise IOError, (_errno.ENOENT,
@@ -238,6 +242,10 @@ def _mkstemp_inner(dir, pre, suf, flags):
except OSError, e:
if e.errno == _errno.EEXIST:
continue # try again
+ if _os.name == 'nt' and e.errno == _errno.EACCES:
+ # On windows, when a directory with the chosen name already
+ # exists, EACCES error code is returned instead of EEXIST.
+ continue
raise
raise IOError, (_errno.EEXIST, "No usable temporary file name found")
@@ -452,8 +460,12 @@ def NamedTemporaryFile(mode='w+b', bufsize=-1, suffix="",
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
- file = _os.fdopen(fd, mode, bufsize)
- return _TemporaryFileWrapper(file, name, delete)
+ try:
+ file = _os.fdopen(fd, mode, bufsize)
+ return _TemporaryFileWrapper(file, name, delete)
+ except:
+ _os.close(fd)
+ raise
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
@@ -546,10 +558,6 @@ class SpooledTemporaryFile:
def closed(self):
return self._file.closed
- @property
- def encoding(self):
- return self._file.encoding
-
def fileno(self):
self.rollover()
return self._file.fileno()
@@ -562,15 +570,17 @@ class SpooledTemporaryFile:
@property
def mode(self):
- return self._file.mode
+ try:
+ return self._file.mode
+ except AttributeError:
+ return self._TemporaryFileArgs[0]
@property
def name(self):
- return self._file.name
-
- @property
- def newlines(self):
- return self._file.newlines
+ try:
+ return self._file.name
+ except AttributeError:
+ return None
def next(self):
return self._file.next
@@ -610,4 +620,7 @@ class SpooledTemporaryFile:
return rv
def xreadlines(self, *args):
- return self._file.xreadlines(*args)
+ if hasattr(self._file, 'xreadlines'): # real file
+ return iter(self._file)
+ else: # StringIO()
+ return iter(self._file.readlines(*args))
diff --git a/Lib/test/audiodata/pluck-pcm16.aiff b/Lib/test/audiodata/pluck-pcm16.aiff
new file mode 100644
index 0000000..6c8c40d
--- /dev/null
+++ b/Lib/test/audiodata/pluck-pcm16.aiff
Binary files differ
diff --git a/Lib/test/audiodata/pluck-pcm16.au b/Lib/test/audiodata/pluck-pcm16.au
new file mode 100644
index 0000000..398f07f
--- /dev/null
+++ b/Lib/test/audiodata/pluck-pcm16.au
Binary files differ
diff --git a/Lib/test/audiodata/pluck-pcm16.wav b/Lib/test/audiodata/pluck-pcm16.wav
new file mode 100644
index 0000000..cb8627d
--- /dev/null
+++ b/Lib/test/audiodata/pluck-pcm16.wav
Binary files differ
diff --git a/Lib/test/audiodata/pluck-pcm24.aiff b/Lib/test/audiodata/pluck-pcm24.aiff
new file mode 100644
index 0000000..8eba145
--- /dev/null
+++ b/Lib/test/audiodata/pluck-pcm24.aiff
Binary files differ
diff --git a/Lib/test/audiodata/pluck-pcm24.wav b/Lib/test/audiodata/pluck-pcm24.wav
new file mode 100644
index 0000000..60d92c3
--- /dev/null
+++ b/Lib/test/audiodata/pluck-pcm24.wav
Binary files differ
diff --git a/Lib/test/audiodata/pluck-pcm32.aiff b/Lib/test/audiodata/pluck-pcm32.aiff
new file mode 100644
index 0000000..46ac037
--- /dev/null
+++ b/Lib/test/audiodata/pluck-pcm32.aiff
Binary files differ
diff --git a/Lib/test/audiodata/pluck-pcm32.au b/Lib/test/audiodata/pluck-pcm32.au
new file mode 100644
index 0000000..92ee596
--- /dev/null
+++ b/Lib/test/audiodata/pluck-pcm32.au
Binary files differ
diff --git a/Lib/test/audiodata/pluck-pcm32.wav b/Lib/test/audiodata/pluck-pcm32.wav
new file mode 100644
index 0000000..846628b
--- /dev/null
+++ b/Lib/test/audiodata/pluck-pcm32.wav
Binary files differ
diff --git a/Lib/test/audiodata/pluck-pcm8.aiff b/Lib/test/audiodata/pluck-pcm8.aiff
new file mode 100644
index 0000000..5de4f3b
--- /dev/null
+++ b/Lib/test/audiodata/pluck-pcm8.aiff
Binary files differ
diff --git a/Lib/test/audiodata/pluck-pcm8.au b/Lib/test/audiodata/pluck-pcm8.au
new file mode 100644
index 0000000..b7172c8
--- /dev/null
+++ b/Lib/test/audiodata/pluck-pcm8.au
Binary files differ
diff --git a/Lib/test/audiodata/pluck-pcm8.wav b/Lib/test/audiodata/pluck-pcm8.wav
new file mode 100644
index 0000000..bb28cb8
--- /dev/null
+++ b/Lib/test/audiodata/pluck-pcm8.wav
Binary files differ
diff --git a/Lib/test/audiodata/pluck-ulaw.aifc b/Lib/test/audiodata/pluck-ulaw.aifc
new file mode 100644
index 0000000..3085cf0
--- /dev/null
+++ b/Lib/test/audiodata/pluck-ulaw.aifc
Binary files differ
diff --git a/Lib/test/audiodata/pluck-ulaw.au b/Lib/test/audiodata/pluck-ulaw.au
new file mode 100644
index 0000000..1110353
--- /dev/null
+++ b/Lib/test/audiodata/pluck-ulaw.au
Binary files differ
diff --git a/Lib/test/audiotests.py b/Lib/test/audiotests.py
new file mode 100644
index 0000000..f4abd2a
--- /dev/null
+++ b/Lib/test/audiotests.py
@@ -0,0 +1,283 @@
+from test.test_support import findfile, TESTFN, unlink
+import unittest
+import array
+import io
+import pickle
+import sys
+import base64
+
+class UnseekableIO(file):
+ def tell(self):
+ raise io.UnsupportedOperation
+
+ def seek(self, *args, **kwargs):
+ raise io.UnsupportedOperation
+
+def fromhex(s):
+ return base64.b16decode(s.replace(' ', ''))
+
+def byteswap2(data):
+ a = array.array('h')
+ a.fromstring(data)
+ a.byteswap()
+ return a.tostring()
+
+def byteswap3(data):
+ ba = bytearray(data)
+ ba[::3] = data[2::3]
+ ba[2::3] = data[::3]
+ return bytes(ba)
+
+def byteswap4(data):
+ a = array.array('i')
+ a.fromstring(data)
+ a.byteswap()
+ return a.tostring()
+
+
+class AudioTests:
+ close_fd = False
+
+ def setUp(self):
+ self.f = self.fout = None
+
+ def tearDown(self):
+ if self.f is not None:
+ self.f.close()
+ if self.fout is not None:
+ self.fout.close()
+ unlink(TESTFN)
+
+ def check_params(self, f, nchannels, sampwidth, framerate, nframes,
+ comptype, compname):
+ self.assertEqual(f.getnchannels(), nchannels)
+ self.assertEqual(f.getsampwidth(), sampwidth)
+ self.assertEqual(f.getframerate(), framerate)
+ self.assertEqual(f.getnframes(), nframes)
+ self.assertEqual(f.getcomptype(), comptype)
+ self.assertEqual(f.getcompname(), compname)
+
+ params = f.getparams()
+ self.assertEqual(params,
+ (nchannels, sampwidth, framerate, nframes, comptype, compname))
+
+ dump = pickle.dumps(params)
+ self.assertEqual(pickle.loads(dump), params)
+
+
+class AudioWriteTests(AudioTests):
+
+ def create_file(self, testfile):
+ f = self.fout = self.module.open(testfile, 'wb')
+ f.setnchannels(self.nchannels)
+ f.setsampwidth(self.sampwidth)
+ f.setframerate(self.framerate)
+ f.setcomptype(self.comptype, self.compname)
+ return f
+
+ def check_file(self, testfile, nframes, frames):
+ f = self.module.open(testfile, 'rb')
+ try:
+ self.assertEqual(f.getnchannels(), self.nchannels)
+ self.assertEqual(f.getsampwidth(), self.sampwidth)
+ self.assertEqual(f.getframerate(), self.framerate)
+ self.assertEqual(f.getnframes(), nframes)
+ self.assertEqual(f.readframes(nframes), frames)
+ finally:
+ f.close()
+
+ def test_write_params(self):
+ f = self.create_file(TESTFN)
+ f.setnframes(self.nframes)
+ f.writeframes(self.frames)
+ self.check_params(f, self.nchannels, self.sampwidth, self.framerate,
+ self.nframes, self.comptype, self.compname)
+ f.close()
+
+ def test_write(self):
+ f = self.create_file(TESTFN)
+ f.setnframes(self.nframes)
+ f.writeframes(self.frames)
+ f.close()
+
+ self.check_file(TESTFN, self.nframes, self.frames)
+
+ def test_incompleted_write(self):
+ with open(TESTFN, 'wb') as testfile:
+ testfile.write(b'ababagalamaga')
+ f = self.create_file(testfile)
+ f.setnframes(self.nframes + 1)
+ f.writeframes(self.frames)
+ f.close()
+
+ with open(TESTFN, 'rb') as testfile:
+ self.assertEqual(testfile.read(13), b'ababagalamaga')
+ self.check_file(testfile, self.nframes, self.frames)
+
+ def test_multiple_writes(self):
+ with open(TESTFN, 'wb') as testfile:
+ testfile.write(b'ababagalamaga')
+ f = self.create_file(testfile)
+ f.setnframes(self.nframes)
+ framesize = self.nchannels * self.sampwidth
+ f.writeframes(self.frames[:-framesize])
+ f.writeframes(self.frames[-framesize:])
+ f.close()
+
+ with open(TESTFN, 'rb') as testfile:
+ self.assertEqual(testfile.read(13), b'ababagalamaga')
+ self.check_file(testfile, self.nframes, self.frames)
+
+ def test_overflowed_write(self):
+ with open(TESTFN, 'wb') as testfile:
+ testfile.write(b'ababagalamaga')
+ f = self.create_file(testfile)
+ f.setnframes(self.nframes - 1)
+ f.writeframes(self.frames)
+ f.close()
+
+ with open(TESTFN, 'rb') as testfile:
+ self.assertEqual(testfile.read(13), b'ababagalamaga')
+ self.check_file(testfile, self.nframes, self.frames)
+
+ def test_unseekable_read(self):
+ f = self.create_file(TESTFN)
+ f.setnframes(self.nframes)
+ f.writeframes(self.frames)
+ f.close()
+
+ with UnseekableIO(TESTFN, 'rb') as testfile:
+ self.check_file(testfile, self.nframes, self.frames)
+
+ def test_unseekable_write(self):
+ with UnseekableIO(TESTFN, 'wb') as testfile:
+ f = self.create_file(testfile)
+ f.setnframes(self.nframes)
+ f.writeframes(self.frames)
+ f.close()
+ self.fout = None
+
+ self.check_file(TESTFN, self.nframes, self.frames)
+
+ def test_unseekable_incompleted_write(self):
+ with UnseekableIO(TESTFN, 'wb') as testfile:
+ testfile.write(b'ababagalamaga')
+ f = self.create_file(testfile)
+ f.setnframes(self.nframes + 1)
+ try:
+ f.writeframes(self.frames)
+ except IOError:
+ pass
+ try:
+ f.close()
+ except IOError:
+ pass
+
+ with open(TESTFN, 'rb') as testfile:
+ self.assertEqual(testfile.read(13), b'ababagalamaga')
+ self.check_file(testfile, self.nframes + 1, self.frames)
+
+ def test_unseekable_overflowed_write(self):
+ with UnseekableIO(TESTFN, 'wb') as testfile:
+ testfile.write(b'ababagalamaga')
+ f = self.create_file(testfile)
+ f.setnframes(self.nframes - 1)
+ try:
+ f.writeframes(self.frames)
+ except IOError:
+ pass
+ try:
+ f.close()
+ except IOError:
+ pass
+
+ with open(TESTFN, 'rb') as testfile:
+ self.assertEqual(testfile.read(13), b'ababagalamaga')
+ framesize = self.nchannels * self.sampwidth
+ self.check_file(testfile, self.nframes - 1, self.frames[:-framesize])
+
+
+class AudioTestsWithSourceFile(AudioTests):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.sndfilepath = findfile(cls.sndfilename, subdir='audiodata')
+
+ def test_read_params(self):
+ f = self.f = self.module.open(self.sndfilepath)
+ #self.assertEqual(f.getfp().name, self.sndfilepath)
+ self.check_params(f, self.nchannels, self.sampwidth, self.framerate,
+ self.sndfilenframes, self.comptype, self.compname)
+
+ def test_close(self):
+ with open(self.sndfilepath, 'rb') as testfile:
+ f = self.f = self.module.open(testfile)
+ self.assertFalse(testfile.closed)
+ f.close()
+ self.assertEqual(testfile.closed, self.close_fd)
+ with open(TESTFN, 'wb') as testfile:
+ fout = self.fout = self.module.open(testfile, 'wb')
+ self.assertFalse(testfile.closed)
+ with self.assertRaises(self.module.Error):
+ fout.close()
+ self.assertEqual(testfile.closed, self.close_fd)
+ fout.close() # do nothing
+
+ def test_read(self):
+ framesize = self.nchannels * self.sampwidth
+ chunk1 = self.frames[:2 * framesize]
+ chunk2 = self.frames[2 * framesize: 4 * framesize]
+ f = self.f = self.module.open(self.sndfilepath)
+ self.assertEqual(f.readframes(0), b'')
+ self.assertEqual(f.tell(), 0)
+ self.assertEqual(f.readframes(2), chunk1)
+ f.rewind()
+ pos0 = f.tell()
+ self.assertEqual(pos0, 0)
+ self.assertEqual(f.readframes(2), chunk1)
+ pos2 = f.tell()
+ self.assertEqual(pos2, 2)
+ self.assertEqual(f.readframes(2), chunk2)
+ f.setpos(pos2)
+ self.assertEqual(f.readframes(2), chunk2)
+ f.setpos(pos0)
+ self.assertEqual(f.readframes(2), chunk1)
+ with self.assertRaises(self.module.Error):
+ f.setpos(-1)
+ with self.assertRaises(self.module.Error):
+ f.setpos(f.getnframes() + 1)
+
+ def test_copy(self):
+ f = self.f = self.module.open(self.sndfilepath)
+ fout = self.fout = self.module.open(TESTFN, 'wb')
+ fout.setparams(f.getparams())
+ i = 0
+ n = f.getnframes()
+ while n > 0:
+ i += 1
+ fout.writeframes(f.readframes(i))
+ n -= i
+ fout.close()
+ fout = self.fout = self.module.open(TESTFN, 'rb')
+ f.rewind()
+ self.assertEqual(f.getparams(), fout.getparams())
+ self.assertEqual(f.readframes(f.getnframes()),
+ fout.readframes(fout.getnframes()))
+
+ def test_read_not_from_start(self):
+ with open(TESTFN, 'wb') as testfile:
+ testfile.write(b'ababagalamaga')
+ with open(self.sndfilepath, 'rb') as f:
+ testfile.write(f.read())
+
+ with open(TESTFN, 'rb') as testfile:
+ self.assertEqual(testfile.read(13), b'ababagalamaga')
+ f = self.module.open(testfile, 'rb')
+ try:
+ self.assertEqual(f.getnchannels(), self.nchannels)
+ self.assertEqual(f.getsampwidth(), self.sampwidth)
+ self.assertEqual(f.getframerate(), self.framerate)
+ self.assertEqual(f.getnframes(), self.sndfilenframes)
+ self.assertEqual(f.readframes(self.nframes), self.frames)
+ finally:
+ f.close()
diff --git a/Lib/test/bad_coding3.py b/Lib/test/bad_coding3.py
new file mode 100644
index 0000000..77836d9
--- /dev/null
+++ b/Lib/test/bad_coding3.py
@@ -0,0 +1,2 @@
+# coding: string-escape
+\x70\x72\x69\x6e\x74\x20\x32\x2b\x32\x0a
diff --git a/Lib/test/crashers/buffer_mutate.py b/Lib/test/crashers/buffer_mutate.py
new file mode 100644
index 0000000..d68d7cc
--- /dev/null
+++ b/Lib/test/crashers/buffer_mutate.py
@@ -0,0 +1,30 @@
+#
+# The various methods of bufferobject.c (here buffer_subscript()) call
+# get_buf() before calling potentially more Python code (here via
+# PySlice_GetIndicesEx()). But get_buf() already returned a void*
+# pointer. This void* pointer can become invalid if the object
+# underlying the buffer is mutated (here a bytearray object).
+#
+# As usual, please keep in mind that the three "here" in the sentence
+# above are only examples. Each can be changed easily and lead to
+# another crasher.
+#
+# This crashes for me on Linux 32-bits with CPython 2.6 and 2.7
+# with a segmentation fault.
+#
+
+
+class PseudoIndex(object):
+ def __index__(self):
+ for c in "foobar"*n:
+ a.append(c)
+ return n * 4
+
+
+for n in range(1, 100000, 100):
+ a = bytearray("test"*n)
+ buf = buffer(a)
+
+ s = buf[:PseudoIndex():1]
+ #print repr(s)
+ #assert s == "test"*n
diff --git a/Lib/test/crashers/decref_before_assignment.py b/Lib/test/crashers/decref_before_assignment.py
new file mode 100644
index 0000000..b5b17fa
--- /dev/null
+++ b/Lib/test/crashers/decref_before_assignment.py
@@ -0,0 +1,44 @@
+"""
+General example for an attack against code like this:
+
+ Py_DECREF(obj->attr); obj->attr = ...;
+
+here in Module/_json.c:scanner_init().
+
+Explanation: if the first Py_DECREF() calls either a __del__ or a
+weakref callback, it will run while the 'obj' appears to have in
+'obj->attr' still the old reference to the object, but not holding
+the reference count any more.
+
+Status: progress has been made replacing these cases, but there is an
+infinite number of such cases.
+"""
+
+import _json, weakref
+
+class Ctx1(object):
+ encoding = "utf8"
+ strict = None
+ object_hook = None
+ object_pairs_hook = None
+ parse_float = None
+ parse_int = None
+ parse_constant = None
+
+class Foo(unicode):
+ pass
+
+def delete_me(*args):
+ print scanner.encoding.__dict__
+
+class Ctx2(Ctx1):
+ @property
+ def encoding(self):
+ global wref
+ f = Foo("utf8")
+ f.abc = globals()
+ wref = weakref.ref(f, delete_me)
+ return f
+
+scanner = _json.make_scanner(Ctx1())
+scanner.__init__(Ctx2())
diff --git a/Lib/test/crashers/recursive_call.py b/Lib/test/crashers/recursive_call.py
index 31c8963..31c8963 100644..100755
--- a/Lib/test/crashers/recursive_call.py
+++ b/Lib/test/crashers/recursive_call.py
diff --git a/Lib/test/curses_tests.py b/Lib/test/curses_tests.py
index 7dedbbc..7dedbbc 100644..100755
--- a/Lib/test/curses_tests.py
+++ b/Lib/test/curses_tests.py
diff --git a/Lib/test/imghdrdata/python.bmp b/Lib/test/imghdrdata/python.bmp
new file mode 100644
index 0000000..675f951
--- /dev/null
+++ b/Lib/test/imghdrdata/python.bmp
Binary files differ
diff --git a/Lib/test/imghdrdata/python.gif b/Lib/test/imghdrdata/python.gif
new file mode 100644
index 0000000..96fd9fe
--- /dev/null
+++ b/Lib/test/imghdrdata/python.gif
Binary files differ
diff --git a/Lib/test/imghdrdata/python.jpg b/Lib/test/imghdrdata/python.jpg
new file mode 100644
index 0000000..21222c0
--- /dev/null
+++ b/Lib/test/imghdrdata/python.jpg
Binary files differ
diff --git a/Lib/test/imghdrdata/python.pbm b/Lib/test/imghdrdata/python.pbm
new file mode 100644
index 0000000..1848ba7
--- /dev/null
+++ b/Lib/test/imghdrdata/python.pbm
@@ -0,0 +1,3 @@
+P4
+16 16
+ûñ¿úßÕ­±[ñ¥a_ÁX°°ðððð?ÿÿ \ No newline at end of file
diff --git a/Lib/test/imghdrdata/python.pgm b/Lib/test/imghdrdata/python.pgm
new file mode 100644
index 0000000..8349f2a
--- /dev/null
+++ b/Lib/test/imghdrdata/python.pgm
Binary files differ
diff --git a/Lib/test/imghdrdata/python.png b/Lib/test/imghdrdata/python.png
new file mode 100644
index 0000000..1a987f7
--- /dev/null
+++ b/Lib/test/imghdrdata/python.png
Binary files differ
diff --git a/Lib/test/imghdrdata/python.ppm b/Lib/test/imghdrdata/python.ppm
new file mode 100644
index 0000000..7d9cdb3
--- /dev/null
+++ b/Lib/test/imghdrdata/python.ppm
Binary files differ
diff --git a/Lib/test/imghdrdata/python.ras b/Lib/test/imghdrdata/python.ras
new file mode 100644
index 0000000..130e96f
--- /dev/null
+++ b/Lib/test/imghdrdata/python.ras
Binary files differ
diff --git a/Lib/test/imghdrdata/python.sgi b/Lib/test/imghdrdata/python.sgi
new file mode 100644
index 0000000..ffe9081
--- /dev/null
+++ b/Lib/test/imghdrdata/python.sgi
Binary files differ
diff --git a/Lib/test/imghdrdata/python.tiff b/Lib/test/imghdrdata/python.tiff
new file mode 100644
index 0000000..39d0bfc
--- /dev/null
+++ b/Lib/test/imghdrdata/python.tiff
Binary files differ
diff --git a/Lib/test/imghdrdata/python.xbm b/Lib/test/imghdrdata/python.xbm
new file mode 100644
index 0000000..cfbee2e
--- /dev/null
+++ b/Lib/test/imghdrdata/python.xbm
@@ -0,0 +1,6 @@
+#define python_width 16
+#define python_height 16
+static char python_bits[] = {
+ 0xDF, 0xFE, 0x8F, 0xFD, 0x5F, 0xFB, 0xAB, 0xFE, 0xB5, 0x8D, 0xDA, 0x8F,
+ 0xA5, 0x86, 0xFA, 0x83, 0x1A, 0x80, 0x0D, 0x80, 0x0D, 0x80, 0x0F, 0xE0,
+ 0x0F, 0xF8, 0x0F, 0xF8, 0x0F, 0xFC, 0xFF, 0xFF, };
diff --git a/Lib/test/inspect_fodder.py b/Lib/test/inspect_fodder.py
index afde2e2..5c87ae6 100644
--- a/Lib/test/inspect_fodder.py
+++ b/Lib/test/inspect_fodder.py
@@ -49,6 +49,8 @@ class StupidGit:
class MalodorousPervert(StupidGit):
pass
+Tit = MalodorousPervert
+
class ParrotDroppings:
pass
diff --git a/Lib/test/keycert.pem b/Lib/test/keycert.pem
index 2f46fcf..64318aa 100644
--- a/Lib/test/keycert.pem
+++ b/Lib/test/keycert.pem
@@ -1,32 +1,31 @@
------BEGIN RSA PRIVATE KEY-----
-MIICXwIBAAKBgQC8ddrhm+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9L
-opdJhTvbGfEj0DQs1IE8M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVH
-fhi/VwovESJlaBOp+WMnfhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQAB
-AoGBAK0FZpaKj6WnJZN0RqhhK+ggtBWwBnc0U/ozgKz2j1s3fsShYeiGtW6CK5nU
-D1dZ5wzhbGThI7LiOXDvRucc9n7vUgi0alqPQ/PFodPxAN/eEYkmXQ7W2k7zwsDA
-IUK0KUhktQbLu8qF/m8qM86ba9y9/9YkXuQbZ3COl5ahTZrhAkEA301P08RKv3KM
-oXnGU2UHTuJ1MAD2hOrPxjD4/wxA/39EWG9bZczbJyggB4RHu0I3NOSFjAm3HQm0
-ANOu5QK9owJBANgOeLfNNcF4pp+UikRFqxk5hULqRAWzVxVrWe85FlPm0VVmHbb/
-loif7mqjU8o1jTd/LM7RD9f2usZyE2psaw8CQQCNLhkpX3KO5kKJmS9N7JMZSc4j
-oog58yeYO8BBqKKzpug0LXuQultYv2K4veaIO04iL9VLe5z9S/Q1jaCHBBuXAkEA
-z8gjGoi1AOp6PBBLZNsncCvcV/0aC+1se4HxTNo2+duKSDnbq+ljqOM+E7odU+Nq
-ewvIWOG//e8fssd0mq3HywJBAJ8l/c8GVmrpFTx8r/nZ2Pyyjt3dH1widooDXYSV
-q6Gbf41Llo5sYAtmxdndTLASuHKecacTgZVhy0FryZpLKrU=
------END RSA PRIVATE KEY-----
+-----BEGIN PRIVATE KEY-----
+MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBANtb0+YrKuxevGpm
+LrjaUhZSgz6zFAmuGFmKmUbdjmfv9zSmmdsQIksK++jK0Be9LeZy20j6ahOfuVa0
+ufEmPoP7Fy4hXegKZR9cCWcIe/A6H2xWF1IIJLRTLaU8ol/I7T+um5HD5AwAwNPP
+USNU0Eegmvp+xxWu3NX2m1Veot85AgMBAAECgYA3ZdZ673X0oexFlq7AAmrutkHt
+CL7LvwrpOiaBjhyTxTeSNWzvtQBkIU8DOI0bIazA4UreAFffwtvEuPmonDb3F+Iq
+SMAu42XcGyVZEl+gHlTPU9XRX7nTOXVt+MlRRRxL6t9GkGfUAXI3XxJDXW3c0vBK
+UL9xqD8cORXOfE06rQJBAP8mEX1ERkR64Ptsoe4281vjTlNfIbs7NMPkUnrn9N/Y
+BLhjNIfQ3HFZG8BTMLfX7kCS9D593DW5tV4Z9BP/c6cCQQDcFzCcVArNh2JSywOQ
+ZfTfRbJg/Z5Lt9Fkngv1meeGNPgIMLN8Sg679pAOOWmzdMO3V706rNPzSVMME7E5
+oPIfAkEA8pDddarP5tCvTTgUpmTFbakm0KoTZm2+FzHcnA4jRh+XNTjTOv98Y6Ik
+eO5d1ZnKXseWvkZncQgxfdnMqqpj5wJAcNq/RVne1DbYlwWchT2Si65MYmmJ8t+F
+0mcsULqjOnEMwf5e+ptq5LzwbyrHZYq5FNk7ocufPv/ZQrcSSC+cFwJBAKvOJByS
+x56qyGeZLOQlWS2JS3KJo59XuLFGqcbgN9Om9xFa41Yb4N9NvplFivsvZdw3m1Q/
+SPIXQuT8RMPDVNQ=
+-----END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
-MIICpzCCAhCgAwIBAgIJAP+qStv1cIGNMA0GCSqGSIb3DQEBBQUAMIGJMQswCQYD
-VQQGEwJVUzERMA8GA1UECBMIRGVsYXdhcmUxEzARBgNVBAcTCldpbG1pbmd0b24x
-IzAhBgNVBAoTGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0aW9uMQwwCgYDVQQLEwNT
-U0wxHzAdBgNVBAMTFnNvbWVtYWNoaW5lLnB5dGhvbi5vcmcwHhcNMDcwODI3MTY1
-NDUwWhcNMTMwMjE2MTY1NDUwWjCBiTELMAkGA1UEBhMCVVMxETAPBgNVBAgTCERl
-bGF3YXJlMRMwEQYDVQQHEwpXaWxtaW5ndG9uMSMwIQYDVQQKExpQeXRob24gU29m
-dHdhcmUgRm91bmRhdGlvbjEMMAoGA1UECxMDU1NMMR8wHQYDVQQDExZzb21lbWFj
-aGluZS5weXRob24ub3JnMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC8ddrh
-m+LutBvjYcQlnH21PPIseJ1JVG2HMmN2CmZk2YukO+9LopdJhTvbGfEj0DQs1IE8
-M+kTUyOmuKfVrFMKwtVeCJphrAnhoz7TYOuLBSqt7lVHfhi/VwovESJlaBOp+WMn
-fhcduPEYHYx/6cnVapIkZnLt30zu2um+DzA9jQIDAQABoxUwEzARBglghkgBhvhC
-AQEEBAMCBkAwDQYJKoZIhvcNAQEFBQADgYEAF4Q5BVqmCOLv1n8je/Jw9K669VXb
-08hyGzQhkemEBYQd6fzQ9A/1ZzHkJKb1P6yreOLSEh4KcxYPyrLRC1ll8nr5OlCx
-CMhKkTnR6qBsdNV0XtdU2+N25hqW+Ma4ZeqsN/iiJVCGNOZGnvQuvCAGWF8+J/f/
-iHkC6gGdBJhogs4=
+MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV
+BAYTAlhZMRcwFQYDVQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9u
+IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xMDEw
+MDgyMzAxNTZaFw0yMDEwMDUyMzAxNTZaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH
+Ew5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9uIFNvZnR3YXJlIEZvdW5k
+YXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAw
+gYkCgYEA21vT5isq7F68amYuuNpSFlKDPrMUCa4YWYqZRt2OZ+/3NKaZ2xAiSwr7
+6MrQF70t5nLbSPpqE5+5VrS58SY+g/sXLiFd6AplH1wJZwh78DofbFYXUggktFMt
+pTyiX8jtP66bkcPkDADA089RI1TQR6Ca+n7HFa7c1fabVV6i3zkCAwEAAaMYMBYw
+FAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBBQUAA4GBAHPctQBEQ4wd
+BJ6+JcpIraopLn8BGhbjNWj40mmRqWB/NAWF6M5ne7KpGAu7tLeG4hb1zLaldK8G
+lxy2GPSRF6LFS48dpEj2HbMv2nvv6xxalDMJ9+DicWgAKTQ6bcX2j3GUkCR0g/T1
+CRlNBAAlvhKzO7Clpf9l0YKBEfraJByX
-----END CERTIFICATE-----
diff --git a/Lib/test/leakers/test_ctypes.py b/Lib/test/leakers/test_ctypes.py
index 0f9a2cd..7d7e9ff 100644
--- a/Lib/test/leakers/test_ctypes.py
+++ b/Lib/test/leakers/test_ctypes.py
@@ -1,6 +1,5 @@
# Taken from Lib/ctypes/test/test_keeprefs.py, PointerToStructure.test().
-# When this leak is fixed, remember to remove from Misc/build.sh LEAKY_TESTS.
from ctypes import Structure, c_int, POINTER
import gc
diff --git a/Lib/test/mp_fork_bomb.py b/Lib/test/mp_fork_bomb.py
new file mode 100644
index 0000000..72cea25
--- /dev/null
+++ b/Lib/test/mp_fork_bomb.py
@@ -0,0 +1,16 @@
+import multiprocessing
+
+def foo(conn):
+ conn.send("123")
+
+# Because "if __name__ == '__main__'" is missing this will not work
+# correctly on Windows. However, we should get a RuntimeError rather
+# than the Windows equivalent of a fork bomb.
+
+r, w = multiprocessing.Pipe(False)
+p = multiprocessing.Process(target=foo, args=(w,))
+p.start()
+w.close()
+print(r.recv())
+r.close()
+p.join()
diff --git a/Lib/test/nullbytecert.pem b/Lib/test/nullbytecert.pem
new file mode 100644
index 0000000..447186c
--- /dev/null
+++ b/Lib/test/nullbytecert.pem
@@ -0,0 +1,90 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 0 (0x0)
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=US, ST=Oregon, L=Beaverton, O=Python Software Foundation, OU=Python Core Development, CN=null.python.org\x00example.org/emailAddress=python-dev@python.org
+ Validity
+ Not Before: Aug 7 13:11:52 2013 GMT
+ Not After : Aug 7 13:12:52 2013 GMT
+ Subject: C=US, ST=Oregon, L=Beaverton, O=Python Software Foundation, OU=Python Core Development, CN=null.python.org\x00example.org/emailAddress=python-dev@python.org
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:b5:ea:ed:c9:fb:46:7d:6f:3b:76:80:dd:3a:f3:
+ 03:94:0b:a7:a6:db:ec:1d:df:ff:23:74:08:9d:97:
+ 16:3f:a3:a4:7b:3e:1b:0e:96:59:25:03:a7:26:e2:
+ 88:a9:cf:79:cd:f7:04:56:b0:ab:79:32:6e:59:c1:
+ 32:30:54:eb:58:a8:cb:91:f0:42:a5:64:27:cb:d4:
+ 56:31:88:52:ad:cf:bd:7f:f0:06:64:1f:cc:27:b8:
+ a3:8b:8c:f3:d8:29:1f:25:0b:f5:46:06:1b:ca:02:
+ 45:ad:7b:76:0a:9c:bf:bb:b9:ae:0d:16:ab:60:75:
+ ae:06:3e:9c:7c:31:dc:92:2f:29:1a:e0:4b:0c:91:
+ 90:6c:e9:37:c5:90:d7:2a:d7:97:15:a3:80:8f:5d:
+ 7b:49:8f:54:30:d4:97:2c:1c:5b:37:b5:ab:69:30:
+ 68:43:d3:33:78:4b:02:60:f5:3c:44:80:a1:8f:e7:
+ f0:0f:d1:5e:87:9e:46:cf:62:fc:f9:bf:0c:65:12:
+ f1:93:c8:35:79:3f:c8:ec:ec:47:f5:ef:be:44:d5:
+ ae:82:1e:2d:9a:9f:98:5a:67:65:e1:74:70:7c:cb:
+ d3:c2:ce:0e:45:49:27:dc:e3:2d:d4:fb:48:0e:2f:
+ 9e:77:b8:14:46:c0:c4:36:ca:02:ae:6a:91:8c:da:
+ 2f:85
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints: critical
+ CA:FALSE
+ X509v3 Subject Key Identifier:
+ 88:5A:55:C0:52:FF:61:CD:52:A3:35:0F:EA:5A:9C:24:38:22:F7:5C
+ X509v3 Key Usage:
+ Digital Signature, Non Repudiation, Key Encipherment
+ X509v3 Subject Alternative Name:
+ *************************************************************
+ WARNING: The values for DNS, email and URI are WRONG. OpenSSL
+ doesn't print the text after a NULL byte.
+ *************************************************************
+ DNS:altnull.python.org, email:null@python.org, URI:http://null.python.org, IP Address:192.0.2.1, IP Address:2001:DB8:0:0:0:0:0:1
+ Signature Algorithm: sha1WithRSAEncryption
+ ac:4f:45:ef:7d:49:a8:21:70:8e:88:59:3e:d4:36:42:70:f5:
+ a3:bd:8b:d7:a8:d0:58:f6:31:4a:b1:a4:a6:dd:6f:d9:e8:44:
+ 3c:b6:0a:71:d6:7f:b1:08:61:9d:60:ce:75:cf:77:0c:d2:37:
+ 86:02:8d:5e:5d:f9:0f:71:b4:16:a8:c1:3d:23:1c:f1:11:b3:
+ 56:6e:ca:d0:8d:34:94:e6:87:2a:99:f2:ae:ae:cc:c2:e8:86:
+ de:08:a8:7f:c5:05:fa:6f:81:a7:82:e6:d0:53:9d:34:f4:ac:
+ 3e:40:fe:89:57:7a:29:a4:91:7e:0b:c6:51:31:e5:10:2f:a4:
+ 60:76:cd:95:51:1a:be:8b:a1:b0:fd:ad:52:bd:d7:1b:87:60:
+ d2:31:c7:17:c4:18:4f:2d:08:25:a3:a7:4f:b7:92:ca:e2:f5:
+ 25:f1:54:75:81:9d:b3:3d:61:a2:f7:da:ed:e1:c6:6f:2c:60:
+ 1f:d8:6f:c5:92:05:ab:c9:09:62:49:a9:14:ad:55:11:cc:d6:
+ 4a:19:94:99:97:37:1d:81:5f:8b:cf:a3:a8:96:44:51:08:3d:
+ 0b:05:65:12:eb:b6:70:80:88:48:72:4f:c6:c2:da:cf:cd:8e:
+ 5b:ba:97:2f:60:b4:96:56:49:5e:3a:43:76:63:04:be:2a:f6:
+ c1:ca:a9:94
+-----BEGIN CERTIFICATE-----
+MIIE2DCCA8CgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBxTELMAkGA1UEBhMCVVMx
+DzANBgNVBAgMBk9yZWdvbjESMBAGA1UEBwwJQmVhdmVydG9uMSMwIQYDVQQKDBpQ
+eXRob24gU29mdHdhcmUgRm91bmRhdGlvbjEgMB4GA1UECwwXUHl0aG9uIENvcmUg
+RGV2ZWxvcG1lbnQxJDAiBgNVBAMMG251bGwucHl0aG9uLm9yZwBleGFtcGxlLm9y
+ZzEkMCIGCSqGSIb3DQEJARYVcHl0aG9uLWRldkBweXRob24ub3JnMB4XDTEzMDgw
+NzEzMTE1MloXDTEzMDgwNzEzMTI1MlowgcUxCzAJBgNVBAYTAlVTMQ8wDQYDVQQI
+DAZPcmVnb24xEjAQBgNVBAcMCUJlYXZlcnRvbjEjMCEGA1UECgwaUHl0aG9uIFNv
+ZnR3YXJlIEZvdW5kYXRpb24xIDAeBgNVBAsMF1B5dGhvbiBDb3JlIERldmVsb3Bt
+ZW50MSQwIgYDVQQDDBtudWxsLnB5dGhvbi5vcmcAZXhhbXBsZS5vcmcxJDAiBgkq
+hkiG9w0BCQEWFXB5dGhvbi1kZXZAcHl0aG9uLm9yZzCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBALXq7cn7Rn1vO3aA3TrzA5QLp6bb7B3f/yN0CJ2XFj+j
+pHs+Gw6WWSUDpybiiKnPec33BFawq3kyblnBMjBU61ioy5HwQqVkJ8vUVjGIUq3P
+vX/wBmQfzCe4o4uM89gpHyUL9UYGG8oCRa17dgqcv7u5rg0Wq2B1rgY+nHwx3JIv
+KRrgSwyRkGzpN8WQ1yrXlxWjgI9de0mPVDDUlywcWze1q2kwaEPTM3hLAmD1PESA
+oY/n8A/RXoeeRs9i/Pm/DGUS8ZPINXk/yOzsR/XvvkTVroIeLZqfmFpnZeF0cHzL
+08LODkVJJ9zjLdT7SA4vnne4FEbAxDbKAq5qkYzaL4UCAwEAAaOB0DCBzTAMBgNV
+HRMBAf8EAjAAMB0GA1UdDgQWBBSIWlXAUv9hzVKjNQ/qWpwkOCL3XDALBgNVHQ8E
+BAMCBeAwgZAGA1UdEQSBiDCBhYIeYWx0bnVsbC5weXRob24ub3JnAGV4YW1wbGUu
+Y29tgSBudWxsQHB5dGhvbi5vcmcAdXNlckBleGFtcGxlLm9yZ4YpaHR0cDovL251
+bGwucHl0aG9uLm9yZwBodHRwOi8vZXhhbXBsZS5vcmeHBMAAAgGHECABDbgAAAAA
+AAAAAAAAAAEwDQYJKoZIhvcNAQEFBQADggEBAKxPRe99SaghcI6IWT7UNkJw9aO9
+i9eo0Fj2MUqxpKbdb9noRDy2CnHWf7EIYZ1gznXPdwzSN4YCjV5d+Q9xtBaowT0j
+HPERs1ZuytCNNJTmhyqZ8q6uzMLoht4IqH/FBfpvgaeC5tBTnTT0rD5A/olXeimk
+kX4LxlEx5RAvpGB2zZVRGr6LobD9rVK91xuHYNIxxxfEGE8tCCWjp0+3ksri9SXx
+VHWBnbM9YaL32u3hxm8sYB/Yb8WSBavJCWJJqRStVRHM1koZlJmXNx2BX4vPo6iW
+RFEIPQsFZRLrtnCAiEhyT8bC2s/Njlu6ly9gtJZWSV46Q3ZjBL4q9sHKqZQ=
+-----END CERTIFICATE-----
diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py
index c321f8e..1599893 100644
--- a/Lib/test/pickletester.py
+++ b/Lib/test/pickletester.py
@@ -6,7 +6,14 @@ import cStringIO
import pickletools
import copy_reg
-from test.test_support import TestFailed, have_unicode, TESTFN
+from test.test_support import TestFailed, verbose, have_unicode, TESTFN
+try:
+ from test.test_support import _2G, _1M, precisionbigmemtest
+except ImportError:
+ # this import might fail when run on older Python versions by test_xpickle
+ _2G = _1M = 0
+ def precisionbigmemtest(*args, **kwargs):
+ return lambda self: None
# Tests that try a number of pickle protocols should have a
# for proto in protocols:
@@ -502,10 +509,10 @@ class AbstractPickleTests(unittest.TestCase):
i = C()
i.attr = i
for proto in protocols:
- s = self.dumps(i, 2)
+ s = self.dumps(i, proto)
x = self.loads(s)
self.assertEqual(dir(x), dir(i))
- self.assertTrue(x.attr is x)
+ self.assertIs(x.attr, x)
def test_recursive_multi(self):
l = []
@@ -531,6 +538,8 @@ class AbstractPickleTests(unittest.TestCase):
"'abc\"", # open quote and close quote don't match
"'abc' ?", # junk after close quote
"'\\'", # trailing backslash
+ "'", # issue #17710
+ "' ", # issue #17710
# some tests of the quoting rules
#"'abc\"\''",
#"'\\\\a\'\'\'\\\'\\\\\''",
@@ -1143,30 +1152,34 @@ class AbstractPersistentPicklerTests(unittest.TestCase):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
+ elif object == "test_false_value":
+ self.false_count += 1
+ return ""
else:
return None
def persistent_load(self, oid):
- self.load_count += 1
- object = int(oid)
- assert object % 2 == 0
- return object
+ if not oid:
+ self.load_false_count += 1
+ return "test_false_value"
+ else:
+ self.load_count += 1
+ object = int(oid)
+ assert object % 2 == 0
+ return object
def test_persistence(self):
- self.id_count = 0
- self.load_count = 0
- L = range(10)
- self.assertEqual(self.loads(self.dumps(L)), L)
- self.assertEqual(self.id_count, 5)
- self.assertEqual(self.load_count, 5)
-
- def test_bin_persistence(self):
- self.id_count = 0
- self.load_count = 0
- L = range(10)
- self.assertEqual(self.loads(self.dumps(L, 1)), L)
- self.assertEqual(self.id_count, 5)
- self.assertEqual(self.load_count, 5)
+ L = range(10) + ["test_false_value"]
+ for proto in protocols:
+ self.id_count = 0
+ self.false_count = 0
+ self.load_false_count = 0
+ self.load_count = 0
+ self.assertEqual(self.loads(self.dumps(L, proto)), L)
+ self.assertEqual(self.id_count, 5)
+ self.assertEqual(self.false_count, 1)
+ self.assertEqual(self.load_count, 5)
+ self.assertEqual(self.load_false_count, 1)
class AbstractPicklerUnpicklerObjectTests(unittest.TestCase):
@@ -1280,3 +1293,31 @@ class AbstractPicklerUnpicklerObjectTests(unittest.TestCase):
f.write(pickled2)
f.seek(0)
self.assertEqual(unpickler.load(), data2)
+
+class BigmemPickleTests(unittest.TestCase):
+
+ # Memory requirements: 1 byte per character for input strings, 1 byte
+ # for pickled data, 1 byte for unpickled strings, 1 byte for internal
+ # buffer and 1 byte of free space for resizing of internal buffer.
+
+ @precisionbigmemtest(size=_2G + 100*_1M, memuse=5)
+ def test_huge_strlist(self, size):
+ chunksize = 2**20
+ data = []
+ while size > chunksize:
+ data.append('x' * chunksize)
+ size -= chunksize
+ chunksize += 1
+ data.append('y' * size)
+
+ try:
+ for proto in protocols:
+ try:
+ pickled = self.dumps(data, proto)
+ res = self.loads(pickled)
+ self.assertEqual(res, data)
+ finally:
+ res = None
+ pickled = None
+ finally:
+ data = None
diff --git a/Lib/test/pydoc_mod.py b/Lib/test/pydoc_mod.py
index 9c53324..aa93a33 100644
--- a/Lib/test/pydoc_mod.py
+++ b/Lib/test/pydoc_mod.py
@@ -15,6 +15,16 @@ class B(object):
NO_MEANING = "eggs"
pass
+class C(object):
+ def say_no(self):
+ return "no"
+ def get_answer(self):
+ """ Return say_no() """
+ return self.say_no()
+ def is_it_true(self):
+ """ Return self.get_answer() """
+ return self.get_answer()
+
def doc_func():
"""
This function solves all of the world's problems:
diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py
index 5044ce4..21709f7 100755
--- a/Lib/test/regrtest.py
+++ b/Lib/test/regrtest.py
@@ -32,7 +32,7 @@ Verbosity
Selecting tests
--r/--random -- randomize test execution order (see below)
+-r/--randomize -- randomize test execution order (see below)
--randseed -- pass a random seed to reproduce a previous random run
-f/--fromfile -- read names of tests to run from a file (see below)
-x/--exclude -- arguments are tests to *exclude*
@@ -158,6 +158,7 @@ import json
import os
import random
import re
+import shutil
import sys
import time
import traceback
@@ -258,7 +259,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvqxsSrf:lu:t:TD:NLR:FwWM:j:',
['help', 'verbose', 'verbose2', 'verbose3', 'quiet',
- 'exclude', 'single', 'slow', 'random', 'fromfile', 'findleaks',
+ 'exclude', 'single', 'slow', 'randomize', 'fromfile=', 'findleaks',
'use=', 'threshold=', 'trace', 'coverdir=', 'nocoverdir',
'runleaks', 'huntrleaks=', 'memlimit=', 'randseed=',
'multiprocess=', 'slaveargs=', 'forever', 'header'])
@@ -540,6 +541,8 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
print stdout
if stderr:
print >>sys.stderr, stderr
+ sys.stdout.flush()
+ sys.stderr.flush()
if result[0] == INTERRUPTED:
assert result[1] == 'KeyboardInterrupt'
raise KeyboardInterrupt # What else?
@@ -758,7 +761,9 @@ class saved_test_environment:
# the corresponding method names.
resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr',
- 'os.environ', 'sys.path', 'asyncore.socket_map')
+ 'os.environ', 'sys.path', 'asyncore.socket_map',
+ 'test_support.TESTFN',
+ )
def get_sys_argv(self):
return id(sys.argv), sys.argv, sys.argv[:]
@@ -809,6 +814,21 @@ class saved_test_environment:
asyncore.close_all(ignore_all=True)
asyncore.socket_map.update(saved_map)
+ def get_test_support_TESTFN(self):
+ if os.path.isfile(test_support.TESTFN):
+ result = 'f'
+ elif os.path.isdir(test_support.TESTFN):
+ result = 'd'
+ else:
+ result = None
+ return result
+ def restore_test_support_TESTFN(self, saved_value):
+ if saved_value is None:
+ if os.path.isfile(test_support.TESTFN):
+ os.unlink(test_support.TESTFN)
+ elif os.path.isdir(test_support.TESTFN):
+ shutil.rmtree(test_support.TESTFN)
+
def resource_info(self):
for name in self.resources:
method_suffix = name.replace('.', '_')
@@ -924,7 +944,6 @@ def runtest_inner(test, verbose, quiet, huntrleaks=False):
return FAILED, test_time
def cleanup_test_droppings(testname, verbose):
- import shutil
import stat
import gc
diff --git a/Lib/test/sample_doctest_no_docstrings.py b/Lib/test/sample_doctest_no_docstrings.py
new file mode 100644
index 0000000..e4201ed
--- /dev/null
+++ b/Lib/test/sample_doctest_no_docstrings.py
@@ -0,0 +1,12 @@
+# This is a sample module used for testing doctest.
+#
+# This module is for testing how doctest handles a module with no
+# docstrings.
+
+
+class Foo(object):
+
+ # A class with no docstring.
+
+ def __init__(self):
+ pass
diff --git a/Lib/test/sample_doctest_no_doctests.py b/Lib/test/sample_doctest_no_doctests.py
new file mode 100644
index 0000000..7daa572
--- /dev/null
+++ b/Lib/test/sample_doctest_no_doctests.py
@@ -0,0 +1,15 @@
+"""This is a sample module used for testing doctest.
+
+This module is for testing how doctest handles a module with docstrings
+but no doctest examples.
+
+"""
+
+
+class Foo(object):
+ """A docstring with no doctest examples.
+
+ """
+
+ def __init__(self):
+ pass
diff --git a/Lib/test/script_helper.py b/Lib/test/script_helper.py
index 337854a..7f7c70e 100644
--- a/Lib/test/script_helper.py
+++ b/Lib/test/script_helper.py
@@ -10,7 +10,13 @@ import subprocess
import py_compile
import contextlib
import shutil
-import zipfile
+try:
+ import zipfile
+except ImportError:
+ # If Python is build without Unicode support, importing _io will
+ # fail, which, in turn, means that zipfile cannot be imported
+ # Most of this module can then still be used.
+ pass
from test.test_support import strip_python_stderr
diff --git a/Lib/test/sha256.pem b/Lib/test/sha256.pem
index d488e6d..9475576 100644
--- a/Lib/test/sha256.pem
+++ b/Lib/test/sha256.pem
@@ -1,129 +1,128 @@
# Certificate chain for https://sha256.tbs-internet.com
- 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=sha-256 production/CN=sha256.tbs-internet.com
- i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC
+ 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=Certificats TBS X509/CN=ecom.tbs-x509.com
+ i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business
-----BEGIN CERTIFICATE-----
-MIIGXTCCBUWgAwIBAgIRAMmag+ygSAdxZsbyzYjhuW0wDQYJKoZIhvcNAQELBQAw
-gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl
-bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u
-ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv
-cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg
-Q0EgU0dDMB4XDTEwMDIxODAwMDAwMFoXDTEyMDIxOTIzNTk1OVowgcsxCzAJBgNV
-BAYTAkZSMQ4wDAYDVQQREwUxNDAwMDERMA8GA1UECBMIQ2FsdmFkb3MxDTALBgNV
-BAcTBENBRU4xGzAZBgNVBAkTEjIyIHJ1ZSBkZSBCcmV0YWduZTEVMBMGA1UEChMM
-VEJTIElOVEVSTkVUMRcwFQYDVQQLEw4wMDAyIDQ0MDQ0MzgxMDEbMBkGA1UECxMS
-c2hhLTI1NiBwcm9kdWN0aW9uMSAwHgYDVQQDExdzaGEyNTYudGJzLWludGVybmV0
-LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbuM8VT7f0nntwu
-N3F7v9KIBlhKNAxqCrziOXU5iqUt8HrQB3DtHbdmII+CpVUlwlmepsx6G+srEZ9a
-MIGAy0nxi5aLb7watkyIdPjJTMvTUBQ/+RPWzt5JtYbbY9BlJ+yci0dctP74f4NU
-ISLtlrEjUbf2gTohLrcE01TfmOF6PDEbB5PKDi38cB3NzKfizWfrOaJW6Q1C1qOJ
-y4/4jkUREX1UFUIxzx7v62VfjXSGlcjGpBX1fvtABQOSLeE0a6gciDZs1REqroFf
-5eXtqYphpTa14Z83ITXMfgg5Nze1VtMnzI9Qx4blYBw4dgQVEuIsYr7FDBOITDzc
-VEVXZx0CAwEAAaOCAj8wggI7MB8GA1UdIwQYMBaAFAdEdoWTKLx/bXjSCuv6TEvf
-2YIfMB0GA1UdDgQWBBSJKI/AYVI9RQNY0QPIqc8ej2QivTAOBgNVHQ8BAf8EBAMC
-BaAwDAYDVR0TAQH/BAIwADA0BgNVHSUELTArBggrBgEFBQcDAQYIKwYBBQUHAwIG
-CisGAQQBgjcKAwMGCWCGSAGG+EIEATBMBgNVHSAERTBDMEEGCysGAQQBgOU3AgQB
-MDIwMAYIKwYBBQUHAgEWJGh0dHBzOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0Ev
-Q1BTNDBtBgNVHR8EZjBkMDKgMKAuhixodHRwOi8vY3JsLnRicy1pbnRlcm5ldC5j
-b20vVEJTWDUwOUNBU0dDLmNybDAuoCygKoYoaHR0cDovL2NybC50YnMteDUwOS5j
-b20vVEJTWDUwOUNBU0dDLmNybDCBpgYIKwYBBQUHAQEEgZkwgZYwOAYIKwYBBQUH
-MAKGLGh0dHA6Ly9jcnQudGJzLWludGVybmV0LmNvbS9UQlNYNTA5Q0FTR0MuY3J0
-MDQGCCsGAQUFBzAChihodHRwOi8vY3J0LnRicy14NTA5LmNvbS9UQlNYNTA5Q0FT
-R0MuY3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wPwYD
-VR0RBDgwNoIXc2hhMjU2LnRicy1pbnRlcm5ldC5jb22CG3d3dy5zaGEyNTYudGJz
-LWludGVybmV0LmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAA5NL0D4QSqhErhlkdPmz
-XtiMvdGL+ZehM4coTRIpasM/Agt36Rc0NzCvnQwKE+wkngg1Gy2qe7Q0E/ziqBtB
-fZYzdVgu1zdiL4kTaf+wFKYAFGsFbyeEmXysy+CMwaNoF2vpSjCU1UD56bEnTX/W
-fxVZYxtBQUpnu2wOsm8cDZuZRv9XrYgAhGj9Tt6F0aVHSDGn59uwShG1+BVF/uju
-SCyPTTjL1oc7YElJUzR/x4mQJYvtQI8gDIDAGEOs7v3R/gKa5EMfbUQUI4C84UbI
-Yz09Jdnws/MkC/Hm1BZEqk89u7Hvfv+oHqEb0XaUo0TDfsxE0M1sMdnLb91QNQBm
-UQ==
+MIIGTjCCBTagAwIBAgIQOh3d9dNDPq1cSdJmEiMpqDANBgkqhkiG9w0BAQUFADCB
+yTELMAkGA1UEBhMCRlIxETAPBgNVBAgTCENhbHZhZG9zMQ0wCwYDVQQHEwRDYWVu
+MRUwEwYDVQQKEwxUQlMgSU5URVJORVQxSDBGBgNVBAsTP1Rlcm1zIGFuZCBDb25k
+aXRpb25zOiBodHRwOi8vd3d3LnRicy1pbnRlcm5ldC5jb20vQ0EvcmVwb3NpdG9y
+eTEYMBYGA1UECxMPVEJTIElOVEVSTkVUIENBMR0wGwYDVQQDExRUQlMgWDUwOSBD
+QSBidXNpbmVzczAeFw0xMTAxMjUwMDAwMDBaFw0xMzAyMDUyMzU5NTlaMIHHMQsw
+CQYDVQQGEwJGUjEOMAwGA1UEERMFMTQwMDAxETAPBgNVBAgTCENhbHZhZG9zMQ0w
+CwYDVQQHEwRDQUVOMRswGQYDVQQJExIyMiBydWUgZGUgQnJldGFnbmUxFTATBgNV
+BAoTDFRCUyBJTlRFUk5FVDEXMBUGA1UECxMOMDAwMiA0NDA0NDM4MTAxHTAbBgNV
+BAsTFENlcnRpZmljYXRzIFRCUyBYNTA5MRowGAYDVQQDExFlY29tLnRicy14NTA5
+LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKRrlHUnJ++1lpcg
+jtYco7cdmRe+EEfTmwPfCdfV3G1QfsTSvY6FfMpm/83pqHfT+4ANwr18wD9ZrAEN
+G16mf9VdCGK12+TP7DmqeZyGIqlFFoahQnmb8EarvE43/1UeQ2CV9XmzwZvpqeli
+LfXsFonawrY3H6ZnMwS64St61Z+9gdyuZ/RbsoZBbT5KUjDEG844QRU4OT1IGeEI
+eY5NM5RNIh6ZNhVtqeeCxMS7afONkHQrOco73RdSTRck/Hj96Ofl3MHNHryr+AMK
+DGFk1kLCZGpPdXtkxXvaDeQoiYDlil26CWc+YK6xyDPMdsWvoG14ZLyCpzMXA7/7
+4YAQRH0CAwEAAaOCAjAwggIsMB8GA1UdIwQYMBaAFBoJBMz5CY+7HqDO1KQUf0vV
+I1jNMB0GA1UdDgQWBBQgOU8HsWzbmD4WZP5Wtdw7jca2WDAOBgNVHQ8BAf8EBAMC
+BaAwDAYDVR0TAQH/BAIwADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIw
+TAYDVR0gBEUwQzBBBgsrBgEEAYDlNwIBATAyMDAGCCsGAQUFBwIBFiRodHRwczov
+L3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL0NQUzEwdwYDVR0fBHAwbjA3oDWgM4Yx
+aHR0cDovL2NybC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNy
+bDAzoDGgL4YtaHR0cDovL2NybC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5l
+c3MuY3JsMIGwBggrBgEFBQcBAQSBozCBoDA9BggrBgEFBQcwAoYxaHR0cDovL2Ny
+dC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQWJ1c2luZXNzLmNydDA5BggrBgEF
+BQcwAoYtaHR0cDovL2NydC50YnMteDUwOS5jb20vVEJTWDUwOUNBYnVzaW5lc3Mu
+Y3J0MCQGCCsGAQUFBzABhhhodHRwOi8vb2NzcC50YnMteDUwOS5jb20wMwYDVR0R
+BCwwKoIRZWNvbS50YnMteDUwOS5jb22CFXd3dy5lY29tLnRicy14NTA5LmNvbTAN
+BgkqhkiG9w0BAQUFAAOCAQEArT4NHfbY87bGAw8lPV4DmHlmuDuVp/y7ltO3Ynse
+3Rz8RxW2AzuO0Oy2F0Cu4yWKtMyEyMXyHqWtae7ElRbdTu5w5GwVBLJHClCzC8S9
+SpgMMQTx3Rgn8vjkHuU9VZQlulZyiPK7yunjc7c310S9FRZ7XxOwf8Nnx4WnB+No
+WrfApzhhQl31w+RyrNxZe58hCfDDHmevRvwLjQ785ZoQXJDj2j3qAD4aI2yB8lB5
+oaE1jlCJzC7Kmz/Y9jzfmv/zAs1LQTm9ktevv4BTUFaGjv9jxnQ1xnS862ZiouLW
+zZYIlYPf4F6JjXGiIQgQRglILUfq3ftJd9/ok9W9ZF8h8w==
-----END CERTIFICATE-----
- 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC
+ 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA business
i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root
-----BEGIN CERTIFICATE-----
-MIIFVjCCBD6gAwIBAgIQXpDZ0ETJMV02WTx3GTnhhTANBgkqhkiG9w0BAQUFADBv
+MIIFPzCCBCegAwIBAgIQDlBz/++iRSmLDeVRHT/hADANBgkqhkiG9w0BAQUFADBv
MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk
ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF
-eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDYyNDE5MDYzMFow
-gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl
+eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDcwOTE4MTkyMlow
+gckxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl
bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u
ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv
-cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg
-Q0EgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsgOkO3f7wzN6
-rOjg45tR5vjBfzK7qmV9IBxb/QW9EEXxG+E7FNhZqQLtwGBKoSsHTnQqV75wWMk0
-9tinWvftBkSpj5sTi/8cbzJfUvTSVYh3Qxv6AVVjMMH/ruLjE6y+4PoaPs8WoYAQ
-ts5R4Z1g8c/WnTepLst2x0/Wv7GmuoQi+gXvHU6YrBiu7XkeYhzc95QdviWSJRDk
-owhb5K43qhcvjRmBfO/paGlCliDGZp8mHwrI21mwobWpVjTxZRwYO3bd4+TGcI4G
-Ie5wmHwE8F7SK1tgSqbBacKjDa93j7txKkfz/Yd2n7TGqOXiHPsJpG655vrKtnXk
-9vs1zoDeJQIDAQABo4IBljCCAZIwHQYDVR0OBBYEFAdEdoWTKLx/bXjSCuv6TEvf
-2YIfMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgEAMCAGA1UdJQQZ
-MBcGCisGAQQBgjcKAwMGCWCGSAGG+EIEATAYBgNVHSAEETAPMA0GCysGAQQBgOU3
-AgQBMHsGA1UdHwR0MHIwOKA2oDSGMmh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL0Fk
-ZFRydXN0RXh0ZXJuYWxDQVJvb3QuY3JsMDagNKAyhjBodHRwOi8vY3JsLmNvbW9k
-by5uZXQvQWRkVHJ1c3RFeHRlcm5hbENBUm9vdC5jcmwwgYAGCCsGAQUFBwEBBHQw
-cjA4BggrBgEFBQcwAoYsaHR0cDovL2NydC5jb21vZG9jYS5jb20vQWRkVHJ1c3RV
-VE5TR0NDQS5jcnQwNgYIKwYBBQUHMAKGKmh0dHA6Ly9jcnQuY29tb2RvLm5ldC9B
-ZGRUcnVzdFVUTlNHQ0NBLmNydDARBglghkgBhvhCAQEEBAMCAgQwDQYJKoZIhvcN
-AQEFBQADggEBAK2zEzs+jcIrVK9oDkdDZNvhuBYTdCfpxfFs+OAujW0bIfJAy232
-euVsnJm6u/+OrqKudD2tad2BbejLLXhMZViaCmK7D9nrXHx4te5EP8rL19SUVqLY
-1pTnv5dhNgEgvA7n5lIzDSYs7yRLsr7HJsYPr6SeYSuZizyX1SNz7ooJ32/F3X98
-RB0Mlc/E0OyOrkQ9/y5IrnpnaSora8CnUrV5XNOg+kyCz9edCyx4D5wXYcwZPVWz
-8aDqquESrezPyjtfi4WRO4s/VD3HLZvOxzMrWAVYCDG9FxaOhF0QGuuG1F7F3GKV
-v6prNyCl016kRl2j1UT+a7gLd8fA25A4C9E=
+cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEdMBsGA1UEAxMUVEJTIFg1MDkg
+Q0EgYnVzaW5lc3MwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDB1PAU
+qudCcz3tmyGcf+u6EkZqonKKHrV4gZYbvVkIRojmmlhfi/jwvpHvo8bqSt/9Rj5S
+jhCDW0pcbI+IPPtD1Jy+CHNSfnMqVDy6CKQ3p5maTzCMG6ZT+XjnvcND5v+FtaiB
+xk1iCX6uvt0jeUtdZvYbyytsSDE6c3Y5//wRxOF8tM1JxibwO3pyER26jbbN2gQz
+m/EkdGjLdJ4svPk23WDAvQ6G0/z2LcAaJB+XLfqRwfQpHQvfKa1uTi8PivC8qtip
+rmNQMMPMjxSK2azX8cKjjTDJiUKaCb4VHlJDWKEsCFRpgJAoAuX8f7Yfs1M4esGo
+sWb3PGspK3O22uIlAgMBAAGjggF6MIIBdjAdBgNVHQ4EFgQUGgkEzPkJj7seoM7U
+pBR/S9UjWM0wDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQAwGAYD
+VR0gBBEwDzANBgsrBgEEAYDlNwIBATB7BgNVHR8EdDByMDigNqA0hjJodHRwOi8v
+Y3JsLmNvbW9kb2NhLmNvbS9BZGRUcnVzdEV4dGVybmFsQ0FSb290LmNybDA2oDSg
+MoYwaHR0cDovL2NybC5jb21vZG8ubmV0L0FkZFRydXN0RXh0ZXJuYWxDQVJvb3Qu
+Y3JsMIGGBggrBgEFBQcBAQR6MHgwOwYIKwYBBQUHMAKGL2h0dHA6Ly9jcnQuY29t
+b2RvY2EuY29tL0FkZFRydXN0VVROU2VydmVyQ0EuY3J0MDkGCCsGAQUFBzAChi1o
+dHRwOi8vY3J0LmNvbW9kby5uZXQvQWRkVHJ1c3RVVE5TZXJ2ZXJDQS5jcnQwEQYJ
+YIZIAYb4QgEBBAQDAgIEMA0GCSqGSIb3DQEBBQUAA4IBAQA7mqrMgk/MrE6QnbNA
+h4nRCn2ti4bg4w2C3lB6bSvRPnYwuNw9Jb8vuKkNFzRDxNJXqVDZdfFW5CVQJuyd
+nfAx83+wk+spzvFaE1KhFYfN9G9pQfXUfvDRoIcJgPEKUXL1wRiOG+IjU3VVI8pg
+IgqHkr7ylln5i5zCiFAPuIJmYUSFg/gxH5xkCNcjJqqrHrHatJr6Qrrke93joupw
+oU1njfAcZtYp6fbiK6u2b1pJqwkVBE8RsfLnPhRj+SFbpvjv8Od7o/ieJhFIYQNU
+k2jX2u8qZnAiNw93LZW9lpYjtuvMXq8QQppENNja5b53q7UwI+lU7ZGjZ7quuESp
+J6/5
-----END CERTIFICATE-----
2 s:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root
- i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC
+ i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware
-----BEGIN CERTIFICATE-----
-MIIEZjCCA06gAwIBAgIQUSYKkxzif5zDpV954HKugjANBgkqhkiG9w0BAQUFADCB
-kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
+MIIETzCCAzegAwIBAgIQHM5EYpUZep1jUvnyI6m2mDANBgkqhkiG9w0BAQUFADCB
+lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho
-dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw
-IFNHQzAeFw0wNTA2MDcwODA5MTBaFw0xOTA2MjQxOTA2MzBaMG8xCzAJBgNVBAYT
-AlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0
-ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB
-IFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC39xoz5vIABC05
-4E5b7R+8bA/Ntfojts7emxEzl6QpTH2Tn71KvJPtAxrjj8/lbVBa1pcplFqAsEl6
-2y6V/bjKvzc4LR4+kUGtcFbH8E8/6DKedMrIkFTpxl8PeJ2aQDwOrGGqXhSPnoeh
-alDc15pOrwWzpnGUnHGzUGAKxxOdOAeGAqjpqGkmGJCrTLBPI6s6T4TY386f4Wlv
-u9dC12tE5Met7m1BX3JacQg3s3llpFmglDf3AC8NwpJy2tA4ctsUqEXEXSp9t7TW
-xO6szRNEt8kr3UMAJfphuWlqWCMRt6czj1Z1WfXNKddGtworZbbTQm8Vsrh7++/p
-XVPVNFonAgMBAAGjgdgwgdUwHwYDVR0jBBgwFoAUUzLRs89/+uDxoF2FTpLSnkUd
-tE8wHQYDVR0OBBYEFK29mHo0tCb3+sQmVO8DveAky1QaMA4GA1UdDwEB/wQEAwIB
-BjAPBgNVHRMBAf8EBTADAQH/MBEGCWCGSAGG+EIBAQQEAwIBAjAgBgNVHSUEGTAX
-BgorBgEEAYI3CgMDBglghkgBhvhCBAEwPQYDVR0fBDYwNDAyoDCgLoYsaHR0cDov
-L2NybC51c2VydHJ1c3QuY29tL1VUTi1EQVRBQ29ycFNHQy5jcmwwDQYJKoZIhvcN
-AQEFBQADggEBAMbuUxdoFLJRIh6QWA2U/b3xcOWGLcM2MY9USEbnLQg3vGwKYOEO
-rVE04BKT6b64q7gmtOmWPSiPrmQH/uAB7MXjkesYoPF1ftsK5p+R26+udd8jkWjd
-FwBaS/9kbHDrARrQkNnHptZt9hPk/7XJ0h4qy7ElQyZ42TCbTg0evmnv3+r+LbPM
-+bDdtRTKkdSytaX7ARmjR3mfnYyVhzT4HziS2jamEfpr62vp3EV4FTkG101B5CHI
-3C+H0be/SGB1pWLLJN47YaApIKa+xWycxOkKaSLvkTr6Jq/RW0GnOuL4OAdCq8Fb
-+M5tug8EPzI0rNwEKNdwMBQmBsTkm5jVz3g=
+dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt
+SGFyZHdhcmUwHhcNMDUwNjA3MDgwOTEwWhcNMTkwNzA5MTgxOTIyWjBvMQswCQYD
+VQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0
+IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5h
+bCBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt/caM+by
+AAQtOeBOW+0fvGwPzbX6I7bO3psRM5ekKUx9k5+9SryT7QMa44/P5W1QWtaXKZRa
+gLBJetsulf24yr83OC0ePpFBrXBWx/BPP+gynnTKyJBU6cZfD3idmkA8Dqxhql4U
+j56HoWpQ3NeaTq8Fs6ZxlJxxs1BgCscTnTgHhgKo6ahpJhiQq0ywTyOrOk+E2N/O
+n+Fpb7vXQtdrROTHre5tQV9yWnEIN7N5ZaRZoJQ39wAvDcKSctrQOHLbFKhFxF0q
+fbe01sTurM0TRLfJK91DACX6YblpalgjEbenM49WdVn1zSnXRrcKK2W200JvFbK4
+e/vv6V1T1TRaJwIDAQABo4G9MIG6MB8GA1UdIwQYMBaAFKFyXyYbKJhDlV0HN9WF
+lp1L0sNFMB0GA1UdDgQWBBStvZh6NLQm9/rEJlTvA73gJMtUGjAOBgNVHQ8BAf8E
+BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zARBglghkgBhvhCAQEEBAMCAQIwRAYDVR0f
+BD0wOzA5oDegNYYzaHR0cDovL2NybC51c2VydHJ1c3QuY29tL1VUTi1VU0VSRmly
+c3QtSGFyZHdhcmUuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQByQhANOs4kClrwF8BW
+onvUOGCSjRK52zYZgDXYNjDtmr5rJ6NyPFDNn+JxkLpjYetIFMTbSRe679Bt8m7a
+gIAoQYFQtxMuyLnJegB2aEbQiIxh/tC21UcFF7ktdnDoTlA6w3pLuvunaI84Of3o
+2YBrhzkTbCfaYk5JRlTpudW9DkUkHBsyx3nknPKnplkIGaK0jgn8E0n+SFabYaHk
+I9LroYT/+JtLefh9lgBdAgVv0UPbzoGfuDsrk/Zh+UrgbLFpHoVnElhzbkh64Z0X
+OGaJunQc68cCZu5HTn/aK7fBGMcVflRCXLVEQpU9PIAdGA8Ynvg684t8GMaKsRl1
+jIGZ
-----END CERTIFICATE-----
- 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC
- i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC
+ 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware
+ i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN-USERFirst-Hardware
-----BEGIN CERTIFICATE-----
-MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB
-kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
+MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB
+lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho
-dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw
-IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG
-EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD
-VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu
-dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN
-BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6
-E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ
-D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK
-4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq
-lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW
-bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB
-o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT
-MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js
-LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr
-BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB
-AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft
-Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj
-j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH
-KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv
-2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3
-mfnGV/TJVTl4uix5yaaIK/QI
+dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt
+SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG
+A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe
+MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v
+d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh
+cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn
+0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ
+M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a
+MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd
+oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI
+DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy
+oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD
+VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0
+dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy
+bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF
+BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM
+//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli
+CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE
+CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t
+3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS
+KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA==
-----END CERTIFICATE-----
diff --git a/Lib/test/ssl_cert.pem b/Lib/test/ssl_cert.pem
deleted file mode 100644
index 9d7ac23..0000000
--- a/Lib/test/ssl_cert.pem
+++ /dev/null
@@ -1,14 +0,0 @@
------BEGIN CERTIFICATE-----
-MIICLDCCAdYCAQAwDQYJKoZIhvcNAQEEBQAwgaAxCzAJBgNVBAYTAlBUMRMwEQYD
-VQQIEwpRdWVlbnNsYW5kMQ8wDQYDVQQHEwZMaXNib2ExFzAVBgNVBAoTDk5ldXJv
-bmlvLCBMZGEuMRgwFgYDVQQLEw9EZXNlbnZvbHZpbWVudG8xGzAZBgNVBAMTEmJy
-dXR1cy5uZXVyb25pby5wdDEbMBkGCSqGSIb3DQEJARYMc2FtcG9AaWtpLmZpMB4X
-DTk2MDkwNTAzNDI0M1oXDTk2MTAwNTAzNDI0M1owgaAxCzAJBgNVBAYTAlBUMRMw
-EQYDVQQIEwpRdWVlbnNsYW5kMQ8wDQYDVQQHEwZMaXNib2ExFzAVBgNVBAoTDk5l
-dXJvbmlvLCBMZGEuMRgwFgYDVQQLEw9EZXNlbnZvbHZpbWVudG8xGzAZBgNVBAMT
-EmJydXR1cy5uZXVyb25pby5wdDEbMBkGCSqGSIb3DQEJARYMc2FtcG9AaWtpLmZp
-MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAL7+aty3S1iBA/+yxjxv4q1MUTd1kjNw
-L4lYKbpzzlmC5beaQXeQ2RmGMTXU+mDvuqItjVHOK3DvPK7lTcSGftUCAwEAATAN
-BgkqhkiG9w0BAQQFAANBAFqPEKFjk6T6CKTHvaQeEAsX0/8YHPHqH/9AnhSjrwuX
-9EBc0n6bVGhN7XaXd6sJ7dym9sbsWxb+pJdurnkxjx4=
------END CERTIFICATE-----
diff --git a/Lib/test/ssl_key.pem b/Lib/test/ssl_key.pem
deleted file mode 100644
index 239ad66..0000000
--- a/Lib/test/ssl_key.pem
+++ /dev/null
@@ -1,9 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIBPAIBAAJBAL7+aty3S1iBA/+yxjxv4q1MUTd1kjNwL4lYKbpzzlmC5beaQXeQ
-2RmGMTXU+mDvuqItjVHOK3DvPK7lTcSGftUCAwEAAQJBALjkK+jc2+iihI98riEF
-oudmkNziSRTYjnwjx8mCoAjPWviB3c742eO3FG4/soi1jD9A5alihEOXfUzloenr
-8IECIQD3B5+0l+68BA/6d76iUNqAAV8djGTzvxnCxycnxPQydQIhAMXt4trUI3nc
-a+U8YL2HPFA3gmhBsSICbq2OptOCnM7hAiEA6Xi3JIQECob8YwkRj29DU3/4WYD7
-WLPgsQpwo1GuSpECICGsnWH5oaeD9t9jbFoSfhJvv0IZmxdcLpRcpslpeWBBAiEA
-6/5B8J0GHdJq89FHwEG/H2eVVUYu5y/aD6sgcm+0Avg=
------END RSA PRIVATE KEY-----
diff --git a/Lib/test/string_tests.py b/Lib/test/string_tests.py
index 5931f3d..0479601 100644
--- a/Lib/test/string_tests.py
+++ b/Lib/test/string_tests.py
@@ -749,10 +749,10 @@ class CommonTest(unittest.TestCase):
self.checkraises(TypeError, 'hello', 'replace', 42, 'h')
self.checkraises(TypeError, 'hello', 'replace', 'h', 42)
+ @unittest.skipIf(sys.maxint > (1 << 32) or struct.calcsize('P') != 4,
+ 'only applies to 32-bit platforms')
def test_replace_overflow(self):
# Check for overflow checking on 32 bit machines
- if sys.maxint != 2147483647 or struct.calcsize("P") > 4:
- return
A2_16 = "A" * (2**16)
self.checkraises(OverflowError, A2_16, "replace", "", A2_16)
self.checkraises(OverflowError, A2_16, "replace", "A", A2_16)
@@ -1113,6 +1113,31 @@ class MixinStrUnicodeUserStringTest:
self.checkraises(TypeError, '%10.*f', '__mod__', ('foo', 42.))
self.checkraises(ValueError, '%10', '__mod__', (42,))
+ class X(object): pass
+ self.checkraises(TypeError, 'abc', '__mod__', X())
+ class X(Exception):
+ def __getitem__(self, k):
+ return k
+ self.checkequal('melon apple', '%(melon)s %(apple)s', '__mod__', X())
+
+ @test_support.cpython_only
+ def test_formatting_c_limits(self):
+ from _testcapi import PY_SSIZE_T_MAX, INT_MAX, UINT_MAX
+ SIZE_MAX = (1 << (PY_SSIZE_T_MAX.bit_length() + 1)) - 1
+ width = int(PY_SSIZE_T_MAX + 1)
+ if width <= sys.maxint:
+ self.checkraises(OverflowError, '%*s', '__mod__', (width, ''))
+ prec = int(INT_MAX + 1)
+ if prec <= sys.maxint:
+ self.checkraises(OverflowError, '%.*f', '__mod__', (prec, 1. / 7))
+ # Issue 15989
+ width = int(SIZE_MAX + 1)
+ if width <= sys.maxint:
+ self.checkraises(OverflowError, '%*s', '__mod__', (width, ''))
+ prec = int(UINT_MAX + 1)
+ if prec <= sys.maxint:
+ self.checkraises(OverflowError, '%.*f', '__mod__', (prec, 1. / 7))
+
def test_floatformatting(self):
# float formatting
for prec in xrange(100):
@@ -1264,27 +1289,27 @@ class MixinStrUserStringTest:
# Additional tests that only work with
# 8bit compatible object, i.e. str and UserString
- if test_support.have_unicode:
- def test_encoding_decoding(self):
- codecs = [('rot13', 'uryyb jbeyq'),
- ('base64', 'aGVsbG8gd29ybGQ=\n'),
- ('hex', '68656c6c6f20776f726c64'),
- ('uu', 'begin 666 <data>\n+:&5L;&\\@=V]R;&0 \n \nend\n')]
- for encoding, data in codecs:
- self.checkequal(data, 'hello world', 'encode', encoding)
- self.checkequal('hello world', data, 'decode', encoding)
- # zlib is optional, so we make the test optional too...
- try:
- import zlib
- except ImportError:
- pass
- else:
- data = 'x\x9c\xcbH\xcd\xc9\xc9W(\xcf/\xcaI\x01\x00\x1a\x0b\x04]'
- self.checkequal(data, 'hello world', 'encode', 'zlib')
- self.checkequal('hello world', data, 'decode', 'zlib')
+ @unittest.skipUnless(test_support.have_unicode, 'no unicode support')
+ def test_encoding_decoding(self):
+ codecs = [('rot13', 'uryyb jbeyq'),
+ ('base64', 'aGVsbG8gd29ybGQ=\n'),
+ ('hex', '68656c6c6f20776f726c64'),
+ ('uu', 'begin 666 <data>\n+:&5L;&\\@=V]R;&0 \n \nend\n')]
+ for encoding, data in codecs:
+ self.checkequal(data, 'hello world', 'encode', encoding)
+ self.checkequal('hello world', data, 'decode', encoding)
+ # zlib is optional, so we make the test optional too...
+ try:
+ import zlib
+ except ImportError:
+ pass
+ else:
+ data = 'x\x9c\xcbH\xcd\xc9\xc9W(\xcf/\xcaI\x01\x00\x1a\x0b\x04]'
+ self.checkequal(data, 'hello world', 'encode', 'zlib')
+ self.checkequal('hello world', data, 'decode', 'zlib')
- self.checkraises(TypeError, 'xyz', 'decode', 42)
- self.checkraises(TypeError, 'xyz', 'encode', 42)
+ self.checkraises(TypeError, 'xyz', 'decode', 42)
+ self.checkraises(TypeError, 'xyz', 'encode', 42)
class MixinStrUnicodeTest:
diff --git a/Lib/test/subprocessdata/sigchild_ignore.py b/Lib/test/subprocessdata/sigchild_ignore.py
index 6072aec..86320fb 100644
--- a/Lib/test/subprocessdata/sigchild_ignore.py
+++ b/Lib/test/subprocessdata/sigchild_ignore.py
@@ -1,6 +1,15 @@
-import signal, subprocess, sys
+import signal, subprocess, sys, time
# On Linux this causes os.waitpid to fail with OSError as the OS has already
# reaped our child process. The wait() passing the OSError on to the caller
# and causing us to exit with an error is what we are testing against.
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
subprocess.Popen([sys.executable, '-c', 'print("albatross")']).wait()
+# Also ensure poll() handles an errno.ECHILD appropriately.
+p = subprocess.Popen([sys.executable, '-c', 'print("albatross")'])
+num_polls = 0
+while p.poll() is None:
+ # Waiting for the process to finish.
+ time.sleep(0.01) # Avoid being a CPU busy loop.
+ num_polls += 1
+ if num_polls > 3000:
+ raise RuntimeError('poll should have returned 0 within 30 seconds')
diff --git a/Lib/test/symlink_support.py b/Lib/test/symlink_support.py
new file mode 100644
index 0000000..301d0f9
--- /dev/null
+++ b/Lib/test/symlink_support.py
@@ -0,0 +1,100 @@
+import os
+import unittest
+import platform
+
+from test.test_support import TESTFN
+
+def can_symlink():
+ # cache the result in can_symlink.prev_val
+ prev_val = getattr(can_symlink, 'prev_val', None)
+ if prev_val is not None:
+ return prev_val
+ symlink_path = TESTFN + "can_symlink"
+ try:
+ symlink(TESTFN, symlink_path)
+ can = True
+ except (OSError, NotImplementedError, AttributeError):
+ can = False
+ else:
+ os.remove(symlink_path)
+ can_symlink.prev_val = can
+ return can
+
+def skip_unless_symlink(test):
+ """Skip decorator for tests that require functional symlink"""
+ ok = can_symlink()
+ msg = "Requires functional symlink implementation"
+ return test if ok else unittest.skip(msg)(test)
+
+def _symlink_win32(target, link, target_is_directory=False):
+ """
+ Ctypes symlink implementation since Python doesn't support
+ symlinks in windows yet. Borrowed from jaraco.windows project.
+ """
+ import ctypes.wintypes
+ CreateSymbolicLink = ctypes.windll.kernel32.CreateSymbolicLinkW
+ CreateSymbolicLink.argtypes = (
+ ctypes.wintypes.LPWSTR,
+ ctypes.wintypes.LPWSTR,
+ ctypes.wintypes.DWORD,
+ )
+ CreateSymbolicLink.restype = ctypes.wintypes.BOOLEAN
+
+ def format_system_message(errno):
+ """
+ Call FormatMessage with a system error number to retrieve
+ the descriptive error message.
+ """
+ # first some flags used by FormatMessageW
+ ALLOCATE_BUFFER = 0x100
+ ARGUMENT_ARRAY = 0x2000
+ FROM_HMODULE = 0x800
+ FROM_STRING = 0x400
+ FROM_SYSTEM = 0x1000
+ IGNORE_INSERTS = 0x200
+
+ # Let FormatMessageW allocate the buffer (we'll free it below)
+ # Also, let it know we want a system error message.
+ flags = ALLOCATE_BUFFER | FROM_SYSTEM
+ source = None
+ message_id = errno
+ language_id = 0
+ result_buffer = ctypes.wintypes.LPWSTR()
+ buffer_size = 0
+ arguments = None
+ bytes = ctypes.windll.kernel32.FormatMessageW(
+ flags,
+ source,
+ message_id,
+ language_id,
+ ctypes.byref(result_buffer),
+ buffer_size,
+ arguments,
+ )
+ # note the following will cause an infinite loop if GetLastError
+ # repeatedly returns an error that cannot be formatted, although
+ # this should not happen.
+ handle_nonzero_success(bytes)
+ message = result_buffer.value
+ ctypes.windll.kernel32.LocalFree(result_buffer)
+ return message
+
+ def handle_nonzero_success(result):
+ if result == 0:
+ value = ctypes.windll.kernel32.GetLastError()
+ strerror = format_system_message(value)
+ raise WindowsError(value, strerror)
+
+ target_is_directory = target_is_directory or os.path.isdir(target)
+ handle_nonzero_success(CreateSymbolicLink(link, target, target_is_directory))
+
+symlink = os.symlink if hasattr(os, 'symlink') else (
+ _symlink_win32 if platform.system() == 'Windows' else None
+)
+
+def remove_symlink(name):
+ # On Windows, to remove a directory symlink, one must use rmdir
+ try:
+ os.rmdir(name)
+ except OSError:
+ os.remove(name)
diff --git a/Lib/test/test_StringIO.py b/Lib/test/test_StringIO.py
index bf0c733..42f307a 100644
--- a/Lib/test/test_StringIO.py
+++ b/Lib/test/test_StringIO.py
@@ -5,6 +5,7 @@ import StringIO
import cStringIO
import types
import array
+import sys
from test import test_support
@@ -19,7 +20,6 @@ class TestGenericStringIO(unittest.TestCase):
constructor = str
def setUp(self):
- self._line = self.constructor(self._line)
self._lines = self.constructor((self._line + '\n') * 5)
self._fp = self.MODULE.StringIO(self._lines)
@@ -27,6 +27,8 @@ class TestGenericStringIO(unittest.TestCase):
eq = self.assertEqual
self.assertRaises(TypeError, self._fp.seek)
eq(self._fp.read(10), self._line[:10])
+ eq(self._fp.read(0), '')
+ eq(self._fp.readline(0), '')
eq(self._fp.readline(), self._line[10:] + '\n')
eq(len(self._fp.readlines(60)), 2)
self._fp.seek(0)
@@ -105,6 +107,45 @@ class TestGenericStringIO(unittest.TestCase):
self._fp.close()
self.assertRaises(ValueError, self._fp.getvalue)
+ @test_support.bigmemtest(test_support._2G + 2**26, memuse=2.001)
+ def test_reads_from_large_stream(self, size):
+ linesize = 2**26 # 64 MiB
+ lines = ['x' * (linesize - 1) + '\n'] * (size // linesize) + \
+ ['y' * (size % linesize)]
+ f = self.MODULE.StringIO(''.join(lines))
+ for i, expected in enumerate(lines):
+ line = f.read(len(expected))
+ self.assertEqual(len(line), len(expected))
+ self.assertEqual(line, expected)
+ self.assertEqual(f.read(), '')
+ f.seek(0)
+ for i, expected in enumerate(lines):
+ line = f.readline()
+ self.assertEqual(len(line), len(expected))
+ self.assertEqual(line, expected)
+ self.assertEqual(f.readline(), '')
+ f.seek(0)
+ self.assertEqual(f.readlines(), lines)
+ self.assertEqual(f.readlines(), [])
+ f.seek(0)
+ self.assertEqual(f.readlines(size), lines)
+ self.assertEqual(f.readlines(), [])
+
+ # In worst case cStringIO requires 2 + 1 + 1/2 + 1/2**2 + ... = 4
+ # bytes per input character.
+ @test_support.bigmemtest(test_support._2G, memuse=4)
+ def test_writes_to_large_stream(self, size):
+ s = 'x' * 2**26 # 64 MiB
+ f = self.MODULE.StringIO()
+ n = size
+ while n > len(s):
+ f.write(s)
+ n -= len(s)
+ s = None
+ f.write('x' * n)
+ self.assertEqual(len(f.getvalue()), size)
+
+
class TestStringIO(TestGenericStringIO):
MODULE = StringIO
@@ -168,12 +209,16 @@ class TestBufferStringIO(TestStringIO):
class TestBuffercStringIO(TestcStringIO):
constructor = buffer
+class TestMemoryviewcStringIO(TestcStringIO):
+ constructor = memoryview
+
def test_main():
test_support.run_unittest(TestStringIO, TestcStringIO)
with test_support.check_py3k_warnings(("buffer.. not supported",
DeprecationWarning)):
test_support.run_unittest(TestBufferStringIO, TestBuffercStringIO)
+ test_support.run_unittest(TestMemoryviewcStringIO)
if __name__ == '__main__':
test_main()
diff --git a/Lib/test/test___future__.py b/Lib/test/test___future__.py
index 1d8f8e6..b573382 100644
--- a/Lib/test/test___future__.py
+++ b/Lib/test/test___future__.py
@@ -1,4 +1,3 @@
-#! /usr/bin/env python
import unittest
from test import test_support
import __future__
diff --git a/Lib/test/test__osx_support.py b/Lib/test/test__osx_support.py
new file mode 100644
index 0000000..71dd00e
--- /dev/null
+++ b/Lib/test/test__osx_support.py
@@ -0,0 +1,281 @@
+"""
+Test suite for _osx_support: shared OS X support functions.
+"""
+
+import os
+import platform
+import shutil
+import stat
+import sys
+import unittest
+
+import test.test_support
+
+import _osx_support
+
+@unittest.skipUnless(sys.platform.startswith("darwin"), "requires OS X")
+class Test_OSXSupport(unittest.TestCase):
+
+ def setUp(self):
+ self.maxDiff = None
+ self.prog_name = 'bogus_program_xxxx'
+ self.temp_path_dir = os.path.abspath(os.getcwd())
+ self.env = test.test_support.EnvironmentVarGuard()
+ self.addCleanup(self.env.__exit__)
+ for cv in ('CFLAGS', 'LDFLAGS', 'CPPFLAGS',
+ 'BASECFLAGS', 'BLDSHARED', 'LDSHARED', 'CC',
+ 'CXX', 'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
+ 'PY_CORE_CFLAGS'):
+ if cv in self.env:
+ self.env.unset(cv)
+
+ def add_expected_saved_initial_values(self, config_vars, expected_vars):
+ # Ensure that the initial values for all modified config vars
+ # are also saved with modified keys.
+ expected_vars.update(('_OSX_SUPPORT_INITIAL_'+ k,
+ config_vars[k]) for k in config_vars
+ if config_vars[k] != expected_vars[k])
+
+ def test__find_executable(self):
+ if self.env['PATH']:
+ self.env['PATH'] = self.env['PATH'] + ':'
+ self.env['PATH'] = self.env['PATH'] + os.path.abspath(self.temp_path_dir)
+ test.test_support.unlink(self.prog_name)
+ self.assertIsNone(_osx_support._find_executable(self.prog_name))
+ self.addCleanup(test.test_support.unlink, self.prog_name)
+ with open(self.prog_name, 'w') as f:
+ f.write("#!/bin/sh\n/bin/echo OK\n")
+ os.chmod(self.prog_name, stat.S_IRWXU)
+ self.assertEqual(self.prog_name,
+ _osx_support._find_executable(self.prog_name))
+
+ def test__read_output(self):
+ if self.env['PATH']:
+ self.env['PATH'] = self.env['PATH'] + ':'
+ self.env['PATH'] = self.env['PATH'] + os.path.abspath(self.temp_path_dir)
+ test.test_support.unlink(self.prog_name)
+ self.addCleanup(test.test_support.unlink, self.prog_name)
+ with open(self.prog_name, 'w') as f:
+ f.write("#!/bin/sh\n/bin/echo ExpectedOutput\n")
+ os.chmod(self.prog_name, stat.S_IRWXU)
+ self.assertEqual('ExpectedOutput',
+ _osx_support._read_output(self.prog_name))
+
+ def test__find_build_tool(self):
+ out = _osx_support._find_build_tool('cc')
+ self.assertTrue(os.path.isfile(out),
+ 'cc not found - check xcode-select')
+
+ def test__get_system_version(self):
+ self.assertTrue(platform.mac_ver()[0].startswith(
+ _osx_support._get_system_version()))
+
+ def test__remove_original_values(self):
+ config_vars = {
+ 'CC': 'gcc-test -pthreads',
+ }
+ expected_vars = {
+ 'CC': 'clang -pthreads',
+ }
+ cv = 'CC'
+ newvalue = 'clang -pthreads'
+ _osx_support._save_modified_value(config_vars, cv, newvalue)
+ self.assertNotEqual(expected_vars, config_vars)
+ _osx_support._remove_original_values(config_vars)
+ self.assertEqual(expected_vars, config_vars)
+
+ def test__save_modified_value(self):
+ config_vars = {
+ 'CC': 'gcc-test -pthreads',
+ }
+ expected_vars = {
+ 'CC': 'clang -pthreads',
+ }
+ self.add_expected_saved_initial_values(config_vars, expected_vars)
+ cv = 'CC'
+ newvalue = 'clang -pthreads'
+ _osx_support._save_modified_value(config_vars, cv, newvalue)
+ self.assertEqual(expected_vars, config_vars)
+
+ def test__save_modified_value_unchanged(self):
+ config_vars = {
+ 'CC': 'gcc-test -pthreads',
+ }
+ expected_vars = config_vars.copy()
+ cv = 'CC'
+ newvalue = 'gcc-test -pthreads'
+ _osx_support._save_modified_value(config_vars, cv, newvalue)
+ self.assertEqual(expected_vars, config_vars)
+
+ def test__supports_universal_builds(self):
+ import platform
+ mac_ver_tuple = tuple(int(i) for i in
+ platform.mac_ver()[0].split('.')[0:2])
+ self.assertEqual(mac_ver_tuple >= (10, 4),
+ _osx_support._supports_universal_builds())
+
+ def test__find_appropriate_compiler(self):
+ compilers = (
+ ('gcc-test', 'i686-apple-darwin11-llvm-gcc-4.2'),
+ ('clang', 'clang version 3.1'),
+ )
+ config_vars = {
+ 'CC': 'gcc-test -pthreads',
+ 'CXX': 'cc++-test',
+ 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
+ 'LDFLAGS': '-arch ppc -arch i386 -g',
+ 'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
+ 'BLDSHARED': 'gcc-test -bundle -arch ppc -arch i386 -g',
+ 'LDSHARED': 'gcc-test -bundle -arch ppc -arch i386 '
+ '-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
+ }
+ expected_vars = {
+ 'CC': 'clang -pthreads',
+ 'CXX': 'clang++',
+ 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
+ 'LDFLAGS': '-arch ppc -arch i386 -g',
+ 'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
+ 'BLDSHARED': 'clang -bundle -arch ppc -arch i386 -g',
+ 'LDSHARED': 'clang -bundle -arch ppc -arch i386 '
+ '-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
+ }
+ self.add_expected_saved_initial_values(config_vars, expected_vars)
+
+ suffix = (':' + self.env['PATH']) if self.env['PATH'] else ''
+ self.env['PATH'] = os.path.abspath(self.temp_path_dir) + suffix
+ for c_name, c_output in compilers:
+ test.test_support.unlink(c_name)
+ self.addCleanup(test.test_support.unlink, c_name)
+ with open(c_name, 'w') as f:
+ f.write("#!/bin/sh\n/bin/echo " + c_output)
+ os.chmod(c_name, stat.S_IRWXU)
+ self.assertEqual(expected_vars,
+ _osx_support._find_appropriate_compiler(
+ config_vars))
+
+ def test__remove_universal_flags(self):
+ config_vars = {
+ 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
+ 'LDFLAGS': '-arch ppc -arch i386 -g',
+ 'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
+ 'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
+ 'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
+ '-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
+ }
+ expected_vars = {
+ 'CFLAGS': '-fno-strict-aliasing -g -O3 ',
+ 'LDFLAGS': ' -g',
+ 'CPPFLAGS': '-I. ',
+ 'BLDSHARED': 'gcc-4.0 -bundle -g',
+ 'LDSHARED': 'gcc-4.0 -bundle -g',
+ }
+ self.add_expected_saved_initial_values(config_vars, expected_vars)
+
+ self.assertEqual(expected_vars,
+ _osx_support._remove_universal_flags(
+ config_vars))
+
+ def test__remove_unsupported_archs(self):
+ config_vars = {
+ 'CC': 'clang',
+ 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
+ 'LDFLAGS': '-arch ppc -arch i386 -g',
+ 'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
+ 'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
+ 'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
+ '-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
+ }
+ expected_vars = {
+ 'CC': 'clang',
+ 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch i386 ',
+ 'LDFLAGS': ' -arch i386 -g',
+ 'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
+ 'BLDSHARED': 'gcc-4.0 -bundle -arch i386 -g',
+ 'LDSHARED': 'gcc-4.0 -bundle -arch i386 '
+ '-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
+ }
+ self.add_expected_saved_initial_values(config_vars, expected_vars)
+
+ suffix = (':' + self.env['PATH']) if self.env['PATH'] else ''
+ self.env['PATH'] = os.path.abspath(self.temp_path_dir) + suffix
+ c_name = 'clang'
+ test.test_support.unlink(c_name)
+ self.addCleanup(test.test_support.unlink, c_name)
+ # exit status 255 means no PPC support in this compiler chain
+ with open(c_name, 'w') as f:
+ f.write("#!/bin/sh\nexit 255")
+ os.chmod(c_name, stat.S_IRWXU)
+ self.assertEqual(expected_vars,
+ _osx_support._remove_unsupported_archs(
+ config_vars))
+
+ def test__override_all_archs(self):
+ self.env['ARCHFLAGS'] = '-arch x86_64'
+ config_vars = {
+ 'CC': 'clang',
+ 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ',
+ 'LDFLAGS': '-arch ppc -arch i386 -g',
+ 'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
+ 'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
+ 'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
+ '-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g',
+ }
+ expected_vars = {
+ 'CC': 'clang',
+ 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch x86_64',
+ 'LDFLAGS': ' -g -arch x86_64',
+ 'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk',
+ 'BLDSHARED': 'gcc-4.0 -bundle -g -arch x86_64',
+ 'LDSHARED': 'gcc-4.0 -bundle -isysroot '
+ '/Developer/SDKs/MacOSX10.4u.sdk -g -arch x86_64',
+ }
+ self.add_expected_saved_initial_values(config_vars, expected_vars)
+
+ self.assertEqual(expected_vars,
+ _osx_support._override_all_archs(
+ config_vars))
+
+ def test__check_for_unavailable_sdk(self):
+ config_vars = {
+ 'CC': 'clang',
+ 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 '
+ '-isysroot /Developer/SDKs/MacOSX10.1.sdk',
+ 'LDFLAGS': '-arch ppc -arch i386 -g',
+ 'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.1.sdk',
+ 'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
+ 'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
+ '-isysroot /Developer/SDKs/MacOSX10.1.sdk -g',
+ }
+ expected_vars = {
+ 'CC': 'clang',
+ 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 '
+ ' ',
+ 'LDFLAGS': '-arch ppc -arch i386 -g',
+ 'CPPFLAGS': '-I. ',
+ 'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g',
+ 'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 '
+ ' -g',
+ }
+ self.add_expected_saved_initial_values(config_vars, expected_vars)
+
+ self.assertEqual(expected_vars,
+ _osx_support._check_for_unavailable_sdk(
+ config_vars))
+
+ def test_get_platform_osx(self):
+ # Note, get_platform_osx is currently tested more extensively
+ # indirectly by test_sysconfig and test_distutils
+ config_vars = {
+ 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 '
+ '-isysroot /Developer/SDKs/MacOSX10.1.sdk',
+ 'MACOSX_DEPLOYMENT_TARGET': '10.6',
+ }
+ result = _osx_support.get_platform_osx(config_vars, ' ', ' ', ' ')
+ self.assertEqual(('macosx', '10.6', 'fat'), result)
+
+def test_main():
+ if sys.platform == 'darwin':
+ test.test_support.run_unittest(Test_OSXSupport)
+
+if __name__ == "__main__":
+ test_main()
diff --git a/Lib/test/test_aepack.py b/Lib/test/test_aepack.py
index f4ea25b..84c8609 100644
--- a/Lib/test/test_aepack.py
+++ b/Lib/test/test_aepack.py
@@ -59,10 +59,10 @@ class TestAepack(unittest.TestCase):
try:
import Carbon.File
except:
- return
+ self.skipTest('Carbon.File not available')
if not hasattr(Carbon.File, "FSSpec"):
- return
+ self.skipTest('Carbon.File.FSSpec not available')
o = Carbon.File.FSSpec(os.curdir)
packed = aepack.pack(o)
unpacked = aepack.unpack(packed)
@@ -72,9 +72,9 @@ class TestAepack(unittest.TestCase):
try:
import Carbon.File
except:
- return
+ self.skipTest('Carbon.File not available')
if not hasattr(Carbon.File, "FSSpec"):
- return
+ self.skipTest('Carbon.File.FSSpec not available')
o = Carbon.File.FSSpec(os.curdir).NewAliasMinimal()
packed = aepack.pack(o)
unpacked = aepack.unpack(packed)
diff --git a/Lib/test/test_aifc.py b/Lib/test/test_aifc.py
index e492838..cfc491d 100644
--- a/Lib/test/test_aifc.py
+++ b/Lib/test/test_aifc.py
@@ -1,111 +1,156 @@
-from test.test_support import findfile, run_unittest, TESTFN
+from test.test_support import (findfile, TESTFN, unlink, captured_stdout,
+ run_unittest)
import unittest
+from test import audiotests
import os
import io
-
+import sys
+import struct
import aifc
-class AIFCTest(unittest.TestCase):
+class AifcTest(audiotests.AudioWriteTests,
+ audiotests.AudioTestsWithSourceFile):
+ module = aifc
+ close_fd = True
+ test_unseekable_read = None
+
+
+class AifcPCM8Test(AifcTest, unittest.TestCase):
+ sndfilename = 'pluck-pcm8.aiff'
+ sndfilenframes = 3307
+ nchannels = 2
+ sampwidth = 1
+ framerate = 11025
+ nframes = 48
+ comptype = 'NONE'
+ compname = 'not compressed'
+ frames = audiotests.fromhex("""\
+ 02FF 4B00 3104 8008 CB06 4803 BF01 03FE B8FA B4F3 29EB 1AE6 \
+ EDE4 C6E2 0EE0 EFE0 57E2 FBE8 13EF D8F7 97FB F5FC 08FB DFFB \
+ 11FA 3EFB BCFC 66FF CF04 4309 C10E 5112 EE17 8216 7F14 8012 \
+ 490E 520D EF0F CE0F E40C 630A 080A 2B0B 510E 8B11 B60E 440A \
+ """)
+
+
+class AifcPCM16Test(AifcTest, unittest.TestCase):
+ sndfilename = 'pluck-pcm16.aiff'
+ sndfilenframes = 3307
+ nchannels = 2
+ sampwidth = 2
+ framerate = 11025
+ nframes = 48
+ comptype = 'NONE'
+ compname = 'not compressed'
+ frames = audiotests.fromhex("""\
+ 022EFFEA 4B5D00F6 311804EA 80E10840 CBE106B1 48A903F5 BFE601B2 036CFE7B \
+ B858FA3E B4B1F34F 299AEBCA 1A5DE6DA EDFAE491 C628E275 0E09E0B5 EF2AE029 \
+ 5758E271 FB35E83F 1376EF86 D82BF727 9790FB76 F5FAFC0F 0867FB9C DF30FB43 \
+ 117EFA36 3EE5FB5B BC79FCB1 66D9FF5D CF150412 431D097C C1BA0EC8 512112A1 \
+ EEE21753 82071665 7FFF1443 8004128F 49A20EAF 52BB0DBA EFB40F60 CE3C0FBF \
+ E4B30CEC 63430A5C 08C80A20 2BBB0B08 514A0E43 8BCF1139 B6F60EEB 44120A5E \
+ """)
+
+
+class AifcPCM24Test(AifcTest, unittest.TestCase):
+ sndfilename = 'pluck-pcm24.aiff'
+ sndfilenframes = 3307
+ nchannels = 2
+ sampwidth = 3
+ framerate = 11025
+ nframes = 48
+ comptype = 'NONE'
+ compname = 'not compressed'
+ frames = audiotests.fromhex("""\
+ 022D65FFEB9D 4B5A0F00FA54 3113C304EE2B 80DCD6084303 \
+ CBDEC006B261 48A99803F2F8 BFE82401B07D 036BFBFE7B5D \
+ B85756FA3EC9 B4B055F3502B 299830EBCB62 1A5CA7E6D99A \
+ EDFA3EE491BD C625EBE27884 0E05A9E0B6CF EF2929E02922 \
+ 5758D8E27067 FB3557E83E16 1377BFEF8402 D82C5BF7272A \
+ 978F16FB7745 F5F865FC1013 086635FB9C4E DF30FCFB40EE \
+ 117FE0FA3438 3EE6B8FB5AC3 BC77A3FCB2F4 66D6DAFF5F32 \
+ CF13B9041275 431D69097A8C C1BB600EC74E 5120B912A2BA \
+ EEDF641754C0 8207001664B7 7FFFFF14453F 8000001294E6 \
+ 499C1B0EB3B2 52B73E0DBCA0 EFB2B20F5FD8 CE3CDB0FBE12 \
+ E4B49C0CEA2D 6344A80A5A7C 08C8FE0A1FFE 2BB9860B0A0E \
+ 51486F0E44E1 8BCC64113B05 B6F4EC0EEB36 4413170A5B48 \
+ """)
+
+
+class AifcPCM32Test(AifcTest, unittest.TestCase):
+ sndfilename = 'pluck-pcm32.aiff'
+ sndfilenframes = 3307
+ nchannels = 2
+ sampwidth = 4
+ framerate = 11025
+ nframes = 48
+ comptype = 'NONE'
+ compname = 'not compressed'
+ frames = audiotests.fromhex("""\
+ 022D65BCFFEB9D92 4B5A0F8000FA549C 3113C34004EE2BC0 80DCD680084303E0 \
+ CBDEC0C006B26140 48A9980003F2F8FC BFE8248001B07D92 036BFB60FE7B5D34 \
+ B8575600FA3EC920 B4B05500F3502BC0 29983000EBCB6240 1A5CA7A0E6D99A60 \
+ EDFA3E80E491BD40 C625EB80E27884A0 0E05A9A0E0B6CFE0 EF292940E0292280 \
+ 5758D800E2706700 FB3557D8E83E1640 1377BF00EF840280 D82C5B80F7272A80 \
+ 978F1600FB774560 F5F86510FC101364 086635A0FB9C4E20 DF30FC40FB40EE28 \
+ 117FE0A0FA3438B0 3EE6B840FB5AC3F0 BC77A380FCB2F454 66D6DA80FF5F32B4 \
+ CF13B980041275B0 431D6980097A8C00 C1BB60000EC74E00 5120B98012A2BAA0 \
+ EEDF64C01754C060 820700001664B780 7FFFFFFF14453F40 800000001294E6E0 \
+ 499C1B000EB3B270 52B73E000DBCA020 EFB2B2E00F5FD880 CE3CDB400FBE1270 \
+ E4B49CC00CEA2D90 6344A8800A5A7CA0 08C8FE800A1FFEE0 2BB986C00B0A0E00 \
+ 51486F800E44E190 8BCC6480113B0580 B6F4EC000EEB3630 441317800A5B48A0 \
+ """)
+
- def setUp(self):
- self.f = self.fout = None
- self.sndfilepath = findfile('Sine-1000Hz-300ms.aif')
+class AifcULAWTest(AifcTest, unittest.TestCase):
+ sndfilename = 'pluck-ulaw.aifc'
+ sndfilenframes = 3307
+ nchannels = 2
+ sampwidth = 2
+ framerate = 11025
+ nframes = 48
+ comptype = 'ulaw'
+ compname = ''
+ frames = audiotests.fromhex("""\
+ 022CFFE8 497C0104 307C04DC 8284083C CB84069C 497C03DC BE8401AC 036CFE74 \
+ B684FA24 B684F344 2A7CEC04 19FCE704 EE04E504 C584E204 0E3CE104 EF04DF84 \
+ 557CE204 FB24E804 12FCEF04 D784F744 9684FB64 F5C4FC24 083CFBA4 DF84FB24 \
+ 11FCFA24 3E7CFB64 BA84FCB4 657CFF5C CF84041C 417C093C C1840EBC 517C12FC \
+ EF0416FC 828415FC 7D7C13FC 828412FC 497C0EBC 517C0DBC F0040F3C CD840FFC \
+ E5040CBC 617C0A3C 08BC0A3C 2C7C0B3C 517C0E3C 8A8410FC B6840EBC 457C0A3C \
+ """)
+ if sys.byteorder != 'big':
+ frames = audiotests.byteswap2(frames)
- def tearDown(self):
- if self.f is not None:
- self.f.close()
- if self.fout is not None:
- try:
- self.fout.close()
- except (aifc.Error, AttributeError):
- pass
- try:
- os.remove(TESTFN)
- except OSError:
- pass
+class AifcMiscTest(audiotests.AudioTests, unittest.TestCase):
def test_skipunknown(self):
#Issue 2245
#This file contains chunk types aifc doesn't recognize.
- self.f = aifc.open(self.sndfilepath)
-
- def test_params(self):
- f = self.f = aifc.open(self.sndfilepath)
- self.assertEqual(f.getnchannels(), 2)
- self.assertEqual(f.getsampwidth(), 2)
- self.assertEqual(f.getframerate(), 48000)
- self.assertEqual(f.getnframes(), 14400)
- self.assertEqual(f.getcomptype(), 'NONE')
- self.assertEqual(f.getcompname(), 'not compressed')
- self.assertEqual(f.getparams(), (2, 2, 48000, 14400, 'NONE', 'not compressed'))
-
- def test_read(self):
- f = self.f = aifc.open(self.sndfilepath)
- self.assertEqual(f.tell(), 0)
- self.assertEqual(f.readframes(2), '\x00\x00\x00\x00\x0b\xd4\x0b\xd4')
- f.rewind()
- pos0 = f.tell()
- self.assertEqual(pos0, 0)
- self.assertEqual(f.readframes(2), '\x00\x00\x00\x00\x0b\xd4\x0b\xd4')
- pos2 = f.tell()
- self.assertEqual(pos2, 2)
- self.assertEqual(f.readframes(2), '\x17t\x17t"\xad"\xad')
- f.setpos(pos2)
- self.assertEqual(f.readframes(2), '\x17t\x17t"\xad"\xad')
- f.setpos(pos0)
- self.assertEqual(f.readframes(2), '\x00\x00\x00\x00\x0b\xd4\x0b\xd4')
-
- def test_write(self):
- f = self.f = aifc.open(self.sndfilepath)
- fout = self.fout = aifc.open(TESTFN, 'wb')
- fout.aifc()
- fout.setparams(f.getparams())
- for frame in range(f.getnframes()):
- fout.writeframes(f.readframes(1))
- fout.close()
- fout = self.fout = aifc.open(TESTFN, 'rb')
- f.rewind()
- self.assertEqual(f.getparams(), fout.getparams())
- self.assertEqual(f.readframes(5), fout.readframes(5))
+ self.f = aifc.open(findfile('Sine-1000Hz-300ms.aif'))
+
+ def test_write_markers_values(self):
+ fout = aifc.open(io.BytesIO(), 'wb')
+ self.assertEqual(fout.getmarkers(), None)
+ fout.setmark(1, 0, 'foo1')
+ fout.setmark(1, 1, 'foo2')
+ self.assertEqual(fout.getmark(1), (1, 1, 'foo2'))
+ self.assertEqual(fout.getmarkers(), [(1, 1, 'foo2')])
+ fout.initfp(None)
- def test_compress(self):
- f = self.f = aifc.open(self.sndfilepath)
+ def test_read_markers(self):
fout = self.fout = aifc.open(TESTFN, 'wb')
- fout.aifc()
- fout.setnchannels(f.getnchannels())
- fout.setsampwidth(f.getsampwidth())
- fout.setframerate(f.getframerate())
- fout.setcomptype('ULAW', 'foo')
- for frame in range(f.getnframes()):
- fout.writeframes(f.readframes(1))
+ fout.aiff()
+ fout.setparams((1, 1, 1, 1, 'NONE', ''))
+ fout.setmark(1, 0, 'odd')
+ fout.setmark(2, 0, 'even')
+ fout.writeframes('\x00')
fout.close()
- self.assertLess(
- os.stat(TESTFN).st_size,
- os.stat(self.sndfilepath).st_size*0.75,
- )
- fout = self.fout = aifc.open(TESTFN, 'rb')
- f.rewind()
- self.assertEqual(f.getparams()[0:3], fout.getparams()[0:3])
- self.assertEqual(fout.getcomptype(), 'ULAW')
- self.assertEqual(fout.getcompname(), 'foo')
- # XXX: this test fails, not sure if it should succeed or not
- # self.assertEqual(f.readframes(5), fout.readframes(5))
-
- def test_close(self):
- class Wrapfile(object):
- def __init__(self, file):
- self.file = open(file, 'rb')
- self.closed = False
- def close(self):
- self.file.close()
- self.closed = True
- def __getattr__(self, attr): return getattr(self.file, attr)
- testfile = Wrapfile(self.sndfilepath)
- f = self.f = aifc.open(testfile)
- self.assertEqual(testfile.closed, False)
- f.close()
- self.assertEqual(testfile.closed, True)
+ f = self.f = aifc.open(TESTFN, 'rb')
+ self.assertEqual(f.getmarkers(), [(1, 0, 'odd'), (2, 0, 'even')])
+ self.assertEqual(f.getmark(1), (1, 0, 'odd'))
+ self.assertEqual(f.getmark(2), (2, 0, 'even'))
+ self.assertRaises(aifc.Error, f.getmark, 3)
class AIFCLowLevelTest(unittest.TestCase):
@@ -120,7 +165,7 @@ class AIFCLowLevelTest(unittest.TestCase):
self.assertEqual(read_written(x, 'float'), x)
for x in (float('NaN'), float('Inf')):
self.assertEqual(read_written(x, 'float'), aifc._HUGE_VAL)
- for x in (b'', b'foo', b'a' * 255):
+ for x in ('', 'foo', 'a' * 255):
self.assertEqual(read_written(x, 'string'), x)
for x in (-0x7FFFFFFF, -1, 0, 1, 0x7FFFFFFF):
self.assertEqual(read_written(x, 'long'), x)
@@ -132,7 +177,7 @@ class AIFCLowLevelTest(unittest.TestCase):
self.assertEqual(read_written(x, 'ushort'), x)
def test_read_raises(self):
- f = io.BytesIO(b'\x00')
+ f = io.BytesIO('\x00')
self.assertRaises(EOFError, aifc._read_ulong, f)
self.assertRaises(EOFError, aifc._read_long, f)
self.assertRaises(EOFError, aifc._read_ushort, f)
@@ -141,13 +186,156 @@ class AIFCLowLevelTest(unittest.TestCase):
def test_write_long_string_raises(self):
f = io.BytesIO()
with self.assertRaises(ValueError):
- aifc._write_string(f, b'too long' * 255)
+ aifc._write_string(f, 'too long' * 255)
+ def test_wrong_open_mode(self):
+ with self.assertRaises(aifc.Error):
+ aifc.open(TESTFN, 'wrong_mode')
-def test_main():
- run_unittest(AIFCTest)
- run_unittest(AIFCLowLevelTest)
+ def test_read_wrong_form(self):
+ b1 = io.BytesIO('WRNG' + struct.pack('>L', 0))
+ b2 = io.BytesIO('FORM' + struct.pack('>L', 4) + 'WRNG')
+ self.assertRaises(aifc.Error, aifc.open, b1)
+ self.assertRaises(aifc.Error, aifc.open, b2)
+
+ def test_read_no_comm_chunk(self):
+ b = io.BytesIO('FORM' + struct.pack('>L', 4) + 'AIFF')
+ self.assertRaises(aifc.Error, aifc.open, b)
+ def test_read_wrong_compression_type(self):
+ b = 'FORM' + struct.pack('>L', 4) + 'AIFC'
+ b += 'COMM' + struct.pack('>LhlhhLL', 23, 0, 0, 0, 0, 0, 0)
+ b += 'WRNG' + struct.pack('B', 0)
+ self.assertRaises(aifc.Error, aifc.open, io.BytesIO(b))
+
+ def test_read_wrong_marks(self):
+ b = 'FORM' + struct.pack('>L', 4) + 'AIFF'
+ b += 'COMM' + struct.pack('>LhlhhLL', 18, 0, 0, 0, 0, 0, 0)
+ b += 'SSND' + struct.pack('>L', 8) + '\x00' * 8
+ b += 'MARK' + struct.pack('>LhB', 3, 1, 1)
+ with captured_stdout() as s:
+ f = aifc.open(io.BytesIO(b))
+ self.assertEqual(s.getvalue(), 'Warning: MARK chunk contains '
+ 'only 0 markers instead of 1\n')
+ self.assertEqual(f.getmarkers(), None)
+
+ def test_read_comm_kludge_compname_even(self):
+ b = 'FORM' + struct.pack('>L', 4) + 'AIFC'
+ b += 'COMM' + struct.pack('>LhlhhLL', 18, 0, 0, 0, 0, 0, 0)
+ b += 'NONE' + struct.pack('B', 4) + 'even' + '\x00'
+ b += 'SSND' + struct.pack('>L', 8) + '\x00' * 8
+ with captured_stdout() as s:
+ f = aifc.open(io.BytesIO(b))
+ self.assertEqual(s.getvalue(), 'Warning: bad COMM chunk size\n')
+ self.assertEqual(f.getcompname(), 'even')
+
+ def test_read_comm_kludge_compname_odd(self):
+ b = 'FORM' + struct.pack('>L', 4) + 'AIFC'
+ b += 'COMM' + struct.pack('>LhlhhLL', 18, 0, 0, 0, 0, 0, 0)
+ b += 'NONE' + struct.pack('B', 3) + 'odd'
+ b += 'SSND' + struct.pack('>L', 8) + '\x00' * 8
+ with captured_stdout() as s:
+ f = aifc.open(io.BytesIO(b))
+ self.assertEqual(s.getvalue(), 'Warning: bad COMM chunk size\n')
+ self.assertEqual(f.getcompname(), 'odd')
+
+ def test_write_params_raises(self):
+ fout = aifc.open(io.BytesIO(), 'wb')
+ wrong_params = (0, 0, 0, 0, 'WRNG', '')
+ self.assertRaises(aifc.Error, fout.setparams, wrong_params)
+ self.assertRaises(aifc.Error, fout.getparams)
+ self.assertRaises(aifc.Error, fout.setnchannels, 0)
+ self.assertRaises(aifc.Error, fout.getnchannels)
+ self.assertRaises(aifc.Error, fout.setsampwidth, 0)
+ self.assertRaises(aifc.Error, fout.getsampwidth)
+ self.assertRaises(aifc.Error, fout.setframerate, 0)
+ self.assertRaises(aifc.Error, fout.getframerate)
+ self.assertRaises(aifc.Error, fout.setcomptype, 'WRNG', '')
+ fout.aiff()
+ fout.setnchannels(1)
+ fout.setsampwidth(1)
+ fout.setframerate(1)
+ fout.setnframes(1)
+ fout.writeframes('\x00')
+ self.assertRaises(aifc.Error, fout.setparams, (1, 1, 1, 1, 1, 1))
+ self.assertRaises(aifc.Error, fout.setnchannels, 1)
+ self.assertRaises(aifc.Error, fout.setsampwidth, 1)
+ self.assertRaises(aifc.Error, fout.setframerate, 1)
+ self.assertRaises(aifc.Error, fout.setnframes, 1)
+ self.assertRaises(aifc.Error, fout.setcomptype, 'NONE', '')
+ self.assertRaises(aifc.Error, fout.aiff)
+ self.assertRaises(aifc.Error, fout.aifc)
+
+ def test_write_params_singles(self):
+ fout = aifc.open(io.BytesIO(), 'wb')
+ fout.aifc()
+ fout.setnchannels(1)
+ fout.setsampwidth(2)
+ fout.setframerate(3)
+ fout.setnframes(4)
+ fout.setcomptype('NONE', 'name')
+ self.assertEqual(fout.getnchannels(), 1)
+ self.assertEqual(fout.getsampwidth(), 2)
+ self.assertEqual(fout.getframerate(), 3)
+ self.assertEqual(fout.getnframes(), 0)
+ self.assertEqual(fout.tell(), 0)
+ self.assertEqual(fout.getcomptype(), 'NONE')
+ self.assertEqual(fout.getcompname(), 'name')
+ fout.writeframes('\x00' * 4 * fout.getsampwidth() * fout.getnchannels())
+ self.assertEqual(fout.getnframes(), 4)
+ self.assertEqual(fout.tell(), 4)
+
+ def test_write_params_bunch(self):
+ fout = aifc.open(io.BytesIO(), 'wb')
+ fout.aifc()
+ p = (1, 2, 3, 4, 'NONE', 'name')
+ fout.setparams(p)
+ self.assertEqual(fout.getparams(), p)
+ fout.initfp(None)
+
+ def test_write_header_raises(self):
+ fout = aifc.open(io.BytesIO(), 'wb')
+ self.assertRaises(aifc.Error, fout.close)
+ fout = aifc.open(io.BytesIO(), 'wb')
+ fout.setnchannels(1)
+ self.assertRaises(aifc.Error, fout.close)
+ fout = aifc.open(io.BytesIO(), 'wb')
+ fout.setnchannels(1)
+ fout.setsampwidth(1)
+ self.assertRaises(aifc.Error, fout.close)
+
+ def test_write_header_comptype_raises(self):
+ for comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'):
+ fout = aifc.open(io.BytesIO(), 'wb')
+ fout.setsampwidth(1)
+ fout.setcomptype(comptype, '')
+ self.assertRaises(aifc.Error, fout.close)
+ fout.initfp(None)
+
+ def test_write_markers_raises(self):
+ fout = aifc.open(io.BytesIO(), 'wb')
+ self.assertRaises(aifc.Error, fout.setmark, 0, 0, '')
+ self.assertRaises(aifc.Error, fout.setmark, 1, -1, '')
+ self.assertRaises(aifc.Error, fout.setmark, 1, 0, None)
+ self.assertRaises(aifc.Error, fout.getmark, 1)
+ fout.initfp(None)
+
+ def test_write_aiff_by_extension(self):
+ sampwidth = 2
+ fout = self.fout = aifc.open(TESTFN + '.aiff', 'wb')
+ fout.setparams((1, sampwidth, 1, 1, 'ULAW', ''))
+ frames = '\x00' * fout.getnchannels() * sampwidth
+ fout.writeframes(frames)
+ fout.close()
+ f = self.f = aifc.open(TESTFN + '.aiff', 'rb')
+ self.assertEqual(f.getcomptype(), 'NONE')
+ f.close()
+
+
+def test_main():
+ run_unittest(AifcPCM8Test, AifcPCM16Test, AifcPCM16Test, AifcPCM24Test,
+ AifcPCM32Test, AifcULAWTest,
+ AifcMiscTest, AIFCLowLevelTest)
if __name__ == "__main__":
- unittest.main()
+ test_main()
diff --git a/Lib/test/test_al.py b/Lib/test/test_al.py
index 688576d..a00abfc 100755..100644
--- a/Lib/test/test_al.py
+++ b/Lib/test/test_al.py
@@ -1,4 +1,3 @@
-#! /usr/bin/env python
"""Whimpy test script for the al module
Roger E. Masse
"""
diff --git a/Lib/test/test_anydbm.py b/Lib/test/test_anydbm.py
index a01dd0b..2412061 100644
--- a/Lib/test/test_anydbm.py
+++ b/Lib/test/test_anydbm.py
@@ -1,4 +1,3 @@
-#! /usr/bin/env python
"""Test script for the anydbm module
based on testdumbdbm.py
"""
diff --git a/Lib/test/test_argparse.py b/Lib/test/test_argparse.py
index 1a5f05e..0df66ad 100644
--- a/Lib/test/test_argparse.py
+++ b/Lib/test/test_argparse.py
@@ -47,6 +47,9 @@ class TempDirMixin(object):
def tearDown(self):
os.chdir(self.old_dir)
+ for root, dirs, files in os.walk(self.temp_dir, topdown=False):
+ for name in files:
+ os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE)
shutil.rmtree(self.temp_dir, True)
def create_readonly_file(self, filename):
@@ -1374,6 +1377,7 @@ class TestArgumentsFromFile(TempDirMixin, ParserTestCase):
('X @hello', NS(a=None, x='X', y=['hello world!'])),
('-a B @recursive Y Z', NS(a='A', x='hello world!', y=['Y', 'Z'])),
('X @recursive Z -a B', NS(a='B', x='X', y=['hello world!', 'Z'])),
+ (["-a", "", "X", "Y"], NS(a='', x='X', y=['Y'])),
]
@@ -1466,6 +1470,22 @@ class TestFileTypeR(TempDirMixin, ParserTestCase):
('readonly', NS(x=None, spam=RFile('readonly'))),
]
+class TestFileTypeDefaults(TempDirMixin, ParserTestCase):
+ """Test that a file is not created unless the default is needed"""
+ def setUp(self):
+ super(TestFileTypeDefaults, self).setUp()
+ file = open(os.path.join(self.temp_dir, 'good'), 'w')
+ file.write('good')
+ file.close()
+
+ argument_signatures = [
+ Sig('-c', type=argparse.FileType('r'), default='no-file.txt'),
+ ]
+ # should provoke no such file error
+ failures = ['']
+ # should not provoke error because default file is created
+ successes = [('-c good', NS(c=RFile('good')))]
+
class TestFileTypeRB(TempDirMixin, ParserTestCase):
"""Test the FileType option/argument type for reading files"""
@@ -1763,6 +1783,14 @@ class TestAddSubparsers(TestCase):
parser2.add_argument('-y', choices='123', help='y help')
parser2.add_argument('z', type=complex, nargs='*', help='z help')
+ # add third sub-parser
+ parser3_kwargs = dict(description='3 description')
+ if subparser_help:
+ parser3_kwargs['help'] = '3 help'
+ parser3 = subparsers.add_parser('3', **parser3_kwargs)
+ parser3.add_argument('t', type=int, help='t help')
+ parser3.add_argument('u', nargs='...', help='u help')
+
# return the main parser
return parser
@@ -1792,6 +1820,10 @@ class TestAddSubparsers(TestCase):
self.parser.parse_args('--foo 0.125 1 c'.split()),
NS(foo=True, bar=0.125, w=None, x='c'),
)
+ self.assertEqual(
+ self.parser.parse_args('-1.5 3 11 -- a --foo 7 -- b'.split()),
+ NS(foo=False, bar=-1.5, t=11, u=['a', '--foo', '7', '--', 'b']),
+ )
def test_parse_known_args(self):
self.assertEqual(
@@ -1826,15 +1858,15 @@ class TestAddSubparsers(TestCase):
def test_help(self):
self.assertEqual(self.parser.format_usage(),
- 'usage: PROG [-h] [--foo] bar {1,2} ...\n')
+ 'usage: PROG [-h] [--foo] bar {1,2,3} ...\n')
self.assertEqual(self.parser.format_help(), textwrap.dedent('''\
- usage: PROG [-h] [--foo] bar {1,2} ...
+ usage: PROG [-h] [--foo] bar {1,2,3} ...
main description
positional arguments:
bar bar help
- {1,2} command help
+ {1,2,3} command help
optional arguments:
-h, --help show this help message and exit
@@ -1845,15 +1877,15 @@ class TestAddSubparsers(TestCase):
# Make sure - is still used for help if it is a non-first prefix char
parser = self._get_parser(prefix_chars='+:-')
self.assertEqual(parser.format_usage(),
- 'usage: PROG [-h] [++foo] bar {1,2} ...\n')
+ 'usage: PROG [-h] [++foo] bar {1,2,3} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
- usage: PROG [-h] [++foo] bar {1,2} ...
+ usage: PROG [-h] [++foo] bar {1,2,3} ...
main description
positional arguments:
bar bar help
- {1,2} command help
+ {1,2,3} command help
optional arguments:
-h, --help show this help message and exit
@@ -1864,15 +1896,15 @@ class TestAddSubparsers(TestCase):
def test_help_alternate_prefix_chars(self):
parser = self._get_parser(prefix_chars='+:/')
self.assertEqual(parser.format_usage(),
- 'usage: PROG [+h] [++foo] bar {1,2} ...\n')
+ 'usage: PROG [+h] [++foo] bar {1,2,3} ...\n')
self.assertEqual(parser.format_help(), textwrap.dedent('''\
- usage: PROG [+h] [++foo] bar {1,2} ...
+ usage: PROG [+h] [++foo] bar {1,2,3} ...
main description
positional arguments:
bar bar help
- {1,2} command help
+ {1,2,3} command help
optional arguments:
+h, ++help show this help message and exit
@@ -1881,18 +1913,19 @@ class TestAddSubparsers(TestCase):
def test_parser_command_help(self):
self.assertEqual(self.command_help_parser.format_usage(),
- 'usage: PROG [-h] [--foo] bar {1,2} ...\n')
+ 'usage: PROG [-h] [--foo] bar {1,2,3} ...\n')
self.assertEqual(self.command_help_parser.format_help(),
textwrap.dedent('''\
- usage: PROG [-h] [--foo] bar {1,2} ...
+ usage: PROG [-h] [--foo] bar {1,2,3} ...
main description
positional arguments:
bar bar help
- {1,2} command help
+ {1,2,3} command help
1 1 help
2 2 help
+ 3 3 help
optional arguments:
-h, --help show this help message and exit
@@ -2906,6 +2939,60 @@ class TestHelpBiggerOptionals(HelpTestCase):
0.1
'''
+class TestShortColumns(HelpTestCase):
+ '''Test extremely small number of columns.
+
+ TestCase prevents "COLUMNS" from being too small in the tests themselves,
+ but we don't want any exceptions thrown in such case. Only ugly representation.
+ '''
+ def setUp(self):
+ env = test_support.EnvironmentVarGuard()
+ env.set("COLUMNS", '15')
+ self.addCleanup(env.__exit__)
+
+ parser_signature = TestHelpBiggerOptionals.parser_signature
+ argument_signatures = TestHelpBiggerOptionals.argument_signatures
+ argument_group_signatures = TestHelpBiggerOptionals.argument_group_signatures
+ usage = '''\
+ usage: PROG
+ [-h]
+ [-v]
+ [-x]
+ [--y Y]
+ foo
+ bar
+ '''
+ help = usage + '''\
+
+ DESCRIPTION
+
+ positional arguments:
+ foo
+ FOO HELP
+ bar
+ BAR HELP
+
+ optional arguments:
+ -h, --help
+ show this
+ help
+ message and
+ exit
+ -v, --version
+ show
+ program's
+ version
+ number and
+ exit
+ -x
+ X HELP
+ --y Y
+ Y HELP
+
+ EPILOG
+ '''
+ version = TestHelpBiggerOptionals.version
+
class TestHelpBiggerOptionalGroups(HelpTestCase):
"""Make sure that argument help aligns when options are longer"""
@@ -4366,6 +4453,12 @@ class TestNamespace(TestCase):
self.assertTrue(ns2 != ns3)
self.assertTrue(ns2 != ns4)
+ def test_equality_returns_notimplemeted(self):
+ # See issue 21481
+ ns = argparse.Namespace(a=1, b=2)
+ self.assertIs(ns.__eq__(None), NotImplemented)
+ self.assertIs(ns.__ne__(None), NotImplemented)
+
# ===================
# File encoding tests
@@ -4418,12 +4511,95 @@ class TestArgumentTypeError(TestCase):
else:
self.fail()
+# ================================================
+# Check that the type function is called only once
+# ================================================
+
+class TestTypeFunctionCallOnlyOnce(TestCase):
+
+ def test_type_function_call_only_once(self):
+ def spam(string_to_convert):
+ self.assertEqual(string_to_convert, 'spam!')
+ return 'foo_converted'
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--foo', type=spam, default='bar')
+ args = parser.parse_args('--foo spam!'.split())
+ self.assertEqual(NS(foo='foo_converted'), args)
+
+# ==================================================================
+# Check semantics regarding the default argument and type conversion
+# ==================================================================
+
+class TestTypeFunctionCalledOnDefault(TestCase):
+
+ def test_type_function_call_with_non_string_default(self):
+ def spam(int_to_convert):
+ self.assertEqual(int_to_convert, 0)
+ return 'foo_converted'
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--foo', type=spam, default=0)
+ args = parser.parse_args([])
+ # foo should *not* be converted because its default is not a string.
+ self.assertEqual(NS(foo=0), args)
+
+ def test_type_function_call_with_string_default(self):
+ def spam(int_to_convert):
+ return 'foo_converted'
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--foo', type=spam, default='0')
+ args = parser.parse_args([])
+ # foo is converted because its default is a string.
+ self.assertEqual(NS(foo='foo_converted'), args)
+
+ def test_no_double_type_conversion_of_default(self):
+ def extend(str_to_convert):
+ return str_to_convert + '*'
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--test', type=extend, default='*')
+ args = parser.parse_args([])
+ # The test argument will be two stars, one coming from the default
+ # value and one coming from the type conversion being called exactly
+ # once.
+ self.assertEqual(NS(test='**'), args)
+
+ def test_issue_15906(self):
+ # Issue #15906: When action='append', type=str, default=[] are
+ # providing, the dest value was the string representation "[]" when it
+ # should have been an empty list.
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--test', dest='test', type=str,
+ default=[], action='append')
+ args = parser.parse_args([])
+ self.assertEqual(args.test, [])
+
# ======================
# parse_known_args tests
# ======================
class TestParseKnownArgs(TestCase):
+ def test_arguments_tuple(self):
+ parser = argparse.ArgumentParser()
+ parser.parse_args(())
+
+ def test_arguments_list(self):
+ parser = argparse.ArgumentParser()
+ parser.parse_args([])
+
+ def test_arguments_tuple_positional(self):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('x')
+ parser.parse_args(('x',))
+
+ def test_arguments_list_positional(self):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('x')
+ parser.parse_args(['x'])
+
def test_optionals(self):
parser = argparse.ArgumentParser()
parser.add_argument('--foo')
diff --git a/Lib/test/test_array.py b/Lib/test/test_array.py
index acf5b8f..b933cbf 100755..100644
--- a/Lib/test/test_array.py
+++ b/Lib/test/test_array.py
@@ -1,4 +1,3 @@
-#! /usr/bin/env python
"""Test the arraymodule.
Roger E. Masse
"""
@@ -9,6 +8,7 @@ from test import test_support
from weakref import proxy
import array, cStringIO
from cPickle import loads, dumps, HIGHEST_PROTOCOL
+import sys
class ArraySubclass(array.array):
pass
@@ -49,7 +49,7 @@ class BaseTest(unittest.TestCase):
def test_constructor(self):
a = array.array(self.typecode)
self.assertEqual(a.typecode, self.typecode)
- self.assertTrue(a.itemsize>=self.minitemsize)
+ self.assertGreaterEqual(a.itemsize, self.minitemsize)
self.assertRaises(TypeError, array.array, self.typecode, None)
def test_len(self):
@@ -253,39 +253,39 @@ class BaseTest(unittest.TestCase):
def test_cmp(self):
a = array.array(self.typecode, self.example)
- self.assertTrue((a == 42) is False)
- self.assertTrue((a != 42) is True)
+ self.assertIs(a == 42, False)
+ self.assertIs(a != 42, True)
- self.assertTrue((a == a) is True)
- self.assertTrue((a != a) is False)
- self.assertTrue((a < a) is False)
- self.assertTrue((a <= a) is True)
- self.assertTrue((a > a) is False)
- self.assertTrue((a >= a) is True)
+ self.assertIs(a == a, True)
+ self.assertIs(a != a, False)
+ self.assertIs(a < a, False)
+ self.assertIs(a <= a, True)
+ self.assertIs(a > a, False)
+ self.assertIs(a >= a, True)
al = array.array(self.typecode, self.smallerexample)
ab = array.array(self.typecode, self.biggerexample)
- self.assertTrue((a == 2*a) is False)
- self.assertTrue((a != 2*a) is True)
- self.assertTrue((a < 2*a) is True)
- self.assertTrue((a <= 2*a) is True)
- self.assertTrue((a > 2*a) is False)
- self.assertTrue((a >= 2*a) is False)
-
- self.assertTrue((a == al) is False)
- self.assertTrue((a != al) is True)
- self.assertTrue((a < al) is False)
- self.assertTrue((a <= al) is False)
- self.assertTrue((a > al) is True)
- self.assertTrue((a >= al) is True)
-
- self.assertTrue((a == ab) is False)
- self.assertTrue((a != ab) is True)
- self.assertTrue((a < ab) is True)
- self.assertTrue((a <= ab) is True)
- self.assertTrue((a > ab) is False)
- self.assertTrue((a >= ab) is False)
+ self.assertIs(a == 2*a, False)
+ self.assertIs(a != 2*a, True)
+ self.assertIs(a < 2*a, True)
+ self.assertIs(a <= 2*a, True)
+ self.assertIs(a > 2*a, False)
+ self.assertIs(a >= 2*a, False)
+
+ self.assertIs(a == al, False)
+ self.assertIs(a != al, True)
+ self.assertIs(a < al, False)
+ self.assertIs(a <= al, False)
+ self.assertIs(a > al, True)
+ self.assertIs(a >= al, True)
+
+ self.assertIs(a == ab, False)
+ self.assertIs(a != ab, True)
+ self.assertIs(a < ab, True)
+ self.assertIs(a <= ab, True)
+ self.assertIs(a > ab, False)
+ self.assertIs(a >= ab, False)
def test_add(self):
a = array.array(self.typecode, self.example) \
@@ -304,7 +304,7 @@ class BaseTest(unittest.TestCase):
a = array.array(self.typecode, self.example[::-1])
b = a
a += array.array(self.typecode, 2*self.example)
- self.assertTrue(a is b)
+ self.assertIs(a, b)
self.assertEqual(
a,
array.array(self.typecode, self.example[::-1]+2*self.example)
@@ -353,22 +353,22 @@ class BaseTest(unittest.TestCase):
b = a
a *= 5
- self.assertTrue(a is b)
+ self.assertIs(a, b)
self.assertEqual(
a,
array.array(self.typecode, 5*self.example)
)
a *= 0
- self.assertTrue(a is b)
+ self.assertIs(a, b)
self.assertEqual(a, array.array(self.typecode))
a *= 1000
- self.assertTrue(a is b)
+ self.assertIs(a, b)
self.assertEqual(a, array.array(self.typecode))
a *= -1
- self.assertTrue(a is b)
+ self.assertIs(a, b)
self.assertEqual(a, array.array(self.typecode))
a = array.array(self.typecode, self.example)
@@ -753,7 +753,7 @@ class BaseTest(unittest.TestCase):
try:
import gc
except ImportError:
- return
+ self.skipTest('gc module not available')
a = array.array(self.typecode)
l = [iter(a)]
l.append(l)
@@ -772,15 +772,15 @@ class BaseTest(unittest.TestCase):
s = None
self.assertRaises(ReferenceError, len, p)
+ @unittest.skipUnless(hasattr(sys, 'getrefcount'),
+ 'test needs sys.getrefcount()')
def test_bug_782369(self):
- import sys
- if hasattr(sys, "getrefcount"):
- for i in range(10):
- b = array.array('B', range(64))
- rc = sys.getrefcount(10)
- for i in range(10):
- b = array.array('B', range(64))
- self.assertEqual(rc, sys.getrefcount(10))
+ for i in range(10):
+ b = array.array('B', range(64))
+ rc = sys.getrefcount(10)
+ for i in range(10):
+ b = array.array('B', range(64))
+ self.assertEqual(rc, sys.getrefcount(10))
def test_subclass_with_kwargs(self):
# SF bug #1486663 -- this used to erroneously raise a TypeError
@@ -985,6 +985,19 @@ class UnsignedNumberTest(NumberTest):
upper = long(pow(2, a.itemsize * 8)) - 1L
self.check_overflow(lower, upper)
+ @test_support.cpython_only
+ def test_sizeof_with_buffer(self):
+ a = array.array(self.typecode, self.example)
+ basesize = test_support.calcvobjsize('4P')
+ buffer_size = a.buffer_info()[1] * a.itemsize
+ test_support.check_sizeof(self, a, basesize + buffer_size)
+
+ @test_support.cpython_only
+ def test_sizeof_without_buffer(self):
+ a = array.array(self.typecode)
+ basesize = test_support.calcvobjsize('4P')
+ test_support.check_sizeof(self, a, basesize)
+
class ByteTest(SignedNumberTest):
typecode = 'b'
diff --git a/Lib/test/test_ast.py b/Lib/test/test_ast.py
index e3aa5b1..0a1ca41 100644
--- a/Lib/test/test_ast.py
+++ b/Lib/test/test_ast.py
@@ -18,7 +18,7 @@ def to_tuple(t):
# These tests are compiled through "exec"
-# There should be atleast one test per statement
+# There should be at least one test per statement
exec_tests = [
# None
"None",
@@ -231,6 +231,12 @@ class AST_Tests(unittest.TestCase):
im = ast.parse("from . import y").body[0]
self.assertIsNone(im.module)
+ def test_non_interned_future_from_ast(self):
+ mod = ast.parse("from __future__ import division")
+ self.assertIsInstance(mod.body[0], ast.ImportFrom)
+ mod.body[0].module = " __future__ ".strip()
+ compile(mod, "<test>", "exec")
+
def test_base_classes(self):
self.assertTrue(issubclass(ast.For, ast.stmt))
self.assertTrue(issubclass(ast.Name, ast.expr))
diff --git a/Lib/test/test_asyncore.py b/Lib/test/test_asyncore.py
index 7d1a3cb..20eceb6 100644
--- a/Lib/test/test_asyncore.py
+++ b/Lib/test/test_asyncore.py
@@ -7,9 +7,10 @@ import sys
import time
import warnings
import errno
+import struct
from test import test_support
-from test.test_support import TESTFN, run_unittest, unlink
+from test.test_support import TESTFN, run_unittest, unlink, HOST
from StringIO import StringIO
try:
@@ -17,7 +18,6 @@ try:
except ImportError:
threading = None
-HOST = test_support.HOST
class dummysocket:
def __init__(self):
@@ -483,8 +483,9 @@ class TCPServer(asyncore.dispatcher):
return self.socket.getsockname()[:2]
def handle_accept(self):
- sock, addr = self.accept()
- self.handler(sock)
+ pair = self.accept()
+ if pair is not None:
+ self.handler(pair[0])
def handle_error(self):
raise
@@ -703,6 +704,27 @@ class BaseTestAPI(unittest.TestCase):
finally:
sock.close()
+ @unittest.skipUnless(threading, 'Threading required for this test.')
+ @test_support.reap_threads
+ def test_quick_connect(self):
+ # see: http://bugs.python.org/issue10340
+ server = TCPServer()
+ t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1, count=500))
+ t.start()
+ self.addCleanup(t.join)
+
+ for x in xrange(20):
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.settimeout(.2)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
+ struct.pack('ii', 1, 0))
+ try:
+ s.connect(server.address)
+ except socket.error:
+ pass
+ finally:
+ s.close()
+
class TestAPI_UseSelect(BaseTestAPI):
use_poll = False
diff --git a/Lib/test/test_audioop.py b/Lib/test/test_audioop.py
index 5b8e0f1..e5a5159 100644
--- a/Lib/test/test_audioop.py
+++ b/Lib/test/test_audioop.py
@@ -1,25 +1,33 @@
import audioop
+import sys
import unittest
+import struct
from test.test_support import run_unittest
-endian = 'big' if audioop.getsample('\0\1', 2, 0) == 1 else 'little'
-def gendata1():
- return '\0\1\2'
+formats = {
+ 1: 'b',
+ 2: 'h',
+ 4: 'i',
+}
-def gendata2():
- if endian == 'big':
- return '\0\0\0\1\0\2'
- else:
- return '\0\0\1\0\2\0'
+def pack(width, data):
+ return struct.pack('=%d%s' % (len(data), formats[width]), *data)
-def gendata4():
- if endian == 'big':
- return '\0\0\0\0\0\0\0\1\0\0\0\2'
- else:
- return '\0\0\0\0\1\0\0\0\2\0\0\0'
+packs = {
+ 1: lambda *data: pack(1, data),
+ 2: lambda *data: pack(2, data),
+ 4: lambda *data: pack(4, data),
+}
+maxvalues = {w: (1 << (8 * w - 1)) - 1 for w in (1, 2, 4)}
+minvalues = {w: -1 << (8 * w - 1) for w in (1, 2, 4)}
-data = [gendata1(), gendata2(), gendata4()]
+datas = {
+ 1: b'\x00\x12\x45\xbb\x7f\x80\xff',
+ 2: packs[2](0, 0x1234, 0x4567, -0x4567, 0x7fff, -0x8000, -1),
+ 4: packs[4](0, 0x12345678, 0x456789ab, -0x456789ab,
+ 0x7fffffff, -0x80000000, -1),
+}
INVALID_DATA = [
(b'abc', 0),
@@ -31,164 +39,315 @@ INVALID_DATA = [
class TestAudioop(unittest.TestCase):
def test_max(self):
- self.assertEqual(audioop.max(data[0], 1), 2)
- self.assertEqual(audioop.max(data[1], 2), 2)
- self.assertEqual(audioop.max(data[2], 4), 2)
+ for w in 1, 2, 4:
+ self.assertEqual(audioop.max(b'', w), 0)
+ p = packs[w]
+ self.assertEqual(audioop.max(p(5), w), 5)
+ self.assertEqual(audioop.max(p(5, -8, -1), w), 8)
+ self.assertEqual(audioop.max(p(maxvalues[w]), w), maxvalues[w])
+ self.assertEqual(audioop.max(p(minvalues[w]), w), -minvalues[w])
+ self.assertEqual(audioop.max(datas[w], w), -minvalues[w])
def test_minmax(self):
- self.assertEqual(audioop.minmax(data[0], 1), (0, 2))
- self.assertEqual(audioop.minmax(data[1], 2), (0, 2))
- self.assertEqual(audioop.minmax(data[2], 4), (0, 2))
+ for w in 1, 2, 4:
+ self.assertEqual(audioop.minmax(b'', w),
+ (0x7fffffff, -0x80000000))
+ p = packs[w]
+ self.assertEqual(audioop.minmax(p(5), w), (5, 5))
+ self.assertEqual(audioop.minmax(p(5, -8, -1), w), (-8, 5))
+ self.assertEqual(audioop.minmax(p(maxvalues[w]), w),
+ (maxvalues[w], maxvalues[w]))
+ self.assertEqual(audioop.minmax(p(minvalues[w]), w),
+ (minvalues[w], minvalues[w]))
+ self.assertEqual(audioop.minmax(datas[w], w),
+ (minvalues[w], maxvalues[w]))
def test_maxpp(self):
- self.assertEqual(audioop.maxpp(data[0], 1), 0)
- self.assertEqual(audioop.maxpp(data[1], 2), 0)
- self.assertEqual(audioop.maxpp(data[2], 4), 0)
+ for w in 1, 2, 4:
+ self.assertEqual(audioop.maxpp(b'', w), 0)
+ self.assertEqual(audioop.maxpp(packs[w](*range(100)), w), 0)
+ self.assertEqual(audioop.maxpp(packs[w](9, 10, 5, 5, 0, 1), w), 10)
+ self.assertEqual(audioop.maxpp(datas[w], w),
+ maxvalues[w] - minvalues[w])
def test_avg(self):
- self.assertEqual(audioop.avg(data[0], 1), 1)
- self.assertEqual(audioop.avg(data[1], 2), 1)
- self.assertEqual(audioop.avg(data[2], 4), 1)
+ for w in 1, 2, 4:
+ self.assertEqual(audioop.avg(b'', w), 0)
+ p = packs[w]
+ self.assertEqual(audioop.avg(p(5), w), 5)
+ self .assertEqual(audioop.avg(p(5, 8), w), 6)
+ self.assertEqual(audioop.avg(p(5, -8), w), -2)
+ self.assertEqual(audioop.avg(p(maxvalues[w], maxvalues[w]), w),
+ maxvalues[w])
+ self.assertEqual(audioop.avg(p(minvalues[w], minvalues[w]), w),
+ minvalues[w])
+ self.assertEqual(audioop.avg(packs[4](0x50000000, 0x70000000), 4),
+ 0x60000000)
+ self.assertEqual(audioop.avg(packs[4](-0x50000000, -0x70000000), 4),
+ -0x60000000)
def test_avgpp(self):
- self.assertEqual(audioop.avgpp(data[0], 1), 0)
- self.assertEqual(audioop.avgpp(data[1], 2), 0)
- self.assertEqual(audioop.avgpp(data[2], 4), 0)
+ for w in 1, 2, 4:
+ self.assertEqual(audioop.avgpp(b'', w), 0)
+ self.assertEqual(audioop.avgpp(packs[w](*range(100)), w), 0)
+ self.assertEqual(audioop.avgpp(packs[w](9, 10, 5, 5, 0, 1), w), 10)
+ self.assertEqual(audioop.avgpp(datas[1], 1), 196)
+ self.assertEqual(audioop.avgpp(datas[2], 2), 50534)
+ self.assertEqual(audioop.avgpp(datas[4], 4), 3311897002)
def test_rms(self):
- self.assertEqual(audioop.rms(data[0], 1), 1)
- self.assertEqual(audioop.rms(data[1], 2), 1)
- self.assertEqual(audioop.rms(data[2], 4), 1)
+ for w in 1, 2, 4:
+ self.assertEqual(audioop.rms(b'', w), 0)
+ p = packs[w]
+ self.assertEqual(audioop.rms(p(*range(100)), w), 57)
+ self.assertAlmostEqual(audioop.rms(p(maxvalues[w]) * 5, w),
+ maxvalues[w], delta=1)
+ self.assertAlmostEqual(audioop.rms(p(minvalues[w]) * 5, w),
+ -minvalues[w], delta=1)
+ self.assertEqual(audioop.rms(datas[1], 1), 77)
+ self.assertEqual(audioop.rms(datas[2], 2), 20001)
+ self.assertEqual(audioop.rms(datas[4], 4), 1310854152)
def test_cross(self):
- self.assertEqual(audioop.cross(data[0], 1), 0)
- self.assertEqual(audioop.cross(data[1], 2), 0)
- self.assertEqual(audioop.cross(data[2], 4), 0)
+ for w in 1, 2, 4:
+ self.assertEqual(audioop.cross(b'', w), -1)
+ p = packs[w]
+ self.assertEqual(audioop.cross(p(0, 1, 2), w), 0)
+ self.assertEqual(audioop.cross(p(1, 2, -3, -4), w), 1)
+ self.assertEqual(audioop.cross(p(-1, -2, 3, 4), w), 1)
+ self.assertEqual(audioop.cross(p(0, minvalues[w]), w), 1)
+ self.assertEqual(audioop.cross(p(minvalues[w], maxvalues[w]), w), 1)
def test_add(self):
- data2 = []
- for d in data:
- str = ''
- for s in d:
- str = str + chr(ord(s)*2)
- data2.append(str)
- self.assertEqual(audioop.add(data[0], data[0], 1), data2[0])
- self.assertEqual(audioop.add(data[1], data[1], 2), data2[1])
- self.assertEqual(audioop.add(data[2], data[2], 4), data2[2])
+ for w in 1, 2, 4:
+ self.assertEqual(audioop.add(b'', b'', w), b'')
+ self.assertEqual(audioop.add(datas[w], b'\0' * len(datas[w]), w),
+ datas[w])
+ self.assertEqual(audioop.add(datas[1], datas[1], 1),
+ b'\x00\x24\x7f\x80\x7f\x80\xfe')
+ self.assertEqual(audioop.add(datas[2], datas[2], 2),
+ packs[2](0, 0x2468, 0x7fff, -0x8000, 0x7fff, -0x8000, -2))
+ self.assertEqual(audioop.add(datas[4], datas[4], 4),
+ packs[4](0, 0x2468acf0, 0x7fffffff, -0x80000000,
+ 0x7fffffff, -0x80000000, -2))
def test_bias(self):
- # Note: this test assumes that avg() works
- d1 = audioop.bias(data[0], 1, 100)
- d2 = audioop.bias(data[1], 2, 100)
- d4 = audioop.bias(data[2], 4, 100)
- self.assertEqual(audioop.avg(d1, 1), 101)
- self.assertEqual(audioop.avg(d2, 2), 101)
- self.assertEqual(audioop.avg(d4, 4), 101)
+ for w in 1, 2, 4:
+ for bias in 0, 1, -1, 127, -128, 0x7fffffff, -0x80000000:
+ self.assertEqual(audioop.bias(b'', w, bias), b'')
+ self.assertEqual(audioop.bias(datas[1], 1, 1),
+ b'\x01\x13\x46\xbc\x80\x81\x00')
+ self.assertEqual(audioop.bias(datas[1], 1, -1),
+ b'\xff\x11\x44\xba\x7e\x7f\xfe')
+ self.assertEqual(audioop.bias(datas[1], 1, 0x7fffffff),
+ b'\xff\x11\x44\xba\x7e\x7f\xfe')
+ self.assertEqual(audioop.bias(datas[1], 1, -0x80000000),
+ datas[1])
+ self.assertEqual(audioop.bias(datas[2], 2, 1),
+ packs[2](1, 0x1235, 0x4568, -0x4566, -0x8000, -0x7fff, 0))
+ self.assertEqual(audioop.bias(datas[2], 2, -1),
+ packs[2](-1, 0x1233, 0x4566, -0x4568, 0x7ffe, 0x7fff, -2))
+ self.assertEqual(audioop.bias(datas[2], 2, 0x7fffffff),
+ packs[2](-1, 0x1233, 0x4566, -0x4568, 0x7ffe, 0x7fff, -2))
+ self.assertEqual(audioop.bias(datas[2], 2, -0x80000000),
+ datas[2])
+ self.assertEqual(audioop.bias(datas[4], 4, 1),
+ packs[4](1, 0x12345679, 0x456789ac, -0x456789aa,
+ -0x80000000, -0x7fffffff, 0))
+ self.assertEqual(audioop.bias(datas[4], 4, -1),
+ packs[4](-1, 0x12345677, 0x456789aa, -0x456789ac,
+ 0x7ffffffe, 0x7fffffff, -2))
+ self.assertEqual(audioop.bias(datas[4], 4, 0x7fffffff),
+ packs[4](0x7fffffff, -0x6dcba989, -0x3a987656, 0x3a987654,
+ -2, -1, 0x7ffffffe))
+ self.assertEqual(audioop.bias(datas[4], 4, -0x80000000),
+ packs[4](-0x80000000, -0x6dcba988, -0x3a987655, 0x3a987655,
+ -1, 0, 0x7fffffff))
def test_lin2lin(self):
- # too simple: we test only the size
- for d1 in data:
- for d2 in data:
- got = len(d1)//3
- wtd = len(d2)//3
- self.assertEqual(len(audioop.lin2lin(d1, got, wtd)), len(d2))
+ for w in 1, 2, 4:
+ self.assertEqual(audioop.lin2lin(datas[w], w, w), datas[w])
+
+ self.assertEqual(audioop.lin2lin(datas[1], 1, 2),
+ packs[2](0, 0x1200, 0x4500, -0x4500, 0x7f00, -0x8000, -0x100))
+ self.assertEqual(audioop.lin2lin(datas[1], 1, 4),
+ packs[4](0, 0x12000000, 0x45000000, -0x45000000,
+ 0x7f000000, -0x80000000, -0x1000000))
+ self.assertEqual(audioop.lin2lin(datas[2], 2, 1),
+ b'\x00\x12\x45\xba\x7f\x80\xff')
+ self.assertEqual(audioop.lin2lin(datas[2], 2, 4),
+ packs[4](0, 0x12340000, 0x45670000, -0x45670000,
+ 0x7fff0000, -0x80000000, -0x10000))
+ self.assertEqual(audioop.lin2lin(datas[4], 4, 1),
+ b'\x00\x12\x45\xba\x7f\x80\xff')
+ self.assertEqual(audioop.lin2lin(datas[4], 4, 2),
+ packs[2](0, 0x1234, 0x4567, -0x4568, 0x7fff, -0x8000, -1))
def test_adpcm2lin(self):
+ self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 1, None),
+ (b'\x00\x00\x00\xff\x00\xff', (-179, 40)))
+ self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 2, None),
+ (packs[2](0, 0xb, 0x29, -0x16, 0x72, -0xb3), (-179, 40)))
+ self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 4, None),
+ (packs[4](0, 0xb0000, 0x290000, -0x160000, 0x720000,
+ -0xb30000), (-179, 40)))
+
# Very cursory test
- self.assertEqual(audioop.adpcm2lin(b'\0\0', 1, None), (b'\0' * 4, (0,0)))
- self.assertEqual(audioop.adpcm2lin(b'\0\0', 2, None), (b'\0' * 8, (0,0)))
- self.assertEqual(audioop.adpcm2lin(b'\0\0', 4, None), (b'\0' * 16, (0,0)))
+ for w in 1, 2, 4:
+ self.assertEqual(audioop.adpcm2lin(b'\0' * 5, w, None),
+ (b'\0' * w * 10, (0, 0)))
def test_lin2adpcm(self):
+ self.assertEqual(audioop.lin2adpcm(datas[1], 1, None),
+ (b'\x07\x7f\x7f', (-221, 39)))
+ self.assertEqual(audioop.lin2adpcm(datas[2], 2, None),
+ (b'\x07\x7f\x7f', (31, 39)))
+ self.assertEqual(audioop.lin2adpcm(datas[4], 4, None),
+ (b'\x07\x7f\x7f', (31, 39)))
+
# Very cursory test
- self.assertEqual(audioop.lin2adpcm('\0\0\0\0', 1, None), ('\0\0', (0,0)))
+ for w in 1, 2, 4:
+ self.assertEqual(audioop.lin2adpcm(b'\0' * w * 10, w, None),
+ (b'\0' * 5, (0, 0)))
def test_lin2alaw(self):
- self.assertEqual(audioop.lin2alaw(data[0], 1), '\xd5\xc5\xf5')
- self.assertEqual(audioop.lin2alaw(data[1], 2), '\xd5\xd5\xd5')
- self.assertEqual(audioop.lin2alaw(data[2], 4), '\xd5\xd5\xd5')
+ self.assertEqual(audioop.lin2alaw(datas[1], 1),
+ b'\xd5\x87\xa4\x24\xaa\x2a\x5a')
+ self.assertEqual(audioop.lin2alaw(datas[2], 2),
+ b'\xd5\x87\xa4\x24\xaa\x2a\x55')
+ self.assertEqual(audioop.lin2alaw(datas[4], 4),
+ b'\xd5\x87\xa4\x24\xaa\x2a\x55')
def test_alaw2lin(self):
- # Cursory
- d = audioop.lin2alaw(data[0], 1)
- self.assertEqual(audioop.alaw2lin(d, 1), data[0])
- if endian == 'big':
- self.assertEqual(audioop.alaw2lin(d, 2),
- b'\x00\x08\x01\x08\x02\x10')
- self.assertEqual(audioop.alaw2lin(d, 4),
- b'\x00\x08\x00\x00\x01\x08\x00\x00\x02\x10\x00\x00')
- else:
- self.assertEqual(audioop.alaw2lin(d, 2),
- b'\x08\x00\x08\x01\x10\x02')
- self.assertEqual(audioop.alaw2lin(d, 4),
- b'\x00\x00\x08\x00\x00\x00\x08\x01\x00\x00\x10\x02')
+ encoded = b'\x00\x03\x24\x2a\x51\x54\x55\x58\x6b\x71\x7f'\
+ b'\x80\x83\xa4\xaa\xd1\xd4\xd5\xd8\xeb\xf1\xff'
+ src = [-688, -720, -2240, -4032, -9, -3, -1, -27, -244, -82, -106,
+ 688, 720, 2240, 4032, 9, 3, 1, 27, 244, 82, 106]
+ for w in 1, 2, 4:
+ self.assertEqual(audioop.alaw2lin(encoded, w),
+ packs[w](*(x << (w * 8) >> 13 for x in src)))
+
+ encoded = ''.join(chr(x) for x in xrange(256))
+ for w in 2, 4:
+ decoded = audioop.alaw2lin(encoded, w)
+ self.assertEqual(audioop.lin2alaw(decoded, w), encoded)
def test_lin2ulaw(self):
- self.assertEqual(audioop.lin2ulaw(data[0], 1), '\xff\xe7\xdb')
- self.assertEqual(audioop.lin2ulaw(data[1], 2), '\xff\xff\xff')
- self.assertEqual(audioop.lin2ulaw(data[2], 4), '\xff\xff\xff')
+ self.assertEqual(audioop.lin2ulaw(datas[1], 1),
+ b'\xff\xad\x8e\x0e\x80\x00\x67')
+ self.assertEqual(audioop.lin2ulaw(datas[2], 2),
+ b'\xff\xad\x8e\x0e\x80\x00\x7e')
+ self.assertEqual(audioop.lin2ulaw(datas[4], 4),
+ b'\xff\xad\x8e\x0e\x80\x00\x7e')
def test_ulaw2lin(self):
- # Cursory
- d = audioop.lin2ulaw(data[0], 1)
- self.assertEqual(audioop.ulaw2lin(d, 1), data[0])
- if endian == 'big':
- self.assertEqual(audioop.ulaw2lin(d, 2),
- b'\x00\x00\x01\x04\x02\x0c')
- self.assertEqual(audioop.ulaw2lin(d, 4),
- b'\x00\x00\x00\x00\x01\x04\x00\x00\x02\x0c\x00\x00')
- else:
- self.assertEqual(audioop.ulaw2lin(d, 2),
- b'\x00\x00\x04\x01\x0c\x02')
- self.assertEqual(audioop.ulaw2lin(d, 4),
- b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x0c\x02')
+ encoded = b'\x00\x0e\x28\x3f\x57\x6a\x76\x7c\x7e\x7f'\
+ b'\x80\x8e\xa8\xbf\xd7\xea\xf6\xfc\xfe\xff'
+ src = [-8031, -4447, -1471, -495, -163, -53, -18, -6, -2, 0,
+ 8031, 4447, 1471, 495, 163, 53, 18, 6, 2, 0]
+ for w in 1, 2, 4:
+ self.assertEqual(audioop.ulaw2lin(encoded, w),
+ packs[w](*(x << (w * 8) >> 14 for x in src)))
+
+ # Current u-law implementation has two codes fo 0: 0x7f and 0xff.
+ encoded = ''.join(chr(x) for x in range(127) + range(128, 256))
+ for w in 2, 4:
+ decoded = audioop.ulaw2lin(encoded, w)
+ self.assertEqual(audioop.lin2ulaw(decoded, w), encoded)
def test_mul(self):
- data2 = []
- for d in data:
- str = ''
- for s in d:
- str = str + chr(ord(s)*2)
- data2.append(str)
- self.assertEqual(audioop.mul(data[0], 1, 2), data2[0])
- self.assertEqual(audioop.mul(data[1],2, 2), data2[1])
- self.assertEqual(audioop.mul(data[2], 4, 2), data2[2])
+ for w in 1, 2, 4:
+ self.assertEqual(audioop.mul(b'', w, 2), b'')
+ self.assertEqual(audioop.mul(datas[w], w, 0),
+ b'\0' * len(datas[w]))
+ self.assertEqual(audioop.mul(datas[w], w, 1),
+ datas[w])
+ self.assertEqual(audioop.mul(datas[1], 1, 2),
+ b'\x00\x24\x7f\x80\x7f\x80\xfe')
+ self.assertEqual(audioop.mul(datas[2], 2, 2),
+ packs[2](0, 0x2468, 0x7fff, -0x8000, 0x7fff, -0x8000, -2))
+ self.assertEqual(audioop.mul(datas[4], 4, 2),
+ packs[4](0, 0x2468acf0, 0x7fffffff, -0x80000000,
+ 0x7fffffff, -0x80000000, -2))
def test_ratecv(self):
+ for w in 1, 2, 4:
+ self.assertEqual(audioop.ratecv(b'', w, 1, 8000, 8000, None),
+ (b'', (-1, ((0, 0),))))
+ self.assertEqual(audioop.ratecv(b'', w, 5, 8000, 8000, None),
+ (b'', (-1, ((0, 0),) * 5)))
+ self.assertEqual(audioop.ratecv(b'', w, 1, 8000, 16000, None),
+ (b'', (-2, ((0, 0),))))
+ self.assertEqual(audioop.ratecv(datas[w], w, 1, 8000, 8000, None)[0],
+ datas[w])
state = None
- d1, state = audioop.ratecv(data[0], 1, 1, 8000, 16000, state)
- d2, state = audioop.ratecv(data[0], 1, 1, 8000, 16000, state)
- self.assertEqual(d1 + d2, '\000\000\001\001\002\001\000\000\001\001\002')
+ d1, state = audioop.ratecv(b'\x00\x01\x02', 1, 1, 8000, 16000, state)
+ d2, state = audioop.ratecv(b'\x00\x01\x02', 1, 1, 8000, 16000, state)
+ self.assertEqual(d1 + d2, b'\000\000\001\001\002\001\000\000\001\001\002')
+
+ for w in 1, 2, 4:
+ d0, state0 = audioop.ratecv(datas[w], w, 1, 8000, 16000, None)
+ d, state = b'', None
+ for i in range(0, len(datas[w]), w):
+ d1, state = audioop.ratecv(datas[w][i:i + w], w, 1,
+ 8000, 16000, state)
+ d += d1
+ self.assertEqual(d, d0)
+ self.assertEqual(state, state0)
def test_reverse(self):
- self.assertEqual(audioop.reverse(data[0], 1), '\2\1\0')
+ for w in 1, 2, 4:
+ self.assertEqual(audioop.reverse(b'', w), b'')
+ self.assertEqual(audioop.reverse(packs[w](0, 1, 2), w),
+ packs[w](2, 1, 0))
def test_tomono(self):
- data2 = ''
- for d in data[0]:
- data2 = data2 + d + d
- self.assertEqual(audioop.tomono(data2, 1, 0.5, 0.5), data[0])
+ for w in 1, 2, 4:
+ data1 = datas[w]
+ data2 = bytearray(2 * len(data1))
+ for k in range(w):
+ data2[k::2*w] = data1[k::w]
+ self.assertEqual(audioop.tomono(str(data2), w, 1, 0), data1)
+ self.assertEqual(audioop.tomono(str(data2), w, 0, 1), b'\0' * len(data1))
+ for k in range(w):
+ data2[k+w::2*w] = data1[k::w]
+ self.assertEqual(audioop.tomono(str(data2), w, 0.5, 0.5), data1)
def test_tostereo(self):
- data2 = ''
- for d in data[0]:
- data2 = data2 + d + d
- self.assertEqual(audioop.tostereo(data[0], 1, 1, 1), data2)
+ for w in 1, 2, 4:
+ data1 = datas[w]
+ data2 = bytearray(2 * len(data1))
+ for k in range(w):
+ data2[k::2*w] = data1[k::w]
+ self.assertEqual(audioop.tostereo(data1, w, 1, 0), data2)
+ self.assertEqual(audioop.tostereo(data1, w, 0, 0), b'\0' * len(data2))
+ for k in range(w):
+ data2[k+w::2*w] = data1[k::w]
+ self.assertEqual(audioop.tostereo(data1, w, 1, 1), data2)
def test_findfactor(self):
- self.assertEqual(audioop.findfactor(data[1], data[1]), 1.0)
+ self.assertEqual(audioop.findfactor(datas[2], datas[2]), 1.0)
+ self.assertEqual(audioop.findfactor(b'\0' * len(datas[2]), datas[2]),
+ 0.0)
def test_findfit(self):
- self.assertEqual(audioop.findfit(data[1], data[1]), (0, 1.0))
+ self.assertEqual(audioop.findfit(datas[2], datas[2]), (0, 1.0))
+ self.assertEqual(audioop.findfit(datas[2], packs[2](1, 2, 0)),
+ (1, 8038.8))
+ self.assertEqual(audioop.findfit(datas[2][:-2] * 5 + datas[2], datas[2]),
+ (30, 1.0))
def test_findmax(self):
- self.assertEqual(audioop.findmax(data[1], 1), 2)
+ self.assertEqual(audioop.findmax(datas[2], 1), 5)
def test_getsample(self):
- for i in range(3):
- self.assertEqual(audioop.getsample(data[0], 1, i), i)
- self.assertEqual(audioop.getsample(data[1], 2, i), i)
- self.assertEqual(audioop.getsample(data[2], 4, i), i)
+ for w in 1, 2, 4:
+ data = packs[w](0, 1, -1, maxvalues[w], minvalues[w])
+ self.assertEqual(audioop.getsample(data, w, 0), 0)
+ self.assertEqual(audioop.getsample(data, w, 1), 1)
+ self.assertEqual(audioop.getsample(data, w, 2), -1)
+ self.assertEqual(audioop.getsample(data, w, 3), maxvalues[w])
+ self.assertEqual(audioop.getsample(data, w, 4), minvalues[w])
def test_negativelen(self):
# from issue 3306, previously it segfaulted
@@ -220,9 +379,9 @@ class TestAudioop(unittest.TestCase):
self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state)
def test_wrongsize(self):
- data = b'abc'
+ data = b'abcdefgh'
state = None
- for size in (-1, 3, 5):
+ for size in (-1, 0, 3, 5, 1024):
self.assertRaises(audioop.error, audioop.ulaw2lin, data, size)
self.assertRaises(audioop.error, audioop.alaw2lin, data, size)
self.assertRaises(audioop.error, audioop.adpcm2lin, data, size, state)
diff --git a/Lib/test/test_base64.py b/Lib/test/test_base64.py
index ff2c370..3f2cee4 100644
--- a/Lib/test/test_base64.py
+++ b/Lib/test/test_base64.py
@@ -18,6 +18,8 @@ class LegacyBase64TestCase(unittest.TestCase):
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n")
+ # Non-bytes
+ eq(base64.encodestring(bytearray('abc')), 'YWJj\n')
def test_decodestring(self):
eq = self.assertEqual
@@ -32,6 +34,8 @@ class LegacyBase64TestCase(unittest.TestCase):
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}")
eq(base64.decodestring(''), '')
+ # Non-bytes
+ eq(base64.decodestring(bytearray("YWJj\n")), "abc")
def test_encode(self):
eq = self.assertEqual
@@ -73,6 +77,10 @@ class BaseXYTestCase(unittest.TestCase):
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Test with arbitrary alternative characters
eq(base64.b64encode('\xd3V\xbeo\xf7\x1d', altchars='*$'), '01a*b$cd')
+ # Non-bytes
+ eq(base64.b64encode(bytearray('abcd')), 'YWJjZA==')
+ self.assertRaises(TypeError, base64.b64encode,
+ '\xd3V\xbeo\xf7\x1d', altchars=bytearray('*$'))
# Test standard alphabet
eq(base64.standard_b64encode("www.python.org"), "d3d3LnB5dGhvbi5vcmc=")
eq(base64.standard_b64encode("a"), "YQ==")
@@ -85,8 +93,12 @@ class BaseXYTestCase(unittest.TestCase):
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
+ # Non-bytes
+ eq(base64.standard_b64encode(bytearray('abcd')), 'YWJjZA==')
# Test with 'URL safe' alternative characters
eq(base64.urlsafe_b64encode('\xd3V\xbeo\xf7\x1d'), '01a-b_cd')
+ # Non-bytes
+ eq(base64.urlsafe_b64encode(bytearray('\xd3V\xbeo\xf7\x1d')), '01a-b_cd')
def test_b64decode(self):
eq = self.assertEqual
@@ -104,6 +116,8 @@ class BaseXYTestCase(unittest.TestCase):
eq(base64.b64decode(''), '')
# Test with arbitrary alternative characters
eq(base64.b64decode('01a*b$cd', altchars='*$'), '\xd3V\xbeo\xf7\x1d')
+ # Non-bytes
+ eq(base64.b64decode(bytearray("YWJj")), "abc")
# Test standard alphabet
eq(base64.standard_b64decode("d3d3LnB5dGhvbi5vcmc="), "www.python.org")
eq(base64.standard_b64decode("YQ=="), "a")
@@ -116,8 +130,12 @@ class BaseXYTestCase(unittest.TestCase):
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#0^&*();:<>,. []{}")
+ # Non-bytes
+ eq(base64.standard_b64decode(bytearray("YWJj")), "abc")
# Test with 'URL safe' alternative characters
eq(base64.urlsafe_b64decode('01a-b_cd'), '\xd3V\xbeo\xf7\x1d')
+ # Non-bytes
+ eq(base64.urlsafe_b64decode(bytearray('01a-b_cd')), '\xd3V\xbeo\xf7\x1d')
def test_b64decode_error(self):
self.assertRaises(TypeError, base64.b64decode, 'abc')
@@ -131,6 +149,8 @@ class BaseXYTestCase(unittest.TestCase):
eq(base64.b32encode('abc'), 'MFRGG===')
eq(base64.b32encode('abcd'), 'MFRGGZA=')
eq(base64.b32encode('abcde'), 'MFRGGZDF')
+ # Non-bytes
+ eq(base64.b32encode(bytearray('abcd')), 'MFRGGZA=')
def test_b32decode(self):
eq = self.assertEqual
@@ -141,6 +161,8 @@ class BaseXYTestCase(unittest.TestCase):
eq(base64.b32decode('MFRGG==='), 'abc')
eq(base64.b32decode('MFRGGZA='), 'abcd')
eq(base64.b32decode('MFRGGZDF'), 'abcde')
+ # Non-bytes
+ self.assertRaises(TypeError, base64.b32decode, bytearray('MFRGG==='))
def test_b32decode_casefold(self):
eq = self.assertEqual
@@ -171,6 +193,8 @@ class BaseXYTestCase(unittest.TestCase):
eq = self.assertEqual
eq(base64.b16encode('\x01\x02\xab\xcd\xef'), '0102ABCDEF')
eq(base64.b16encode('\x00'), '00')
+ # Non-bytes
+ eq(base64.b16encode(bytearray('\x01\x02\xab\xcd\xef')), '0102ABCDEF')
def test_b16decode(self):
eq = self.assertEqual
@@ -180,6 +204,8 @@ class BaseXYTestCase(unittest.TestCase):
self.assertRaises(TypeError, base64.b16decode, '0102abcdef')
# Case fold
eq(base64.b16decode('0102abcdef', True), '\x01\x02\xab\xcd\xef')
+ # Non-bytes
+ eq(base64.b16decode(bytearray("0102ABCDEF")), '\x01\x02\xab\xcd\xef')
diff --git a/Lib/test/test_bigmem.py b/Lib/test/test_bigmem.py
index c560167..c41c373 100644
--- a/Lib/test/test_bigmem.py
+++ b/Lib/test/test_bigmem.py
@@ -118,12 +118,13 @@ class StrTest(unittest.TestCase):
except MemoryError:
pass # acceptable on 32-bit
- @precisionbigmemtest(size=_2G-1, memuse=2)
+ @precisionbigmemtest(size=_2G-1, memuse=4)
def test_decodeascii(self, size):
return self.basic_encode_test(size, 'ascii', c='A')
@precisionbigmemtest(size=_4G // 5, memuse=6+2)
def test_unicode_repr_oflw(self, size):
+ self.skipTest("test crashes - see issue #14904")
try:
s = u"\uAAAA"*size
r = repr(s)
@@ -485,7 +486,7 @@ class StrTest(unittest.TestCase):
self.assertEqual(s.count('.'), 3)
self.assertEqual(s.count('-'), size * 2)
- @bigmemtest(minsize=_2G + 10, memuse=2)
+ @bigmemtest(minsize=_2G + 10, memuse=5)
def test_repr_small(self, size):
s = '-' * size
s = repr(s)
@@ -497,7 +498,6 @@ class StrTest(unittest.TestCase):
# repr() will create a string four times as large as this 'binary
# string', but we don't want to allocate much more than twice
# size in total. (We do extra testing in test_repr_large())
- size = size // 5 * 2
s = '\x00' * size
s = repr(s)
self.assertEqual(len(s), size * 4 + 2)
@@ -541,7 +541,7 @@ class StrTest(unittest.TestCase):
self.assertEqual(len(s), size * 2)
self.assertEqual(s.count('.'), size * 2)
- @bigmemtest(minsize=_2G + 20, memuse=1)
+ @bigmemtest(minsize=_2G + 20, memuse=2)
def test_slice_and_getitem(self, size):
SUBSTR = '0123456789'
sublen = len(SUBSTR)
diff --git a/Lib/test/test_binhex.py b/Lib/test/test_binhex.py
index 2f89703..1f6a0aa 100755..100644
--- a/Lib/test/test_binhex.py
+++ b/Lib/test/test_binhex.py
@@ -1,4 +1,3 @@
-#! /usr/bin/env python
"""Test script for the binhex C module
Uses the mechanism of the python binhex module
diff --git a/Lib/test/test_bisect.py b/Lib/test/test_bisect.py
index 934ba8c..5c3330b 100644
--- a/Lib/test/test_bisect.py
+++ b/Lib/test/test_bisect.py
@@ -23,6 +23,28 @@ del sys.modules['bisect']
import bisect as c_bisect
+class Range(object):
+ """A trivial xrange()-like object without any integer width limitations."""
+ def __init__(self, start, stop):
+ self.start = start
+ self.stop = stop
+ self.last_insert = None
+
+ def __len__(self):
+ return self.stop - self.start
+
+ def __getitem__(self, idx):
+ n = self.stop - self.start
+ if idx < 0:
+ idx += n
+ if idx >= n:
+ raise IndexError(idx)
+ return self.start + idx
+
+ def insert(self, idx, item):
+ self.last_insert = idx, item
+
+
class TestBisect(unittest.TestCase):
module = None
@@ -122,6 +144,35 @@ class TestBisect(unittest.TestCase):
self.assertRaises(ValueError, mod.insort_left, [1, 2, 3], 5, -1, 3),
self.assertRaises(ValueError, mod.insort_right, [1, 2, 3], 5, -1, 3),
+ def test_large_range(self):
+ # Issue 13496
+ mod = self.module
+ n = sys.maxsize
+ try:
+ data = xrange(n-1)
+ except OverflowError:
+ self.skipTest("can't create a xrange() object of size `sys.maxsize`")
+ self.assertEqual(mod.bisect_left(data, n-3), n-3)
+ self.assertEqual(mod.bisect_right(data, n-3), n-2)
+ self.assertEqual(mod.bisect_left(data, n-3, n-10, n), n-3)
+ self.assertEqual(mod.bisect_right(data, n-3, n-10, n), n-2)
+
+ def test_large_pyrange(self):
+ # Same as above, but without C-imposed limits on range() parameters
+ mod = self.module
+ n = sys.maxsize
+ data = Range(0, n-1)
+ self.assertEqual(mod.bisect_left(data, n-3), n-3)
+ self.assertEqual(mod.bisect_right(data, n-3), n-2)
+ self.assertEqual(mod.bisect_left(data, n-3, n-10, n), n-3)
+ self.assertEqual(mod.bisect_right(data, n-3, n-10, n), n-2)
+ x = n - 100
+ mod.insort_left(data, x, x - 50, x + 50)
+ self.assertEqual(data.last_insert, (x, x))
+ x = n - 200
+ mod.insort_right(data, x, x - 50, x + 50)
+ self.assertEqual(data.last_insert, (x + 1, x))
+
def test_random(self, n=25):
from random import randrange
for i in xrange(n):
@@ -191,7 +242,7 @@ class TestInsort(unittest.TestCase):
else:
f = self.module.insort_right
f(insorted, digit)
- self.assertEqual(sorted(insorted), insorted)
+ self.assertEqual(sorted(insorted), insorted)
def test_backcompatibility(self):
self.assertEqual(self.module.insort, self.module.insort_right)
diff --git a/Lib/test/test_bsddb.py b/Lib/test/test_bsddb.py
index d1ee0a1..3ff020a 100755..100644
--- a/Lib/test/test_bsddb.py
+++ b/Lib/test/test_bsddb.py
@@ -1,4 +1,3 @@
-#! /usr/bin/env python
"""Test script for the bsddb C module by Roger E. Masse
Adapted to unittest format and expanded scope by Raymond Hettinger
"""
@@ -47,10 +46,7 @@ class TestBSDDB(unittest.TestCase):
self.assertIn('discovered', self.f.values())
def test_close_and_reopen(self):
- if self.fname is None:
- # if we're using an in-memory only db, we can't reopen it
- # so finish here.
- return
+ self.assertIsNotNone(self.fname)
self.f.close()
self.f = self.openmethod[0](self.fname, 'w')
for k, v in self.d.iteritems():
@@ -309,8 +305,7 @@ class TestBSDDB(unittest.TestCase):
self.assertEqual(self.f[k], v)
def test_keyordering(self):
- if self.openmethod[0] is not bsddb.btopen:
- return
+ self.assertIs(self.openmethod[0], bsddb.btopen)
keys = self.d.keys()
keys.sort()
self.assertEqual(self.f.first()[0], keys[0])
@@ -327,19 +322,34 @@ class TestBTree_InMemory(TestBSDDB):
fname = None
openmethod = [bsddb.btopen]
+ # if we're using an in-memory only db, we can't reopen it
+ test_close_and_reopen = None
+
class TestBTree_InMemory_Truncate(TestBSDDB):
fname = None
openflag = 'n'
openmethod = [bsddb.btopen]
+ # if we're using an in-memory only db, we can't reopen it
+ test_close_and_reopen = None
+
class TestHashTable(TestBSDDB):
fname = test_support.TESTFN
openmethod = [bsddb.hashopen]
+ # keyordering is specific to btopen method
+ test_keyordering = None
+
class TestHashTable_InMemory(TestBSDDB):
fname = None
openmethod = [bsddb.hashopen]
+ # if we're using an in-memory only db, we can't reopen it
+ test_close_and_reopen = None
+
+ # keyordering is specific to btopen method
+ test_keyordering = None
+
## # (bsddb.rnopen,'Record Numbers'), 'put' for RECNO for bsddb 1.85
## # appears broken... at least on
## # Solaris Intel - rmasse 1/97
diff --git a/Lib/test/test_buffer.py b/Lib/test/test_buffer.py
index 6bdc34d..a02c5f7 100644
--- a/Lib/test/test_buffer.py
+++ b/Lib/test/test_buffer.py
@@ -4,6 +4,7 @@ For now, tests just new or changed functionality.
"""
+import sys
import unittest
from test import test_support
@@ -21,6 +22,19 @@ class BufferTests(unittest.TestCase):
self.assertEqual(b[start:stop:step],
s[start:stop:step])
+ def test_newbuffer_interface(self):
+ # Test that the buffer object has the new buffer interface
+ # as used by the memoryview object
+ s = "".join(chr(c) for c in list(range(255, -1, -1)))
+ b = buffer(s)
+ m = memoryview(b) # Should not raise an exception
+ self.assertEqual(m.tobytes(), s)
+
+ def test_large_buffer_size_and_offset(self):
+ data = bytearray('hola mundo')
+ buf = buffer(data, sys.maxsize, sys.maxsize)
+ self.assertEqual(buf[:4096], "")
+
def test_main():
with test_support.check_py3k_warnings(("buffer.. not supported",
diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py
index 1d35a6a..15581d9 100644
--- a/Lib/test/test_builtin.py
+++ b/Lib/test/test_builtin.py
@@ -110,6 +110,7 @@ class BuiltinTest(unittest.TestCase):
self.assertRaises(TypeError, all) # No args
self.assertRaises(TypeError, all, [2, 4, 6], []) # Too many args
self.assertEqual(all([]), True) # Empty iterator
+ self.assertEqual(all([0, TestFailingBool()]), False)# Short-circuit
S = [50, 60]
self.assertEqual(all(x > 42 for x in S), True)
S = [50, 40, 60]
@@ -119,11 +120,12 @@ class BuiltinTest(unittest.TestCase):
self.assertEqual(any([None, None, None]), False)
self.assertEqual(any([None, 4, None]), True)
self.assertRaises(RuntimeError, any, [None, TestFailingBool(), 6])
- self.assertRaises(RuntimeError, all, TestFailingIter())
+ self.assertRaises(RuntimeError, any, TestFailingIter())
self.assertRaises(TypeError, any, 10) # Non-iterable
self.assertRaises(TypeError, any) # No args
self.assertRaises(TypeError, any, [2, 4, 6], []) # Too many args
self.assertEqual(any([]), False) # Empty iterator
+ self.assertEqual(any([1, TestFailingBool()]), True) # Short-circuit
S = [40, 60, 30]
self.assertEqual(any(x > 42 for x in S), True)
S = [10, 20, 30]
@@ -445,59 +447,6 @@ class BuiltinTest(unittest.TestCase):
return 'a'
self.assertRaises(TypeError, eval, 'dir()', globals(), C())
- # Done outside of the method test_z to get the correct scope
- z = 0
- f = open(TESTFN, 'w')
- f.write('z = z+1\n')
- f.write('z = z*2\n')
- f.close()
- with check_py3k_warnings(("execfile.. not supported in 3.x",
- DeprecationWarning)):
- execfile(TESTFN)
-
- def test_execfile(self):
- global numruns
- if numruns:
- return
- numruns += 1
-
- globals = {'a': 1, 'b': 2}
- locals = {'b': 200, 'c': 300}
-
- self.assertEqual(self.__class__.z, 2)
- globals['z'] = 0
- execfile(TESTFN, globals)
- self.assertEqual(globals['z'], 2)
- locals['z'] = 0
- execfile(TESTFN, globals, locals)
- self.assertEqual(locals['z'], 2)
-
- class M:
- "Test mapping interface versus possible calls from execfile()."
- def __init__(self):
- self.z = 10
- def __getitem__(self, key):
- if key == 'z':
- return self.z
- raise KeyError
- def __setitem__(self, key, value):
- if key == 'z':
- self.z = value
- return
- raise KeyError
-
- locals = M()
- locals['z'] = 0
- execfile(TESTFN, globals, locals)
- self.assertEqual(locals['z'], 2)
-
- unlink(TESTFN)
- self.assertRaises(TypeError, execfile)
- self.assertRaises(TypeError, execfile, TESTFN, {}, ())
- import os
- self.assertRaises(IOError, execfile, os.curdir)
- self.assertRaises(IOError, execfile, "I_dont_exist")
-
def test_filter(self):
self.assertEqual(filter(lambda c: 'a' <= c <= 'z', 'Hello World'), 'elloorld')
self.assertEqual(filter(None, [1, 'hello', [], [3], '', None, 9, 0]), [1, 'hello', [3], 9])
@@ -680,6 +629,8 @@ class BuiltinTest(unittest.TestCase):
# Test input() later, together with raw_input
+ # test_int(): see test_int.py for int() tests.
+
def test_intern(self):
self.assertRaises(TypeError, intern)
# This fails if the test is run twice with a constant string,
@@ -1642,6 +1593,56 @@ class BuiltinTest(unittest.TestCase):
self.assertRaises(ValueError, x.translate, "1", 1)
self.assertRaises(TypeError, x.translate, "1"*256, 1)
+class TestExecFile(unittest.TestCase):
+ # Done outside of the method test_z to get the correct scope
+ z = 0
+ f = open(TESTFN, 'w')
+ f.write('z = z+1\n')
+ f.write('z = z*2\n')
+ f.close()
+ with check_py3k_warnings(("execfile.. not supported in 3.x",
+ DeprecationWarning)):
+ execfile(TESTFN)
+
+ def test_execfile(self):
+ globals = {'a': 1, 'b': 2}
+ locals = {'b': 200, 'c': 300}
+
+ self.assertEqual(self.__class__.z, 2)
+ globals['z'] = 0
+ execfile(TESTFN, globals)
+ self.assertEqual(globals['z'], 2)
+ locals['z'] = 0
+ execfile(TESTFN, globals, locals)
+ self.assertEqual(locals['z'], 2)
+
+ class M:
+ "Test mapping interface versus possible calls from execfile()."
+ def __init__(self):
+ self.z = 10
+ def __getitem__(self, key):
+ if key == 'z':
+ return self.z
+ raise KeyError
+ def __setitem__(self, key, value):
+ if key == 'z':
+ self.z = value
+ return
+ raise KeyError
+
+ locals = M()
+ locals['z'] = 0
+ execfile(TESTFN, globals, locals)
+ self.assertEqual(locals['z'], 2)
+
+ unlink(TESTFN)
+ self.assertRaises(TypeError, execfile)
+ self.assertRaises(TypeError, execfile, TESTFN, {}, ())
+ import os
+ self.assertRaises(IOError, execfile, os.curdir)
+ self.assertRaises(IOError, execfile, "I_dont_exist")
+
+
class TestSorted(unittest.TestCase):
def test_basic(self):
@@ -1689,6 +1690,12 @@ def _run_unittest(*args):
run_unittest(*args)
def test_main(verbose=None):
+ global numruns
+ if not numruns:
+ with check_py3k_warnings(
+ (".+ not supported in 3.x", DeprecationWarning)):
+ run_unittest(TestExecFile)
+ numruns += 1
test_classes = (BuiltinTest, TestSorted)
_run_unittest(*test_classes)
diff --git a/Lib/test/test_bytes.py b/Lib/test/test_bytes.py
index 2ef1c88..988b931 100644
--- a/Lib/test/test_bytes.py
+++ b/Lib/test/test_bytes.py
@@ -635,6 +635,26 @@ class ByteArrayTest(BaseBytesTest):
b[3:0] = [42, 42, 42]
self.assertEqual(b, bytearray([0, 1, 2, 42, 42, 42, 3, 4, 5, 6, 7, 8, 9]))
+ b[3:] = b'foo'
+ self.assertEqual(b, bytearray([0, 1, 2, 102, 111, 111]))
+
+ b[:3] = memoryview(b'foo')
+ self.assertEqual(b, bytearray([102, 111, 111, 102, 111, 111]))
+
+ b[3:4] = []
+ self.assertEqual(b, bytearray([102, 111, 111, 111, 111]))
+
+ b[1:] = list(b'uuuu') # this works only on Python2
+ self.assertEqual(b, bytearray([102, 117, 117, 117, 117]))
+
+ for elem in [5, -5, 0, long(10e20), u'str', 2.3, [u'a', u'b'], [[]]]:
+ with self.assertRaises(TypeError):
+ b[3:4] = elem
+
+ for elem in [[254, 255, 256], [-256, 9000]]:
+ with self.assertRaises(ValueError):
+ b[3:4] = elem
+
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 300, 1<<333, -1, -2, -31, -300)
for start in indices:
@@ -905,6 +925,7 @@ class AssortedBytesTest(unittest.TestCase):
self.assertEqual(bytes(b"abc") < b"ab", False)
self.assertEqual(bytes(b"abc") <= b"ab", False)
+ @test.test_support.requires_docstrings
def test_doc(self):
self.assertIsNotNone(bytearray.__doc__)
self.assertTrue(bytearray.__doc__.startswith("bytearray("), bytearray.__doc__)
diff --git a/Lib/test/test_bz2.py b/Lib/test/test_bz2.py
index d9ccf0a..e2c222a 100644
--- a/Lib/test/test_bz2.py
+++ b/Lib/test/test_bz2.py
@@ -1,6 +1,5 @@
-#!/usr/bin/env python
from test import test_support
-from test.test_support import TESTFN, import_module
+from test.test_support import TESTFN, _4G, bigmemtest, import_module, findfile
import unittest
from cStringIO import StringIO
@@ -23,6 +22,7 @@ class BaseTest(unittest.TestCase):
TEXT = 'root:x:0:0:root:/root:/bin/bash\nbin:x:1:1:bin:/bin:\ndaemon:x:2:2:daemon:/sbin:\nadm:x:3:4:adm:/var/adm:\nlp:x:4:7:lp:/var/spool/lpd:\nsync:x:5:0:sync:/sbin:/bin/sync\nshutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\nhalt:x:7:0:halt:/sbin:/sbin/halt\nmail:x:8:12:mail:/var/spool/mail:\nnews:x:9:13:news:/var/spool/news:\nuucp:x:10:14:uucp:/var/spool/uucp:\noperator:x:11:0:operator:/root:\ngames:x:12:100:games:/usr/games:\ngopher:x:13:30:gopher:/usr/lib/gopher-data:\nftp:x:14:50:FTP User:/var/ftp:/bin/bash\nnobody:x:65534:65534:Nobody:/home:\npostfix:x:100:101:postfix:/var/spool/postfix:\nniemeyer:x:500:500::/home/niemeyer:/bin/bash\npostgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\nmysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\nwww:x:103:104::/var/www:/bin/false\n'
DATA = 'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
DATA_CRLF = 'BZh91AY&SY\xaez\xbbN\x00\x01H\xdf\x80\x00\x12@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe0@\x01\xbc\xc6`\x86*\x8d=M\xa9\x9a\x86\xd0L@\x0fI\xa6!\xa1\x13\xc8\x88jdi\x8d@\x03@\x1a\x1a\x0c\x0c\x83 \x00\xc4h2\x19\x01\x82D\x84e\t\xe8\x99\x89\x19\x1ah\x00\r\x1a\x11\xaf\x9b\x0fG\xf5(\x1b\x1f?\t\x12\xcf\xb5\xfc\x95E\x00ps\x89\x12^\xa4\xdd\xa2&\x05(\x87\x04\x98\x89u\xe40%\xb6\x19\'\x8c\xc4\x89\xca\x07\x0e\x1b!\x91UIFU%C\x994!DI\xd2\xfa\xf0\xf1N8W\xde\x13A\xf5\x9cr%?\x9f3;I45A\xd1\x8bT\xb1<l\xba\xcb_\xc00xY\x17r\x17\x88\x08\x08@\xa0\ry@\x10\x04$)`\xf2\xce\x89z\xb0s\xec\x9b.iW\x9d\x81\xb5-+t\x9f\x1a\'\x97dB\xf5x\xb5\xbe.[.\xd7\x0e\x81\xe7\x08\x1cN`\x88\x10\xca\x87\xc3!"\x80\x92R\xa1/\xd1\xc0\xe6mf\xac\xbd\x99\xcca\xb3\x8780>\xa4\xc7\x8d\x1a\\"\xad\xa1\xabyBg\x15\xb9l\x88\x88\x91k"\x94\xa4\xd4\x89\xae*\xa6\x0b\x10\x0c\xd6\xd4m\xe86\xec\xb5j\x8a\x86j\';\xca.\x01I\xf2\xaaJ\xe8\x88\x8cU+t3\xfb\x0c\n\xa33\x13r2\r\x16\xe0\xb3(\xbf\x1d\x83r\xe7M\xf0D\x1365\xd8\x88\xd3\xa4\x92\xcb2\x06\x04\\\xc1\xb0\xea//\xbek&\xd8\xe6+t\xe5\xa1\x13\xada\x16\xder5"w]\xa2i\xb7[\x97R \xe2IT\xcd;Z\x04dk4\xad\x8a\t\xd3\x81z\x10\xf1:^`\xab\x1f\xc5\xdc\x91N\x14$+\x9e\xae\xd3\x80'
+ EMPTY_DATA = 'BZh9\x17rE8P\x90\x00\x00\x00\x00'
if has_cmdline_bunzip2:
def decompress(self, data):
@@ -43,6 +43,7 @@ class BaseTest(unittest.TestCase):
def decompress(self, data):
return bz2.decompress(data)
+
class BZ2FileTest(BaseTest):
"Test BZ2File type miscellaneous methods."
@@ -323,6 +324,21 @@ class BZ2FileTest(BaseTest):
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
+ @unittest.skipIf(sys.platform == 'win32',
+ 'test depends on being able to delete a still-open file,'
+ ' which is not possible on Windows')
+ def testInitNonExistentFile(self):
+ # Issue #19878: Should not segfault when __init__ with non-existent
+ # file for the second time.
+ self.createTempFile()
+ # Test close():
+ with BZ2File(self.filename, "wb") as f:
+ self.assertRaises(IOError, f.__init__, "non-existent-file")
+ # Test object deallocation without call to close():
+ f = bz2.BZ2File(self.filename)
+ self.assertRaises(IOError, f.__init__, "non-existent-file")
+ del f
+
class BZ2CompressorTest(BaseTest):
def testCompress(self):
# "Test BZ2Compressor.compress()/flush()"
@@ -332,6 +348,13 @@ class BZ2CompressorTest(BaseTest):
data += bz2c.flush()
self.assertEqual(self.decompress(data), self.TEXT)
+ def testCompressEmptyString(self):
+ # "Test BZ2Compressor.compress()/flush() of empty string"
+ bz2c = BZ2Compressor()
+ data = bz2c.compress('')
+ data += bz2c.flush()
+ self.assertEqual(data, self.EMPTY_DATA)
+
def testCompressChunks10(self):
# "Test BZ2Compressor.compress()/flush() with chunks of 10 bytes"
bz2c = BZ2Compressor()
@@ -346,6 +369,17 @@ class BZ2CompressorTest(BaseTest):
data += bz2c.flush()
self.assertEqual(self.decompress(data), self.TEXT)
+ @bigmemtest(_4G, memuse=1.25)
+ def testBigmem(self, size):
+ text = "a" * size
+ bz2c = bz2.BZ2Compressor()
+ data = bz2c.compress(text) + bz2c.flush()
+ del text
+ text = self.decompress(data)
+ self.assertEqual(len(text), size)
+ self.assertEqual(text.strip("a"), "")
+
+
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
@@ -383,6 +417,17 @@ class BZ2DecompressorTest(BaseTest):
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, "anything")
+ self.assertRaises(EOFError, bz2d.decompress, "")
+
+ @bigmemtest(_4G, memuse=1.25)
+ def testBigmem(self, size):
+ # Issue #14398: decompression fails when output data is >=2GB.
+ if size < _4G:
+ self.skipTest("Test needs 5GB of memory to run.")
+ compressed = bz2.compress("a" * _4G)
+ text = bz2.BZ2Decompressor().decompress(compressed)
+ self.assertEqual(len(text), _4G)
+ self.assertEqual(text.strip("a"), "")
class FuncTest(BaseTest):
@@ -393,6 +438,11 @@ class FuncTest(BaseTest):
data = bz2.compress(self.TEXT)
self.assertEqual(self.decompress(data), self.TEXT)
+ def testCompressEmptyString(self):
+ # "Test compress() of empty string"
+ text = bz2.compress('')
+ self.assertEqual(text, self.EMPTY_DATA)
+
def testDecompress(self):
# "Test decompress() function"
text = bz2.decompress(self.DATA)
@@ -403,10 +453,34 @@ class FuncTest(BaseTest):
text = bz2.decompress("")
self.assertEqual(text, "")
+ def testDecompressToEmptyString(self):
+ # "Test decompress() of minimal bz2 data to empty string"
+ text = bz2.decompress(self.EMPTY_DATA)
+ self.assertEqual(text, '')
+
def testDecompressIncomplete(self):
# "Test decompress() function with incomplete data"
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
+ @bigmemtest(_4G, memuse=1.25)
+ def testCompressBigmem(self, size):
+ text = "a" * size
+ data = bz2.compress(text)
+ del text
+ text = self.decompress(data)
+ self.assertEqual(len(text), size)
+ self.assertEqual(text.strip("a"), "")
+
+ @bigmemtest(_4G, memuse=1.25)
+ def testDecompressBigmem(self, size):
+ # Issue #14398: decompression fails when output data is >=2GB.
+ if size < _4G:
+ self.skipTest("Test needs 5GB of memory to run.")
+ compressed = bz2.compress("a" * _4G)
+ text = bz2.decompress(compressed)
+ self.assertEqual(len(text), _4G)
+ self.assertEqual(text.strip("a"), "")
+
def test_main():
test_support.run_unittest(
BZ2FileTest,
diff --git a/Lib/test/test_calendar.py b/Lib/test/test_calendar.py
index 2a56268..40fb76d 100644
--- a/Lib/test/test_calendar.py
+++ b/Lib/test/test_calendar.py
@@ -3,6 +3,7 @@ import unittest
from test import test_support
import locale
+import datetime
result_2004_text = """
@@ -254,13 +255,30 @@ class CalendarTestCase(unittest.TestCase):
# (it is still not thread-safe though)
old_october = calendar.TextCalendar().formatmonthname(2010, 10, 10)
try:
- calendar.LocaleTextCalendar(locale='').formatmonthname(2010, 10, 10)
+ cal = calendar.LocaleTextCalendar(locale='')
+ local_weekday = cal.formatweekday(1, 10)
+ local_month = cal.formatmonthname(2010, 10, 10)
except locale.Error:
# cannot set the system default locale -- skip rest of test
- return
- calendar.LocaleHTMLCalendar(locale='').formatmonthname(2010, 10)
+ raise unittest.SkipTest('cannot set the system default locale')
+ # should be encodable
+ local_weekday.encode('utf-8')
+ local_month.encode('utf-8')
+ self.assertEqual(len(local_weekday), 10)
+ self.assertGreaterEqual(len(local_month), 10)
+ cal = calendar.LocaleHTMLCalendar(locale='')
+ local_weekday = cal.formatweekday(1)
+ local_month = cal.formatmonthname(2010, 10)
+ # should be encodable
+ local_weekday.encode('utf-8')
+ local_month.encode('utf-8')
new_october = calendar.TextCalendar().formatmonthname(2010, 10, 10)
- self.assertEquals(old_october, new_october)
+ self.assertEqual(old_october, new_october)
+
+ def test_itermonthdates(self):
+ # ensure itermonthdates doesn't overflow after datetime.MAXYEAR
+ # see #15421
+ list(calendar.Calendar().itermonthdates(datetime.MAXYEAR, 12))
class MonthCalendarTestCase(unittest.TestCase):
diff --git a/Lib/test/test_capi.py b/Lib/test/test_capi.py
index 7dec2de..a2cb5c7 100644
--- a/Lib/test/test_capi.py
+++ b/Lib/test/test_capi.py
@@ -8,10 +8,14 @@ import random
import unittest
from test import test_support
try:
+ import thread
import threading
except ImportError:
+ thread = None
threading = None
-import _testcapi
+# Skip this test if the _testcapi module isn't available.
+_testcapi = test_support.import_module('_testcapi')
+
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestPendingCalls(unittest.TestCase):
@@ -96,8 +100,32 @@ class TestPendingCalls(unittest.TestCase):
self.pendingcalls_wait(l, n)
-def test_main():
+@unittest.skipUnless(threading and thread, 'Threading required for this test.')
+class TestThreadState(unittest.TestCase):
+
+ @test_support.reap_threads
+ def test_thread_state(self):
+ # some extra thread-state tests driven via _testcapi
+ def target():
+ idents = []
+
+ def callback():
+ idents.append(thread.get_ident())
+
+ _testcapi._test_thread_state(callback)
+ a = b = callback
+ time.sleep(1)
+ # Check our main thread is in the list exactly 3 times.
+ self.assertEqual(idents.count(thread.get_ident()), 3,
+ "Couldn't find main thread correctly in the list")
+
+ target()
+ t = threading.Thread(target=target)
+ t.start()
+ t.join()
+
+def test_main():
for name in dir(_testcapi):
if name.startswith('test_'):
test = getattr(_testcapi, name)
@@ -108,33 +136,7 @@ def test_main():
except _testcapi.error:
raise test_support.TestFailed, sys.exc_info()[1]
- # some extra thread-state tests driven via _testcapi
- def TestThreadState():
- if test_support.verbose:
- print "auto-thread-state"
-
- idents = []
-
- def callback():
- idents.append(thread.get_ident())
-
- _testcapi._test_thread_state(callback)
- a = b = callback
- time.sleep(1)
- # Check our main thread is in the list exactly 3 times.
- if idents.count(thread.get_ident()) != 3:
- raise test_support.TestFailed, \
- "Couldn't find main thread correctly in the list"
-
- if threading:
- import thread
- import time
- TestThreadState()
- t=threading.Thread(target=TestThreadState)
- t.start()
- t.join()
-
- test_support.run_unittest(TestPendingCalls)
+ test_support.run_unittest(TestPendingCalls, TestThreadState)
if __name__ == "__main__":
test_main()
diff --git a/Lib/test/test_cd.py b/Lib/test/test_cd.py
index edaa82b..18ca586 100755..100644
--- a/Lib/test/test_cd.py
+++ b/Lib/test/test_cd.py
@@ -1,4 +1,3 @@
-#! /usr/bin/env python
"""Whimpy test script for the cd module
Roger E. Masse
"""
diff --git a/Lib/test/test_cfgparser.py b/Lib/test/test_cfgparser.py
index f0073f0..df65f6e 100644
--- a/Lib/test/test_cfgparser.py
+++ b/Lib/test/test_cfgparser.py
@@ -284,13 +284,17 @@ class TestCaseBase(unittest.TestCase):
cf.set("sect", "option1", mystr("splat"))
cf.set("sect", "option2", "splat")
cf.set("sect", "option2", mystr("splat"))
+
+ def test_set_unicode(self):
try:
unicode
except NameError:
- pass
- else:
- cf.set("sect", "option1", unicode("splat"))
- cf.set("sect", "option2", unicode("splat"))
+ self.skipTest('no unicode support')
+
+ cf = self.fromstring("[sect]\n"
+ "option1=foo\n")
+ cf.set("sect", "option1", unicode("splat"))
+ cf.set("sect", "option2", unicode("splat"))
def test_read_returns_file_list(self):
file1 = test_support.findfile("cfgparser.1")
diff --git a/Lib/test/test_cgi.py b/Lib/test/test_cgi.py
index 63547b2..c9cf095 100644
--- a/Lib/test/test_cgi.py
+++ b/Lib/test/test_cgi.py
@@ -5,6 +5,8 @@ import sys
import tempfile
import unittest
+from collections import namedtuple
+
class HackedSysModule:
# The regression test will have real values in sys.argv, which
# will completely confuse the test of the cgi module
@@ -120,6 +122,11 @@ def gen_result(data, environ):
class CgiTests(unittest.TestCase):
+ def test_escape(self):
+ self.assertEqual("test &amp; string", cgi.escape("test & string"))
+ self.assertEqual("&lt;test string&gt;", cgi.escape("<test string>"))
+ self.assertEqual("&quot;test string&quot;", cgi.escape('"test string"', True))
+
def test_strict(self):
for orig, expect in parse_strict_test_cases:
# Test basic parsing
@@ -225,7 +232,15 @@ class CgiTests(unittest.TestCase):
# if we're not chunking properly, readline is only called twice
# (by read_binary); if we are chunking properly, it will be called 5 times
# as long as the chunksize is 1 << 16.
- self.assertTrue(f.numcalls > 2)
+ self.assertGreater(f.numcalls, 2)
+
+ def test_fieldstorage_invalid(self):
+ fs = cgi.FieldStorage()
+ self.assertFalse(fs)
+ self.assertRaises(TypeError, bool(fs))
+ self.assertEqual(list(fs), list(fs.keys()))
+ fs.list.append(namedtuple('MockFieldStorage', 'name')('fieldvalue'))
+ self.assertTrue(fs)
def test_fieldstorage_multipart(self):
#Test basic FieldStorage multipart parsing
@@ -261,6 +276,29 @@ Content-Disposition: form-data; name="submit"
got = getattr(fs.list[x], k)
self.assertEqual(got, exp)
+ def test_fieldstorage_multipart_maxline(self):
+ # Issue #18167
+ maxline = 1 << 16
+ self.maxDiff = None
+ def check(content):
+ data = """
+---123
+Content-Disposition: form-data; name="upload"; filename="fake.txt"
+Content-Type: text/plain
+
+%s
+---123--
+""".replace('\n', '\r\n') % content
+ environ = {
+ 'CONTENT_LENGTH': str(len(data)),
+ 'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
+ 'REQUEST_METHOD': 'POST',
+ }
+ self.assertEqual(gen_result(data, environ), {'upload': content})
+ check('x' * (maxline - 1))
+ check('x' * (maxline - 1) + '\r')
+ check('x' * (maxline - 1) + '\r' + 'y' * (maxline - 1))
+
_qs_result = {
'key1': 'value1',
'key2': ['value2x', 'value2y'],
diff --git a/Lib/test/test_cl.py b/Lib/test/test_cl.py
index 50102e9..9582282 100755..100644
--- a/Lib/test/test_cl.py
+++ b/Lib/test/test_cl.py
@@ -1,4 +1,3 @@
-#! /usr/bin/env python
"""Whimpy test script for the cl module
Roger E. Masse
"""
diff --git a/Lib/test/test_class.py b/Lib/test/test_class.py
index db75b93..e5cdf08 100644
--- a/Lib/test/test_class.py
+++ b/Lib/test/test_class.py
@@ -628,6 +628,13 @@ class ClassTests(unittest.TestCase):
a = A(hash(A.f.im_func)^(-1))
hash(a.f)
+ def testAttrSlots(self):
+ class C:
+ pass
+ for c in C, C():
+ self.assertRaises(TypeError, type(c).__getattribute__, c, [])
+ self.assertRaises(TypeError, type(c).__setattr__, c, [], [])
+
def test_main():
with test_support.check_py3k_warnings(
(".+__(get|set|del)slice__ has been removed", DeprecationWarning),
diff --git a/Lib/test/test_cmath.py b/Lib/test/test_cmath.py
index 8b5c4bf..5d10261 100644
--- a/Lib/test/test_cmath.py
+++ b/Lib/test/test_cmath.py
@@ -282,7 +282,7 @@ class CMathTests(unittest.TestCase):
def test_specific_values(self):
if not float.__getformat__("double").startswith("IEEE"):
- return
+ self.skipTest('needs IEEE double')
def rect_complex(z):
"""Wrapped version of rect that accepts a complex number instead of
diff --git a/Lib/test/test_cmd.py b/Lib/test/test_cmd.py
index 8bb4e63..2cb2c88 100644
--- a/Lib/test/test_cmd.py
+++ b/Lib/test/test_cmd.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
"""
Test script for the 'cmd' module
Original by Michael Schneider
@@ -84,11 +83,11 @@ class samplecmdclass(cmd.Cmd):
<BLANKLINE>
Documented commands (type help <topic>):
========================================
- add
+ add help
<BLANKLINE>
Undocumented commands:
======================
- exit help shell
+ exit shell
<BLANKLINE>
Test for the function print_topics():
@@ -125,11 +124,11 @@ class samplecmdclass(cmd.Cmd):
<BLANKLINE>
Documented commands (type help <topic>):
========================================
- add
+ add help
<BLANKLINE>
Undocumented commands:
======================
- exit help shell
+ exit shell
<BLANKLINE>
help text for add
Hello from postloop
diff --git a/Lib/test/test_cmd_line.py b/Lib/test/test_cmd_line.py
index 05e29d4..12f26d9 100644
--- a/Lib/test/test_cmd_line.py
+++ b/Lib/test/test_cmd_line.py
@@ -2,9 +2,13 @@
# All tests are executed with environment variables ignored
# See test_cmd_line_script.py for testing of script execution
-import test.test_support, unittest
+import test.test_support
import sys
-from test.script_helper import spawn_python, kill_python, python_exit_code
+import unittest
+from test.script_helper import (
+ assert_python_ok, assert_python_failure, spawn_python, kill_python,
+ python_exit_code
+)
class CmdLineTest(unittest.TestCase):
@@ -101,6 +105,36 @@ class CmdLineTest(unittest.TestCase):
data = self.start_python('-R', '-c', code)
self.assertTrue('hash_randomization=1' in data)
+ def test_del___main__(self):
+ # Issue #15001: PyRun_SimpleFileExFlags() did crash because it kept a
+ # borrowed reference to the dict of __main__ module and later modify
+ # the dict whereas the module was destroyed
+ filename = test.test_support.TESTFN
+ self.addCleanup(test.test_support.unlink, filename)
+ with open(filename, "w") as script:
+ print >>script, "import sys"
+ print >>script, "del sys.modules['__main__']"
+ assert_python_ok(filename)
+
+ def test_unknown_options(self):
+ rc, out, err = assert_python_failure('-E', '-z')
+ self.assertIn(b'Unknown option: -z', err)
+ self.assertEqual(err.splitlines().count(b'Unknown option: -z'), 1)
+ self.assertEqual(b'', out)
+ # Add "without='-E'" to prevent _assert_python to append -E
+ # to env_vars and change the output of stderr
+ rc, out, err = assert_python_failure('-z', without='-E')
+ self.assertIn(b'Unknown option: -z', err)
+ self.assertEqual(err.splitlines().count(b'Unknown option: -z'), 1)
+ self.assertEqual(b'', out)
+ rc, out, err = assert_python_failure('-a', '-z', without='-E')
+ self.assertIn(b'Unknown option: -a', err)
+ # only the first unknown option is reported
+ self.assertNotIn(b'Unknown option: -z', err)
+ self.assertEqual(err.splitlines().count(b'Unknown option: -a'), 1)
+ self.assertEqual(b'', out)
+
+
def test_main():
test.test_support.run_unittest(CmdLineTest)
test.test_support.reap_children()
diff --git a/Lib/test/test_cmd_line_script.py b/Lib/test/test_cmd_line_script.py
index 099471f..8b05227 100644
--- a/Lib/test/test_cmd_line_script.py
+++ b/Lib/test/test_cmd_line_script.py
@@ -6,11 +6,14 @@ import os.path
import test.test_support
from test.script_helper import (run_python,
temp_dir, make_script, compile_script,
- make_pkg, make_zip_script, make_zip_pkg)
+ assert_python_failure, make_pkg,
+ make_zip_script, make_zip_pkg)
verbose = test.test_support.verbose
+example_args = ['test1', 'test2', 'test3']
+
test_source = """\
# Script may be run with optimisation enabled, so don't rely on assert
# statements being executed
@@ -204,6 +207,19 @@ class CmdLineTest(unittest.TestCase):
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
self._check_import_error(launch_name, msg)
+ def test_dash_m_error_code_is_one(self):
+ # If a module is invoked with the -m command line flag
+ # and results in an error that the return code to the
+ # shell is '1'
+ with temp_dir() as script_dir:
+ pkg_dir = os.path.join(script_dir, 'test_pkg')
+ make_pkg(pkg_dir)
+ script_name = _make_test_script(pkg_dir, 'other', "if __name__ == '__main__': raise ValueError")
+ rc, out, err = assert_python_failure('-m', 'test_pkg.other', *example_args)
+ if verbose > 1:
+ print(out)
+ self.assertEqual(rc, 1)
+
def test_main():
test.test_support.run_unittest(CmdLineTest)
diff --git a/Lib/test/test_code.py b/Lib/test/test_code.py
index 97adf59..c588999 100644
--- a/Lib/test/test_code.py
+++ b/Lib/test/test_code.py
@@ -82,7 +82,7 @@ consts: ("'doc string'", 'None')
import unittest
import weakref
-import _testcapi
+from test.test_support import run_doctest, run_unittest, cpython_only
def consts(t):
@@ -104,7 +104,9 @@ def dump(co):
class CodeTest(unittest.TestCase):
+ @cpython_only
def test_newempty(self):
+ import _testcapi
co = _testcapi.code_newempty("filename", "funcname", 15)
self.assertEqual(co.co_filename, "filename")
self.assertEqual(co.co_name, "funcname")
@@ -137,7 +139,6 @@ class CodeWeakRefTest(unittest.TestCase):
def test_main(verbose=None):
- from test.test_support import run_doctest, run_unittest
from test import test_code
run_doctest(test_code, verbose)
run_unittest(CodeTest, CodeWeakRefTest)
diff --git a/Lib/test/test_codeccallbacks.py b/Lib/test/test_codeccallbacks.py
index 4825f43..dbdb4f4 100644
--- a/Lib/test/test_codeccallbacks.py
+++ b/Lib/test/test_codeccallbacks.py
@@ -66,15 +66,34 @@ class CodecCallbackTest(unittest.TestCase):
# replace unencodable characters which numeric character entities.
# For ascii, latin-1 and charmaps this is completely implemented
# in C and should be reasonably fast.
- s = u"\u30b9\u30d1\u30e2 \xe4nd eggs"
+ s = u"\u30b9\u30d1\u30e2 \xe4nd egg\u0161"
self.assertEqual(
s.encode("ascii", "xmlcharrefreplace"),
- "&#12473;&#12497;&#12514; &#228;nd eggs"
+ "&#12473;&#12497;&#12514; &#228;nd egg&#353;"
)
self.assertEqual(
s.encode("latin-1", "xmlcharrefreplace"),
- "&#12473;&#12497;&#12514; \xe4nd eggs"
+ "&#12473;&#12497;&#12514; \xe4nd egg&#353;"
)
+ self.assertEqual(
+ s.encode("iso-8859-15", "xmlcharrefreplace"),
+ "&#12473;&#12497;&#12514; \xe4nd egg\xa8"
+ )
+
+ def test_xmlcharrefreplace_with_surrogates(self):
+ tests = [(u'\U0001f49d', '&#128157;'),
+ (u'\ud83d', '&#55357;'),
+ (u'\udc9d', '&#56477;'),
+ ]
+ if u'\ud83d\udc9d' != u'\U0001f49d':
+ tests += [(u'\ud83d\udc9d', '&#55357;&#56477;')]
+ for encoding in ['ascii', 'latin1', 'iso-8859-15']:
+ for s, exp in tests:
+ self.assertEqual(s.encode(encoding, 'xmlcharrefreplace'),
+ exp, msg='%r.encode(%r)' % (s, encoding))
+ self.assertEqual((s+'X').encode(encoding, 'xmlcharrefreplace'),
+ exp+'X',
+ msg='%r.encode(%r)' % (s + 'X', encoding))
def test_xmlcharnamereplace(self):
# This time use a named character entity for unencodable
@@ -262,12 +281,12 @@ class CodecCallbackTest(unittest.TestCase):
self.assertEqual(
"\\u3042\u3xxx".decode("unicode-escape", "test.handler1"),
- u"\u3042[<92><117><51><120>]xx"
+ u"\u3042[<92><117><51>]xxx"
)
self.assertEqual(
"\\u3042\u3xx".decode("unicode-escape", "test.handler1"),
- u"\u3042[<92><117><51><120><120>]"
+ u"\u3042[<92><117><51>]xx"
)
self.assertEqual(
@@ -717,7 +736,7 @@ class CodecCallbackTest(unittest.TestCase):
raise ValueError
self.assertRaises(UnicodeError, codecs.charmap_decode, "\xff", "strict", {0xff: None})
self.assertRaises(ValueError, codecs.charmap_decode, "\xff", "strict", D())
- self.assertRaises(TypeError, codecs.charmap_decode, "\xff", "strict", {0xff: sys.maxunicode+1})
+ self.assertRaises(TypeError, codecs.charmap_decode, "\xff", "strict", {0xff: 0x110000})
def test_encodehelper(self):
# enhance coverage of:
diff --git a/Lib/test/test_codecencodings_cn.py b/Lib/test/test_codecencodings_cn.py
index 215e8de..cd102fd 100644
--- a/Lib/test/test_codecencodings_cn.py
+++ b/Lib/test/test_codecencodings_cn.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
#
# test_codecencodings_cn.py
# Codec encoding tests for PRC encodings.
diff --git a/Lib/test/test_codecencodings_hk.py b/Lib/test/test_codecencodings_hk.py
index b1c2606..391c316 100644
--- a/Lib/test/test_codecencodings_hk.py
+++ b/Lib/test/test_codecencodings_hk.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
#
# test_codecencodings_hk.py
# Codec encoding tests for HongKong encodings.
diff --git a/Lib/test/test_codecencodings_iso2022.py b/Lib/test/test_codecencodings_iso2022.py
index 9d1d2af..9f23628 100644
--- a/Lib/test/test_codecencodings_iso2022.py
+++ b/Lib/test/test_codecencodings_iso2022.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-#
# Codec encoding tests for ISO 2022 encodings.
from test import test_support
@@ -36,6 +34,7 @@ class Test_ISO2022_KR(test_multibytecodec_support.TestBase, unittest.TestCase):
# iso2022_kr.txt cannot be used to test "chunk coding": the escape
# sequence is only written on the first line
+ @unittest.skip('iso2022_kr.txt cannot be used to test "chunk coding"')
def test_chunkcoding(self):
pass
diff --git a/Lib/test/test_codecencodings_jp.py b/Lib/test/test_codecencodings_jp.py
index 5f81f41..f3cf923 100644
--- a/Lib/test/test_codecencodings_jp.py
+++ b/Lib/test/test_codecencodings_jp.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
#
# test_codecencodings_jp.py
# Codec encoding tests for Japanese encodings.
diff --git a/Lib/test/test_codecencodings_kr.py b/Lib/test/test_codecencodings_kr.py
index 8c866a9..45ea62b 100644
--- a/Lib/test/test_codecencodings_kr.py
+++ b/Lib/test/test_codecencodings_kr.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
#
# test_codecencodings_kr.py
# Codec encoding tests for ROK encodings.
diff --git a/Lib/test/test_codecencodings_tw.py b/Lib/test/test_codecencodings_tw.py
index 983d06f..c62d321 100644
--- a/Lib/test/test_codecencodings_tw.py
+++ b/Lib/test/test_codecencodings_tw.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
#
# test_codecencodings_tw.py
# Codec encoding tests for ROC encodings.
diff --git a/Lib/test/test_codecmaps_cn.py b/Lib/test/test_codecmaps_cn.py
index 344fc56..9f42858 100644
--- a/Lib/test/test_codecmaps_cn.py
+++ b/Lib/test/test_codecmaps_cn.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
#
# test_codecmaps_cn.py
# Codec mapping tests for PRC encodings
diff --git a/Lib/test/test_codecmaps_hk.py b/Lib/test/test_codecmaps_hk.py
index 362ab7f..3fd3eb8 100644
--- a/Lib/test/test_codecmaps_hk.py
+++ b/Lib/test/test_codecmaps_hk.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
#
# test_codecmaps_hk.py
# Codec mapping tests for HongKong encodings
@@ -17,5 +16,4 @@ def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
- test_support.use_resources = ['urlfetch']
test_main()
diff --git a/Lib/test/test_codecmaps_jp.py b/Lib/test/test_codecmaps_jp.py
index 5466a98..0bf43b6 100644
--- a/Lib/test/test_codecmaps_jp.py
+++ b/Lib/test/test_codecmaps_jp.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
#
# test_codecmaps_jp.py
# Codec mapping tests for Japanese encodings
diff --git a/Lib/test/test_codecmaps_kr.py b/Lib/test/test_codecmaps_kr.py
index 39c612e..b2513ea 100644
--- a/Lib/test/test_codecmaps_kr.py
+++ b/Lib/test/test_codecmaps_kr.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
#
# test_codecmaps_kr.py
# Codec mapping tests for ROK encodings
diff --git a/Lib/test/test_codecmaps_tw.py b/Lib/test/test_codecmaps_tw.py
index 143ae23..ff6a356 100644
--- a/Lib/test/test_codecmaps_tw.py
+++ b/Lib/test/test_codecmaps_tw.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
#
# test_codecmaps_tw.py
# Codec mapping tests for ROC encodings
diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py
index d434f83..f2ec670 100644
--- a/Lib/test/test_codecs.py
+++ b/Lib/test/test_codecs.py
@@ -2,7 +2,12 @@ from test import test_support
import unittest
import codecs
import locale
-import sys, StringIO, _testcapi
+import sys, StringIO
+
+def coding_checker(self, coder):
+ def check(input, expect):
+ self.assertEqual(coder(input), (expect, len(input)))
+ return check
class Queue(object):
"""
@@ -92,19 +97,20 @@ class ReadTest(unittest.TestCase):
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
+ lineends = ("\n", "\r\n", "\r", u"\u2028")
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
- for (i, lineend) in enumerate(u"\n \r\n \r \u2028".split()):
- vw.append((i*200)*u"\3042" + lineend)
- vwo.append((i*200)*u"\3042")
- self.assertEqual(readalllines("".join(vw), True), "".join(vw))
- self.assertEqual(readalllines("".join(vw), False),"".join(vwo))
+ for (i, lineend) in enumerate(lineends):
+ vw.append((i*200+200)*u"\u3042" + lineend)
+ vwo.append((i*200+200)*u"\u3042")
+ self.assertEqual(readalllines("".join(vw), True), "|".join(vw))
+ self.assertEqual(readalllines("".join(vw), False), "|".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in xrange(80):
- for lineend in u"\n \r\n \r \u2028".split():
+ for lineend in lineends:
s = 10*(size*u"a" + lineend + u"xxx\n")
reader = getreader(s)
for i in xrange(10):
@@ -112,12 +118,54 @@ class ReadTest(unittest.TestCase):
reader.readline(keepends=True),
size*u"a" + lineend,
)
+ self.assertEqual(
+ reader.readline(keepends=True),
+ "xxx\n",
+ )
reader = getreader(s)
for i in xrange(10):
self.assertEqual(
reader.readline(keepends=False),
size*u"a",
)
+ self.assertEqual(
+ reader.readline(keepends=False),
+ "xxx",
+ )
+
+ def test_mixed_readline_and_read(self):
+ lines = ["Humpty Dumpty sat on a wall,\n",
+ "Humpty Dumpty had a great fall.\r\n",
+ "All the king's horses and all the king's men\r",
+ "Couldn't put Humpty together again."]
+ data = ''.join(lines)
+ def getreader():
+ stream = StringIO.StringIO(data.encode(self.encoding))
+ return codecs.getreader(self.encoding)(stream)
+
+ # Issue #8260: Test readline() followed by read()
+ f = getreader()
+ self.assertEqual(f.readline(), lines[0])
+ self.assertEqual(f.read(), ''.join(lines[1:]))
+ self.assertEqual(f.read(), '')
+
+ # Issue #16636: Test readline() followed by readlines()
+ f = getreader()
+ self.assertEqual(f.readline(), lines[0])
+ self.assertEqual(f.readlines(), lines[1:])
+ self.assertEqual(f.read(), '')
+
+ # Test read() followed by read()
+ f = getreader()
+ self.assertEqual(f.read(size=40, chars=5), data[:5])
+ self.assertEqual(f.read(), data[5:])
+ self.assertEqual(f.read(), '')
+
+ # Issue #12446: Test read() followed by readlines()
+ f = getreader()
+ self.assertEqual(f.read(size=40, chars=5), data[:5])
+ self.assertEqual(f.readlines(), [lines[0][5:]] + lines[1:])
+ self.assertEqual(f.read(), '')
def test_bug1175396(self):
s = [
@@ -281,7 +329,7 @@ class UTF32Test(ReadTest):
def test_partial(self):
self.check_partial(
- u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff\U00010000",
[
u"", # first byte of BOM read
u"", # second byte of BOM read
@@ -303,6 +351,10 @@ class UTF32Test(ReadTest):
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff\U00010000",
]
)
@@ -331,7 +383,7 @@ class UTF32LETest(ReadTest):
def test_partial(self):
self.check_partial(
- u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff\U00010000",
[
u"",
u"",
@@ -349,6 +401,10 @@ class UTF32LETest(ReadTest):
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff\U00010000",
]
)
@@ -371,7 +427,7 @@ class UTF32BETest(ReadTest):
def test_partial(self):
self.check_partial(
- u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff\U00010000",
[
u"",
u"",
@@ -389,6 +445,10 @@ class UTF32BETest(ReadTest):
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff\U00010000",
]
)
@@ -439,7 +499,7 @@ class UTF16Test(ReadTest):
def test_partial(self):
self.check_partial(
- u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff\U00010000",
[
u"", # first byte of BOM read
u"", # second byte of BOM read => byteorder known
@@ -451,6 +511,10 @@ class UTF16Test(ReadTest):
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff\U00010000",
]
)
@@ -481,7 +545,7 @@ class UTF16LETest(ReadTest):
def test_partial(self):
self.check_partial(
- u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff\U00010000",
[
u"",
u"\x00",
@@ -491,18 +555,34 @@ class UTF16LETest(ReadTest):
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
- self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode, "\xff", "strict", True)
+ tests = [
+ (b'\xff', u'\ufffd'),
+ (b'A\x00Z', u'A\ufffd'),
+ (b'A\x00B\x00C\x00D\x00Z', u'ABCD\ufffd'),
+ (b'\x00\xd8', u'\ufffd'),
+ (b'\x00\xd8A', u'\ufffd'),
+ (b'\x00\xd8A\x00', u'\ufffdA'),
+ (b'\x00\xdcA\x00', u'\ufffdA'),
+ ]
+ for raw, expected in tests:
+ self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode,
+ raw, 'strict', True)
+ self.assertEqual(raw.decode('utf-16le', 'replace'), expected)
class UTF16BETest(ReadTest):
encoding = "utf-16-be"
def test_partial(self):
self.check_partial(
- u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff\U00010000",
[
u"",
u"\x00",
@@ -512,18 +592,34 @@ class UTF16BETest(ReadTest):
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff",
+ u"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
- self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode, "\xff", "strict", True)
+ tests = [
+ (b'\xff', u'\ufffd'),
+ (b'\x00A\xff', u'A\ufffd'),
+ (b'\x00A\x00B\x00C\x00DZ', u'ABCD\ufffd'),
+ (b'\xd8\x00', u'\ufffd'),
+ (b'\xd8\x00\xdc', u'\ufffd'),
+ (b'\xd8\x00\x00A', u'\ufffdA'),
+ (b'\xdc\x00\x00A', u'\ufffdA'),
+ ]
+ for raw, expected in tests:
+ self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode,
+ raw, 'strict', True)
+ self.assertEqual(raw.decode('utf-16be', 'replace'), expected)
class UTF8Test(ReadTest):
encoding = "utf-8"
def test_partial(self):
self.check_partial(
- u"\x00\xff\u07ff\u0800\uffff",
+ u"\x00\xff\u07ff\u0800\uffff\U00010000",
[
u"\x00",
u"\x00",
@@ -536,6 +632,10 @@ class UTF8Test(ReadTest):
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800\uffff",
+ u"\x00\xff\u07ff\u0800\uffff",
+ u"\x00\xff\u07ff\u0800\uffff",
+ u"\x00\xff\u07ff\u0800\uffff",
+ u"\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
@@ -554,6 +654,35 @@ class UTF7Test(ReadTest):
]
)
+ def test_errors(self):
+ tests = [
+ ('a\xffb', u'a\ufffdb'),
+ ('a+IK', u'a\ufffd'),
+ ('a+IK-b', u'a\ufffdb'),
+ ('a+IK,b', u'a\ufffdb'),
+ ('a+IKx', u'a\u20ac\ufffd'),
+ ('a+IKx-b', u'a\u20ac\ufffdb'),
+ ('a+IKwgr', u'a\u20ac\ufffd'),
+ ('a+IKwgr-b', u'a\u20ac\ufffdb'),
+ ('a+IKwgr,', u'a\u20ac\ufffd'),
+ ('a+IKwgr,-b', u'a\u20ac\ufffd-b'),
+ ('a+IKwgrB', u'a\u20ac\u20ac\ufffd'),
+ ('a+IKwgrB-b', u'a\u20ac\u20ac\ufffdb'),
+ ('a+/,+IKw-b', u'a\ufffd\u20acb'),
+ ('a+//,+IKw-b', u'a\ufffd\u20acb'),
+ ('a+///,+IKw-b', u'a\uffff\ufffd\u20acb'),
+ ('a+////,+IKw-b', u'a\uffff\ufffd\u20acb'),
+ ]
+ for raw, expected in tests:
+ self.assertRaises(UnicodeDecodeError, codecs.utf_7_decode,
+ raw, 'strict', True)
+ self.assertEqual(raw.decode('utf-7', 'replace'), expected)
+
+ def test_nonbmp(self):
+ self.assertEqual(u'\U000104A0'.encode(self.encoding), '+2AHcoA-')
+ self.assertEqual(u'\ud801\udca0'.encode(self.encoding), '+2AHcoA-')
+ self.assertEqual('+2AHcoA-'.decode(self.encoding), u'\U000104A0')
+
class UTF16ExTest(unittest.TestCase):
def test_errors(self):
@@ -595,7 +724,7 @@ class UTF8SigTest(ReadTest):
def test_partial(self):
self.check_partial(
- u"\ufeff\x00\xff\u07ff\u0800\uffff",
+ u"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
[
u"",
u"",
@@ -614,6 +743,10 @@ class UTF8SigTest(ReadTest):
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800\uffff",
+ u"\ufeff\x00\xff\u07ff\u0800\uffff",
+ u"\ufeff\x00\xff\u07ff\u0800\uffff",
+ u"\ufeff\x00\xff\u07ff\u0800\uffff",
+ u"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
@@ -674,6 +807,54 @@ class EscapeDecodeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.escape_decode(""), ("", 0))
+ def test_raw(self):
+ decode = codecs.escape_decode
+ for b in range(256):
+ b = chr(b)
+ if b != '\\':
+ self.assertEqual(decode(b + '0'), (b + '0', 2))
+
+ def test_escape(self):
+ decode = codecs.escape_decode
+ check = coding_checker(self, decode)
+ check(b"[\\\n]", b"[]")
+ check(br'[\"]', b'["]')
+ check(br"[\']", b"[']")
+ check(br"[\\]", br"[\]")
+ check(br"[\a]", b"[\x07]")
+ check(br"[\b]", b"[\x08]")
+ check(br"[\t]", b"[\x09]")
+ check(br"[\n]", b"[\x0a]")
+ check(br"[\v]", b"[\x0b]")
+ check(br"[\f]", b"[\x0c]")
+ check(br"[\r]", b"[\x0d]")
+ check(br"[\7]", b"[\x07]")
+ check(br"[\8]", br"[\8]")
+ check(br"[\78]", b"[\x078]")
+ check(br"[\41]", b"[!]")
+ check(br"[\418]", b"[!8]")
+ check(br"[\101]", b"[A]")
+ check(br"[\1010]", b"[A0]")
+ check(br"[\501]", b"[A]")
+ check(br"[\x41]", b"[A]")
+ check(br"[\X41]", br"[\X41]")
+ check(br"[\x410]", b"[A0]")
+ for b in range(256):
+ b = chr(b)
+ if b not in '\n"\'\\abtnvfr01234567x':
+ check('\\' + b, '\\' + b)
+
+ def test_errors(self):
+ decode = codecs.escape_decode
+ self.assertRaises(ValueError, decode, br"\x")
+ self.assertRaises(ValueError, decode, br"[\x]")
+ self.assertEqual(decode(br"[\x]\x", "ignore"), (b"[]", 6))
+ self.assertEqual(decode(br"[\x]\x", "replace"), (b"[?]?", 6))
+ self.assertRaises(ValueError, decode, br"\x0")
+ self.assertRaises(ValueError, decode, br"[\x0]")
+ self.assertEqual(decode(br"[\x0]\x0", "ignore"), (b"[]", 8))
+ self.assertEqual(decode(br"[\x0]\x0", "replace"), (b"[?]?", 8))
+
class RecodingTest(unittest.TestCase):
def test_recoding(self):
f = StringIO.StringIO()
@@ -1358,7 +1539,7 @@ else:
class BasicUnicodeTest(unittest.TestCase):
def test_basics(self):
- s = u"abc123" # all codecs should be able to encode these
+ s = u"abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
name = codecs.lookup(encoding).name
if encoding.endswith("_codec"):
@@ -1367,9 +1548,9 @@ class BasicUnicodeTest(unittest.TestCase):
name = "latin_1"
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
(bytes, size) = codecs.getencoder(encoding)(s)
- self.assertEqual(size, len(s), "%r != %r (encoding=%r)" % (size, len(s), encoding))
+ self.assertEqual(size, len(s), "encoding=%r" % encoding)
(chars, size) = codecs.getdecoder(encoding)(bytes)
- self.assertEqual(chars, s, "%r != %r (encoding=%r)" % (chars, s, encoding))
+ self.assertEqual(chars, s, "encoding=%r" % encoding)
if encoding not in broken_unicode_with_streams:
# check stream reader/writer
@@ -1385,15 +1566,13 @@ class BasicUnicodeTest(unittest.TestCase):
for c in encodedresult:
q.write(c)
decodedresult += reader.read()
- self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
+ self.assertEqual(decodedresult, s, "encoding=%r" % encoding)
if encoding not in broken_incremental_coders:
- # check incremental decoder/encoder (fetched via the Python
- # and C API) and iterencode()/iterdecode()
+ # check incremental decoder/encoder and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
- cencoder = _testcapi.codec_incrementalencoder(encoding)
- except LookupError: # no IncrementalEncoder
+ except LookupError: # no IncrementalEncoder
pass
else:
# check incremental decoder/encoder
@@ -1406,45 +1585,71 @@ class BasicUnicodeTest(unittest.TestCase):
for c in encodedresult:
decodedresult += decoder.decode(c)
decodedresult += decoder.decode("", True)
- self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
+ self.assertEqual(decodedresult, s,
+ "encoding=%r" % encoding)
+
+ # check iterencode()/iterdecode()
+ result = u"".join(codecs.iterdecode(
+ codecs.iterencode(s, encoding), encoding))
+ self.assertEqual(result, s, "encoding=%r" % encoding)
+ # check iterencode()/iterdecode() with empty string
+ result = u"".join(codecs.iterdecode(
+ codecs.iterencode(u"", encoding), encoding))
+ self.assertEqual(result, u"")
+
+ if encoding not in only_strict_mode:
+ # check incremental decoder/encoder with errors argument
+ try:
+ encoder = codecs.getincrementalencoder(encoding)("ignore")
+ except LookupError: # no IncrementalEncoder
+ pass
+ else:
+ encodedresult = "".join(encoder.encode(c) for c in s)
+ decoder = codecs.getincrementaldecoder(encoding)("ignore")
+ decodedresult = u"".join(decoder.decode(c)
+ for c in encodedresult)
+ self.assertEqual(decodedresult, s,
+ "encoding=%r" % encoding)
+
+ @test_support.cpython_only
+ def test_basics_capi(self):
+ from _testcapi import codec_incrementalencoder, codec_incrementaldecoder
+ s = u"abc123" # all codecs should be able to encode these
+ for encoding in all_unicode_encodings:
+ if encoding not in broken_incremental_coders:
+ # check incremental decoder/encoder and iterencode()/iterdecode()
+ try:
+ cencoder = codec_incrementalencoder(encoding)
+ except LookupError: # no IncrementalEncoder
+ pass
+ else:
# check C API
encodedresult = ""
for c in s:
encodedresult += cencoder.encode(c)
encodedresult += cencoder.encode(u"", True)
- cdecoder = _testcapi.codec_incrementaldecoder(encoding)
+ cdecoder = codec_incrementaldecoder(encoding)
decodedresult = u""
for c in encodedresult:
decodedresult += cdecoder.decode(c)
decodedresult += cdecoder.decode("", True)
- self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
-
- # check iterencode()/iterdecode()
- result = u"".join(codecs.iterdecode(codecs.iterencode(s, encoding), encoding))
- self.assertEqual(result, s, "%r != %r (encoding=%r)" % (result, s, encoding))
-
- # check iterencode()/iterdecode() with empty string
- result = u"".join(codecs.iterdecode(codecs.iterencode(u"", encoding), encoding))
- self.assertEqual(result, u"")
+ self.assertEqual(decodedresult, s,
+ "encoding=%r" % encoding)
if encoding not in only_strict_mode:
# check incremental decoder/encoder with errors argument
try:
- encoder = codecs.getincrementalencoder(encoding)("ignore")
- cencoder = _testcapi.codec_incrementalencoder(encoding, "ignore")
- except LookupError: # no IncrementalEncoder
+ cencoder = codec_incrementalencoder(encoding, "ignore")
+ except LookupError: # no IncrementalEncoder
pass
else:
- encodedresult = "".join(encoder.encode(c) for c in s)
- decoder = codecs.getincrementaldecoder(encoding)("ignore")
- decodedresult = u"".join(decoder.decode(c) for c in encodedresult)
- self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
-
encodedresult = "".join(cencoder.encode(c) for c in s)
- cdecoder = _testcapi.codec_incrementaldecoder(encoding, "ignore")
- decodedresult = u"".join(cdecoder.decode(c) for c in encodedresult)
- self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
+ cdecoder = codec_incrementaldecoder(encoding, "ignore")
+ decodedresult = u"".join(cdecoder.decode(c)
+ for c in encodedresult)
+ self.assertEqual(decodedresult, s,
+ "encoding=%r" % encoding)
def test_seek(self):
# all codecs should be able to encode these
@@ -1495,6 +1700,14 @@ class CharmapTest(unittest.TestCase):
(u"abc", 3)
)
+ self.assertRaises(UnicodeDecodeError,
+ codecs.charmap_decode, b"\x00\x01\x02", "strict", u"ab"
+ )
+
+ self.assertRaises(UnicodeDecodeError,
+ codecs.charmap_decode, "\x00\x01\x02", "strict", u"ab\ufffe"
+ )
+
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace", u"ab"),
(u"ab\ufffd", 3)
@@ -1521,6 +1734,149 @@ class CharmapTest(unittest.TestCase):
(u"", len(allbytes))
)
+ def test_decode_with_int2str_map(self):
+ self.assertEqual(
+ codecs.charmap_decode("\x00\x01\x02", "strict",
+ {0: u'a', 1: u'b', 2: u'c'}),
+ (u"abc", 3)
+ )
+
+ self.assertEqual(
+ codecs.charmap_decode("\x00\x01\x02", "strict",
+ {0: u'Aa', 1: u'Bb', 2: u'Cc'}),
+ (u"AaBbCc", 3)
+ )
+
+ self.assertEqual(
+ codecs.charmap_decode("\x00\x01\x02", "strict",
+ {0: u'\U0010FFFF', 1: u'b', 2: u'c'}),
+ (u"\U0010FFFFbc", 3)
+ )
+
+ self.assertEqual(
+ codecs.charmap_decode("\x00\x01\x02", "strict",
+ {0: u'a', 1: u'b', 2: u''}),
+ (u"ab", 3)
+ )
+
+ self.assertRaises(UnicodeDecodeError,
+ codecs.charmap_decode, "\x00\x01\x02", "strict",
+ {0: u'a', 1: u'b'}
+ )
+
+ self.assertRaises(UnicodeDecodeError,
+ codecs.charmap_decode, "\x00\x01\x02", "strict",
+ {0: u'a', 1: u'b', 2: None}
+ )
+
+ # Issue #14850
+ self.assertRaises(UnicodeDecodeError,
+ codecs.charmap_decode, "\x00\x01\x02", "strict",
+ {0: u'a', 1: u'b', 2: u'\ufffe'}
+ )
+
+ self.assertEqual(
+ codecs.charmap_decode("\x00\x01\x02", "replace",
+ {0: u'a', 1: u'b'}),
+ (u"ab\ufffd", 3)
+ )
+
+ self.assertEqual(
+ codecs.charmap_decode("\x00\x01\x02", "replace",
+ {0: u'a', 1: u'b', 2: None}),
+ (u"ab\ufffd", 3)
+ )
+
+ # Issue #14850
+ self.assertEqual(
+ codecs.charmap_decode("\x00\x01\x02", "replace",
+ {0: u'a', 1: u'b', 2: u'\ufffe'}),
+ (u"ab\ufffd", 3)
+ )
+
+ self.assertEqual(
+ codecs.charmap_decode("\x00\x01\x02", "ignore",
+ {0: u'a', 1: u'b'}),
+ (u"ab", 3)
+ )
+
+ self.assertEqual(
+ codecs.charmap_decode("\x00\x01\x02", "ignore",
+ {0: u'a', 1: u'b', 2: None}),
+ (u"ab", 3)
+ )
+
+ # Issue #14850
+ self.assertEqual(
+ codecs.charmap_decode("\x00\x01\x02", "ignore",
+ {0: u'a', 1: u'b', 2: u'\ufffe'}),
+ (u"ab", 3)
+ )
+
+ allbytes = "".join(chr(i) for i in xrange(256))
+ self.assertEqual(
+ codecs.charmap_decode(allbytes, "ignore", {}),
+ (u"", len(allbytes))
+ )
+
+ def test_decode_with_int2int_map(self):
+ a = ord(u'a')
+ b = ord(u'b')
+ c = ord(u'c')
+
+ self.assertEqual(
+ codecs.charmap_decode("\x00\x01\x02", "strict",
+ {0: a, 1: b, 2: c}),
+ (u"abc", 3)
+ )
+
+ # Issue #15379
+ self.assertEqual(
+ codecs.charmap_decode("\x00\x01\x02", "strict",
+ {0: 0x10FFFF, 1: b, 2: c}),
+ (u"\U0010FFFFbc", 3)
+ )
+
+ self.assertRaises(TypeError,
+ codecs.charmap_decode, "\x00\x01\x02", "strict",
+ {0: 0x110000, 1: b, 2: c}
+ )
+
+ self.assertRaises(UnicodeDecodeError,
+ codecs.charmap_decode, "\x00\x01\x02", "strict",
+ {0: a, 1: b},
+ )
+
+ self.assertRaises(UnicodeDecodeError,
+ codecs.charmap_decode, "\x00\x01\x02", "strict",
+ {0: a, 1: b, 2: 0xFFFE},
+ )
+
+ self.assertEqual(
+ codecs.charmap_decode("\x00\x01\x02", "replace",
+ {0: a, 1: b}),
+ (u"ab\ufffd", 3)
+ )
+
+ self.assertEqual(
+ codecs.charmap_decode("\x00\x01\x02", "replace",
+ {0: a, 1: b, 2: 0xFFFE}),
+ (u"ab\ufffd", 3)
+ )
+
+ self.assertEqual(
+ codecs.charmap_decode("\x00\x01\x02", "ignore",
+ {0: a, 1: b}),
+ (u"ab", 3)
+ )
+
+ self.assertEqual(
+ codecs.charmap_decode("\x00\x01\x02", "ignore",
+ {0: a, 1: b, 2: 0xFFFE}),
+ (u"ab", 3)
+ )
+
+
class WithStmtTest(unittest.TestCase):
def test_encodedfile(self):
f = StringIO.StringIO("\xc3\xbc")
@@ -1535,6 +1891,134 @@ class WithStmtTest(unittest.TestCase):
self.assertEqual(srw.read(), u"\xfc")
+class UnicodeEscapeTest(unittest.TestCase):
+ def test_empty(self):
+ self.assertEqual(codecs.unicode_escape_encode(u""), ("", 0))
+ self.assertEqual(codecs.unicode_escape_decode(""), (u"", 0))
+
+ def test_raw_encode(self):
+ encode = codecs.unicode_escape_encode
+ for b in range(32, 127):
+ if b != ord('\\'):
+ self.assertEqual(encode(unichr(b)), (chr(b), 1))
+
+ def test_raw_decode(self):
+ decode = codecs.unicode_escape_decode
+ for b in range(256):
+ if b != ord('\\'):
+ self.assertEqual(decode(chr(b) + '0'), (unichr(b) + u'0', 2))
+
+ def test_escape_encode(self):
+ encode = codecs.unicode_escape_encode
+ check = coding_checker(self, encode)
+ check(u'\t', r'\t')
+ check(u'\n', r'\n')
+ check(u'\r', r'\r')
+ check(u'\\', r'\\')
+ for b in range(32):
+ if chr(b) not in '\t\n\r':
+ check(unichr(b), '\\x%02x' % b)
+ for b in range(127, 256):
+ check(unichr(b), '\\x%02x' % b)
+ check(u'\u20ac', r'\u20ac')
+ check(u'\U0001d120', r'\U0001d120')
+
+ def test_escape_decode(self):
+ decode = codecs.unicode_escape_decode
+ check = coding_checker(self, decode)
+ check("[\\\n]", u"[]")
+ check(r'[\"]', u'["]')
+ check(r"[\']", u"[']")
+ check(r"[\\]", ur"[\]")
+ check(r"[\a]", u"[\x07]")
+ check(r"[\b]", u"[\x08]")
+ check(r"[\t]", u"[\x09]")
+ check(r"[\n]", u"[\x0a]")
+ check(r"[\v]", u"[\x0b]")
+ check(r"[\f]", u"[\x0c]")
+ check(r"[\r]", u"[\x0d]")
+ check(r"[\7]", u"[\x07]")
+ check(r"[\8]", ur"[\8]")
+ check(r"[\78]", u"[\x078]")
+ check(r"[\41]", u"[!]")
+ check(r"[\418]", u"[!8]")
+ check(r"[\101]", u"[A]")
+ check(r"[\1010]", u"[A0]")
+ check(r"[\x41]", u"[A]")
+ check(r"[\x410]", u"[A0]")
+ check(r"\u20ac", u"\u20ac")
+ check(r"\U0001d120", u"\U0001d120")
+ for b in range(256):
+ if chr(b) not in '\n"\'\\abtnvfr01234567xuUN':
+ check('\\' + chr(b), u'\\' + unichr(b))
+
+ def test_decode_errors(self):
+ decode = codecs.unicode_escape_decode
+ for c, d in ('x', 2), ('u', 4), ('U', 4):
+ for i in range(d):
+ self.assertRaises(UnicodeDecodeError, decode,
+ "\\" + c + "0"*i)
+ self.assertRaises(UnicodeDecodeError, decode,
+ "[\\" + c + "0"*i + "]")
+ data = "[\\" + c + "0"*i + "]\\" + c + "0"*i
+ self.assertEqual(decode(data, "ignore"), (u"[]", len(data)))
+ self.assertEqual(decode(data, "replace"),
+ (u"[\ufffd]\ufffd", len(data)))
+ self.assertRaises(UnicodeDecodeError, decode, r"\U00110000")
+ self.assertEqual(decode(r"\U00110000", "ignore"), (u"", 10))
+ self.assertEqual(decode(r"\U00110000", "replace"), (u"\ufffd", 10))
+
+
+class RawUnicodeEscapeTest(unittest.TestCase):
+ def test_empty(self):
+ self.assertEqual(codecs.raw_unicode_escape_encode(u""), ("", 0))
+ self.assertEqual(codecs.raw_unicode_escape_decode(""), (u"", 0))
+
+ def test_raw_encode(self):
+ encode = codecs.raw_unicode_escape_encode
+ for b in range(256):
+ self.assertEqual(encode(unichr(b)), (chr(b), 1))
+
+ def test_raw_decode(self):
+ decode = codecs.raw_unicode_escape_decode
+ for b in range(256):
+ self.assertEqual(decode(chr(b) + '0'), (unichr(b) + u'0', 2))
+
+ def test_escape_encode(self):
+ encode = codecs.raw_unicode_escape_encode
+ check = coding_checker(self, encode)
+ for b in range(256):
+ if chr(b) not in 'uU':
+ check(u'\\' + unichr(b), '\\' + chr(b))
+ check(u'\u20ac', r'\u20ac')
+ check(u'\U0001d120', r'\U0001d120')
+
+ def test_escape_decode(self):
+ decode = codecs.raw_unicode_escape_decode
+ check = coding_checker(self, decode)
+ for b in range(256):
+ if chr(b) not in 'uU':
+ check('\\' + chr(b), u'\\' + unichr(b))
+ check(r"\u20ac", u"\u20ac")
+ check(r"\U0001d120", u"\U0001d120")
+
+ def test_decode_errors(self):
+ decode = codecs.raw_unicode_escape_decode
+ for c, d in ('u', 4), ('U', 4):
+ for i in range(d):
+ self.assertRaises(UnicodeDecodeError, decode,
+ "\\" + c + "0"*i)
+ self.assertRaises(UnicodeDecodeError, decode,
+ "[\\" + c + "0"*i + "]")
+ data = "[\\" + c + "0"*i + "]\\" + c + "0"*i
+ self.assertEqual(decode(data, "ignore"), (u"[]", len(data)))
+ self.assertEqual(decode(data, "replace"),
+ (u"[\ufffd]\ufffd", len(data)))
+ self.assertRaises(UnicodeDecodeError, decode, r"\U00110000")
+ self.assertEqual(decode(r"\U00110000", "ignore"), (u"", 10))
+ self.assertEqual(decode(r"\U00110000", "replace"), (u"\ufffd", 10))
+
+
class BomTest(unittest.TestCase):
def test_seek0(self):
data = u"1234567890"
@@ -1620,6 +2104,8 @@ def test_main():
BasicStrTest,
CharmapTest,
WithStmtTest,
+ UnicodeEscapeTest,
+ RawUnicodeEscapeTest,
BomTest,
)
diff --git a/Lib/test/test_codeop.py b/Lib/test/test_codeop.py
index 99fe6f9..00cd3ce 100644
--- a/Lib/test/test_codeop.py
+++ b/Lib/test/test_codeop.py
@@ -50,7 +50,7 @@ class CodeopTests(unittest.TestCase):
'''succeed iff str is the start of an invalid piece of code'''
try:
compile_command(str,symbol=symbol)
- self.fail("No exception thrown for invalid code")
+ self.fail("No exception raised for invalid code")
except SyntaxError:
self.assertTrue(is_syntax)
except OverflowError:
diff --git a/Lib/test/test_collections.py b/Lib/test/test_collections.py
index 313f81f..de4ba86 100644
--- a/Lib/test/test_collections.py
+++ b/Lib/test/test_collections.py
@@ -8,6 +8,7 @@ import pickle, cPickle, copy
from random import randrange, shuffle
import keyword
import re
+import sets
import sys
from collections import Hashable, Iterable, Iterator
from collections import Sized, Container, Callable
@@ -17,6 +18,43 @@ from collections import Sequence, MutableSequence
TestNT = namedtuple('TestNT', 'x y z') # type used for pickle tests
+py273_named_tuple_pickle = '''\
+ccopy_reg
+_reconstructor
+p0
+(ctest.test_collections
+TestNT
+p1
+c__builtin__
+tuple
+p2
+(I10
+I20
+I30
+tp3
+tp4
+Rp5
+ccollections
+OrderedDict
+p6
+((lp7
+(lp8
+S'x'
+p9
+aI10
+aa(lp10
+S'y'
+p11
+aI20
+aa(lp12
+S'z'
+p13
+aI30
+aatp14
+Rp15
+b.
+'''
+
class TestNamedTuple(unittest.TestCase):
def test_factory(self):
@@ -215,6 +253,11 @@ class TestNamedTuple(unittest.TestCase):
# test __getnewargs__
self.assertEqual(t.__getnewargs__(), values)
+ def test_pickling_bug_18015(self):
+ # http://bugs.python.org/issue18015
+ pt = pickle.loads(py273_named_tuple_pickle)
+ self.assertEqual(pt.x, 10)
+
class ABCTestCase(unittest.TestCase):
def validate_abstract_methods(self, abc, *names):
@@ -552,6 +595,198 @@ class TestCollectionABCs(ABCTestCase):
s |= s
self.assertEqual(s, full)
+ def test_issue16373(self):
+ # Recursion error comparing comparable and noncomparable
+ # Set instances
+ class MyComparableSet(Set):
+ def __contains__(self, x):
+ return False
+ def __len__(self):
+ return 0
+ def __iter__(self):
+ return iter([])
+ class MyNonComparableSet(Set):
+ def __contains__(self, x):
+ return False
+ def __len__(self):
+ return 0
+ def __iter__(self):
+ return iter([])
+ def __le__(self, x):
+ return NotImplemented
+ def __lt__(self, x):
+ return NotImplemented
+
+ cs = MyComparableSet()
+ ncs = MyNonComparableSet()
+
+ # Run all the variants to make sure they don't mutually recurse
+ ncs < cs
+ ncs <= cs
+ ncs > cs
+ ncs >= cs
+ cs < ncs
+ cs <= ncs
+ cs > ncs
+ cs >= ncs
+
+ def assertSameSet(self, s1, s2):
+ # coerce both to a real set then check equality
+ self.assertEqual(set(s1), set(s2))
+
+ def test_Set_interoperability_with_real_sets(self):
+ # Issue: 8743
+ class ListSet(Set):
+ def __init__(self, elements=()):
+ self.data = []
+ for elem in elements:
+ if elem not in self.data:
+ self.data.append(elem)
+ def __contains__(self, elem):
+ return elem in self.data
+ def __iter__(self):
+ return iter(self.data)
+ def __len__(self):
+ return len(self.data)
+ def __repr__(self):
+ return 'Set({!r})'.format(self.data)
+
+ r1 = set('abc')
+ r2 = set('bcd')
+ r3 = set('abcde')
+ f1 = ListSet('abc')
+ f2 = ListSet('bcd')
+ f3 = ListSet('abcde')
+ l1 = list('abccba')
+ l2 = list('bcddcb')
+ l3 = list('abcdeedcba')
+ p1 = sets.Set('abc')
+ p2 = sets.Set('bcd')
+ p3 = sets.Set('abcde')
+
+ target = r1 & r2
+ self.assertSameSet(f1 & f2, target)
+ self.assertSameSet(f1 & r2, target)
+ self.assertSameSet(r2 & f1, target)
+ self.assertSameSet(f1 & p2, target)
+ self.assertSameSet(p2 & f1, target)
+ self.assertSameSet(f1 & l2, target)
+
+ target = r1 | r2
+ self.assertSameSet(f1 | f2, target)
+ self.assertSameSet(f1 | r2, target)
+ self.assertSameSet(r2 | f1, target)
+ self.assertSameSet(f1 | p2, target)
+ self.assertSameSet(p2 | f1, target)
+ self.assertSameSet(f1 | l2, target)
+
+ fwd_target = r1 - r2
+ rev_target = r2 - r1
+ self.assertSameSet(f1 - f2, fwd_target)
+ self.assertSameSet(f2 - f1, rev_target)
+ self.assertSameSet(f1 - r2, fwd_target)
+ self.assertSameSet(f2 - r1, rev_target)
+ self.assertSameSet(r1 - f2, fwd_target)
+ self.assertSameSet(r2 - f1, rev_target)
+ self.assertSameSet(f1 - p2, fwd_target)
+ self.assertSameSet(f2 - p1, rev_target)
+ self.assertSameSet(p1 - f2, fwd_target)
+ self.assertSameSet(p2 - f1, rev_target)
+ self.assertSameSet(f1 - l2, fwd_target)
+ self.assertSameSet(f2 - l1, rev_target)
+
+ target = r1 ^ r2
+ self.assertSameSet(f1 ^ f2, target)
+ self.assertSameSet(f1 ^ r2, target)
+ self.assertSameSet(r2 ^ f1, target)
+ self.assertSameSet(f1 ^ p2, target)
+ self.assertSameSet(p2 ^ f1, target)
+ self.assertSameSet(f1 ^ l2, target)
+
+ # proper subset
+ self.assertTrue(f1 < f3)
+ self.assertFalse(f1 < f1)
+ self.assertFalse(f1 < f2)
+ self.assertTrue(r1 < f3)
+ self.assertFalse(r1 < f1)
+ self.assertFalse(r1 < f2)
+ self.assertTrue(r1 < r3)
+ self.assertFalse(r1 < r1)
+ self.assertFalse(r1 < r2)
+ # python 2 only, cross-type compares will succeed
+ f1 < l3
+ f1 < l1
+ f1 < l2
+
+ # any subset
+ self.assertTrue(f1 <= f3)
+ self.assertTrue(f1 <= f1)
+ self.assertFalse(f1 <= f2)
+ self.assertTrue(r1 <= f3)
+ self.assertTrue(r1 <= f1)
+ self.assertFalse(r1 <= f2)
+ self.assertTrue(r1 <= r3)
+ self.assertTrue(r1 <= r1)
+ self.assertFalse(r1 <= r2)
+ # python 2 only, cross-type compares will succeed
+ f1 <= l3
+ f1 <= l1
+ f1 <= l2
+
+ # proper superset
+ self.assertTrue(f3 > f1)
+ self.assertFalse(f1 > f1)
+ self.assertFalse(f2 > f1)
+ self.assertTrue(r3 > r1)
+ self.assertFalse(f1 > r1)
+ self.assertFalse(f2 > r1)
+ self.assertTrue(r3 > r1)
+ self.assertFalse(r1 > r1)
+ self.assertFalse(r2 > r1)
+ # python 2 only, cross-type compares will succeed
+ f1 > l3
+ f1 > l1
+ f1 > l2
+
+ # any superset
+ self.assertTrue(f3 >= f1)
+ self.assertTrue(f1 >= f1)
+ self.assertFalse(f2 >= f1)
+ self.assertTrue(r3 >= r1)
+ self.assertTrue(f1 >= r1)
+ self.assertFalse(f2 >= r1)
+ self.assertTrue(r3 >= r1)
+ self.assertTrue(r1 >= r1)
+ self.assertFalse(r2 >= r1)
+ # python 2 only, cross-type compares will succeed
+ f1 >= l3
+ f1 >=l1
+ f1 >= l2
+
+ # equality
+ self.assertTrue(f1 == f1)
+ self.assertTrue(r1 == f1)
+ self.assertTrue(f1 == r1)
+ self.assertFalse(f1 == f3)
+ self.assertFalse(r1 == f3)
+ self.assertFalse(f1 == r3)
+ # python 2 only, cross-type compares will succeed
+ f1 == l3
+ f1 == l1
+ f1 == l2
+
+ # inequality
+ self.assertFalse(f1 != f1)
+ self.assertFalse(r1 != f1)
+ self.assertFalse(f1 != r1)
+ self.assertTrue(f1 != f3)
+ self.assertTrue(r1 != f3)
+ self.assertTrue(f1 != r3)
+ # python 2 only, cross-type compares will succeed
+ f1 != l3
+ f1 != l1
+ f1 != l2
+
def test_Mapping(self):
for sample in [dict]:
self.assertIsInstance(sample(), Mapping)
diff --git a/Lib/test/test_compile.py b/Lib/test/test_compile.py
index 22d1708..ec2709d 100644
--- a/Lib/test/test_compile.py
+++ b/Lib/test/test_compile.py
@@ -1,3 +1,4 @@
+import math
import unittest
import sys
import _ast
@@ -61,6 +62,34 @@ class TestSpecifics(unittest.TestCase):
except SyntaxError:
pass
+ def test_exec_functional_style(self):
+ # Exec'ing a tuple of length 2 works.
+ g = {'b': 2}
+ exec("a = b + 1", g)
+ self.assertEqual(g['a'], 3)
+
+ # As does exec'ing a tuple of length 3.
+ l = {'b': 3}
+ g = {'b': 5, 'c': 7}
+ exec("a = b + c", g, l)
+ self.assertNotIn('a', g)
+ self.assertEqual(l['a'], 10)
+
+ # Tuples not of length 2 or 3 are invalid.
+ with self.assertRaises(TypeError):
+ exec("a = b + 1",)
+
+ with self.assertRaises(TypeError):
+ exec("a = b + 1", {}, {}, {})
+
+ # Can't mix and match the two calling forms.
+ g = {'a': 3, 'b': 4}
+ l = {}
+ with self.assertRaises(TypeError):
+ exec("a = b + 1", g) in g
+ with self.assertRaises(TypeError):
+ exec("a = b + 1", g, l) in g, l
+
def test_exec_with_general_mapping_for_locals(self):
class M:
@@ -491,8 +520,46 @@ if 1:
self.assertRaises(TypeError, compile, ast, '<ast>', 'exec')
+class TestStackSize(unittest.TestCase):
+ # These tests check that the computed stack size for a code object
+ # stays within reasonable bounds (see issue #21523 for an example
+ # dysfunction).
+ N = 100
+
+ def check_stack_size(self, code):
+ # To assert that the alleged stack size is not O(N), we
+ # check that it is smaller than log(N).
+ if isinstance(code, str):
+ code = compile(code, "<foo>", "single")
+ max_size = math.ceil(math.log(len(code.co_code)))
+ self.assertLessEqual(code.co_stacksize, max_size)
+
+ def test_and(self):
+ self.check_stack_size("x and " * self.N + "x")
+
+ def test_or(self):
+ self.check_stack_size("x or " * self.N + "x")
+
+ def test_and_or(self):
+ self.check_stack_size("x and x or " * self.N + "x")
+
+ def test_chained_comparison(self):
+ self.check_stack_size("x < " * self.N + "x")
+
+ def test_if_else(self):
+ self.check_stack_size("x if x else " * self.N + "x")
+
+ def test_binop(self):
+ self.check_stack_size("x + " * self.N + "x")
+
+ def test_func_and(self):
+ code = "def f(x):\n"
+ code += " x and x\n" * self.N
+ self.check_stack_size(code)
+
+
def test_main():
- test_support.run_unittest(TestSpecifics)
+ test_support.run_unittest(__name__)
if __name__ == "__main__":
- test_main()
+ unittest.main()
diff --git a/Lib/test/test_compileall.py b/Lib/test/test_compileall.py
index bff6989..d3a26db 100644
--- a/Lib/test/test_compileall.py
+++ b/Lib/test/test_compileall.py
@@ -31,11 +31,10 @@ class CompileallTests(unittest.TestCase):
compare = struct.pack('<4sl', imp.get_magic(), mtime)
return data, compare
+ @unittest.skipUnless(hasattr(os, 'stat'), 'test needs os.stat()')
def recreation_check(self, metadata):
"""Check that compileall recreates bytecode when the new metadata is
used."""
- if not hasattr(os, 'stat'):
- return
py_compile.compile(self.source_path)
self.assertEqual(*self.data())
with open(self.bc_path, 'rb') as file:
diff --git a/Lib/test/test_cookie.py b/Lib/test/test_cookie.py
index d09398d..816133e 100644
--- a/Lib/test/test_cookie.py
+++ b/Lib/test/test_cookie.py
@@ -64,13 +64,13 @@ class CookieTests(unittest.TestCase):
# loading 'expires'
C = Cookie.SimpleCookie()
- C.load('Customer="W"; expires=Wed, 01-Jan-2010 00:00:00 GMT')
+ C.load('Customer="W"; expires=Wed, 01 Jan 2010 00:00:00 GMT')
self.assertEqual(C['Customer']['expires'],
- 'Wed, 01-Jan-2010 00:00:00 GMT')
+ 'Wed, 01 Jan 2010 00:00:00 GMT')
C = Cookie.SimpleCookie()
- C.load('Customer="W"; expires=Wed, 01-Jan-98 00:00:00 GMT')
+ C.load('Customer="W"; expires=Wed, 01 Jan 98 00:00:00 GMT')
self.assertEqual(C['Customer']['expires'],
- 'Wed, 01-Jan-98 00:00:00 GMT')
+ 'Wed, 01 Jan 98 00:00:00 GMT')
def test_extended_encode(self):
# Issue 9824: some browsers don't follow the standard; we now
@@ -90,9 +90,10 @@ class CookieTests(unittest.TestCase):
def test_main():
run_unittest(CookieTests)
- with check_warnings(('.+Cookie class is insecure; do not use it',
- DeprecationWarning)):
- run_doctest(Cookie)
+ if Cookie.__doc__ is not None:
+ with check_warnings(('.+Cookie class is insecure; do not use it',
+ DeprecationWarning)):
+ run_doctest(Cookie)
if __name__ == '__main__':
test_main()
diff --git a/Lib/test/test_cookielib.py b/Lib/test/test_cookielib.py
index dd0ad32..d4b80fa 100644
--- a/Lib/test/test_cookielib.py
+++ b/Lib/test/test_cookielib.py
@@ -26,8 +26,9 @@ class DateTimeTests(TestCase):
az = time2isoz()
bz = time2isoz(500000)
for text in (az, bz):
- self.assertTrue(re.search(r"^\d{4}-\d\d-\d\d \d\d:\d\d:\d\dZ$", text),
- "bad time2isoz format: %s %s" % (az, bz))
+ self.assertRegexpMatches(text,
+ r"^\d{4}-\d\d-\d\d \d\d:\d\d:\d\dZ$",
+ "bad time2isoz format: %s %s" % (az, bz))
def test_http2time(self):
from cookielib import http2time
@@ -75,12 +76,9 @@ class DateTimeTests(TestCase):
"%s => '%s' (%s)" % (test_t, result, expected))
for s in tests:
- t = http2time(s)
- t2 = http2time(s.lower())
- t3 = http2time(s.upper())
-
- self.assertTrue(t == t2 == t3 == test_t,
- "'%s' => %s, %s, %s (%s)" % (s, t, t2, t3, test_t))
+ self.assertEqual(http2time(s), test_t, s)
+ self.assertEqual(http2time(s.lower()), test_t, s.lower())
+ self.assertEqual(http2time(s.upper()), test_t, s.upper())
def test_http2time_garbage(self):
from cookielib import http2time
@@ -329,7 +327,7 @@ class CookieTests(TestCase):
## commas and equals are commonly appear in the cookie value). This also
## means that if you fold multiple Set-Cookie header fields into one,
## comma-separated list, it'll be a headache to parse (at least my head
-## starts hurting everytime I think of that code).
+## starts hurting every time I think of that code).
## - Expires: You'll get all sorts of date formats in the expires,
## including emtpy expires attributes ("expires="). Be as flexible as you
## can, and certainly don't expect the weekday to be there; if you can't
@@ -367,7 +365,7 @@ class CookieTests(TestCase):
request = urllib2.Request(url)
r = pol.domain_return_ok(domain, request)
if ok: self.assertTrue(r)
- else: self.assertTrue(not r)
+ else: self.assertFalse(r)
def test_missing_value(self):
from cookielib import MozillaCookieJar, lwp_cookie_str
@@ -379,10 +377,10 @@ class CookieTests(TestCase):
interact_netscape(c, "http://www.acme.com/", 'eggs')
interact_netscape(c, "http://www.acme.com/", '"spam"; path=/foo/')
cookie = c._cookies["www.acme.com"]["/"]["eggs"]
- self.assertTrue(cookie.value is None)
+ self.assertIsNone(cookie.value)
self.assertEqual(cookie.name, "eggs")
cookie = c._cookies["www.acme.com"]['/foo/']['"spam"']
- self.assertTrue(cookie.value is None)
+ self.assertIsNone(cookie.value)
self.assertEqual(cookie.name, '"spam"')
self.assertEqual(lwp_cookie_str(cookie), (
r'"spam"; path="/foo/"; domain="www.acme.com"; '
@@ -426,7 +424,7 @@ class CookieTests(TestCase):
try:
cookie = c._cookies["www.example.com"]["/"]["ni"]
except KeyError:
- self.assertTrue(version is None) # didn't expect a stored cookie
+ self.assertIsNone(version) # didn't expect a stored cookie
else:
self.assertEqual(cookie.version, version)
# 2965 cookies are unaffected
@@ -452,26 +450,26 @@ class CookieTests(TestCase):
self.assertEqual(cookie.domain, ".acme.com")
self.assertTrue(cookie.domain_specified)
self.assertEqual(cookie.port, DEFAULT_HTTP_PORT)
- self.assertTrue(not cookie.port_specified)
+ self.assertFalse(cookie.port_specified)
# case is preserved
- self.assertTrue(cookie.has_nonstandard_attr("blArgh") and
- not cookie.has_nonstandard_attr("blargh"))
+ self.assertTrue(cookie.has_nonstandard_attr("blArgh"))
+ self.assertFalse(cookie.has_nonstandard_attr("blargh"))
cookie = c._cookies["www.acme.com"]["/"]["ni"]
self.assertEqual(cookie.domain, "www.acme.com")
- self.assertTrue(not cookie.domain_specified)
+ self.assertFalse(cookie.domain_specified)
self.assertEqual(cookie.port, "80,8080")
self.assertTrue(cookie.port_specified)
cookie = c._cookies["www.acme.com"]["/"]["nini"]
- self.assertTrue(cookie.port is None)
- self.assertTrue(not cookie.port_specified)
+ self.assertIsNone(cookie.port)
+ self.assertFalse(cookie.port_specified)
# invalid expires should not cause cookie to be dropped
foo = c._cookies["www.acme.com"]["/"]["foo"]
spam = c._cookies["www.acme.com"]["/"]["foo"]
- self.assertTrue(foo.expires is None)
- self.assertTrue(spam.expires is None)
+ self.assertIsNone(foo.expires)
+ self.assertIsNone(spam.expires)
def test_ns_parser_special_names(self):
# names such as 'expires' are not special in first name=value pair
@@ -655,12 +653,12 @@ class CookieTests(TestCase):
from cookielib import is_HDN
self.assertTrue(is_HDN("foo.bar.com"))
self.assertTrue(is_HDN("1foo2.3bar4.5com"))
- self.assertTrue(not is_HDN("192.168.1.1"))
- self.assertTrue(not is_HDN(""))
- self.assertTrue(not is_HDN("."))
- self.assertTrue(not is_HDN(".foo.bar.com"))
- self.assertTrue(not is_HDN("..foo"))
- self.assertTrue(not is_HDN("foo."))
+ self.assertFalse(is_HDN("192.168.1.1"))
+ self.assertFalse(is_HDN(""))
+ self.assertFalse(is_HDN("."))
+ self.assertFalse(is_HDN(".foo.bar.com"))
+ self.assertFalse(is_HDN("..foo"))
+ self.assertFalse(is_HDN("foo."))
def test_reach(self):
from cookielib import reach
@@ -676,39 +674,39 @@ class CookieTests(TestCase):
def test_domain_match(self):
from cookielib import domain_match, user_domain_match
self.assertTrue(domain_match("192.168.1.1", "192.168.1.1"))
- self.assertTrue(not domain_match("192.168.1.1", ".168.1.1"))
+ self.assertFalse(domain_match("192.168.1.1", ".168.1.1"))
self.assertTrue(domain_match("x.y.com", "x.Y.com"))
self.assertTrue(domain_match("x.y.com", ".Y.com"))
- self.assertTrue(not domain_match("x.y.com", "Y.com"))
+ self.assertFalse(domain_match("x.y.com", "Y.com"))
self.assertTrue(domain_match("a.b.c.com", ".c.com"))
- self.assertTrue(not domain_match(".c.com", "a.b.c.com"))
+ self.assertFalse(domain_match(".c.com", "a.b.c.com"))
self.assertTrue(domain_match("example.local", ".local"))
- self.assertTrue(not domain_match("blah.blah", ""))
- self.assertTrue(not domain_match("", ".rhubarb.rhubarb"))
+ self.assertFalse(domain_match("blah.blah", ""))
+ self.assertFalse(domain_match("", ".rhubarb.rhubarb"))
self.assertTrue(domain_match("", ""))
self.assertTrue(user_domain_match("acme.com", "acme.com"))
- self.assertTrue(not user_domain_match("acme.com", ".acme.com"))
+ self.assertFalse(user_domain_match("acme.com", ".acme.com"))
self.assertTrue(user_domain_match("rhubarb.acme.com", ".acme.com"))
self.assertTrue(user_domain_match("www.rhubarb.acme.com", ".acme.com"))
self.assertTrue(user_domain_match("x.y.com", "x.Y.com"))
self.assertTrue(user_domain_match("x.y.com", ".Y.com"))
- self.assertTrue(not user_domain_match("x.y.com", "Y.com"))
+ self.assertFalse(user_domain_match("x.y.com", "Y.com"))
self.assertTrue(user_domain_match("y.com", "Y.com"))
- self.assertTrue(not user_domain_match(".y.com", "Y.com"))
+ self.assertFalse(user_domain_match(".y.com", "Y.com"))
self.assertTrue(user_domain_match(".y.com", ".Y.com"))
self.assertTrue(user_domain_match("x.y.com", ".com"))
- self.assertTrue(not user_domain_match("x.y.com", "com"))
- self.assertTrue(not user_domain_match("x.y.com", "m"))
- self.assertTrue(not user_domain_match("x.y.com", ".m"))
- self.assertTrue(not user_domain_match("x.y.com", ""))
- self.assertTrue(not user_domain_match("x.y.com", "."))
+ self.assertFalse(user_domain_match("x.y.com", "com"))
+ self.assertFalse(user_domain_match("x.y.com", "m"))
+ self.assertFalse(user_domain_match("x.y.com", ".m"))
+ self.assertFalse(user_domain_match("x.y.com", ""))
+ self.assertFalse(user_domain_match("x.y.com", "."))
self.assertTrue(user_domain_match("192.168.1.1", "192.168.1.1"))
# not both HDNs, so must string-compare equal to match
- self.assertTrue(not user_domain_match("192.168.1.1", ".168.1.1"))
- self.assertTrue(not user_domain_match("192.168.1.1", "."))
+ self.assertFalse(user_domain_match("192.168.1.1", ".168.1.1"))
+ self.assertFalse(user_domain_match("192.168.1.1", "."))
# empty string is a special case
- self.assertTrue(not user_domain_match("192.168.1.1", ""))
+ self.assertFalse(user_domain_match("192.168.1.1", ""))
def test_wrong_domain(self):
# Cookies whose effective request-host name does not domain-match the
@@ -865,7 +863,7 @@ class CookieTests(TestCase):
self.assertEqual(len(c), 2)
# ... and check is doesn't get returned
c.add_cookie_header(req)
- self.assertTrue(not req.has_header("Cookie"))
+ self.assertFalse(req.has_header("Cookie"))
def test_domain_block(self):
from cookielib import CookieJar, DefaultCookiePolicy
@@ -892,8 +890,8 @@ class CookieTests(TestCase):
self.assertEqual(len(c), 1)
req = Request("http://www.roadrunner.net/")
c.add_cookie_header(req)
- self.assertTrue((req.has_header("Cookie") and
- req.has_header("Cookie2")))
+ self.assertTrue(req.has_header("Cookie"))
+ self.assertTrue(req.has_header("Cookie2"))
c.clear()
pol.set_blocked_domains([".acme.com"])
@@ -908,7 +906,7 @@ class CookieTests(TestCase):
self.assertEqual(len(c), 2)
# ... and check is doesn't get returned
c.add_cookie_header(req)
- self.assertTrue(not req.has_header("Cookie"))
+ self.assertFalse(req.has_header("Cookie"))
def test_secure(self):
from cookielib import CookieJar, DefaultCookiePolicy
@@ -928,8 +926,8 @@ class CookieTests(TestCase):
url = "http://www.acme.com/"
int(c, url, "foo1=bar%s%s" % (vs, whitespace))
int(c, url, "foo2=bar%s; secure%s" % (vs, whitespace))
- self.assertTrue(
- not c._cookies["www.acme.com"]["/"]["foo1"].secure,
+ self.assertFalse(
+ c._cookies["www.acme.com"]["/"]["foo1"].secure,
"non-secure cookie registered secure")
self.assertTrue(
c._cookies["www.acme.com"]["/"]["foo2"].secure,
@@ -1011,8 +1009,8 @@ class CookieTests(TestCase):
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1; Port")
h = interact_2965(c, url)
- self.assertTrue(re.search("\$Port([^=]|$)", h),
- "port with no value not returned with no value")
+ self.assertRegexpMatches(h, "\$Port([^=]|$)",
+ "port with no value not returned with no value")
c = CookieJar(pol)
url = "http://foo.bar.com/"
@@ -1038,8 +1036,7 @@ class CookieTests(TestCase):
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
h = interact_2965(c, url)
- self.assertTrue(
- "Comment" not in h,
+ self.assertNotIn("Comment", h,
"Comment or CommentURL cookie-attributes returned to server")
def test_Cookie_iterator(self):
@@ -1128,7 +1125,7 @@ class CookieTests(TestCase):
headers = ["Set-Cookie: c=foo; expires=Foo Bar 12 33:22:11 2000"]
c = cookiejar_from_cookie_headers(headers)
cookie = c._cookies["www.example.com"]["/"]["c"]
- self.assertTrue(cookie.expires is None)
+ self.assertIsNone(cookie.expires)
class LWPCookieTests(TestCase):
@@ -1278,9 +1275,9 @@ class LWPCookieTests(TestCase):
req = Request("http://www.acme.com/ammo")
c.add_cookie_header(req)
- self.assertTrue(re.search(r"PART_NUMBER=RIDING_ROCKET_0023;\s*"
- "PART_NUMBER=ROCKET_LAUNCHER_0001",
- req.get_header("Cookie")))
+ self.assertRegexpMatches(req.get_header("Cookie"),
+ r"PART_NUMBER=RIDING_ROCKET_0023;\s*"
+ "PART_NUMBER=ROCKET_LAUNCHER_0001")
def test_ietf_example_1(self):
from cookielib import CookieJar, DefaultCookiePolicy
@@ -1314,7 +1311,7 @@ class LWPCookieTests(TestCase):
cookie = interact_2965(
c, 'http://www.acme.com/acme/login',
'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
- self.assertTrue(not cookie)
+ self.assertFalse(cookie)
#
# 3. User Agent -> Server
@@ -1336,9 +1333,8 @@ class LWPCookieTests(TestCase):
cookie = interact_2965(c, 'http://www.acme.com/acme/pickitem',
'Part_Number="Rocket_Launcher_0001"; '
'Version="1"; Path="/acme"');
- self.assertTrue(re.search(
- r'^\$Version="?1"?; Customer="?WILE_E_COYOTE"?; \$Path="/acme"$',
- cookie))
+ self.assertRegexpMatches(cookie,
+ r'^\$Version="?1"?; Customer="?WILE_E_COYOTE"?; \$Path="/acme"$')
#
# 5. User Agent -> Server
@@ -1361,11 +1357,11 @@ class LWPCookieTests(TestCase):
cookie = interact_2965(c, "http://www.acme.com/acme/shipping",
'Shipping="FedEx"; Version="1"; Path="/acme"')
- self.assertTrue(re.search(r'^\$Version="?1"?;', cookie))
- self.assertTrue(re.search(r'Part_Number="?Rocket_Launcher_0001"?;'
- '\s*\$Path="\/acme"', cookie))
- self.assertTrue(re.search(r'Customer="?WILE_E_COYOTE"?;\s*\$Path="\/acme"',
- cookie))
+ self.assertRegexpMatches(cookie, r'^\$Version="?1"?;')
+ self.assertRegexpMatches(cookie,
+ r'Part_Number="?Rocket_Launcher_0001"?;\s*\$Path="\/acme"')
+ self.assertRegexpMatches(cookie,
+ r'Customer="?WILE_E_COYOTE"?;\s*\$Path="\/acme"')
#
# 7. User Agent -> Server
@@ -1386,9 +1382,9 @@ class LWPCookieTests(TestCase):
# Transaction is complete.
cookie = interact_2965(c, "http://www.acme.com/acme/process")
- self.assertTrue(
- re.search(r'Shipping="?FedEx"?;\s*\$Path="\/acme"', cookie) and
- "WILE_E_COYOTE" in cookie)
+ self.assertRegexpMatches(cookie,
+ r'Shipping="?FedEx"?;\s*\$Path="\/acme"')
+ self.assertIn("WILE_E_COYOTE", cookie)
#
# The user agent makes a series of requests on the origin server, after
@@ -1437,8 +1433,8 @@ class LWPCookieTests(TestCase):
# than once.
cookie = interact_2965(c, "http://www.acme.com/acme/ammo/...")
- self.assertTrue(
- re.search(r"Riding_Rocket_0023.*Rocket_Launcher_0001", cookie))
+ self.assertRegexpMatches(cookie,
+ r"Riding_Rocket_0023.*Rocket_Launcher_0001")
# A subsequent request by the user agent to the (same) server for a URL of
# the form /acme/parts/ would include the following request header:
@@ -1466,7 +1462,7 @@ class LWPCookieTests(TestCase):
# illegal domain (no embedded dots)
cookie = interact_2965(c, "http://www.acme.com",
'foo=bar; domain=".com"; version=1')
- self.assertTrue(not c)
+ self.assertFalse(c)
# legal domain
cookie = interact_2965(c, "http://www.acme.com",
@@ -1559,11 +1555,12 @@ class LWPCookieTests(TestCase):
c, "http://www.acme.com/foo%2f%25/<<%0anewå/æøå",
'bar=baz; path="/foo/"; version=1');
version_re = re.compile(r'^\$version=\"?1\"?', re.I)
- self.assertTrue("foo=bar" in cookie and version_re.search(cookie))
+ self.assertIn("foo=bar", cookie)
+ self.assertRegexpMatches(cookie, version_re)
cookie = interact_2965(
c, "http://www.acme.com/foo/%25/<<%0anewå/æøå")
- self.assertTrue(not cookie)
+ self.assertFalse(cookie)
# unicode URL doesn't raise exception
cookie = interact_2965(c, u"http://www.acme.com/\xfc")
@@ -1740,13 +1737,12 @@ class LWPCookieTests(TestCase):
key = "%s_after" % cookie.value
counter[key] = counter[key] + 1
- self.assertTrue(not (
- # a permanent cookie got lost accidentally
- counter["perm_after"] != counter["perm_before"] or
+ # a permanent cookie got lost accidently
+ self.assertEqual(counter["perm_after"], counter["perm_before"])
# a session cookie hasn't been cleared
- counter["session_after"] != 0 or
+ self.assertEqual(counter["session_after"], 0)
# we didn't have session cookies in the first place
- counter["session_before"] == 0))
+ self.assertNotEqual(counter["session_before"], 0)
def test_main(verbose=None):
diff --git a/Lib/test/test_cpickle.py b/Lib/test/test_cpickle.py
index c240669..3bc700b 100644
--- a/Lib/test/test_cpickle.py
+++ b/Lib/test/test_cpickle.py
@@ -1,9 +1,45 @@
-import cPickle, unittest
-from cStringIO import StringIO
-from test.pickletester import AbstractPickleTests, AbstractPickleModuleTests
-from test.pickletester import AbstractPicklerUnpicklerObjectTests
+import cPickle
+import cStringIO
+import io
+import unittest
+from test.pickletester import (AbstractPickleTests,
+ AbstractPickleModuleTests,
+ AbstractPicklerUnpicklerObjectTests,
+ BigmemPickleTests)
from test import test_support
+class cStringIOMixin:
+ output = input = cStringIO.StringIO
+
+ def close(self, f):
+ pass
+
+class BytesIOMixin:
+ output = input = io.BytesIO
+
+ def close(self, f):
+ pass
+
+class FileIOMixin:
+
+ def output(self):
+ return open(test_support.TESTFN, 'wb+')
+
+ def input(self, data):
+ f = open(test_support.TESTFN, 'wb+')
+ try:
+ f.write(data)
+ f.seek(0)
+ return f
+ except:
+ f.close()
+ raise
+
+ def close(self, f):
+ f.close()
+ test_support.unlink(test_support.TESTFN)
+
+
class cPickleTests(AbstractPickleTests, AbstractPickleModuleTests):
def setUp(self):
@@ -16,19 +52,35 @@ class cPickleTests(AbstractPickleTests, AbstractPickleModuleTests):
class cPicklePicklerTests(AbstractPickleTests):
def dumps(self, arg, proto=0):
- f = StringIO()
- p = cPickle.Pickler(f, proto)
- p.dump(arg)
- f.seek(0)
- return f.read()
+ f = self.output()
+ try:
+ p = cPickle.Pickler(f, proto)
+ p.dump(arg)
+ f.seek(0)
+ return f.read()
+ finally:
+ self.close(f)
def loads(self, buf):
- f = StringIO(buf)
- p = cPickle.Unpickler(f)
- return p.load()
+ f = self.input(buf)
+ try:
+ p = cPickle.Unpickler(f)
+ return p.load()
+ finally:
+ self.close(f)
error = cPickle.BadPickleGet
+class cStringIOCPicklerTests(cStringIOMixin, cPicklePicklerTests):
+ pass
+
+class BytesIOCPicklerTests(BytesIOMixin, cPicklePicklerTests):
+ pass
+
+class FileIOCPicklerTests(FileIOMixin, cPicklePicklerTests):
+ pass
+
+
class cPickleListPicklerTests(AbstractPickleTests):
def dumps(self, arg, proto=0):
@@ -37,26 +89,45 @@ class cPickleListPicklerTests(AbstractPickleTests):
return p.getvalue()
def loads(self, *args):
- f = StringIO(args[0])
- p = cPickle.Unpickler(f)
- return p.load()
+ f = self.input(args[0])
+ try:
+ p = cPickle.Unpickler(f)
+ return p.load()
+ finally:
+ self.close(f)
error = cPickle.BadPickleGet
+class cStringIOCPicklerListTests(cStringIOMixin, cPickleListPicklerTests):
+ pass
+
+class BytesIOCPicklerListTests(BytesIOMixin, cPickleListPicklerTests):
+ pass
+
+class FileIOCPicklerListTests(FileIOMixin, cPickleListPicklerTests):
+ pass
+
+
class cPickleFastPicklerTests(AbstractPickleTests):
def dumps(self, arg, proto=0):
- f = StringIO()
- p = cPickle.Pickler(f, proto)
- p.fast = 1
- p.dump(arg)
- f.seek(0)
- return f.read()
+ f = self.output()
+ try:
+ p = cPickle.Pickler(f, proto)
+ p.fast = 1
+ p.dump(arg)
+ f.seek(0)
+ return f.read()
+ finally:
+ self.close(f)
def loads(self, *args):
- f = StringIO(args[0])
- p = cPickle.Unpickler(f)
- return p.load()
+ f = self.input(args[0])
+ try:
+ p = cPickle.Unpickler(f)
+ return p.load()
+ finally:
+ self.close(f)
error = cPickle.BadPickleGet
@@ -96,11 +167,31 @@ class cPickleFastPicklerTests(AbstractPickleTests):
b = self.loads(self.dumps(a))
self.assertEqual(a, b)
+class cStringIOCPicklerFastTests(cStringIOMixin, cPickleFastPicklerTests):
+ pass
+
+class BytesIOCPicklerFastTests(BytesIOMixin, cPickleFastPicklerTests):
+ pass
+
+class FileIOCPicklerFastTests(FileIOMixin, cPickleFastPicklerTests):
+ pass
+
+
class cPicklePicklerUnpicklerObjectTests(AbstractPicklerUnpicklerObjectTests):
pickler_class = cPickle.Pickler
unpickler_class = cPickle.Unpickler
+class cPickleBigmemPickleTests(BigmemPickleTests):
+
+ def dumps(self, arg, proto=0, fast=0):
+ # Ignore fast
+ return cPickle.dumps(arg, proto)
+
+ def loads(self, buf):
+ # Ignore fast
+ return cPickle.loads(buf)
+
class Node(object):
pass
@@ -128,11 +219,18 @@ class cPickleDeepRecursive(unittest.TestCase):
def test_main():
test_support.run_unittest(
cPickleTests,
- cPicklePicklerTests,
- cPickleListPicklerTests,
- cPickleFastPicklerTests,
+ cStringIOCPicklerTests,
+ BytesIOCPicklerTests,
+ FileIOCPicklerTests,
+ cStringIOCPicklerListTests,
+ BytesIOCPicklerListTests,
+ FileIOCPicklerListTests,
+ cStringIOCPicklerFastTests,
+ BytesIOCPicklerFastTests,
+ FileIOCPicklerFastTests,
cPickleDeepRecursive,
cPicklePicklerUnpicklerObjectTests,
+ cPickleBigmemPickleTests,
)
if __name__ == "__main__":
diff --git a/Lib/test/test_csv.py b/Lib/test/test_csv.py
index 681cfd8..e031170 100644
--- a/Lib/test/test_csv.py
+++ b/Lib/test/test_csv.py
@@ -243,6 +243,15 @@ class Test_Csv(unittest.TestCase):
self.assertRaises(csv.Error, self._read_test, ['a,b\nc,d'], [])
self.assertRaises(csv.Error, self._read_test, ['a,b\r\nc,d'], [])
+ def test_read_eof(self):
+ self._read_test(['a,"'], [['a', '']])
+ self._read_test(['"a'], [['a']])
+ self._read_test(['^'], [['\n']], escapechar='^')
+ self.assertRaises(csv.Error, self._read_test, ['a,"'], [], strict=True)
+ self.assertRaises(csv.Error, self._read_test, ['"a'], [], strict=True)
+ self.assertRaises(csv.Error, self._read_test,
+ ['^'], [], escapechar='^', strict=True)
+
def test_read_escape(self):
self._read_test(['a,\\b,c'], [['a', 'b', 'c']], escapechar='\\')
self._read_test(['a,b\\,c'], [['a', 'b,c']], escapechar='\\')
@@ -621,6 +630,23 @@ class TestDictFields(unittest.TestCase):
fileobj = StringIO()
self.assertRaises(TypeError, csv.DictWriter, fileobj)
+ def test_write_fields_not_in_fieldnames(self):
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ writer = csv.DictWriter(fileobj, fieldnames = ["f1", "f2", "f3"])
+ # Of special note is the non-string key (issue 19449)
+ with self.assertRaises(ValueError) as cx:
+ writer.writerow({"f4": 10, "f2": "spam", 1: "abc"})
+ exception = str(cx.exception)
+ self.assertIn("fieldnames", exception)
+ self.assertIn("'f4'", exception)
+ self.assertNotIn("'f2'", exception)
+ self.assertIn("1", exception)
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
def test_read_dict_fields(self):
fd, name = tempfile.mkstemp()
fileobj = os.fdopen(fd, "w+b")
@@ -844,6 +870,7 @@ class TestDialectValidity(unittest.TestCase):
lineterminator = '\r\n'
quoting = csv.QUOTE_NONE
d = mydialect()
+ self.assertEqual(d.quoting, csv.QUOTE_NONE)
mydialect.quoting = None
self.assertRaises(csv.Error, mydialect)
@@ -852,12 +879,21 @@ class TestDialectValidity(unittest.TestCase):
mydialect.quoting = csv.QUOTE_ALL
mydialect.quotechar = '"'
d = mydialect()
+ self.assertEqual(d.quoting, csv.QUOTE_ALL)
+ self.assertEqual(d.quotechar, '"')
+ self.assertTrue(d.doublequote)
mydialect.quotechar = "''"
- self.assertRaises(csv.Error, mydialect)
+ with self.assertRaises(csv.Error) as cm:
+ mydialect()
+ self.assertEqual(str(cm.exception),
+ '"quotechar" must be an 1-character string')
mydialect.quotechar = 4
- self.assertRaises(csv.Error, mydialect)
+ with self.assertRaises(csv.Error) as cm:
+ mydialect()
+ self.assertEqual(str(cm.exception),
+ '"quotechar" must be string, not int')
def test_delimiter(self):
class mydialect(csv.Dialect):
@@ -868,12 +904,31 @@ class TestDialectValidity(unittest.TestCase):
lineterminator = '\r\n'
quoting = csv.QUOTE_NONE
d = mydialect()
+ self.assertEqual(d.delimiter, ";")
mydialect.delimiter = ":::"
- self.assertRaises(csv.Error, mydialect)
+ with self.assertRaises(csv.Error) as cm:
+ mydialect()
+ self.assertEqual(str(cm.exception),
+ '"delimiter" must be an 1-character string')
+
+ mydialect.delimiter = ""
+ with self.assertRaises(csv.Error) as cm:
+ mydialect()
+ self.assertEqual(str(cm.exception),
+ '"delimiter" must be an 1-character string')
+
+ mydialect.delimiter = u","
+ with self.assertRaises(csv.Error) as cm:
+ mydialect()
+ self.assertEqual(str(cm.exception),
+ '"delimiter" must be string, not unicode')
mydialect.delimiter = 4
- self.assertRaises(csv.Error, mydialect)
+ with self.assertRaises(csv.Error) as cm:
+ mydialect()
+ self.assertEqual(str(cm.exception),
+ '"delimiter" must be string, not int')
def test_lineterminator(self):
class mydialect(csv.Dialect):
@@ -884,12 +939,17 @@ class TestDialectValidity(unittest.TestCase):
lineterminator = '\r\n'
quoting = csv.QUOTE_NONE
d = mydialect()
+ self.assertEqual(d.lineterminator, '\r\n')
mydialect.lineterminator = ":::"
d = mydialect()
+ self.assertEqual(d.lineterminator, ":::")
mydialect.lineterminator = 4
- self.assertRaises(csv.Error, mydialect)
+ with self.assertRaises(csv.Error) as cm:
+ mydialect()
+ self.assertEqual(str(cm.exception),
+ '"lineterminator" must be a string')
class TestSniffer(unittest.TestCase):
@@ -905,7 +965,7 @@ Stonecutters Seafood and Chop House, Lemont, IL, 12/19/02, Week Back
'Tommy''s Place':'Blue Island':'IL':'12/28/02':'Blue Sunday/White Crow'
'Stonecutters ''Seafood'' and Chop House':'Lemont':'IL':'12/19/02':'Week Back'
"""
- header = '''\
+ header1 = '''\
"venue","city","state","date","performers"
'''
sample3 = '''\
@@ -924,10 +984,35 @@ Stonecutters Seafood and Chop House, Lemont, IL, 12/19/02, Week Back
sample6 = "a|b|c\r\nd|e|f\r\n"
sample7 = "'a'|'b'|'c'\r\n'd'|e|f\r\n"
+# Issue 18155: Use a delimiter that is a special char to regex:
+
+ header2 = '''\
+"venue"+"city"+"state"+"date"+"performers"
+'''
+ sample8 = """\
+Harry's+ Arlington Heights+ IL+ 2/1/03+ Kimi Hayes
+Shark City+ Glendale Heights+ IL+ 12/28/02+ Prezence
+Tommy's Place+ Blue Island+ IL+ 12/28/02+ Blue Sunday/White Crow
+Stonecutters Seafood and Chop House+ Lemont+ IL+ 12/19/02+ Week Back
+"""
+ sample9 = """\
+'Harry''s'+ Arlington Heights'+ 'IL'+ '2/1/03'+ 'Kimi Hayes'
+'Shark City'+ Glendale Heights'+' IL'+ '12/28/02'+ 'Prezence'
+'Tommy''s Place'+ Blue Island'+ 'IL'+ '12/28/02'+ 'Blue Sunday/White Crow'
+'Stonecutters ''Seafood'' and Chop House'+ 'Lemont'+ 'IL'+ '12/19/02'+ 'Week Back'
+"""
+
def test_has_header(self):
sniffer = csv.Sniffer()
self.assertEqual(sniffer.has_header(self.sample1), False)
- self.assertEqual(sniffer.has_header(self.header+self.sample1), True)
+ self.assertEqual(sniffer.has_header(self.header1 + self.sample1),
+ True)
+
+ def test_has_header_regex_special_delimiter(self):
+ sniffer = csv.Sniffer()
+ self.assertEqual(sniffer.has_header(self.sample8), False)
+ self.assertEqual(sniffer.has_header(self.header2 + self.sample8),
+ True)
def test_sniff(self):
sniffer = csv.Sniffer()
@@ -961,86 +1046,96 @@ Stonecutters Seafood and Chop House, Lemont, IL, 12/19/02, Week Back
dialect = sniffer.sniff(self.sample7)
self.assertEqual(dialect.delimiter, "|")
self.assertEqual(dialect.quotechar, "'")
+ dialect = sniffer.sniff(self.sample8)
+ self.assertEqual(dialect.delimiter, '+')
+ dialect = sniffer.sniff(self.sample9)
+ self.assertEqual(dialect.delimiter, '+')
+ self.assertEqual(dialect.quotechar, "'")
def test_doublequote(self):
sniffer = csv.Sniffer()
- dialect = sniffer.sniff(self.header)
+ dialect = sniffer.sniff(self.header1)
+ self.assertFalse(dialect.doublequote)
+ dialect = sniffer.sniff(self.header2)
self.assertFalse(dialect.doublequote)
dialect = sniffer.sniff(self.sample2)
self.assertTrue(dialect.doublequote)
+ dialect = sniffer.sniff(self.sample8)
+ self.assertFalse(dialect.doublequote)
+ dialect = sniffer.sniff(self.sample9)
+ self.assertTrue(dialect.doublequote)
-if not hasattr(sys, "gettotalrefcount"):
- if test_support.verbose: print "*** skipping leakage tests ***"
-else:
- class NUL:
- def write(s, *args):
- pass
- writelines = write
-
- class TestLeaks(unittest.TestCase):
- def test_create_read(self):
- delta = 0
- lastrc = sys.gettotalrefcount()
- for i in xrange(20):
- gc.collect()
- self.assertEqual(gc.garbage, [])
- rc = sys.gettotalrefcount()
- csv.reader(["a,b,c\r\n"])
- csv.reader(["a,b,c\r\n"])
- csv.reader(["a,b,c\r\n"])
- delta = rc-lastrc
- lastrc = rc
- # if csv.reader() leaks, last delta should be 3 or more
- self.assertEqual(delta < 3, True)
-
- def test_create_write(self):
- delta = 0
- lastrc = sys.gettotalrefcount()
- s = NUL()
- for i in xrange(20):
- gc.collect()
- self.assertEqual(gc.garbage, [])
- rc = sys.gettotalrefcount()
- csv.writer(s)
- csv.writer(s)
- csv.writer(s)
- delta = rc-lastrc
- lastrc = rc
- # if csv.writer() leaks, last delta should be 3 or more
- self.assertEqual(delta < 3, True)
-
- def test_read(self):
- delta = 0
- rows = ["a,b,c\r\n"]*5
- lastrc = sys.gettotalrefcount()
- for i in xrange(20):
- gc.collect()
- self.assertEqual(gc.garbage, [])
- rc = sys.gettotalrefcount()
- rdr = csv.reader(rows)
- for row in rdr:
- pass
- delta = rc-lastrc
- lastrc = rc
- # if reader leaks during read, delta should be 5 or more
- self.assertEqual(delta < 5, True)
-
- def test_write(self):
- delta = 0
- rows = [[1,2,3]]*5
- s = NUL()
- lastrc = sys.gettotalrefcount()
- for i in xrange(20):
- gc.collect()
- self.assertEqual(gc.garbage, [])
- rc = sys.gettotalrefcount()
- writer = csv.writer(s)
- for row in rows:
- writer.writerow(row)
- delta = rc-lastrc
- lastrc = rc
- # if writer leaks during write, last delta should be 5 or more
- self.assertEqual(delta < 5, True)
+class NUL:
+ def write(s, *args):
+ pass
+ writelines = write
+
+@unittest.skipUnless(hasattr(sys, "gettotalrefcount"),
+ 'requires sys.gettotalrefcount()')
+class TestLeaks(unittest.TestCase):
+ def test_create_read(self):
+ delta = 0
+ lastrc = sys.gettotalrefcount()
+ for i in xrange(20):
+ gc.collect()
+ self.assertEqual(gc.garbage, [])
+ rc = sys.gettotalrefcount()
+ csv.reader(["a,b,c\r\n"])
+ csv.reader(["a,b,c\r\n"])
+ csv.reader(["a,b,c\r\n"])
+ delta = rc-lastrc
+ lastrc = rc
+ # if csv.reader() leaks, last delta should be 3 or more
+ self.assertEqual(delta < 3, True)
+
+ def test_create_write(self):
+ delta = 0
+ lastrc = sys.gettotalrefcount()
+ s = NUL()
+ for i in xrange(20):
+ gc.collect()
+ self.assertEqual(gc.garbage, [])
+ rc = sys.gettotalrefcount()
+ csv.writer(s)
+ csv.writer(s)
+ csv.writer(s)
+ delta = rc-lastrc
+ lastrc = rc
+ # if csv.writer() leaks, last delta should be 3 or more
+ self.assertEqual(delta < 3, True)
+
+ def test_read(self):
+ delta = 0
+ rows = ["a,b,c\r\n"]*5
+ lastrc = sys.gettotalrefcount()
+ for i in xrange(20):
+ gc.collect()
+ self.assertEqual(gc.garbage, [])
+ rc = sys.gettotalrefcount()
+ rdr = csv.reader(rows)
+ for row in rdr:
+ pass
+ delta = rc-lastrc
+ lastrc = rc
+ # if reader leaks during read, delta should be 5 or more
+ self.assertEqual(delta < 5, True)
+
+ def test_write(self):
+ delta = 0
+ rows = [[1,2,3]]*5
+ s = NUL()
+ lastrc = sys.gettotalrefcount()
+ for i in xrange(20):
+ gc.collect()
+ self.assertEqual(gc.garbage, [])
+ rc = sys.gettotalrefcount()
+ writer = csv.writer(s)
+ for row in rows:
+ writer.writerow(row)
+ delta = rc-lastrc
+ lastrc = rc
+ # if writer leaks during write, last delta should be 5 or more
+ self.assertEqual(delta < 5, True)
# commented out for now - csv module doesn't yet support Unicode
## class TestUnicode(unittest.TestCase):
diff --git a/Lib/test/test_curses.py b/Lib/test/test_curses.py
index fa0d469..fcf9618 100644
--- a/Lib/test/test_curses.py
+++ b/Lib/test/test_curses.py
@@ -250,6 +250,26 @@ def test_userptr_without_set(stdscr):
except curses.panel.error:
pass
+def test_userptr_memory_leak(stdscr):
+ w = curses.newwin(10, 10)
+ p = curses.panel.new_panel(w)
+ obj = object()
+ nrefs = sys.getrefcount(obj)
+ for i in range(100):
+ p.set_userptr(obj)
+
+ p.set_userptr(None)
+ if sys.getrefcount(obj) != nrefs:
+ raise RuntimeError, "set_userptr leaked references"
+
+def test_userptr_segfault(stdscr):
+ panel = curses.panel.new_panel(stdscr)
+ class A:
+ def __del__(self):
+ panel.set_userptr(None)
+ panel.set_userptr(A())
+ panel.set_userptr(None)
+
def test_resize_term(stdscr):
if hasattr(curses, 'resizeterm'):
lines, cols = curses.LINES, curses.COLS
@@ -268,6 +288,8 @@ def main(stdscr):
module_funcs(stdscr)
window_funcs(stdscr)
test_userptr_without_set(stdscr)
+ test_userptr_memory_leak(stdscr)
+ test_userptr_segfault(stdscr)
test_resize_term(stdscr)
test_issue6243(stdscr)
finally:
diff --git a/Lib/test/test_datetime.py b/Lib/test/test_datetime.py
index d246d60..7caa408 100644
--- a/Lib/test/test_datetime.py
+++ b/Lib/test/test_datetime.py
@@ -101,11 +101,11 @@ class TestTZInfo(unittest.TestCase):
# carry no data), but they need to be picklable anyway else
# concrete subclasses can't be pickled.
orig = tzinfo.__new__(tzinfo)
- self.assertTrue(type(orig) is tzinfo)
+ self.assertIs(type(orig), tzinfo)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
- self.assertTrue(type(derived) is tzinfo)
+ self.assertIs(type(derived), tzinfo)
def test_pickling_subclass(self):
# Make sure we can pickle/unpickle an instance of a subclass.
@@ -124,7 +124,7 @@ class TestTZInfo(unittest.TestCase):
self.assertEqual(derived.tzname(None), 'cookie')
#############################################################################
-# Base clase for testing a particular aspect of timedelta, time, date and
+# Base class for testing a particular aspect of timedelta, time, date and
# datetime comparisons.
class HarmlessMixedComparison:
@@ -328,9 +328,9 @@ class TestTimeDelta(HarmlessMixedComparison, unittest.TestCase):
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
- self.assertTrue(not t1 != t2)
- self.assertTrue(not t1 < t2)
- self.assertTrue(not t1 > t2)
+ self.assertFalse(t1 != t2)
+ self.assertFalse(t1 < t2)
+ self.assertFalse(t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
@@ -342,12 +342,12 @@ class TestTimeDelta(HarmlessMixedComparison, unittest.TestCase):
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
- self.assertTrue(not t1 == t2)
- self.assertTrue(not t2 == t1)
- self.assertTrue(not t1 > t2)
- self.assertTrue(not t2 < t1)
- self.assertTrue(not t1 >= t2)
- self.assertTrue(not t2 <= t1)
+ self.assertFalse(t1 == t2)
+ self.assertFalse(t2 == t1)
+ self.assertFalse(t1 > t2)
+ self.assertFalse(t2 < t1)
+ self.assertFalse(t1 >= t2)
+ self.assertFalse(t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
@@ -459,7 +459,7 @@ class TestTimeDelta(HarmlessMixedComparison, unittest.TestCase):
self.assertTrue(timedelta(0, 1))
self.assertTrue(timedelta(0, 0, 1))
self.assertTrue(timedelta(microseconds=1))
- self.assertTrue(not timedelta(0))
+ self.assertFalse(timedelta(0))
def test_subclass_timedelta(self):
@@ -475,17 +475,17 @@ class TestTimeDelta(HarmlessMixedComparison, unittest.TestCase):
return round(sum)
t1 = T(days=1)
- self.assertTrue(type(t1) is T)
+ self.assertIs(type(t1), T)
self.assertEqual(t1.as_hours(), 24)
t2 = T(days=-1, seconds=-3600)
- self.assertTrue(type(t2) is T)
+ self.assertIs(type(t2), T)
self.assertEqual(t2.as_hours(), -25)
t3 = t1 + t2
- self.assertTrue(type(t3) is timedelta)
+ self.assertIs(type(t3), timedelta)
t4 = T.from_td(t3)
- self.assertTrue(type(t4) is T)
+ self.assertIs(type(t4), T)
self.assertEqual(t3.days, t4.days)
self.assertEqual(t3.seconds, t4.seconds)
self.assertEqual(t3.microseconds, t4.microseconds)
@@ -783,8 +783,9 @@ class TestDate(HarmlessMixedComparison, unittest.TestCase):
# It worked or it didn't. If it didn't, assume it's reason #2, and
# let the test pass if they're within half a second of each other.
- self.assertTrue(today == todayagain or
- abs(todayagain - today) < timedelta(seconds=0.5))
+ if today != todayagain:
+ self.assertAlmostEqual(todayagain, today,
+ delta=timedelta(seconds=0.5))
def test_weekday(self):
for i in range(7):
@@ -974,9 +975,9 @@ class TestDate(HarmlessMixedComparison, unittest.TestCase):
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
- self.assertTrue(not t1 != t2)
- self.assertTrue(not t1 < t2)
- self.assertTrue(not t1 > t2)
+ self.assertFalse(t1 != t2)
+ self.assertFalse(t1 < t2)
+ self.assertFalse(t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
@@ -988,12 +989,12 @@ class TestDate(HarmlessMixedComparison, unittest.TestCase):
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
- self.assertTrue(not t1 == t2)
- self.assertTrue(not t2 == t1)
- self.assertTrue(not t1 > t2)
- self.assertTrue(not t2 < t1)
- self.assertTrue(not t1 >= t2)
- self.assertTrue(not t2 <= t1)
+ self.assertFalse(t1 == t2)
+ self.assertFalse(t2 == t1)
+ self.assertFalse(t1 > t2)
+ self.assertFalse(t2 < t1)
+ self.assertFalse(t1 >= t2)
+ self.assertFalse(t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
@@ -1444,9 +1445,9 @@ class TestDateTime(TestDate):
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
- self.assertTrue(not t1 != t2)
- self.assertTrue(not t1 < t2)
- self.assertTrue(not t1 > t2)
+ self.assertFalse(t1 != t2)
+ self.assertFalse(t1 < t2)
+ self.assertFalse(t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
@@ -1460,12 +1461,12 @@ class TestDateTime(TestDate):
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
- self.assertTrue(not t1 == t2)
- self.assertTrue(not t2 == t1)
- self.assertTrue(not t1 > t2)
- self.assertTrue(not t2 < t1)
- self.assertTrue(not t1 >= t2)
- self.assertTrue(not t2 <= t1)
+ self.assertFalse(t1 == t2)
+ self.assertFalse(t2 == t1)
+ self.assertFalse(t1 > t2)
+ self.assertFalse(t2 < t1)
+ self.assertFalse(t1 >= t2)
+ self.assertFalse(t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
@@ -1541,7 +1542,7 @@ class TestDateTime(TestDate):
if abs(from_timestamp - from_now) <= tolerance:
break
# Else try again a few times.
- self.assertTrue(abs(from_timestamp - from_now) <= tolerance)
+ self.assertLessEqual(abs(from_timestamp - from_now), tolerance)
def test_strptime(self):
import _strptime
@@ -1727,9 +1728,9 @@ class TestTime(HarmlessMixedComparison, unittest.TestCase):
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
- self.assertTrue(not t1 != t2)
- self.assertTrue(not t1 < t2)
- self.assertTrue(not t1 > t2)
+ self.assertFalse(t1 != t2)
+ self.assertFalse(t1 < t2)
+ self.assertFalse(t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
@@ -1743,12 +1744,12 @@ class TestTime(HarmlessMixedComparison, unittest.TestCase):
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
- self.assertTrue(not t1 == t2)
- self.assertTrue(not t2 == t1)
- self.assertTrue(not t1 > t2)
- self.assertTrue(not t2 < t1)
- self.assertTrue(not t1 >= t2)
- self.assertTrue(not t2 <= t1)
+ self.assertFalse(t1 == t2)
+ self.assertFalse(t2 == t1)
+ self.assertFalse(t1 > t2)
+ self.assertFalse(t2 < t1)
+ self.assertFalse(t1 >= t2)
+ self.assertFalse(t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
@@ -1928,8 +1929,8 @@ class TestTime(HarmlessMixedComparison, unittest.TestCase):
self.assertTrue(cls(0, 1))
self.assertTrue(cls(0, 0, 1))
self.assertTrue(cls(0, 0, 0, 1))
- self.assertTrue(not cls(0))
- self.assertTrue(not cls())
+ self.assertFalse(cls(0))
+ self.assertFalse(cls())
def test_replace(self):
cls = self.theclass
@@ -2026,7 +2027,7 @@ class TZInfoBase:
def utcoffset(self, dt): pass
b = BetterTry()
t = cls(1, 1, 1, tzinfo=b)
- self.assertTrue(t.tzinfo is b)
+ self.assertIs(t.tzinfo, b)
def test_utc_offset_out_of_bounds(self):
class Edgy(tzinfo):
@@ -2065,9 +2066,9 @@ class TZInfoBase:
for t in (cls(1, 1, 1),
cls(1, 1, 1, tzinfo=None),
cls(1, 1, 1, tzinfo=C1())):
- self.assertTrue(t.utcoffset() is None)
- self.assertTrue(t.dst() is None)
- self.assertTrue(t.tzname() is None)
+ self.assertIsNone(t.utcoffset())
+ self.assertIsNone(t.dst())
+ self.assertIsNone(t.tzname())
class C3(tzinfo):
def utcoffset(self, dt): return timedelta(minutes=-1439)
@@ -2161,7 +2162,7 @@ class TestTimeTZ(TestTime, TZInfoBase, unittest.TestCase):
self.assertEqual(t.minute, 0)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 0)
- self.assertTrue(t.tzinfo is None)
+ self.assertIsNone(t.tzinfo)
def test_zones(self):
est = FixedOffset(-300, "EST", 1)
@@ -2176,25 +2177,25 @@ class TestTimeTZ(TestTime, TZInfoBase, unittest.TestCase):
self.assertEqual(t1.tzinfo, est)
self.assertEqual(t2.tzinfo, utc)
self.assertEqual(t3.tzinfo, met)
- self.assertTrue(t4.tzinfo is None)
+ self.assertIsNone(t4.tzinfo)
self.assertEqual(t5.tzinfo, utc)
self.assertEqual(t1.utcoffset(), timedelta(minutes=-300))
self.assertEqual(t2.utcoffset(), timedelta(minutes=0))
self.assertEqual(t3.utcoffset(), timedelta(minutes=60))
- self.assertTrue(t4.utcoffset() is None)
+ self.assertIsNone(t4.utcoffset())
self.assertRaises(TypeError, t1.utcoffset, "no args")
self.assertEqual(t1.tzname(), "EST")
self.assertEqual(t2.tzname(), "UTC")
self.assertEqual(t3.tzname(), "MET")
- self.assertTrue(t4.tzname() is None)
+ self.assertIsNone(t4.tzname())
self.assertRaises(TypeError, t1.tzname, "no args")
self.assertEqual(t1.dst(), timedelta(minutes=1))
self.assertEqual(t2.dst(), timedelta(minutes=-2))
self.assertEqual(t3.dst(), timedelta(minutes=3))
- self.assertTrue(t4.dst() is None)
+ self.assertIsNone(t4.dst())
self.assertRaises(TypeError, t1.dst, "no args")
self.assertEqual(hash(t1), hash(t2))
@@ -2285,10 +2286,10 @@ class TestTimeTZ(TestTime, TZInfoBase, unittest.TestCase):
self.assertTrue(t)
t = cls(5, tzinfo=FixedOffset(300, ""))
- self.assertTrue(not t)
+ self.assertFalse(t)
t = cls(23, 59, tzinfo=FixedOffset(23*60 + 59, ""))
- self.assertTrue(not t)
+ self.assertFalse(t)
# Mostly ensuring this doesn't overflow internally.
t = cls(0, tzinfo=FixedOffset(23*60 + 59, ""))
@@ -2326,13 +2327,13 @@ class TestTimeTZ(TestTime, TZInfoBase, unittest.TestCase):
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
- self.assertTrue(base2.tzinfo is None)
- self.assertTrue(base2.tzname() is None)
+ self.assertIsNone(base2.tzinfo)
+ self.assertIsNone(base2.tzname())
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
- self.assertTrue(base.tzinfo is base3.tzinfo)
+ self.assertIs(base.tzinfo, base3.tzinfo)
# Out of bounds.
base = cls(1)
@@ -2567,7 +2568,7 @@ class TestDateTimeTZ(TestDateTime, TZInfoBase, unittest.TestCase):
tz55 = FixedOffset(-330, "west 5:30")
timeaware = now.time().replace(tzinfo=tz55)
nowaware = self.theclass.combine(now.date(), timeaware)
- self.assertTrue(nowaware.tzinfo is tz55)
+ self.assertIs(nowaware.tzinfo, tz55)
self.assertEqual(nowaware.timetz(), timeaware)
# Can't mix aware and non-aware.
@@ -2586,15 +2587,15 @@ class TestDateTimeTZ(TestDateTime, TZInfoBase, unittest.TestCase):
# Adding a delta should preserve tzinfo.
delta = timedelta(weeks=1, minutes=12, microseconds=5678)
nowawareplus = nowaware + delta
- self.assertTrue(nowaware.tzinfo is tz55)
+ self.assertIs(nowaware.tzinfo, tz55)
nowawareplus2 = delta + nowaware
- self.assertTrue(nowawareplus2.tzinfo is tz55)
+ self.assertIs(nowawareplus2.tzinfo, tz55)
self.assertEqual(nowawareplus, nowawareplus2)
# that - delta should be what we started with, and that - what we
# started with should be delta.
diff = nowawareplus - delta
- self.assertTrue(diff.tzinfo is tz55)
+ self.assertIs(diff.tzinfo, tz55)
self.assertEqual(nowaware, diff)
self.assertRaises(TypeError, lambda: delta - nowawareplus)
self.assertEqual(nowawareplus - nowaware, delta)
@@ -2603,7 +2604,7 @@ class TestDateTimeTZ(TestDateTime, TZInfoBase, unittest.TestCase):
tzr = FixedOffset(random.randrange(-1439, 1440), "randomtimezone")
# Attach it to nowawareplus.
nowawareplus = nowawareplus.replace(tzinfo=tzr)
- self.assertTrue(nowawareplus.tzinfo is tzr)
+ self.assertIs(nowawareplus.tzinfo, tzr)
# Make sure the difference takes the timezone adjustments into account.
got = nowaware - nowawareplus
# Expected: (nowaware base - nowaware offset) -
@@ -2630,7 +2631,7 @@ class TestDateTimeTZ(TestDateTime, TZInfoBase, unittest.TestCase):
off42 = FixedOffset(42, "42")
another = meth(off42)
again = meth(tz=off42)
- self.assertTrue(another.tzinfo is again.tzinfo)
+ self.assertIs(another.tzinfo, again.tzinfo)
self.assertEqual(another.utcoffset(), timedelta(minutes=42))
# Bad argument with and w/o naming the keyword.
self.assertRaises(TypeError, meth, 16)
@@ -2647,7 +2648,7 @@ class TestDateTimeTZ(TestDateTime, TZInfoBase, unittest.TestCase):
utc = FixedOffset(0, "utc", 0)
for dummy in range(3):
now = datetime.now(weirdtz)
- self.assertTrue(now.tzinfo is weirdtz)
+ self.assertIs(now.tzinfo, weirdtz)
utcnow = datetime.utcnow().replace(tzinfo=utc)
now2 = utcnow.astimezone(weirdtz)
if abs(now - now2) < timedelta(seconds=30):
@@ -2668,7 +2669,7 @@ class TestDateTimeTZ(TestDateTime, TZInfoBase, unittest.TestCase):
off42 = FixedOffset(42, "42")
another = meth(ts, off42)
again = meth(ts, tz=off42)
- self.assertTrue(another.tzinfo is again.tzinfo)
+ self.assertIs(another.tzinfo, again.tzinfo)
self.assertEqual(another.utcoffset(), timedelta(minutes=42))
# Bad argument with and w/o naming the keyword.
self.assertRaises(TypeError, meth, ts, 16)
@@ -2862,13 +2863,13 @@ class TestDateTimeTZ(TestDateTime, TZInfoBase, unittest.TestCase):
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
- self.assertTrue(base2.tzinfo is None)
- self.assertTrue(base2.tzname() is None)
+ self.assertIsNone(base2.tzinfo)
+ self.assertIsNone(base2.tzname())
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
- self.assertTrue(base.tzinfo is base3.tzinfo)
+ self.assertIs(base.tzinfo, base3.tzinfo)
# Out of bounds.
base = cls(2000, 2, 29)
@@ -2881,20 +2882,20 @@ class TestDateTimeTZ(TestDateTime, TZInfoBase, unittest.TestCase):
fm5h = FixedOffset(-timedelta(hours=5), "m300")
dt = self.theclass.now(tz=f44m)
- self.assertTrue(dt.tzinfo is f44m)
+ self.assertIs(dt.tzinfo, f44m)
# Replacing with degenerate tzinfo raises an exception.
self.assertRaises(ValueError, dt.astimezone, fnone)
# Ditto with None tz.
self.assertRaises(TypeError, dt.astimezone, None)
# Replacing with same tzinfo makes no change.
x = dt.astimezone(dt.tzinfo)
- self.assertTrue(x.tzinfo is f44m)
+ self.assertIs(x.tzinfo, f44m)
self.assertEqual(x.date(), dt.date())
self.assertEqual(x.time(), dt.time())
# Replacing with different tzinfo does adjust.
got = dt.astimezone(fm5h)
- self.assertTrue(got.tzinfo is fm5h)
+ self.assertIs(got.tzinfo, fm5h)
self.assertEqual(got.utcoffset(), timedelta(hours=-5))
expected = dt - dt.utcoffset() # in effect, convert to UTC
expected += fm5h.utcoffset(dt) # and from there to local time
@@ -2902,7 +2903,7 @@ class TestDateTimeTZ(TestDateTime, TZInfoBase, unittest.TestCase):
self.assertEqual(got.date(), expected.date())
self.assertEqual(got.time(), expected.time())
self.assertEqual(got.timetz(), expected.timetz())
- self.assertTrue(got.tzinfo is expected.tzinfo)
+ self.assertIs(got.tzinfo, expected.tzinfo)
self.assertEqual(got, expected)
def test_aware_subtract(self):
@@ -3330,8 +3331,8 @@ class Oddballs(unittest.TestCase):
as_datetime = datetime.combine(as_date, time())
self.assertTrue(as_date != as_datetime)
self.assertTrue(as_datetime != as_date)
- self.assertTrue(not as_date == as_datetime)
- self.assertTrue(not as_datetime == as_date)
+ self.assertFalse(as_date == as_datetime)
+ self.assertFalse(as_datetime == as_date)
self.assertRaises(TypeError, lambda: as_date < as_datetime)
self.assertRaises(TypeError, lambda: as_datetime < as_date)
self.assertRaises(TypeError, lambda: as_date <= as_datetime)
@@ -3345,8 +3346,7 @@ class Oddballs(unittest.TestCase):
# projection if use of a date method is forced.
self.assertTrue(as_date.__eq__(as_datetime))
different_day = (as_date.day + 1) % 20 + 1
- self.assertTrue(not as_date.__eq__(as_datetime.replace(day=
- different_day)))
+ self.assertFalse(as_date.__eq__(as_datetime.replace(day=different_day)))
# And date should compare with other subclasses of date. If a
# subclass wants to stop this, it's up to the subclass to do so.
diff --git a/Lib/test/test_decimal.py b/Lib/test/test_decimal.py
index a011b2f..0902278 100644
--- a/Lib/test/test_decimal.py
+++ b/Lib/test/test_decimal.py
@@ -223,7 +223,6 @@ class DecimalTest(unittest.TestCase):
global skip_expected
if skip_expected:
raise unittest.SkipTest
- return
with open(file) as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
@@ -234,7 +233,6 @@ class DecimalTest(unittest.TestCase):
#Exception raised where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
- return
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
@@ -391,7 +389,6 @@ class DecimalTest(unittest.TestCase):
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertItemsEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
- return
def getexceptions(self):
return [e for e in Signals if self.context.flags[e]]
@@ -834,7 +831,7 @@ class DecimalFormatTest(unittest.TestCase):
try:
from locale import CHAR_MAX
except ImportError:
- return
+ self.skipTest('locale.CHAR_MAX not available')
# Set up some localeconv-like dictionaries
en_US = {
@@ -1196,7 +1193,6 @@ def thfunc1(cls):
cls.assertEqual(test1, Decimal('0.3333333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.3333333333333333333333333333'))
- return
def thfunc2(cls):
d1 = Decimal(1)
@@ -1210,17 +1206,12 @@ def thfunc2(cls):
cls.assertEqual(test1, Decimal('0.3333333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
- return
+@unittest.skipUnless(threading, 'threading required')
class DecimalUseOfContextTest(unittest.TestCase):
'''Unit tests for Use of Context cases in Decimal.'''
- try:
- import threading
- except ImportError:
- threading = None
-
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
@@ -1239,10 +1230,6 @@ class DecimalUseOfContextTest(unittest.TestCase):
self.finish1.wait()
self.finish2.wait()
- return
-
- if threading is None:
- del test_threading
class DecimalUsabilityTest(unittest.TestCase):
@@ -1448,6 +1435,18 @@ class DecimalUsabilityTest(unittest.TestCase):
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
+ def test_nan_to_float(self):
+ # Test conversions of decimal NANs to float.
+ # See http://bugs.python.org/issue15544
+ for s in ('nan', 'nan1234', '-nan', '-nan2468'):
+ f = float(Decimal(s))
+ self.assertTrue(math.isnan(f))
+
+ def test_snan_to_float(self):
+ for s in ('snan', '-snan', 'snan1357', '-snan1234'):
+ d = Decimal(s)
+ self.assertRaises(ValueError, float, d)
+
def test_eval_round_trip(self):
#with zero
@@ -1528,7 +1527,6 @@ class DecimalUsabilityTest(unittest.TestCase):
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
- return
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
@@ -2273,7 +2271,7 @@ class ContextFlags(unittest.TestCase):
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
-def test_main(arith=False, verbose=None, todo_tests=None, debug=None):
+def test_main(arith=None, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
@@ -2282,7 +2280,7 @@ def test_main(arith=False, verbose=None, todo_tests=None, debug=None):
init()
global TEST_ALL, DEBUG
- TEST_ALL = arith or is_resource_enabled('decimal')
+ TEST_ALL = arith if arith is not None else is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
diff --git a/Lib/test/test_deque.py b/Lib/test/test_deque.py
index a0d30f1..595a0c4 100644
--- a/Lib/test/test_deque.py
+++ b/Lib/test/test_deque.py
@@ -6,6 +6,7 @@ import weakref
import copy
import cPickle as pickle
import random
+import struct
BIG = 100000
@@ -517,6 +518,21 @@ class TestBasic(unittest.TestCase):
gc.collect()
self.assertTrue(ref() is None, "Cycle was not collected")
+ check_sizeof = test_support.check_sizeof
+
+ @test_support.cpython_only
+ def test_sizeof(self):
+ BLOCKLEN = 62
+ basesize = test_support.calcobjsize('2P4PlP')
+ blocksize = struct.calcsize('2P%dP' % BLOCKLEN)
+ self.assertEqual(object.__sizeof__(deque()), basesize)
+ check = self.check_sizeof
+ check(deque(), basesize + blocksize)
+ check(deque('a'), basesize + blocksize)
+ check(deque('a' * (BLOCKLEN // 2)), basesize + blocksize)
+ check(deque('a' * (BLOCKLEN // 2 + 1)), basesize + 2 * blocksize)
+ check(deque('a' * (42 * BLOCKLEN)), basesize + 43 * blocksize)
+
class TestVariousIteratorArgs(unittest.TestCase):
def test_constructor(self):
diff --git a/Lib/test/test_descr.py b/Lib/test/test_descr.py
index 964cc5c..6f91842 100644
--- a/Lib/test/test_descr.py
+++ b/Lib/test/test_descr.py
@@ -1,7 +1,9 @@
import __builtin__
+import gc
import sys
import types
import unittest
+import weakref
from copy import deepcopy
from test import test_support
@@ -396,13 +398,21 @@ class OperatorsTest(unittest.TestCase):
class ClassPropertiesAndMethods(unittest.TestCase):
+ def assertHasAttr(self, obj, name):
+ self.assertTrue(hasattr(obj, name),
+ '%r has no attribute %r' % (obj, name))
+
+ def assertNotHasAttr(self, obj, name):
+ self.assertFalse(hasattr(obj, name),
+ '%r has unexpected attribute %r' % (obj, name))
+
def test_python_dicts(self):
# Testing Python subclass of dict...
self.assertTrue(issubclass(dict, dict))
self.assertIsInstance({}, dict)
d = dict()
self.assertEqual(d, {})
- self.assertTrue(d.__class__ is dict)
+ self.assertIs(d.__class__, dict)
self.assertIsInstance(d, dict)
class C(dict):
state = -1
@@ -583,7 +593,7 @@ class ClassPropertiesAndMethods(unittest.TestCase):
def _set_x(self, x):
self.__x = -x
a = A()
- self.assertTrue(not hasattr(a, "x"))
+ self.assertNotHasAttr(a, "x")
a.x = 12
self.assertEqual(a.x, 12)
self.assertEqual(a._A__x, -12)
@@ -932,14 +942,14 @@ order (MRO) for bases """
self.assertEqual(type(a), object)
b = object()
self.assertNotEqual(a, b)
- self.assertFalse(hasattr(a, "foo"))
+ self.assertNotHasAttr(a, "foo")
try:
a.foo = 12
except (AttributeError, TypeError):
pass
else:
self.fail("object() should not allow setting a foo attribute")
- self.assertFalse(hasattr(object(), "__dict__"))
+ self.assertNotHasAttr(object(), "__dict__")
class Cdict(object):
pass
@@ -954,28 +964,28 @@ order (MRO) for bases """
class C0(object):
__slots__ = []
x = C0()
- self.assertFalse(hasattr(x, "__dict__"))
- self.assertFalse(hasattr(x, "foo"))
+ self.assertNotHasAttr(x, "__dict__")
+ self.assertNotHasAttr(x, "foo")
class C1(object):
__slots__ = ['a']
x = C1()
- self.assertFalse(hasattr(x, "__dict__"))
- self.assertFalse(hasattr(x, "a"))
+ self.assertNotHasAttr(x, "__dict__")
+ self.assertNotHasAttr(x, "a")
x.a = 1
self.assertEqual(x.a, 1)
x.a = None
self.assertEqual(x.a, None)
del x.a
- self.assertFalse(hasattr(x, "a"))
+ self.assertNotHasAttr(x, "a")
class C3(object):
__slots__ = ['a', 'b', 'c']
x = C3()
- self.assertFalse(hasattr(x, "__dict__"))
- self.assertFalse(hasattr(x, 'a'))
- self.assertFalse(hasattr(x, 'b'))
- self.assertFalse(hasattr(x, 'c'))
+ self.assertNotHasAttr(x, "__dict__")
+ self.assertNotHasAttr(x, 'a')
+ self.assertNotHasAttr(x, 'b')
+ self.assertNotHasAttr(x, 'c')
x.a = 1
x.b = 2
x.c = 3
@@ -991,8 +1001,8 @@ order (MRO) for bases """
def get(self):
return self.__a
x = C4(5)
- self.assertFalse(hasattr(x, '__dict__'))
- self.assertFalse(hasattr(x, '__a'))
+ self.assertNotHasAttr(x, '__dict__')
+ self.assertNotHasAttr(x, '__a')
self.assertEqual(x.get(), 5)
try:
x.__a = 6
@@ -1049,11 +1059,12 @@ order (MRO) for bases """
c.abc = 5
self.assertEqual(c.abc, 5)
+ def test_unicode_slots(self):
# Test unicode slot names
try:
unicode
except NameError:
- pass
+ self.skipTest('no unicode support')
else:
# Test a single unicode string is not expanded as a sequence.
class C(object):
@@ -1127,7 +1138,6 @@ order (MRO) for bases """
self.assertEqual(Counted.counter, 0)
# Test lookup leaks [SF bug 572567]
- import gc
if hasattr(gc, 'get_objects'):
class G(object):
def __cmp__(self, other):
@@ -1163,16 +1173,16 @@ order (MRO) for bases """
class D(object):
__slots__ = ["__dict__"]
a = D()
- self.assertTrue(hasattr(a, "__dict__"))
- self.assertFalse(hasattr(a, "__weakref__"))
+ self.assertHasAttr(a, "__dict__")
+ self.assertNotHasAttr(a, "__weakref__")
a.foo = 42
self.assertEqual(a.__dict__, {"foo": 42})
class W(object):
__slots__ = ["__weakref__"]
a = W()
- self.assertTrue(hasattr(a, "__weakref__"))
- self.assertFalse(hasattr(a, "__dict__"))
+ self.assertHasAttr(a, "__weakref__")
+ self.assertNotHasAttr(a, "__dict__")
try:
a.foo = 42
except AttributeError:
@@ -1183,16 +1193,16 @@ order (MRO) for bases """
class C1(W, D):
__slots__ = []
a = C1()
- self.assertTrue(hasattr(a, "__dict__"))
- self.assertTrue(hasattr(a, "__weakref__"))
+ self.assertHasAttr(a, "__dict__")
+ self.assertHasAttr(a, "__weakref__")
a.foo = 42
self.assertEqual(a.__dict__, {"foo": 42})
class C2(D, W):
__slots__ = []
a = C2()
- self.assertTrue(hasattr(a, "__dict__"))
- self.assertTrue(hasattr(a, "__weakref__"))
+ self.assertHasAttr(a, "__dict__")
+ self.assertHasAttr(a, "__weakref__")
a.foo = 42
self.assertEqual(a.__dict__, {"foo": 42})
@@ -1240,7 +1250,7 @@ order (MRO) for bases """
class C(object):
pass
a = C()
- self.assertFalse(hasattr(a, "foobar"))
+ self.assertNotHasAttr(a, "foobar")
C.foobar = 2
self.assertEqual(a.foobar, 2)
C.method = lambda self: 42
@@ -1250,7 +1260,7 @@ order (MRO) for bases """
C.__int__ = lambda self: 100
self.assertEqual(int(a), 100)
self.assertEqual(a.foobar, 2)
- self.assertFalse(hasattr(a, "spam"))
+ self.assertNotHasAttr(a, "spam")
def mygetattr(self, name):
if name == "spam":
return "spam"
@@ -1418,6 +1428,22 @@ order (MRO) for bases """
self.assertEqual(x, spam.spamlist)
self.assertEqual(a, a1)
self.assertEqual(d, d1)
+ spam_cm = spam.spamlist.__dict__['classmeth']
+ x2, a2, d2 = spam_cm(spam.spamlist, *a, **d)
+ self.assertEqual(x2, spam.spamlist)
+ self.assertEqual(a2, a1)
+ self.assertEqual(d2, d1)
+ class SubSpam(spam.spamlist): pass
+ x2, a2, d2 = spam_cm(SubSpam, *a, **d)
+ self.assertEqual(x2, SubSpam)
+ self.assertEqual(a2, a1)
+ self.assertEqual(d2, d1)
+ with self.assertRaises(TypeError):
+ spam_cm()
+ with self.assertRaises(TypeError):
+ spam_cm(spam.spamlist())
+ with self.assertRaises(TypeError):
+ spam_cm(list)
def test_staticmethods(self):
# Testing static methods...
@@ -1504,7 +1530,7 @@ order (MRO) for bases """
self.assertEqual(a.x, 10)
self.assertEqual(a.x, 11)
del a.x
- self.assertEqual(hasattr(a, 'x'), 0)
+ self.assertNotHasAttr(a, 'x')
def test_newslots(self):
# Testing __new__ slot override...
@@ -1780,18 +1806,18 @@ order (MRO) for bases """
raise IndexError
c1 = C()
c2 = C()
- self.assertTrue(not not c1) # What?
+ self.assertFalse(not c1)
self.assertNotEqual(id(c1), id(c2))
hash(c1)
hash(c2)
self.assertEqual(cmp(c1, c2), cmp(id(c1), id(c2)))
self.assertEqual(c1, c1)
self.assertTrue(c1 != c2)
- self.assertTrue(not c1 != c1)
- self.assertTrue(not c1 == c2)
+ self.assertFalse(c1 != c1)
+ self.assertFalse(c1 == c2)
# Note that the module name appears in str/repr, and that varies
# depending on whether this test is run standalone or from a framework.
- self.assertTrue(str(c1).find('C object at ') >= 0)
+ self.assertGreaterEqual(str(c1).find('C object at '), 0)
self.assertEqual(str(c1), repr(c1))
self.assertNotIn(-1, c1)
for i in range(10):
@@ -1804,18 +1830,18 @@ order (MRO) for bases """
raise IndexError
d1 = D()
d2 = D()
- self.assertTrue(not not d1)
+ self.assertFalse(not d1)
self.assertNotEqual(id(d1), id(d2))
hash(d1)
hash(d2)
self.assertEqual(cmp(d1, d2), cmp(id(d1), id(d2)))
self.assertEqual(d1, d1)
self.assertNotEqual(d1, d2)
- self.assertTrue(not d1 != d1)
- self.assertTrue(not d1 == d2)
+ self.assertFalse(d1 != d1)
+ self.assertFalse(d1 == d2)
# Note that the module name appears in str/repr, and that varies
# depending on whether this test is run standalone or from a framework.
- self.assertTrue(str(d1).find('D object at ') >= 0)
+ self.assertGreaterEqual(str(d1).find('D object at '), 0)
self.assertEqual(str(d1), repr(d1))
self.assertNotIn(-1, d1)
for i in range(10):
@@ -1845,11 +1871,11 @@ order (MRO) for bases """
p1 = Proxy(1)
p_1 = Proxy(-1)
self.assertFalse(p0)
- self.assertTrue(not not p1)
+ self.assertFalse(not p1)
self.assertEqual(hash(p0), hash(0))
self.assertEqual(p0, p0)
self.assertNotEqual(p0, p1)
- self.assertTrue(not p0 != p0)
+ self.assertFalse(p0 != p0)
self.assertEqual(not p0, p1)
self.assertEqual(cmp(p0, p1), -1)
self.assertEqual(cmp(p0, p0), 0)
@@ -1885,7 +1911,7 @@ order (MRO) for bases """
p1 = DProxy(1)
p_1 = DProxy(-1)
self.assertFalse(p0)
- self.assertTrue(not not p1)
+ self.assertFalse(not p1)
self.assertEqual(hash(p0), hash(0))
self.assertEqual(p0, p0)
self.assertNotEqual(p0, p1)
@@ -1978,7 +2004,7 @@ order (MRO) for bases """
try:
weakref.ref(no)
except TypeError, msg:
- self.assertTrue(str(msg).find("weak reference") >= 0)
+ self.assertIn("weak reference", str(msg))
else:
self.fail("weakref.ref(no) should be illegal")
class Weak(object):
@@ -2002,17 +2028,17 @@ order (MRO) for bases """
del self.__x
x = property(getx, setx, delx, doc="I'm the x property.")
a = C()
- self.assertFalse(hasattr(a, "x"))
+ self.assertNotHasAttr(a, "x")
a.x = 42
self.assertEqual(a._C__x, 42)
self.assertEqual(a.x, 42)
del a.x
- self.assertFalse(hasattr(a, "x"))
- self.assertFalse(hasattr(a, "_C__x"))
+ self.assertNotHasAttr(a, "x")
+ self.assertNotHasAttr(a, "_C__x")
C.x.__set__(a, 100)
self.assertEqual(C.x.__get__(a), 100)
C.x.__delete__(a)
- self.assertFalse(hasattr(a, "x"))
+ self.assertNotHasAttr(a, "x")
raw = C.__dict__['x']
self.assertIsInstance(raw, property)
@@ -2024,9 +2050,9 @@ order (MRO) for bases """
self.assertIn("fdel", attrs)
self.assertEqual(raw.__doc__, "I'm the x property.")
- self.assertTrue(raw.fget is C.__dict__['getx'])
- self.assertTrue(raw.fset is C.__dict__['setx'])
- self.assertTrue(raw.fdel is C.__dict__['delx'])
+ self.assertIs(raw.fget, C.__dict__['getx'])
+ self.assertIs(raw.fset, C.__dict__['setx'])
+ self.assertIs(raw.fdel, C.__dict__['delx'])
for attr in "__doc__", "fget", "fset", "fdel":
try:
@@ -2066,6 +2092,7 @@ order (MRO) for bases """
prop2 = property(fset=setter)
self.assertEqual(prop2.__doc__, None)
+ @test_support.cpython_only
def test_testcapi_no_segfault(self):
# this segfaulted in 2.5b2
try:
@@ -2090,14 +2117,14 @@ order (MRO) for bases """
del self._foo
c = C()
self.assertEqual(C.foo.__doc__, "hello")
- self.assertFalse(hasattr(c, "foo"))
+ self.assertNotHasAttr(c, "foo")
c.foo = -42
- self.assertTrue(hasattr(c, '_foo'))
+ self.assertHasAttr(c, '_foo')
self.assertEqual(c._foo, 42)
self.assertEqual(c.foo, 42)
del c.foo
- self.assertFalse(hasattr(c, '_foo'))
- self.assertFalse(hasattr(c, "foo"))
+ self.assertNotHasAttr(c, '_foo')
+ self.assertNotHasAttr(c, "foo")
class D(C):
@C.foo.deleter
@@ -2483,13 +2510,13 @@ order (MRO) for bases """
a = hexint(12345)
self.assertEqual(a, 12345)
self.assertEqual(int(a), 12345)
- self.assertTrue(int(a).__class__ is int)
+ self.assertIs(int(a).__class__, int)
self.assertEqual(hash(a), hash(12345))
- self.assertTrue((+a).__class__ is int)
- self.assertTrue((a >> 0).__class__ is int)
- self.assertTrue((a << 0).__class__ is int)
- self.assertTrue((hexint(0) << 12).__class__ is int)
- self.assertTrue((hexint(0) >> 12).__class__ is int)
+ self.assertIs((+a).__class__, int)
+ self.assertIs((a >> 0).__class__, int)
+ self.assertIs((a << 0).__class__, int)
+ self.assertIs((hexint(0) << 12).__class__, int)
+ self.assertIs((hexint(0) >> 12).__class__, int)
class octlong(long):
__slots__ = []
@@ -2509,31 +2536,31 @@ order (MRO) for bases """
self.assertEqual(a, 12345L)
self.assertEqual(long(a), 12345L)
self.assertEqual(hash(a), hash(12345L))
- self.assertTrue(long(a).__class__ is long)
- self.assertTrue((+a).__class__ is long)
- self.assertTrue((-a).__class__ is long)
- self.assertTrue((-octlong(0)).__class__ is long)
- self.assertTrue((a >> 0).__class__ is long)
- self.assertTrue((a << 0).__class__ is long)
- self.assertTrue((a - 0).__class__ is long)
- self.assertTrue((a * 1).__class__ is long)
- self.assertTrue((a ** 1).__class__ is long)
- self.assertTrue((a // 1).__class__ is long)
- self.assertTrue((1 * a).__class__ is long)
- self.assertTrue((a | 0).__class__ is long)
- self.assertTrue((a ^ 0).__class__ is long)
- self.assertTrue((a & -1L).__class__ is long)
- self.assertTrue((octlong(0) << 12).__class__ is long)
- self.assertTrue((octlong(0) >> 12).__class__ is long)
- self.assertTrue(abs(octlong(0)).__class__ is long)
+ self.assertIs(long(a).__class__, long)
+ self.assertIs((+a).__class__, long)
+ self.assertIs((-a).__class__, long)
+ self.assertIs((-octlong(0)).__class__, long)
+ self.assertIs((a >> 0).__class__, long)
+ self.assertIs((a << 0).__class__, long)
+ self.assertIs((a - 0).__class__, long)
+ self.assertIs((a * 1).__class__, long)
+ self.assertIs((a ** 1).__class__, long)
+ self.assertIs((a // 1).__class__, long)
+ self.assertIs((1 * a).__class__, long)
+ self.assertIs((a | 0).__class__, long)
+ self.assertIs((a ^ 0).__class__, long)
+ self.assertIs((a & -1L).__class__, long)
+ self.assertIs((octlong(0) << 12).__class__, long)
+ self.assertIs((octlong(0) >> 12).__class__, long)
+ self.assertIs(abs(octlong(0)).__class__, long)
# Because octlong overrides __add__, we can't check the absence of +0
# optimizations using octlong.
class longclone(long):
pass
a = longclone(1)
- self.assertTrue((a + 0).__class__ is long)
- self.assertTrue((0 + a).__class__ is long)
+ self.assertIs((a + 0).__class__, long)
+ self.assertIs((0 + a).__class__, long)
# Check that negative clones don't segfault
a = longclone(-1)
@@ -2550,9 +2577,9 @@ order (MRO) for bases """
a = precfloat(12345)
self.assertEqual(a, 12345.0)
self.assertEqual(float(a), 12345.0)
- self.assertTrue(float(a).__class__ is float)
+ self.assertIs(float(a).__class__, float)
self.assertEqual(hash(a), hash(12345.0))
- self.assertTrue((+a).__class__ is float)
+ self.assertIs((+a).__class__, float)
class madcomplex(complex):
def __repr__(self):
@@ -2600,20 +2627,20 @@ order (MRO) for bases """
self.assertEqual(v, t)
a = madtuple((1,2,3,4,5))
self.assertEqual(tuple(a), (1,2,3,4,5))
- self.assertTrue(tuple(a).__class__ is tuple)
+ self.assertIs(tuple(a).__class__, tuple)
self.assertEqual(hash(a), hash((1,2,3,4,5)))
- self.assertTrue(a[:].__class__ is tuple)
- self.assertTrue((a * 1).__class__ is tuple)
- self.assertTrue((a * 0).__class__ is tuple)
- self.assertTrue((a + ()).__class__ is tuple)
+ self.assertIs(a[:].__class__, tuple)
+ self.assertIs((a * 1).__class__, tuple)
+ self.assertIs((a * 0).__class__, tuple)
+ self.assertIs((a + ()).__class__, tuple)
a = madtuple(())
self.assertEqual(tuple(a), ())
- self.assertTrue(tuple(a).__class__ is tuple)
- self.assertTrue((a + a).__class__ is tuple)
- self.assertTrue((a * 0).__class__ is tuple)
- self.assertTrue((a * 1).__class__ is tuple)
- self.assertTrue((a * 2).__class__ is tuple)
- self.assertTrue(a[:].__class__ is tuple)
+ self.assertIs(tuple(a).__class__, tuple)
+ self.assertIs((a + a).__class__, tuple)
+ self.assertIs((a * 0).__class__, tuple)
+ self.assertIs((a * 1).__class__, tuple)
+ self.assertIs((a * 2).__class__, tuple)
+ self.assertIs(a[:].__class__, tuple)
class madstring(str):
_rev = None
@@ -2635,51 +2662,51 @@ order (MRO) for bases """
self.assertEqual(u, s)
s = madstring("12345")
self.assertEqual(str(s), "12345")
- self.assertTrue(str(s).__class__ is str)
+ self.assertIs(str(s).__class__, str)
base = "\x00" * 5
s = madstring(base)
self.assertEqual(s, base)
self.assertEqual(str(s), base)
- self.assertTrue(str(s).__class__ is str)
+ self.assertIs(str(s).__class__, str)
self.assertEqual(hash(s), hash(base))
self.assertEqual({s: 1}[base], 1)
self.assertEqual({base: 1}[s], 1)
- self.assertTrue((s + "").__class__ is str)
+ self.assertIs((s + "").__class__, str)
self.assertEqual(s + "", base)
- self.assertTrue(("" + s).__class__ is str)
+ self.assertIs(("" + s).__class__, str)
self.assertEqual("" + s, base)
- self.assertTrue((s * 0).__class__ is str)
+ self.assertIs((s * 0).__class__, str)
self.assertEqual(s * 0, "")
- self.assertTrue((s * 1).__class__ is str)
+ self.assertIs((s * 1).__class__, str)
self.assertEqual(s * 1, base)
- self.assertTrue((s * 2).__class__ is str)
+ self.assertIs((s * 2).__class__, str)
self.assertEqual(s * 2, base + base)
- self.assertTrue(s[:].__class__ is str)
+ self.assertIs(s[:].__class__, str)
self.assertEqual(s[:], base)
- self.assertTrue(s[0:0].__class__ is str)
+ self.assertIs(s[0:0].__class__, str)
self.assertEqual(s[0:0], "")
- self.assertTrue(s.strip().__class__ is str)
+ self.assertIs(s.strip().__class__, str)
self.assertEqual(s.strip(), base)
- self.assertTrue(s.lstrip().__class__ is str)
+ self.assertIs(s.lstrip().__class__, str)
self.assertEqual(s.lstrip(), base)
- self.assertTrue(s.rstrip().__class__ is str)
+ self.assertIs(s.rstrip().__class__, str)
self.assertEqual(s.rstrip(), base)
identitytab = ''.join([chr(i) for i in range(256)])
- self.assertTrue(s.translate(identitytab).__class__ is str)
+ self.assertIs(s.translate(identitytab).__class__, str)
self.assertEqual(s.translate(identitytab), base)
- self.assertTrue(s.translate(identitytab, "x").__class__ is str)
+ self.assertIs(s.translate(identitytab, "x").__class__, str)
self.assertEqual(s.translate(identitytab, "x"), base)
self.assertEqual(s.translate(identitytab, "\x00"), "")
- self.assertTrue(s.replace("x", "x").__class__ is str)
+ self.assertIs(s.replace("x", "x").__class__, str)
self.assertEqual(s.replace("x", "x"), base)
- self.assertTrue(s.ljust(len(s)).__class__ is str)
+ self.assertIs(s.ljust(len(s)).__class__, str)
self.assertEqual(s.ljust(len(s)), base)
- self.assertTrue(s.rjust(len(s)).__class__ is str)
+ self.assertIs(s.rjust(len(s)).__class__, str)
self.assertEqual(s.rjust(len(s)), base)
- self.assertTrue(s.center(len(s)).__class__ is str)
+ self.assertIs(s.center(len(s)).__class__, str)
self.assertEqual(s.center(len(s)), base)
- self.assertTrue(s.lower().__class__ is str)
+ self.assertIs(s.lower().__class__, str)
self.assertEqual(s.lower(), base)
class madunicode(unicode):
@@ -2698,47 +2725,47 @@ order (MRO) for bases """
base = u"12345"
u = madunicode(base)
self.assertEqual(unicode(u), base)
- self.assertTrue(unicode(u).__class__ is unicode)
+ self.assertIs(unicode(u).__class__, unicode)
self.assertEqual(hash(u), hash(base))
self.assertEqual({u: 1}[base], 1)
self.assertEqual({base: 1}[u], 1)
- self.assertTrue(u.strip().__class__ is unicode)
+ self.assertIs(u.strip().__class__, unicode)
self.assertEqual(u.strip(), base)
- self.assertTrue(u.lstrip().__class__ is unicode)
+ self.assertIs(u.lstrip().__class__, unicode)
self.assertEqual(u.lstrip(), base)
- self.assertTrue(u.rstrip().__class__ is unicode)
+ self.assertIs(u.rstrip().__class__, unicode)
self.assertEqual(u.rstrip(), base)
- self.assertTrue(u.replace(u"x", u"x").__class__ is unicode)
+ self.assertIs(u.replace(u"x", u"x").__class__, unicode)
self.assertEqual(u.replace(u"x", u"x"), base)
- self.assertTrue(u.replace(u"xy", u"xy").__class__ is unicode)
+ self.assertIs(u.replace(u"xy", u"xy").__class__, unicode)
self.assertEqual(u.replace(u"xy", u"xy"), base)
- self.assertTrue(u.center(len(u)).__class__ is unicode)
+ self.assertIs(u.center(len(u)).__class__, unicode)
self.assertEqual(u.center(len(u)), base)
- self.assertTrue(u.ljust(len(u)).__class__ is unicode)
+ self.assertIs(u.ljust(len(u)).__class__, unicode)
self.assertEqual(u.ljust(len(u)), base)
- self.assertTrue(u.rjust(len(u)).__class__ is unicode)
+ self.assertIs(u.rjust(len(u)).__class__, unicode)
self.assertEqual(u.rjust(len(u)), base)
- self.assertTrue(u.lower().__class__ is unicode)
+ self.assertIs(u.lower().__class__, unicode)
self.assertEqual(u.lower(), base)
- self.assertTrue(u.upper().__class__ is unicode)
+ self.assertIs(u.upper().__class__, unicode)
self.assertEqual(u.upper(), base)
- self.assertTrue(u.capitalize().__class__ is unicode)
+ self.assertIs(u.capitalize().__class__, unicode)
self.assertEqual(u.capitalize(), base)
- self.assertTrue(u.title().__class__ is unicode)
+ self.assertIs(u.title().__class__, unicode)
self.assertEqual(u.title(), base)
- self.assertTrue((u + u"").__class__ is unicode)
+ self.assertIs((u + u"").__class__, unicode)
self.assertEqual(u + u"", base)
- self.assertTrue((u"" + u).__class__ is unicode)
+ self.assertIs((u"" + u).__class__, unicode)
self.assertEqual(u"" + u, base)
- self.assertTrue((u * 0).__class__ is unicode)
+ self.assertIs((u * 0).__class__, unicode)
self.assertEqual(u * 0, u"")
- self.assertTrue((u * 1).__class__ is unicode)
+ self.assertIs((u * 1).__class__, unicode)
self.assertEqual(u * 1, base)
- self.assertTrue((u * 2).__class__ is unicode)
+ self.assertIs((u * 2).__class__, unicode)
self.assertEqual(u * 2, base + base)
- self.assertTrue(u[:].__class__ is unicode)
+ self.assertIs(u[:].__class__, unicode)
self.assertEqual(u[:], base)
- self.assertTrue(u[0:0].__class__ is unicode)
+ self.assertIs(u[0:0].__class__, unicode)
self.assertEqual(u[0:0], u"")
class sublist(list):
@@ -2884,12 +2911,16 @@ order (MRO) for bases """
c = {1: c1, 2: c2, 3: c3}
for x in 1, 2, 3:
for y in 1, 2, 3:
- self.assertTrue(cmp(c[x], c[y]) == cmp(x, y), "x=%d, y=%d" % (x, y))
+ self.assertEqual(cmp(c[x], c[y]), cmp(x, y),
+ "x=%d, y=%d" % (x, y))
for op in "<", "<=", "==", "!=", ">", ">=":
- self.assertTrue(eval("c[x] %s c[y]" % op) == eval("x %s y" % op),
- "x=%d, y=%d" % (x, y))
- self.assertTrue(cmp(c[x], y) == cmp(x, y), "x=%d, y=%d" % (x, y))
- self.assertTrue(cmp(x, c[y]) == cmp(x, y), "x=%d, y=%d" % (x, y))
+ self.assertEqual(eval("c[x] %s c[y]" % op),
+ eval("x %s y" % op),
+ "x=%d, y=%d" % (x, y))
+ self.assertEqual(cmp(c[x], y), cmp(x, y),
+ "x=%d, y=%d" % (x, y))
+ self.assertEqual(cmp(x, c[y]), cmp(x, y),
+ "x=%d, y=%d" % (x, y))
def test_rich_comparisons(self):
# Testing rich comparisons...
@@ -2962,12 +2993,15 @@ order (MRO) for bases """
for x in 1, 2, 3:
for y in 1, 2, 3:
for op in "<", "<=", "==", "!=", ">", ">=":
- self.assertTrue(eval("c[x] %s c[y]" % op) == eval("x %s y" % op),
- "x=%d, y=%d" % (x, y))
- self.assertTrue(eval("c[x] %s y" % op) == eval("x %s y" % op),
- "x=%d, y=%d" % (x, y))
- self.assertTrue(eval("x %s c[y]" % op) == eval("x %s y" % op),
- "x=%d, y=%d" % (x, y))
+ self.assertEqual(eval("c[x] %s c[y]" % op),
+ eval("x %s y" % op),
+ "x=%d, y=%d" % (x, y))
+ self.assertEqual(eval("c[x] %s y" % op),
+ eval("x %s y" % op),
+ "x=%d, y=%d" % (x, y))
+ self.assertEqual(eval("x %s c[y]" % op),
+ eval("x %s y" % op),
+ "x=%d, y=%d" % (x, y))
def test_coercions(self):
# Testing coercions...
@@ -3032,9 +3066,9 @@ order (MRO) for bases """
for cls2 in C, D, E, F:
x = cls()
x.__class__ = cls2
- self.assertTrue(x.__class__ is cls2)
+ self.assertIs(x.__class__, cls2)
x.__class__ = cls
- self.assertTrue(x.__class__ is cls)
+ self.assertIs(x.__class__, cls)
def cant(x, C):
try:
x.__class__ = C
@@ -3096,11 +3130,11 @@ order (MRO) for bases """
x = cls()
x.a = 1
x.__class__ = cls2
- self.assertTrue(x.__class__ is cls2,
+ self.assertIs(x.__class__, cls2,
"assigning %r as __class__ for %r silently failed" % (cls2, x))
self.assertEqual(x.a, 1)
x.__class__ = cls
- self.assertTrue(x.__class__ is cls,
+ self.assertIs(x.__class__, cls,
"assigning %r as __class__ for %r silently failed" % (cls, x))
self.assertEqual(x.a, 1)
for cls in G, J, K, L, M, N, P, R, list, Int:
@@ -3270,7 +3304,7 @@ order (MRO) for bases """
for cls in C, C1, C2:
s = p.dumps(cls, bin)
cls2 = p.loads(s)
- self.assertTrue(cls2 is cls)
+ self.assertIs(cls2, cls)
a = C1(1, 2); a.append(42); a.append(24)
b = C2("hello", "world", 42)
@@ -3300,7 +3334,7 @@ order (MRO) for bases """
import copy
for cls in C, C1, C2:
cls2 = copy.deepcopy(cls)
- self.assertTrue(cls2 is cls)
+ self.assertIs(cls2, cls)
a = C1(1, 2); a.append(42); a.append(24)
b = C2("hello", "world", 42)
@@ -3371,9 +3405,9 @@ order (MRO) for bases """
# Now it should work
x = C()
y = pickle.loads(pickle.dumps(x))
- self.assertEqual(hasattr(y, 'a'), 0)
+ self.assertNotHasAttr(y, 'a')
y = cPickle.loads(cPickle.dumps(x))
- self.assertEqual(hasattr(y, 'a'), 0)
+ self.assertNotHasAttr(y, 'a')
x.a = 42
y = pickle.loads(pickle.dumps(x))
self.assertEqual(y.a, 42)
@@ -3689,9 +3723,9 @@ order (MRO) for bases """
from types import ModuleType as M
m = M.__new__(M)
str(m)
- self.assertEqual(hasattr(m, "__name__"), 0)
- self.assertEqual(hasattr(m, "__file__"), 0)
- self.assertEqual(hasattr(m, "foo"), 0)
+ self.assertNotHasAttr(m, "__name__")
+ self.assertNotHasAttr(m, "__file__")
+ self.assertNotHasAttr(m, "foo")
self.assertFalse(m.__dict__) # None or {} are both reasonable answers
m.foo = 1
self.assertEqual(m.__dict__, {"foo": 1})
@@ -3871,8 +3905,8 @@ order (MRO) for bases """
__slots__=()
if test_support.check_impl_detail():
self.assertEqual(C.__basicsize__, B.__basicsize__)
- self.assertTrue(hasattr(C, '__dict__'))
- self.assertTrue(hasattr(C, '__weakref__'))
+ self.assertHasAttr(C, '__dict__')
+ self.assertHasAttr(C, '__weakref__')
C().x = 2
def test_rmul(self):
@@ -4119,6 +4153,20 @@ order (MRO) for bases """
C.__name__ = 'D.E'
self.assertEqual((C.__module__, C.__name__), (mod, 'D.E'))
+ def test_evil_type_name(self):
+ # A badly placed Py_DECREF in type_set_name led to arbitrary code
+ # execution while the type structure was not in a sane state, and a
+ # possible segmentation fault as a result. See bug #16447.
+ class Nasty(str):
+ def __del__(self):
+ C.__name__ = "other"
+
+ class C(object):
+ pass
+
+ C.__name__ = Nasty("abc")
+ C.__name__ = "normal"
+
def test_subclass_right_op(self):
# Testing correct dispatch of subclass overloading __r<op>__...
@@ -4359,7 +4407,7 @@ order (MRO) for bases """
self.assertEqual(c.attr, 1)
# this makes a crash more likely:
test_support.gc_collect()
- self.assertEqual(hasattr(c, 'attr'), False)
+ self.assertNotHasAttr(c, 'attr')
def test_init(self):
# SF 1155938
@@ -4380,17 +4428,17 @@ order (MRO) for bases """
l = []
self.assertEqual(l.__add__, l.__add__)
self.assertEqual(l.__add__, [].__add__)
- self.assertTrue(l.__add__ != [5].__add__)
- self.assertTrue(l.__add__ != l.__mul__)
- self.assertTrue(l.__add__.__name__ == '__add__')
+ self.assertNotEqual(l.__add__, [5].__add__)
+ self.assertNotEqual(l.__add__, l.__mul__)
+ self.assertEqual(l.__add__.__name__, '__add__')
if hasattr(l.__add__, '__self__'):
# CPython
- self.assertTrue(l.__add__.__self__ is l)
- self.assertTrue(l.__add__.__objclass__ is list)
+ self.assertIs(l.__add__.__self__, l)
+ self.assertIs(l.__add__.__objclass__, list)
else:
# Python implementations where [].__add__ is a normal bound method
- self.assertTrue(l.__add__.im_self is l)
- self.assertTrue(l.__add__.im_class is list)
+ self.assertIs(l.__add__.im_self, l)
+ self.assertIs(l.__add__.im_class, list)
self.assertEqual(l.__add__.__doc__, list.__add__.__doc__)
try:
hash(l.__add__)
@@ -4541,7 +4589,6 @@ order (MRO) for bases """
self.assertRaises(AttributeError, getattr, C(), "attr")
self.assertEqual(descr.counter, 4)
- import gc
class EvilGetattribute(object):
# This used to segfault
def __getattr__(self, name):
@@ -4554,6 +4601,9 @@ order (MRO) for bases """
self.assertRaises(AttributeError, getattr, EvilGetattribute(), "attr")
+ def test_type___getattribute__(self):
+ self.assertRaises(TypeError, type.__getattribute__, list, type)
+
def test_abstractmethods(self):
# type pretends not to have __abstractmethods__.
self.assertRaises(AttributeError, getattr, type, "__abstractmethods__")
@@ -4571,7 +4621,7 @@ order (MRO) for bases """
fake_str = FakeStr()
# isinstance() reads __class__ on new style classes
- self.assertTrue(isinstance(fake_str, str))
+ self.assertIsInstance(fake_str, str)
# call a method descriptor
with self.assertRaises(TypeError):
@@ -4588,7 +4638,30 @@ order (MRO) for bases """
pass
Foo.__repr__ = Foo.__str__
foo = Foo()
- str(foo)
+ self.assertRaises(RuntimeError, str, foo)
+ self.assertRaises(RuntimeError, repr, foo)
+
+ def test_mixing_slot_wrappers(self):
+ class X(dict):
+ __setattr__ = dict.__setitem__
+ x = X()
+ x.y = 42
+ self.assertEqual(x["y"], 42)
+
+ def test_cycle_through_dict(self):
+ # See bug #1469629
+ class X(dict):
+ def __init__(self):
+ dict.__init__(self)
+ self.__dict__ = self
+ x = X()
+ x.attr = 42
+ wr = weakref.ref(x)
+ del x
+ test_support.gc_collect()
+ self.assertIsNone(wr())
+ for o in gc.get_objects():
+ self.assertIsNot(type(o), X)
class DictProxyTests(unittest.TestCase):
def setUp(self):
diff --git a/Lib/test/test_descrtut.py b/Lib/test/test_descrtut.py
index 157b9f4..33ff0c8 100644
--- a/Lib/test/test_descrtut.py
+++ b/Lib/test/test_descrtut.py
@@ -329,7 +329,7 @@ Attributes defined by get/set methods
... return self.__set(inst, value)
Now let's define a class with an attribute x defined by a pair of methods,
-getx() and and setx():
+getx() and setx():
>>> class C(object):
...
diff --git a/Lib/test/test_dict.py b/Lib/test/test_dict.py
index 29167d0..a5685b9 100644
--- a/Lib/test/test_dict.py
+++ b/Lib/test/test_dict.py
@@ -33,9 +33,11 @@ class DictTest(unittest.TestCase):
self.assertEqual(d.keys(), [])
d = {'a': 1, 'b': 2}
k = d.keys()
+ self.assertEqual(set(k), {'a', 'b'})
+ self.assertIn('a', k)
+ self.assertIn('b', k)
self.assertTrue(d.has_key('a'))
self.assertTrue(d.has_key('b'))
-
self.assertRaises(TypeError, d.keys, None)
def test_values(self):
@@ -254,6 +256,14 @@ class DictTest(unittest.TestCase):
d = dict(zip(range(6), range(6)))
self.assertEqual(dict.fromkeys(d, 0), dict(zip(range(6), [0]*6)))
+ class baddict3(dict):
+ def __new__(cls):
+ return d
+ d = {i : i for i in range(10)}
+ res = d.copy()
+ res.update(a=None, b=None, c=None)
+ self.assertEqual(baddict3.fromkeys({"a", "b", "c"}), res)
+
def test_copy(self):
d = {1:1, 2:2, 3:3}
self.assertEqual(d.copy(), {1:1, 2:2, 3:3})
@@ -299,6 +309,26 @@ class DictTest(unittest.TestCase):
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
+ def test_setdefault_atomic(self):
+ # Issue #13521: setdefault() calls __hash__ and __eq__ only once.
+ class Hashed(object):
+ def __init__(self):
+ self.hash_count = 0
+ self.eq_count = 0
+ def __hash__(self):
+ self.hash_count += 1
+ return 42
+ def __eq__(self, other):
+ self.eq_count += 1
+ return id(self) == id(other)
+ hashed1 = Hashed()
+ y = {hashed1: 5}
+ hashed2 = Hashed()
+ y.setdefault(hashed2, [])
+ self.assertEqual(hashed1.hash_count, 1)
+ self.assertEqual(hashed2.hash_count, 1)
+ self.assertEqual(hashed1.eq_count + hashed2.eq_count, 1)
+
def test_popitem(self):
# dict.popitem()
for copymode in -1, +1:
diff --git a/Lib/test/test_dictcomps.py b/Lib/test/test_dictcomps.py
index 9af9e48..04cbb2c 100644
--- a/Lib/test/test_dictcomps.py
+++ b/Lib/test/test_dictcomps.py
@@ -1,54 +1,91 @@
+import unittest
-doctests = """
+from test import test_support as support
- >>> k = "old value"
- >>> { k: None for k in range(10) }
- {0: None, 1: None, 2: None, 3: None, 4: None, 5: None, 6: None, 7: None, 8: None, 9: None}
- >>> k
- 'old value'
+# For scope testing.
+g = "Global variable"
- >>> { k: k+10 for k in range(10) }
- {0: 10, 1: 11, 2: 12, 3: 13, 4: 14, 5: 15, 6: 16, 7: 17, 8: 18, 9: 19}
- >>> g = "Global variable"
- >>> { k: g for k in range(10) }
- {0: 'Global variable', 1: 'Global variable', 2: 'Global variable', 3: 'Global variable', 4: 'Global variable', 5: 'Global variable', 6: 'Global variable', 7: 'Global variable', 8: 'Global variable', 9: 'Global variable'}
+class DictComprehensionTest(unittest.TestCase):
- >>> { k: v for k in range(10) for v in range(10) if k == v }
- {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
+ def test_basics(self):
+ expected = {0: 10, 1: 11, 2: 12, 3: 13, 4: 14, 5: 15, 6: 16, 7: 17,
+ 8: 18, 9: 19}
+ actual = {k: k + 10 for k in range(10)}
+ self.assertEqual(actual, expected)
- >>> { k: v for v in range(10) for k in range(v*9, v*10) }
- {9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4, 38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6, 55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7, 66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8, 76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9, 85: 9, 86: 9, 87: 9, 88: 9, 89: 9}
+ expected = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
+ actual = {k: v for k in range(10) for v in range(10) if k == v}
+ self.assertEqual(actual, expected)
- >>> { x: y for y, x in ((1, 2), (3, 4)) } = 5 # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- SyntaxError: ...
+ def test_scope_isolation(self):
+ k = "Local Variable"
- >>> { x: y for y, x in ((1, 2), (3, 4)) } += 5 # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- SyntaxError: ...
+ expected = {0: None, 1: None, 2: None, 3: None, 4: None, 5: None,
+ 6: None, 7: None, 8: None, 9: None}
+ actual = {k: None for k in range(10)}
+ self.assertEqual(actual, expected)
+ self.assertEqual(k, "Local Variable")
-"""
+ expected = {9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4,
+ 38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6,
+ 55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7,
+ 66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8,
+ 76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9,
+ 85: 9, 86: 9, 87: 9, 88: 9, 89: 9}
+ actual = {k: v for v in range(10) for k in range(v * 9, v * 10)}
+ self.assertEqual(k, "Local Variable")
+ self.assertEqual(actual, expected)
-__test__ = {'doctests' : doctests}
+ def test_scope_isolation_from_global(self):
+ expected = {0: None, 1: None, 2: None, 3: None, 4: None, 5: None,
+ 6: None, 7: None, 8: None, 9: None}
+ actual = {g: None for g in range(10)}
+ self.assertEqual(actual, expected)
+ self.assertEqual(g, "Global variable")
-def test_main(verbose=None):
- import sys
- from test import test_support
- from test import test_dictcomps
- test_support.run_doctest(test_dictcomps, verbose)
+ expected = {9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4,
+ 38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6,
+ 55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7,
+ 66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8,
+ 76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9,
+ 85: 9, 86: 9, 87: 9, 88: 9, 89: 9}
+ actual = {g: v for v in range(10) for g in range(v * 9, v * 10)}
+ self.assertEqual(g, "Global variable")
+ self.assertEqual(actual, expected)
- # verify reference counting
- if verbose and hasattr(sys, "gettotalrefcount"):
- import gc
- counts = [None] * 5
- for i in range(len(counts)):
- test_support.run_doctest(test_dictcomps, verbose)
- gc.collect()
- counts[i] = sys.gettotalrefcount()
- print(counts)
+ def test_global_visibility(self):
+ expected = {0: 'Global variable', 1: 'Global variable',
+ 2: 'Global variable', 3: 'Global variable',
+ 4: 'Global variable', 5: 'Global variable',
+ 6: 'Global variable', 7: 'Global variable',
+ 8: 'Global variable', 9: 'Global variable'}
+ actual = {k: g for k in range(10)}
+ self.assertEqual(actual, expected)
+
+ def test_local_visibility(self):
+ v = "Local variable"
+ expected = {0: 'Local variable', 1: 'Local variable',
+ 2: 'Local variable', 3: 'Local variable',
+ 4: 'Local variable', 5: 'Local variable',
+ 6: 'Local variable', 7: 'Local variable',
+ 8: 'Local variable', 9: 'Local variable'}
+ actual = {k: v for k in range(10)}
+ self.assertEqual(actual, expected)
+ self.assertEqual(v, "Local variable")
+
+ def test_illegal_assignment(self):
+ with self.assertRaisesRegexp(SyntaxError, "can't assign"):
+ compile("{x: y for y, x in ((1, 2), (3, 4))} = 5", "<test>",
+ "exec")
+
+ with self.assertRaisesRegexp(SyntaxError, "can't assign"):
+ compile("{x: y for y, x in ((1, 2), (3, 4))} += 5", "<test>",
+ "exec")
+
+
+def test_main():
+ support.run_unittest(__name__)
if __name__ == "__main__":
- test_main(verbose=True)
+ test_main()
diff --git a/Lib/test/test_dictviews.py b/Lib/test/test_dictviews.py
index f903676..30cfb93 100644
--- a/Lib/test/test_dictviews.py
+++ b/Lib/test/test_dictviews.py
@@ -112,6 +112,13 @@ class DictSetTest(unittest.TestCase):
self.assertEqual(d1.viewkeys() ^ set(d3.viewkeys()),
{'a', 'b', 'd', 'e'})
+ self.assertEqual(d1.viewkeys() - d1.viewkeys(), set())
+ self.assertEqual(d1.viewkeys() - d2.viewkeys(), {'a'})
+ self.assertEqual(d1.viewkeys() - d3.viewkeys(), {'a', 'b'})
+ self.assertEqual(d1.viewkeys() - set(d1.viewkeys()), set())
+ self.assertEqual(d1.viewkeys() - set(d2.viewkeys()), {'a'})
+ self.assertEqual(d1.viewkeys() - set(d3.viewkeys()), {'a', 'b'})
+
def test_items_set_operations(self):
d1 = {'a': 1, 'b': 2}
d2 = {'a': 2, 'b': 2}
@@ -144,6 +151,19 @@ class DictSetTest(unittest.TestCase):
self.assertEqual(d1.viewitems() ^ d3.viewitems(),
{('a', 1), ('b', 2), ('d', 4), ('e', 5)})
+ self.assertEqual(d1.viewitems() - d1.viewitems(), set())
+ self.assertEqual(d1.viewitems() - d2.viewitems(), {('a', 1)})
+ self.assertEqual(d1.viewitems() - d3.viewitems(), {('a', 1), ('b', 2)})
+ self.assertEqual(d1.viewitems() - set(d1.viewitems()), set())
+ self.assertEqual(d1.viewitems() - set(d2.viewitems()), {('a', 1)})
+ self.assertEqual(d1.viewitems() - set(d3.viewitems()),
+ {('a', 1), ('b', 2)})
+
+ def test_recursive_repr(self):
+ d = {}
+ d[42] = d.viewvalues()
+ self.assertRaises(RuntimeError, repr, d)
+
diff --git a/Lib/test/test_difflib.py b/Lib/test/test_difflib.py
index 310bf99..35f2c36 100644
--- a/Lib/test/test_difflib.py
+++ b/Lib/test/test_difflib.py
@@ -59,6 +59,15 @@ class TestSFbugs(unittest.TestCase):
diff_gen = difflib.unified_diff([], [])
self.assertRaises(StopIteration, diff_gen.next)
+ def test_matching_blocks_cache(self):
+ # Issue #21635
+ s = difflib.SequenceMatcher(None, "abxcd", "abcd")
+ first = s.get_matching_blocks()
+ second = s.get_matching_blocks()
+ self.assertEqual(second[0].size, 2)
+ self.assertEqual(second[1].size, 2)
+ self.assertEqual(second[2].size, 0)
+
def test_added_tab_hint(self):
# Check fix for bug #1488943
diff = list(difflib.Differ().compare(["\tI am a buggy"],["\t\tI am a bug"]))
diff --git a/Lib/test/test_dis.py b/Lib/test/test_dis.py
index 6ce4b79..da5ba4b 100644
--- a/Lib/test/test_dis.py
+++ b/Lib/test/test_dis.py
@@ -125,6 +125,8 @@ class DisTests(unittest.TestCase):
# so fails if the tests are run with -O. Skip this test then.
if __debug__:
self.do_disassembly_test(bug1333982, dis_bug1333982)
+ else:
+ self.skipTest('need asserts, run without -O')
def test_big_linenos(self):
def func(count):
diff --git a/Lib/test/test_dl.py b/Lib/test/test_dl.py
index 6069a77..da9730f 100755..100644
--- a/Lib/test/test_dl.py
+++ b/Lib/test/test_dl.py
@@ -1,4 +1,3 @@
-#! /usr/bin/env python
"""Test dlmodule.c
Roger E. Masse revised strategy by Barry Warsaw
"""
diff --git a/Lib/test/test_doctest.py b/Lib/test/test_doctest.py
index 9b500fd..a58c4ae 100644
--- a/Lib/test/test_doctest.py
+++ b/Lib/test/test_doctest.py
@@ -1019,6 +1019,33 @@ But IGNORE_EXCEPTION_DETAIL does not allow a mismatch in the exception type:
ValueError: message
TestResults(failed=1, attempted=1)
+If the exception does not have a message, you can still use
+IGNORE_EXCEPTION_DETAIL to normalize the modules between Python 2 and 3:
+
+ >>> def f(x):
+ ... r'''
+ ... >>> from Queue import Empty
+ ... >>> raise Empty() #doctest: +IGNORE_EXCEPTION_DETAIL
+ ... Traceback (most recent call last):
+ ... foo.bar.Empty
+ ... '''
+ >>> test = doctest.DocTestFinder().find(f)[0]
+ >>> doctest.DocTestRunner(verbose=False).run(test)
+ TestResults(failed=0, attempted=2)
+
+Note that a trailing colon doesn't matter either:
+
+ >>> def f(x):
+ ... r'''
+ ... >>> from Queue import Empty
+ ... >>> raise Empty() #doctest: +IGNORE_EXCEPTION_DETAIL
+ ... Traceback (most recent call last):
+ ... foo.bar.Empty:
+ ... '''
+ >>> test = doctest.DocTestFinder().find(f)[0]
+ >>> doctest.DocTestRunner(verbose=False).run(test)
+ TestResults(failed=0, attempted=2)
+
If an exception is raised but not expected, then it is reported as an
unexpected exception:
@@ -2006,6 +2033,31 @@ def test_DocTestSuite():
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=9 errors=0 failures=4>
+ The module need not contain any doctest examples:
+
+ >>> suite = doctest.DocTestSuite('test.sample_doctest_no_doctests')
+ >>> suite.run(unittest.TestResult())
+ <unittest.result.TestResult run=0 errors=0 failures=0>
+
+ However, if DocTestSuite finds no docstrings, it raises an error:
+
+ >>> try:
+ ... doctest.DocTestSuite('test.sample_doctest_no_docstrings')
+ ... except ValueError as e:
+ ... error = e
+
+ >>> print(error.args[1])
+ has no docstrings
+
+ You can prevent this error by passing a DocTestFinder instance with
+ the `exclude_empty` keyword argument set to False:
+
+ >>> finder = doctest.DocTestFinder(exclude_empty=False)
+ >>> suite = doctest.DocTestSuite('test.sample_doctest_no_docstrings',
+ ... test_finder=finder)
+ >>> suite.run(unittest.TestResult())
+ <unittest.result.TestResult run=0 errors=0 failures=0>
+
We can use the current module:
>>> suite = test.sample_doctest.test_suite()
@@ -2648,7 +2700,9 @@ def test_main():
from test import test_doctest
# Ignore all warnings about the use of class Tester in this module.
- deprecations = [("class Tester is deprecated", DeprecationWarning)]
+ deprecations = []
+ if __debug__:
+ deprecations.append(("class Tester is deprecated", DeprecationWarning))
if sys.py3kwarning:
deprecations += [("backquote not supported", SyntaxWarning),
("execfile.. not supported", DeprecationWarning)]
diff --git a/Lib/test/test_docxmlrpc.py b/Lib/test/test_docxmlrpc.py
index 716333e..80d1803 100644
--- a/Lib/test/test_docxmlrpc.py
+++ b/Lib/test/test_docxmlrpc.py
@@ -100,7 +100,7 @@ class DocXMLRPCHTTPGETServer(unittest.TestCase):
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Content-type"), "text/html")
- # Server throws an exception if we don't start to read the data
+ # Server raises an exception if we don't start to read the data
response.read()
def test_invalid_get_response(self):
diff --git a/Lib/test/test_dumbdbm.py b/Lib/test/test_dumbdbm.py
index 041fac1..6f5324f 100644
--- a/Lib/test/test_dumbdbm.py
+++ b/Lib/test/test_dumbdbm.py
@@ -1,4 +1,3 @@
-#! /usr/bin/env python
"""Test script for the dumbdbm module
Original by Roger E. Masse
"""
@@ -38,11 +37,9 @@ class DumbDBMTestCase(unittest.TestCase):
self.read_helper(f)
f.close()
+ @unittest.skipUnless(hasattr(os, 'chmod'), 'os.chmod not available')
+ @unittest.skipUnless(hasattr(os, 'umask'), 'os.umask not available')
def test_dumbdbm_creation_mode(self):
- # On platforms without chmod, don't do anything.
- if not (hasattr(os, 'chmod') and hasattr(os, 'umask')):
- return
-
try:
old_umask = os.umask(0002)
f = dumbdbm.open(_fname, 'c', 0637)
diff --git a/Lib/test/test_email.py b/Lib/test/test_email.py
index cb4ee60..ab6e0b0 100644
--- a/Lib/test/test_email.py
+++ b/Lib/test/test_email.py
@@ -3,10 +3,12 @@
# The specific tests now live in Lib/email/test
from email.test.test_email import suite
+from email.test.test_email_renamed import suite as suite2
from test import test_support
def test_main():
test_support.run_unittest(suite())
+ test_support.run_unittest(suite2())
if __name__ == '__main__':
test_main()
diff --git a/Lib/test/test_enumerate.py b/Lib/test/test_enumerate.py
index 6b9ff79..aac4da7 100644
--- a/Lib/test/test_enumerate.py
+++ b/Lib/test/test_enumerate.py
@@ -188,11 +188,10 @@ class TestReversed(unittest.TestCase):
self.assertRaises(TypeError, reversed)
self.assertRaises(TypeError, reversed, [], 'extra')
+ @unittest.skipUnless(hasattr(sys, 'getrefcount'), 'test needs sys.getrefcount()')
def test_bug1229429(self):
# this bug was never in reversed, it was in
# PyObject_CallMethod, and reversed_new calls that sometimes.
- if not hasattr(sys, "getrefcount"):
- return
def f():
pass
r = f.__reversed__ = object()
diff --git a/Lib/test/test_eof.py b/Lib/test/test_eof.py
index 763917f..7de4686 100644
--- a/Lib/test/test_eof.py
+++ b/Lib/test/test_eof.py
@@ -1,4 +1,3 @@
-#! /usr/bin/env python
"""test script for a few new invalid token catches"""
import unittest
diff --git a/Lib/test/test_epoll.py b/Lib/test/test_epoll.py
index b66d9ea..cd5722f 100644
--- a/Lib/test/test_epoll.py
+++ b/Lib/test/test_epoll.py
@@ -164,11 +164,8 @@ class TestEPoll(unittest.TestCase):
expected.sort()
self.assertEqual(events, expected)
- self.assertFalse(then - now > 0.01, then - now)
- now = time.time()
events = ep.poll(timeout=2.1, maxevents=4)
- then = time.time()
self.assertFalse(events)
client.send("Hello!")
diff --git a/Lib/test/test_errno.py b/Lib/test/test_errno.py
index bb8918f..7a37d3a 100755..100644
--- a/Lib/test/test_errno.py
+++ b/Lib/test/test_errno.py
@@ -1,4 +1,3 @@
-#! /usr/bin/env python
"""Test the errno module
Roger E. Masse
"""
diff --git a/Lib/test/test_exceptions.py b/Lib/test/test_exceptions.py
index 2160641..dc02cb3 100644
--- a/Lib/test/test_exceptions.py
+++ b/Lib/test/test_exceptions.py
@@ -431,6 +431,12 @@ class ExceptionTests(unittest.TestCase):
u.start = 1000
self.assertEqual(str(u), "can't translate characters in position 1000-4: 965230951443685724997")
+ def test_unicode_errors_no_object(self):
+ # See issue #21134.
+ klasses = UnicodeEncodeError, UnicodeDecodeError, UnicodeTranslateError
+ for klass in klasses:
+ self.assertEqual(str(klass.__new__(klass)), "")
+
def test_badisinstance(self):
# Bug #2542: if issubclass(e, MyException) raises an exception,
# it should be ignored
@@ -479,6 +485,18 @@ class ExceptionTests(unittest.TestCase):
except AssertionError as e:
self.assertEqual(str(e), "(3,)")
+ def test_bad_exception_clearing(self):
+ # See issue 16445: use of Py_XDECREF instead of Py_CLEAR in
+ # BaseException_set_message gave a possible way to segfault the
+ # interpreter.
+ class Nasty(str):
+ def __del__(message):
+ del e.message
+
+ e = ValueError(Nasty("msg"))
+ e.args = ()
+ del e.message
+
# Helper class used by TestSameStrAndUnicodeMsg
class ExcWithOverriddenStr(Exception):
diff --git a/Lib/test/test_fcntl.py b/Lib/test/test_fcntl.py
index df09391..3a18031 100644
--- a/Lib/test/test_fcntl.py
+++ b/Lib/test/test_fcntl.py
@@ -8,9 +8,9 @@ import struct
import sys
import unittest
from test.test_support import (verbose, TESTFN, unlink, run_unittest,
- import_module)
+ import_module, cpython_only)
-# Skip test if no fnctl module.
+# Skip test if no fcntl module.
fcntl = import_module('fcntl')
@@ -51,6 +51,12 @@ def get_lockdata():
lockdata = get_lockdata()
+class BadFile:
+ def __init__(self, fn):
+ self.fn = fn
+ def fileno(self):
+ return self.fn
+
class TestFcntl(unittest.TestCase):
def setUp(self):
@@ -81,6 +87,29 @@ class TestFcntl(unittest.TestCase):
rv = fcntl.fcntl(self.f, fcntl.F_SETLKW, lockdata)
self.f.close()
+ def test_fcntl_bad_file(self):
+ with self.assertRaises(ValueError):
+ fcntl.fcntl(-1, fcntl.F_SETFL, os.O_NONBLOCK)
+ with self.assertRaises(ValueError):
+ fcntl.fcntl(BadFile(-1), fcntl.F_SETFL, os.O_NONBLOCK)
+ with self.assertRaises(TypeError):
+ fcntl.fcntl('spam', fcntl.F_SETFL, os.O_NONBLOCK)
+ with self.assertRaises(TypeError):
+ fcntl.fcntl(BadFile('spam'), fcntl.F_SETFL, os.O_NONBLOCK)
+
+ @cpython_only
+ def test_fcntl_bad_file_overflow(self):
+ from _testcapi import INT_MAX, INT_MIN
+ # Issue 15989
+ with self.assertRaises(ValueError):
+ fcntl.fcntl(INT_MAX + 1, fcntl.F_SETFL, os.O_NONBLOCK)
+ with self.assertRaises(ValueError):
+ fcntl.fcntl(BadFile(INT_MAX + 1), fcntl.F_SETFL, os.O_NONBLOCK)
+ with self.assertRaises(ValueError):
+ fcntl.fcntl(INT_MIN - 1, fcntl.F_SETFL, os.O_NONBLOCK)
+ with self.assertRaises(ValueError):
+ fcntl.fcntl(BadFile(INT_MIN - 1), fcntl.F_SETFL, os.O_NONBLOCK)
+
def test_fcntl_64_bit(self):
# Issue #1309352: fcntl shouldn't fail when the third arg fits in a
# C 'long' but not in a C 'int'.
@@ -92,7 +121,10 @@ class TestFcntl(unittest.TestCase):
self.skipTest("F_NOTIFY or DN_MULTISHOT unavailable")
fd = os.open(os.path.dirname(os.path.abspath(TESTFN)), os.O_RDONLY)
try:
+ # This will raise OverflowError if issue1309352 is present.
fcntl.fcntl(fd, cmd, flags)
+ except IOError:
+ pass # Running on a system that doesn't support these flags.
finally:
os.close(fd)
diff --git a/Lib/test/test_file.py b/Lib/test/test_file.py
index dffa4b5..4f2c9ef 100644
--- a/Lib/test/test_file.py
+++ b/Lib/test/test_file.py
@@ -154,16 +154,6 @@ class OtherFileTests(unittest.TestCase):
f.close()
self.fail('%r is an invalid file mode' % mode)
- def testStdin(self):
- # This causes the interpreter to exit on OSF1 v5.1.
- if sys.platform != 'osf1V5':
- self.assertRaises((IOError, ValueError), sys.stdin.seek, -1)
- else:
- print((
- ' Skipping sys.stdin.seek(-1), it may crash the interpreter.'
- ' Test manually.'), file=sys.__stdout__)
- self.assertRaises((IOError, ValueError), sys.stdin.truncate)
-
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
@@ -310,6 +300,7 @@ class OtherFileTests(unittest.TestCase):
self.fail("readlines() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
# Reading after iteration hit EOF shouldn't hurt either
+ f.close()
f = self.open(TESTFN, 'rb')
try:
for line in f:
diff --git a/Lib/test/test_file2k.py b/Lib/test/test_file2k.py
index 399f119..fae1db6 100644
--- a/Lib/test/test_file2k.py
+++ b/Lib/test/test_file2k.py
@@ -2,6 +2,9 @@ import sys
import os
import unittest
import itertools
+import select
+import signal
+import subprocess
import time
from array import array
from weakref import proxy
@@ -86,9 +89,23 @@ class AutoFileTests(unittest.TestCase):
self.assertRaises(TypeError, self.f.writelines,
[NonString(), NonString()])
+ def testWritelinesBuffer(self):
+ self.f.writelines([array('c', 'abc')])
+ self.f.close()
+ self.f = open(TESTFN, 'rb')
+ buf = self.f.read()
+ self.assertEqual(buf, 'abc')
+
def testRepr(self):
# verify repr works
self.assertTrue(repr(self.f).startswith("<open file '" + TESTFN))
+ # see issue #14161
+ # Windows doesn't like \r\n\t" in the file name, but ' is ok
+ fname = 'xx\rxx\nxx\'xx"xx' if sys.platform != "win32" else "xx'xx"
+ with open(fname, 'w') as f:
+ self.addCleanup(os.remove, fname)
+ self.assertTrue(repr(f).startswith(
+ "<open file %r, mode 'w' at" % fname))
def testErrors(self):
self.f.close()
@@ -405,6 +422,20 @@ class OtherFileTests(unittest.TestCase):
finally:
os.unlink(TESTFN)
+ @unittest.skipUnless(os.name == 'posix', 'test requires a posix system.')
+ def test_write_full(self):
+ # Issue #17976
+ try:
+ f = open('/dev/full', 'w', 1)
+ except IOError:
+ self.skipTest("requires '/dev/full'")
+ try:
+ with self.assertRaises(IOError):
+ f.write('hello')
+ f.write('\n')
+ finally:
+ f.close()
+
class FileSubclassTests(unittest.TestCase):
def testExit(self):
@@ -595,6 +626,148 @@ class FileThreadingTests(unittest.TestCase):
self._test_close_open_io(io_func)
+@unittest.skipUnless(os.name == 'posix', 'test requires a posix system.')
+class TestFileSignalEINTR(unittest.TestCase):
+ def _test_reading(self, data_to_write, read_and_verify_code, method_name,
+ universal_newlines=False):
+ """Generic buffered read method test harness to verify EINTR behavior.
+
+ Also validates that Python signal handlers are run during the read.
+
+ Args:
+ data_to_write: String to write to the child process for reading
+ before sending it a signal, confirming the signal was handled,
+ writing a final newline char and closing the infile pipe.
+ read_and_verify_code: Single "line" of code to read from a file
+ object named 'infile' and validate the result. This will be
+ executed as part of a python subprocess fed data_to_write.
+ method_name: The name of the read method being tested, for use in
+ an error message on failure.
+ universal_newlines: If True, infile will be opened in universal
+ newline mode in the child process.
+ """
+ if universal_newlines:
+ # Test the \r\n -> \n conversion while we're at it.
+ data_to_write = data_to_write.replace('\n', '\r\n')
+ infile_setup_code = 'infile = os.fdopen(sys.stdin.fileno(), "rU")'
+ else:
+ infile_setup_code = 'infile = sys.stdin'
+ # Total pipe IO in this function is smaller than the minimum posix OS
+ # pipe buffer size of 512 bytes. No writer should block.
+ assert len(data_to_write) < 512, 'data_to_write must fit in pipe buf.'
+
+ child_code = (
+ 'import os, signal, sys ;'
+ 'signal.signal('
+ 'signal.SIGINT, lambda s, f: sys.stderr.write("$\\n")) ;'
+ + infile_setup_code + ' ;' +
+ 'assert isinstance(infile, file) ;'
+ 'sys.stderr.write("Go.\\n") ;'
+ + read_and_verify_code)
+ reader_process = subprocess.Popen(
+ [sys.executable, '-c', child_code],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ # Wait for the signal handler to be installed.
+ go = reader_process.stderr.read(4)
+ if go != 'Go.\n':
+ reader_process.kill()
+ self.fail('Error from %s process while awaiting "Go":\n%s' % (
+ method_name, go+reader_process.stderr.read()))
+ reader_process.stdin.write(data_to_write)
+ signals_sent = 0
+ rlist = []
+ # We don't know when the read_and_verify_code in our child is actually
+ # executing within the read system call we want to interrupt. This
+ # loop waits for a bit before sending the first signal to increase
+ # the likelihood of that. Implementations without correct EINTR
+ # and signal handling usually fail this test.
+ while not rlist:
+ rlist, _, _ = select.select([reader_process.stderr], (), (), 0.05)
+ reader_process.send_signal(signal.SIGINT)
+ # Give the subprocess time to handle it before we loop around and
+ # send another one. On OSX the second signal happening close to
+ # immediately after the first was causing the subprocess to crash
+ # via the OS's default SIGINT handler.
+ time.sleep(0.1)
+ signals_sent += 1
+ if signals_sent > 200:
+ reader_process.kill()
+ self.fail("failed to handle signal during %s." % method_name)
+ # This assumes anything unexpected that writes to stderr will also
+ # write a newline. That is true of the traceback printing code.
+ signal_line = reader_process.stderr.readline()
+ if signal_line != '$\n':
+ reader_process.kill()
+ self.fail('Error from %s process while awaiting signal:\n%s' % (
+ method_name, signal_line+reader_process.stderr.read()))
+ # We append a newline to our input so that a readline call can
+ # end on its own before the EOF is seen.
+ stdout, stderr = reader_process.communicate(input='\n')
+ if reader_process.returncode != 0:
+ self.fail('%s() process exited rc=%d.\nSTDOUT:\n%s\nSTDERR:\n%s' % (
+ method_name, reader_process.returncode, stdout, stderr))
+
+ def test_readline(self, universal_newlines=False):
+ """file.readline must handle signals and not lose data."""
+ self._test_reading(
+ data_to_write='hello, world!',
+ read_and_verify_code=(
+ 'line = infile.readline() ;'
+ 'expected_line = "hello, world!\\n" ;'
+ 'assert line == expected_line, ('
+ '"read %r expected %r" % (line, expected_line))'
+ ),
+ method_name='readline',
+ universal_newlines=universal_newlines)
+
+ def test_readline_with_universal_newlines(self):
+ self.test_readline(universal_newlines=True)
+
+ def test_readlines(self, universal_newlines=False):
+ """file.readlines must handle signals and not lose data."""
+ self._test_reading(
+ data_to_write='hello\nworld!',
+ read_and_verify_code=(
+ 'lines = infile.readlines() ;'
+ 'expected_lines = ["hello\\n", "world!\\n"] ;'
+ 'assert lines == expected_lines, ('
+ '"readlines returned wrong data.\\n" '
+ '"got lines %r\\nexpected %r" '
+ '% (lines, expected_lines))'
+ ),
+ method_name='readlines',
+ universal_newlines=universal_newlines)
+
+ def test_readlines_with_universal_newlines(self):
+ self.test_readlines(universal_newlines=True)
+
+ def test_readall(self):
+ """Unbounded file.read() must handle signals and not lose data."""
+ self._test_reading(
+ data_to_write='hello, world!abcdefghijklm',
+ read_and_verify_code=(
+ 'data = infile.read() ;'
+ 'expected_data = "hello, world!abcdefghijklm\\n";'
+ 'assert data == expected_data, ('
+ '"read %r expected %r" % (data, expected_data))'
+ ),
+ method_name='unbounded read')
+
+ def test_readinto(self):
+ """file.readinto must handle signals and not lose data."""
+ self._test_reading(
+ data_to_write='hello, world!',
+ read_and_verify_code=(
+ 'data = bytearray(50) ;'
+ 'num_read = infile.readinto(data) ;'
+ 'expected_data = "hello, world!\\n";'
+ 'assert data[:num_read] == expected_data, ('
+ '"read %r expected %r" % (data, expected_data))'
+ ),
+ method_name='readinto')
+
+
class StdoutTests(unittest.TestCase):
def test_move_stdout_on_write(self):
@@ -671,7 +844,7 @@ def test_main():
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests, FileSubclassTests,
- FileThreadingTests, StdoutTests)
+ FileThreadingTests, TestFileSignalEINTR, StdoutTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
diff --git a/Lib/test/test_file_eintr.py b/Lib/test/test_file_eintr.py
new file mode 100644
index 0000000..76bff3a
--- /dev/null
+++ b/Lib/test/test_file_eintr.py
@@ -0,0 +1,239 @@
+# Written to test interrupted system calls interfering with our many buffered
+# IO implementations. http://bugs.python.org/issue12268
+#
+# This tests the '_io' module. Similar tests for Python 2.x's older
+# default file I/O implementation exist within test_file2k.py.
+#
+# It was suggested that this code could be merged into test_io and the tests
+# made to work using the same method as the existing signal tests in test_io.
+# I was unable to get single process tests using alarm or setitimer that way
+# to reproduce the EINTR problems. This process based test suite reproduces
+# the problems prior to the issue12268 patch reliably on Linux and OSX.
+# - gregory.p.smith
+
+import os
+import select
+import signal
+import subprocess
+import sys
+from test.test_support import run_unittest
+import time
+import unittest
+
+# Test import all of the things we're about to try testing up front.
+from _io import FileIO
+
+
+@unittest.skipUnless(os.name == 'posix', 'tests requires a posix system.')
+class TestFileIOSignalInterrupt(unittest.TestCase):
+ def setUp(self):
+ self._process = None
+
+ def tearDown(self):
+ if self._process and self._process.poll() is None:
+ try:
+ self._process.kill()
+ except OSError:
+ pass
+
+ def _generate_infile_setup_code(self):
+ """Returns the infile = ... line of code for the reader process.
+
+ subclasseses should override this to test different IO objects.
+ """
+ return ('import _io ;'
+ 'infile = _io.FileIO(sys.stdin.fileno(), "rb")')
+
+ def fail_with_process_info(self, why, stdout=b'', stderr=b'',
+ communicate=True):
+ """A common way to cleanup and fail with useful debug output.
+
+ Kills the process if it is still running, collects remaining output
+ and fails the test with an error message including the output.
+
+ Args:
+ why: Text to go after "Error from IO process" in the message.
+ stdout, stderr: standard output and error from the process so
+ far to include in the error message.
+ communicate: bool, when True we call communicate() on the process
+ after killing it to gather additional output.
+ """
+ if self._process.poll() is None:
+ time.sleep(0.1) # give it time to finish printing the error.
+ try:
+ self._process.terminate() # Ensure it dies.
+ except OSError:
+ pass
+ if communicate:
+ stdout_end, stderr_end = self._process.communicate()
+ stdout += stdout_end
+ stderr += stderr_end
+ self.fail('Error from IO process %s:\nSTDOUT:\n%sSTDERR:\n%s\n' %
+ (why, stdout.decode(), stderr.decode()))
+
+ def _test_reading(self, data_to_write, read_and_verify_code):
+ """Generic buffered read method test harness to validate EINTR behavior.
+
+ Also validates that Python signal handlers are run during the read.
+
+ Args:
+ data_to_write: String to write to the child process for reading
+ before sending it a signal, confirming the signal was handled,
+ writing a final newline and closing the infile pipe.
+ read_and_verify_code: Single "line" of code to read from a file
+ object named 'infile' and validate the result. This will be
+ executed as part of a python subprocess fed data_to_write.
+ """
+ infile_setup_code = self._generate_infile_setup_code()
+ # Total pipe IO in this function is smaller than the minimum posix OS
+ # pipe buffer size of 512 bytes. No writer should block.
+ assert len(data_to_write) < 512, 'data_to_write must fit in pipe buf.'
+
+ # Start a subprocess to call our read method while handling a signal.
+ self._process = subprocess.Popen(
+ [sys.executable, '-u', '-c',
+ 'import io, signal, sys ;'
+ 'signal.signal(signal.SIGINT, '
+ 'lambda s, f: sys.stderr.write("$\\n")) ;'
+ + infile_setup_code + ' ;' +
+ 'sys.stderr.write("Worm Sign!\\n") ;'
+ + read_and_verify_code + ' ;' +
+ 'infile.close()'
+ ],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ # Wait for the signal handler to be installed.
+ worm_sign = self._process.stderr.read(len(b'Worm Sign!\n'))
+ if worm_sign != b'Worm Sign!\n': # See also, Dune by Frank Herbert.
+ self.fail_with_process_info('while awaiting a sign',
+ stderr=worm_sign)
+ self._process.stdin.write(data_to_write)
+
+ signals_sent = 0
+ rlist = []
+ # We don't know when the read_and_verify_code in our child is actually
+ # executing within the read system call we want to interrupt. This
+ # loop waits for a bit before sending the first signal to increase
+ # the likelihood of that. Implementations without correct EINTR
+ # and signal handling usually fail this test.
+ while not rlist:
+ rlist, _, _ = select.select([self._process.stderr], (), (), 0.05)
+ self._process.send_signal(signal.SIGINT)
+ signals_sent += 1
+ if signals_sent > 200:
+ self._process.kill()
+ self.fail('reader process failed to handle our signals.')
+ # This assumes anything unexpected that writes to stderr will also
+ # write a newline. That is true of the traceback printing code.
+ signal_line = self._process.stderr.readline()
+ if signal_line != b'$\n':
+ self.fail_with_process_info('while awaiting signal',
+ stderr=signal_line)
+
+ # We append a newline to our input so that a readline call can
+ # end on its own before the EOF is seen and so that we're testing
+ # the read call that was interrupted by a signal before the end of
+ # the data stream has been reached.
+ stdout, stderr = self._process.communicate(input=b'\n')
+ if self._process.returncode:
+ self.fail_with_process_info(
+ 'exited rc=%d' % self._process.returncode,
+ stdout, stderr, communicate=False)
+ # PASS!
+
+ # String format for the read_and_verify_code used by read methods.
+ _READING_CODE_TEMPLATE = (
+ 'got = infile.{read_method_name}() ;'
+ 'expected = {expected!r} ;'
+ 'assert got == expected, ('
+ '"{read_method_name} returned wrong data.\\n"'
+ '"got data %r\\nexpected %r" % (got, expected))'
+ )
+
+ def test_readline(self):
+ """readline() must handle signals and not lose data."""
+ self._test_reading(
+ data_to_write=b'hello, world!',
+ read_and_verify_code=self._READING_CODE_TEMPLATE.format(
+ read_method_name='readline',
+ expected=b'hello, world!\n'))
+
+ def test_readlines(self):
+ """readlines() must handle signals and not lose data."""
+ self._test_reading(
+ data_to_write=b'hello\nworld!',
+ read_and_verify_code=self._READING_CODE_TEMPLATE.format(
+ read_method_name='readlines',
+ expected=[b'hello\n', b'world!\n']))
+
+ def test_readall(self):
+ """readall() must handle signals and not lose data."""
+ self._test_reading(
+ data_to_write=b'hello\nworld!',
+ read_and_verify_code=self._READING_CODE_TEMPLATE.format(
+ read_method_name='readall',
+ expected=b'hello\nworld!\n'))
+ # read() is the same thing as readall().
+ self._test_reading(
+ data_to_write=b'hello\nworld!',
+ read_and_verify_code=self._READING_CODE_TEMPLATE.format(
+ read_method_name='read',
+ expected=b'hello\nworld!\n'))
+
+
+class TestBufferedIOSignalInterrupt(TestFileIOSignalInterrupt):
+ def _generate_infile_setup_code(self):
+ """Returns the infile = ... line of code to make a BufferedReader."""
+ return ('infile = io.open(sys.stdin.fileno(), "rb") ;'
+ 'import _io ;assert isinstance(infile, _io.BufferedReader)')
+
+ def test_readall(self):
+ """BufferedReader.read() must handle signals and not lose data."""
+ self._test_reading(
+ data_to_write=b'hello\nworld!',
+ read_and_verify_code=self._READING_CODE_TEMPLATE.format(
+ read_method_name='read',
+ expected=b'hello\nworld!\n'))
+
+
+class TestTextIOSignalInterrupt(TestFileIOSignalInterrupt):
+ def _generate_infile_setup_code(self):
+ """Returns the infile = ... line of code to make a TextIOWrapper."""
+ return ('infile = io.open(sys.stdin.fileno(), "rt", newline=None) ;'
+ 'import _io ;assert isinstance(infile, _io.TextIOWrapper)')
+
+ def test_readline(self):
+ """readline() must handle signals and not lose data."""
+ self._test_reading(
+ data_to_write=b'hello, world!',
+ read_and_verify_code=self._READING_CODE_TEMPLATE.format(
+ read_method_name='readline',
+ expected='hello, world!\n'))
+
+ def test_readlines(self):
+ """readlines() must handle signals and not lose data."""
+ self._test_reading(
+ data_to_write=b'hello\r\nworld!',
+ read_and_verify_code=self._READING_CODE_TEMPLATE.format(
+ read_method_name='readlines',
+ expected=['hello\n', 'world!\n']))
+
+ def test_readall(self):
+ """read() must handle signals and not lose data."""
+ self._test_reading(
+ data_to_write=b'hello\nworld!',
+ read_and_verify_code=self._READING_CODE_TEMPLATE.format(
+ read_method_name='read',
+ expected="hello\nworld!\n"))
+
+
+def test_main():
+ test_cases = [
+ tc for tc in globals().values()
+ if isinstance(tc, type) and issubclass(tc, unittest.TestCase)]
+ run_unittest(*test_cases)
+
+
+if __name__ == '__main__':
+ test_main()
diff --git a/Lib/test/test_fileinput.py b/Lib/test/test_fileinput.py
index 84aed1a..c15ad84 100644
--- a/Lib/test/test_fileinput.py
+++ b/Lib/test/test_fileinput.py
@@ -218,8 +218,49 @@ class FileInputTests(unittest.TestCase):
finally:
remove_tempfiles(t1)
+ def test_readline(self):
+ with open(TESTFN, 'wb') as f:
+ f.write('A\nB\r\nC\r')
+ # Fill TextIOWrapper buffer.
+ f.write('123456789\n' * 1000)
+ # Issue #20501: readline() shouldn't read whole file.
+ f.write('\x80')
+ self.addCleanup(safe_unlink, TESTFN)
+
+ fi = FileInput(files=TESTFN, openhook=hook_encoded('ascii'), bufsize=8)
+ # The most likely failure is a UnicodeDecodeError due to the entire
+ # file being read when it shouldn't have been.
+ self.assertEqual(fi.readline(), u'A\n')
+ self.assertEqual(fi.readline(), u'B\r\n')
+ self.assertEqual(fi.readline(), u'C\r')
+ with self.assertRaises(UnicodeDecodeError):
+ # Read to the end of file.
+ list(fi)
+ fi.close()
+
+class Test_hook_encoded(unittest.TestCase):
+ """Unit tests for fileinput.hook_encoded()"""
+
+ def test_modes(self):
+ with open(TESTFN, 'wb') as f:
+ # UTF-7 is a convenient, seldom used encoding
+ f.write('A\nB\r\nC\rD+IKw-')
+ self.addCleanup(safe_unlink, TESTFN)
+
+ def check(mode, expected_lines):
+ fi = FileInput(files=TESTFN, mode=mode,
+ openhook=hook_encoded('utf-7'))
+ lines = list(fi)
+ fi.close()
+ self.assertEqual(lines, expected_lines)
+
+ check('r', [u'A\n', u'B\r\n', u'C\r', u'D\u20ac'])
+ check('rU', [u'A\n', u'B\r\n', u'C\r', u'D\u20ac'])
+ check('U', [u'A\n', u'B\r\n', u'C\r', u'D\u20ac'])
+ check('rb', [u'A\n', u'B\r\n', u'C\r', u'D\u20ac'])
+
def test_main():
- run_unittest(BufferSizesTests, FileInputTests)
+ run_unittest(BufferSizesTests, FileInputTests, Test_hook_encoded)
if __name__ == "__main__":
test_main()
diff --git a/Lib/test/test_fileio.py b/Lib/test/test_fileio.py
index 9688ffc..b45d79b 100644
--- a/Lib/test/test_fileio.py
+++ b/Lib/test/test_fileio.py
@@ -9,9 +9,10 @@ import unittest
from array import array
from weakref import proxy
from functools import wraps
+from UserList import UserList
from test.test_support import TESTFN, check_warnings, run_unittest, make_bad_fd
-from test.test_support import py3k_bytes as bytes
+from test.test_support import py3k_bytes as bytes, cpython_only
from test.script_helper import run_python
from _io import FileIO as _FileIO
@@ -71,6 +72,26 @@ class AutoFileTests(unittest.TestCase):
n = self.f.readinto(a)
self.assertEqual(array(b'b', [1, 2]), a[:n])
+ def testWritelinesList(self):
+ l = [b'123', b'456']
+ self.f.writelines(l)
+ self.f.close()
+ self.f = _FileIO(TESTFN, 'rb')
+ buf = self.f.read()
+ self.assertEqual(buf, b'123456')
+
+ def testWritelinesUserList(self):
+ l = UserList([b'123', b'456'])
+ self.f.writelines(l)
+ self.f.close()
+ self.f = _FileIO(TESTFN, 'rb')
+ buf = self.f.read()
+ self.assertEqual(buf, b'123456')
+
+ def testWritelinesError(self):
+ self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
+ self.assertRaises(TypeError, self.f.writelines, None)
+
def test_none_args(self):
self.f.write(b"hi\nbye\nabc")
self.f.close()
@@ -130,6 +151,14 @@ class AutoFileTests(unittest.TestCase):
else:
self.fail("Should have raised IOError")
+ @unittest.skipIf(os.name == 'nt', "test only works on a POSIX-like system")
+ def testOpenDirFD(self):
+ fd = os.open('.', os.O_RDONLY)
+ with self.assertRaises(IOError) as cm:
+ _FileIO(fd, 'r')
+ os.close(fd)
+ self.assertEqual(cm.exception.errno, errno.EISDIR)
+
#A set of functions testing that we get expected behaviour if someone has
#manually closed the internal file descriptor. First, a decorator:
def ClosedFD(func):
@@ -253,29 +282,30 @@ class OtherFileTests(unittest.TestCase):
self.assertEqual(f.seekable(), True)
self.assertEqual(f.isatty(), False)
f.close()
-
- if sys.platform != "win32":
- try:
- f = _FileIO("/dev/tty", "a")
- except EnvironmentError:
- # When run in a cron job there just aren't any
- # ttys, so skip the test. This also handles other
- # OS'es that don't support /dev/tty.
- pass
- else:
- self.assertEqual(f.readable(), False)
- self.assertEqual(f.writable(), True)
- if sys.platform != "darwin" and \
- 'bsd' not in sys.platform and \
- not sys.platform.startswith('sunos'):
- # Somehow /dev/tty appears seekable on some BSDs
- self.assertEqual(f.seekable(), False)
- self.assertEqual(f.isatty(), True)
- f.close()
finally:
os.unlink(TESTFN)
- def testModeStrings(self):
+ @unittest.skipIf(sys.platform == 'win32', 'no ttys on Windows')
+ def testAblesOnTTY(self):
+ try:
+ f = _FileIO("/dev/tty", "a")
+ except EnvironmentError:
+ # When run in a cron job there just aren't any
+ # ttys, so skip the test. This also handles other
+ # OS'es that don't support /dev/tty.
+ self.skipTest('need /dev/tty')
+ else:
+ self.assertEqual(f.readable(), False)
+ self.assertEqual(f.writable(), True)
+ if sys.platform != "darwin" and \
+ 'bsd' not in sys.platform and \
+ not sys.platform.startswith('sunos'):
+ # Somehow /dev/tty appears seekable on some BSDs
+ self.assertEqual(f.seekable(), False)
+ self.assertEqual(f.isatty(), True)
+ f.close()
+
+ def testInvalidModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+", "rw", "rt"):
try:
@@ -286,6 +316,21 @@ class OtherFileTests(unittest.TestCase):
f.close()
self.fail('%r is an invalid file mode' % mode)
+ def testModeStrings(self):
+ # test that the mode attribute is correct for various mode strings
+ # given as init args
+ try:
+ for modes in [('w', 'wb'), ('wb', 'wb'), ('wb+', 'rb+'),
+ ('w+b', 'rb+'), ('a', 'ab'), ('ab', 'ab'),
+ ('ab+', 'ab+'), ('a+b', 'ab+'), ('r', 'rb'),
+ ('rb', 'rb'), ('rb+', 'rb+'), ('r+b', 'rb+')]:
+ # read modes are last so that TESTFN will exist first
+ with _FileIO(TESTFN, modes[0]) as f:
+ self.assertEqual(f.mode, modes[1])
+ finally:
+ if os.path.exists(TESTFN):
+ os.unlink(TESTFN)
+
def testUnicodeOpen(self):
# verify repr works for unicode too
f = _FileIO(str(TESTFN), "w")
@@ -297,8 +342,7 @@ class OtherFileTests(unittest.TestCase):
try:
fn = TESTFN.encode("ascii")
except UnicodeEncodeError:
- # Skip test
- return
+ self.skipTest('could not encode %r to ascii' % TESTFN)
f = _FileIO(fn, "w")
try:
f.write(b"abc")
@@ -315,6 +359,13 @@ class OtherFileTests(unittest.TestCase):
import msvcrt
self.assertRaises(IOError, msvcrt.get_osfhandle, make_bad_fd())
+ @cpython_only
+ def testInvalidFd_overflow(self):
+ # Issue 15989
+ import _testcapi
+ self.assertRaises(TypeError, _FileIO, _testcapi.INT_MAX + 1)
+ self.assertRaises(TypeError, _FileIO, _testcapi.INT_MIN - 1)
+
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
@@ -417,10 +468,22 @@ class OtherFileTests(unittest.TestCase):
env = dict(os.environ)
env[b'LC_CTYPE'] = b'C'
_, out = run_python('-c', 'import _io; _io.FileIO(%r)' % filename, env=env)
- if ('UnicodeEncodeError' not in out and
- 'IOError: [Errno 2] No such file or directory' not in out):
+ if ('UnicodeEncodeError' not in out and not
+ ( ('IOError: [Errno 2] No such file or directory' in out) or
+ ('IOError: [Errno 22] Invalid argument' in out) ) ):
self.fail('Bad output: %r' % out)
+ def testUnclosedFDOnException(self):
+ class MyException(Exception): pass
+ class MyFileIO(_FileIO):
+ def __setattr__(self, name, value):
+ if name == "name":
+ raise MyException("blocked setting name")
+ return super(MyFileIO, self).__setattr__(name, value)
+ fd = os.open(__file__, os.O_RDONLY)
+ self.assertRaises(MyException, MyFileIO, fd)
+ os.close(fd) # should not raise OSError(EBADF)
+
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
diff --git a/Lib/test/test_float.py b/Lib/test/test_float.py
index 4246371..e6779c4 100644
--- a/Lib/test/test_float.py
+++ b/Lib/test/test_float.py
@@ -101,7 +101,7 @@ class GeneralFloatCases(unittest.TestCase):
# it still has to accept the normal python syntax
import locale
if not locale.localeconv()['decimal_point'] == ',':
- return
+ self.skipTest('decimal_point is not ","')
self.assertEqual(float(" 3.14 "), 3.14)
self.assertEqual(float("+3.14 "), 3.14)
diff --git a/Lib/test/test_format.py b/Lib/test/test_format.py
index f39426b..ba3399e 100644
--- a/Lib/test/test_format.py
+++ b/Lib/test/test_format.py
@@ -234,6 +234,16 @@ class FormatTest(unittest.TestCase):
testformat('%g', 1.1, '1.1')
testformat('%#g', 1.1, '1.10000')
+ # Regression test for http://bugs.python.org/issue15516.
+ class IntFails(object):
+ def __int__(self):
+ raise TestFailed
+ def __long__(self):
+ return 0
+
+ fst = IntFails()
+ testformat("%x", fst, '0')
+
# Test exception for unknown format characters
if verbose:
print 'Testing exceptions'
@@ -292,6 +302,33 @@ class FormatTest(unittest.TestCase):
def test_main():
test_support.run_unittest(FormatTest)
+ def test_precision(self):
+ f = 1.2
+ self.assertEqual(format(f, ".0f"), "1")
+ self.assertEqual(format(f, ".3f"), "1.200")
+ with self.assertRaises(ValueError) as cm:
+ format(f, ".%sf" % (sys.maxsize + 1))
+ self.assertEqual(str(cm.exception), "precision too big")
+
+ c = complex(f)
+ self.assertEqual(format(c, ".0f"), "1+0j")
+ self.assertEqual(format(c, ".3f"), "1.200+0.000j")
+ with self.assertRaises(ValueError) as cm:
+ format(c, ".%sf" % (sys.maxsize + 1))
+ self.assertEqual(str(cm.exception), "precision too big")
+
+ @test_support.cpython_only
+ def test_precision_c_limits(self):
+ from _testcapi import INT_MAX
+
+ f = 1.2
+ with self.assertRaises(ValueError) as cm:
+ format(f, ".%sf" % (INT_MAX + 1))
+
+ c = complex(f)
+ with self.assertRaises(ValueError) as cm:
+ format(c, ".%sf" % (INT_MAX + 1))
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_fractions.py b/Lib/test/test_fractions.py
index a798477..de09bb0 100644
--- a/Lib/test/test_fractions.py
+++ b/Lib/test/test_fractions.py
@@ -6,6 +6,7 @@ import math
import numbers
import operator
import fractions
+import sys
import unittest
from copy import copy, deepcopy
from cPickle import dumps, loads
@@ -88,6 +89,9 @@ class DummyRational(object):
__hash__ = None
+class DummyFraction(fractions.Fraction):
+ """Dummy Fraction subclass for copy and deepcopy testing."""
+
class GcdTest(unittest.TestCase):
def testMisc(self):
@@ -301,11 +305,15 @@ class FractionTest(unittest.TestCase):
self.assertEqual(F(201, 200).limit_denominator(100), F(1))
self.assertEqual(F(201, 200).limit_denominator(101), F(102, 101))
self.assertEqual(F(0).limit_denominator(10000), F(0))
+ for i in (0, -1):
+ self.assertRaisesMessage(
+ ValueError, "max_denominator should be at least 1",
+ F(1).limit_denominator, i)
def testConversions(self):
self.assertTypedEquals(-1, math.trunc(F(-11, 10)))
self.assertTypedEquals(-1, int(F(-11, 10)))
-
+ self.assertTypedEquals(1, math.trunc(F(11, 10)))
self.assertEqual(False, bool(F(0, 1)))
self.assertEqual(True, bool(F(3, 2)))
self.assertTypedEquals(0.1, float(F(1, 10)))
@@ -330,6 +338,7 @@ class FractionTest(unittest.TestCase):
self.assertEqual(F(8, 27), F(2, 3) ** F(3))
self.assertEqual(F(27, 8), F(2, 3) ** F(-3))
self.assertTypedEquals(2.0, F(4) ** F(1, 2))
+ self.assertEqual(F(1, 1), +F(1, 1))
# Will return 1j in 3.0:
self.assertRaises(ValueError, pow, F(-1), F(1, 2))
@@ -394,6 +403,10 @@ class FractionTest(unittest.TestCase):
TypeError,
"unsupported operand type(s) for +: 'Fraction' and 'Decimal'",
operator.add, F(3,11), Decimal('3.1415926'))
+ self.assertRaisesMessage(
+ TypeError,
+ "unsupported operand type(s) for +: 'Decimal' and 'Fraction'",
+ operator.add, Decimal('3.1415926'), F(3,11))
self.assertNotEqual(F(5, 2), Decimal('2.5'))
def testComparisons(self):
@@ -571,9 +584,14 @@ class FractionTest(unittest.TestCase):
def test_copy_deepcopy_pickle(self):
r = F(13, 7)
+ dr = DummyFraction(13, 7)
self.assertEqual(r, loads(dumps(r)))
self.assertEqual(id(r), id(copy(r)))
self.assertEqual(id(r), id(deepcopy(r)))
+ self.assertNotEqual(id(dr), id(copy(dr)))
+ self.assertNotEqual(id(dr), id(deepcopy(dr)))
+ self.assertTypedEquals(dr, copy(dr))
+ self.assertTypedEquals(dr, deepcopy(dr))
def test_slots(self):
# Issue 4998
diff --git a/Lib/test/test_ftplib.py b/Lib/test/test_ftplib.py
index c82e8a6..b3d8f7e 100644
--- a/Lib/test/test_ftplib.py
+++ b/Lib/test/test_ftplib.py
@@ -15,9 +15,9 @@ try:
except ImportError:
ssl = None
-from unittest import TestCase
+from unittest import TestCase, SkipTest, skipUnless
from test import test_support
-from test.test_support import HOST
+from test.test_support import HOST, HOSTv6
threading = test_support.import_module('threading')
@@ -65,6 +65,7 @@ class DummyFTPHandler(asynchat.async_chat):
self.last_received_data = ''
self.next_response = ''
self.rest = None
+ self.next_retr_data = RETR_DATA
self.push('220 welcome')
def collect_incoming_data(self, data):
@@ -189,7 +190,7 @@ class DummyFTPHandler(asynchat.async_chat):
offset = int(self.rest)
else:
offset = 0
- self.dtp.push(RETR_DATA[offset:])
+ self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
@@ -203,6 +204,11 @@ class DummyFTPHandler(asynchat.async_chat):
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
+ def cmd_setlongretr(self, arg):
+ # For testing. Next RETR will return long line.
+ self.next_retr_data = 'x' * int(arg)
+ self.push('125 setlongretr ok')
+
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
@@ -474,6 +480,14 @@ class TestFTPClass(TestCase):
def test_rmd(self):
self.client.rmd('foo')
+ def test_cwd(self):
+ dir = self.client.cwd('/foo')
+ self.assertEqual(dir, '250 cwd ok')
+
+ def test_mkd(self):
+ dir = self.client.mkd('/foo')
+ self.assertEqual(dir, '/foo')
+
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
@@ -550,11 +564,33 @@ class TestFTPClass(TestCase):
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'pasv')
+ def test_line_too_long(self):
+ self.assertRaises(ftplib.Error, self.client.sendcmd,
+ 'x' * self.client.maxline * 2)
+
+ def test_retrlines_too_long(self):
+ self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
+ received = []
+ self.assertRaises(ftplib.Error,
+ self.client.retrlines, 'retr', received.append)
+
+ def test_storlines_too_long(self):
+ f = StringIO.StringIO('x' * self.client.maxline * 2)
+ self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
+
+@skipUnless(socket.has_ipv6, "IPv6 not enabled")
class TestIPv6Environment(TestCase):
+ @classmethod
+ def setUpClass(cls):
+ try:
+ DummyFTPServer((HOST, 0), af=socket.AF_INET6)
+ except socket.error:
+ raise SkipTest("IPv6 not enabled")
+
def setUp(self):
- self.server = DummyFTPServer((HOST, 0), af=socket.AF_INET6)
+ self.server = DummyFTPServer((HOSTv6, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
@@ -587,6 +623,7 @@ class TestIPv6Environment(TestCase):
retr()
+@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
@@ -602,6 +639,7 @@ class TestTLS_FTPClassMixin(TestFTPClass):
self.client.prot_p()
+@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
@@ -702,10 +740,10 @@ class TestTimeouts(TestCase):
def testTimeoutDefault(self):
# default -- use global socket timeout
- self.assertTrue(socket.getdefaulttimeout() is None)
+ self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
- ftp = ftplib.FTP("localhost")
+ ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
@@ -714,13 +752,13 @@ class TestTimeouts(TestCase):
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
- self.assertTrue(socket.getdefaulttimeout() is None)
+ self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
- ftp = ftplib.FTP("localhost", timeout=None)
+ ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
- self.assertTrue(ftp.sock.gettimeout() is None)
+ self.assertIsNone(ftp.sock.gettimeout())
self.evt.wait()
ftp.close()
@@ -755,17 +793,9 @@ class TestTimeouts(TestCase):
def test_main():
- tests = [TestFTPClass, TestTimeouts]
- if socket.has_ipv6:
- try:
- DummyFTPServer((HOST, 0), af=socket.AF_INET6)
- except socket.error:
- pass
- else:
- tests.append(TestIPv6Environment)
-
- if ssl is not None:
- tests.extend([TestTLS_FTPClassMixin, TestTLS_FTPClass])
+ tests = [TestFTPClass, TestTimeouts,
+ TestIPv6Environment,
+ TestTLS_FTPClassMixin, TestTLS_FTPClass]
thread_info = test_support.threading_setup()
try:
diff --git a/Lib/test/test_functools.py b/Lib/test/test_functools.py
index a713314..445ad9e 100644
--- a/Lib/test/test_functools.py
+++ b/Lib/test/test_functools.py
@@ -43,8 +43,6 @@ class TestPartial(unittest.TestCase):
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
# attributes should not be writable
- if not isinstance(self.thetype, type):
- return
self.assertRaises(TypeError, setattr, p, 'func', map)
self.assertRaises(TypeError, setattr, p, 'args', (1, 2))
self.assertRaises(TypeError, setattr, p, 'keywords', dict(a=1, b=2))
@@ -151,6 +149,23 @@ class TestPartial(unittest.TestCase):
f_copy = pickle.loads(pickle.dumps(f))
self.assertEqual(signature(f), signature(f_copy))
+ # Issue 6083: Reference counting bug
+ def test_setstate_refcount(self):
+ class BadSequence:
+ def __len__(self):
+ return 4
+ def __getitem__(self, key):
+ if key == 0:
+ return max
+ elif key == 1:
+ return tuple(range(1000000))
+ elif key in (2, 3):
+ return {}
+ raise IndexError
+
+ f = self.thetype(object)
+ self.assertRaises(SystemError, f.__setstate__, BadSequence())
+
class PartialSubclass(functools.partial):
pass
@@ -163,7 +178,10 @@ class TestPythonPartial(TestPartial):
thetype = PythonPartial
# the python version isn't picklable
- def test_pickle(self): pass
+ test_pickle = test_setstate_refcount = None
+
+ # the python version isn't a type
+ test_attributes = None
class TestUpdateWrapper(unittest.TestCase):
@@ -232,6 +250,7 @@ class TestUpdateWrapper(unittest.TestCase):
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
+ @test_support.requires_docstrings
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
@@ -258,7 +277,7 @@ class TestWraps(TestUpdateWrapper):
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.attr, 'This is also a test')
- @unittest.skipIf(not sys.flags.optimize <= 1,
+ @unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper = self._default_update()
diff --git a/Lib/test/test_gc.py b/Lib/test/test_gc.py
index 5a35dec..fd874c3 100644
--- a/Lib/test/test_gc.py
+++ b/Lib/test/test_gc.py
@@ -1,9 +1,15 @@
import unittest
from test.test_support import verbose, run_unittest
import sys
+import time
import gc
import weakref
+try:
+ import threading
+except ImportError:
+ threading = None
+
### Support code
###############################################################################
@@ -299,6 +305,69 @@ class GCTests(unittest.TestCase):
v = {1: v, 2: Ouch()}
gc.disable()
+ @unittest.skipUnless(threading, "test meaningless on builds without threads")
+ def test_trashcan_threads(self):
+ # Issue #13992: trashcan mechanism should be thread-safe
+ NESTING = 60
+ N_THREADS = 2
+
+ def sleeper_gen():
+ """A generator that releases the GIL when closed or dealloc'ed."""
+ try:
+ yield
+ finally:
+ time.sleep(0.000001)
+
+ class C(list):
+ # Appending to a list is atomic, which avoids the use of a lock.
+ inits = []
+ dels = []
+ def __init__(self, alist):
+ self[:] = alist
+ C.inits.append(None)
+ def __del__(self):
+ # This __del__ is called by subtype_dealloc().
+ C.dels.append(None)
+ # `g` will release the GIL when garbage-collected. This
+ # helps assert subtype_dealloc's behaviour when threads
+ # switch in the middle of it.
+ g = sleeper_gen()
+ next(g)
+ # Now that __del__ is finished, subtype_dealloc will proceed
+ # to call list_dealloc, which also uses the trashcan mechanism.
+
+ def make_nested():
+ """Create a sufficiently nested container object so that the
+ trashcan mechanism is invoked when deallocating it."""
+ x = C([])
+ for i in range(NESTING):
+ x = [C([x])]
+ del x
+
+ def run_thread():
+ """Exercise make_nested() in a loop."""
+ while not exit:
+ make_nested()
+
+ old_checkinterval = sys.getcheckinterval()
+ sys.setcheckinterval(3)
+ try:
+ exit = False
+ threads = []
+ for i in range(N_THREADS):
+ t = threading.Thread(target=run_thread)
+ threads.append(t)
+ for t in threads:
+ t.start()
+ time.sleep(1.0)
+ exit = True
+ for t in threads:
+ t.join()
+ finally:
+ sys.setcheckinterval(old_checkinterval)
+ gc.collect()
+ self.assertEqual(len(C.inits), len(C.dels))
+
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
diff --git a/Lib/test/test_gdb.py b/Lib/test/test_gdb.py
index fe2c8e6..6bbe05c 100644
--- a/Lib/test/test_gdb.py
+++ b/Lib/test/test_gdb.py
@@ -19,19 +19,48 @@ except OSError:
# This is what "no gdb" looks like. There may, however, be other
# errors that manifest this way too.
raise unittest.SkipTest("Couldn't find gdb on the path")
-gdb_version_number = re.search(r"^GNU gdb [^\d]*(\d+)\.", gdb_version)
-if int(gdb_version_number.group(1)) < 7:
+gdb_version_number = re.search("^GNU gdb [^\d]*(\d+)\.(\d)", gdb_version)
+gdb_major_version = int(gdb_version_number.group(1))
+gdb_minor_version = int(gdb_version_number.group(2))
+if gdb_major_version < 7:
raise unittest.SkipTest("gdb versions before 7.0 didn't support python embedding"
" Saw:\n" + gdb_version)
+# Location of custom hooks file in a repository checkout.
+checkout_hook_path = os.path.join(os.path.dirname(sys.executable),
+ 'python-gdb.py')
+
+def run_gdb(*args, **env_vars):
+ """Runs gdb in --batch mode with the additional arguments given by *args.
+
+ Returns its (stdout, stderr)
+ """
+ if env_vars:
+ env = os.environ.copy()
+ env.update(env_vars)
+ else:
+ env = None
+ base_cmd = ('gdb', '--batch')
+ if (gdb_major_version, gdb_minor_version) >= (7, 4):
+ base_cmd += ('-iex', 'add-auto-load-safe-path ' + checkout_hook_path)
+ out, err = subprocess.Popen(base_cmd + args,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env,
+ ).communicate()
+ return out, err
+
# Verify that "gdb" was built with the embedded python support enabled:
-cmd = "--eval-command=python import sys; print sys.version_info"
-p = subprocess.Popen(["gdb", "--batch", cmd],
- stdout=subprocess.PIPE)
-gdbpy_version, _ = p.communicate()
-if gdbpy_version == '':
+gdbpy_version, _ = run_gdb("--eval-command=python import sys; print(sys.version_info)")
+if not gdbpy_version:
raise unittest.SkipTest("gdb not built with embedded python support")
+# Verify that "gdb" can load our custom hooks, as OS security settings may
+# disallow this without a customised .gdbinit.
+cmd = ['--args', sys.executable]
+_, gdbpy_errors = run_gdb('--args', sys.executable)
+if "auto-loading has been declined" in gdbpy_errors:
+ msg = "gdb security settings prevent use of custom hooks: "
+ raise unittest.SkipTest(msg + gdbpy_errors.rstrip())
+
def python_is_optimized():
cflags = sysconfig.get_config_vars()['PY_CFLAGS']
final_opt = ""
@@ -42,10 +71,7 @@ def python_is_optimized():
def gdb_has_frame_select():
# Does this build of gdb have gdb.Frame.select ?
- cmd = "--eval-command=python print(dir(gdb.Frame))"
- p = subprocess.Popen(["gdb", "--batch", cmd],
- stdout=subprocess.PIPE)
- stdout, _ = p.communicate()
+ stdout, _ = run_gdb("--eval-command=python print(dir(gdb.Frame))")
m = re.match(r'.*\[(.*)\].*', stdout)
if not m:
raise unittest.SkipTest("Unable to parse output from gdb.Frame.select test")
@@ -58,21 +84,6 @@ class DebuggerTests(unittest.TestCase):
"""Test that the debugger can debug Python."""
- def run_gdb(self, *args, **env_vars):
- """Runs gdb with the command line given by *args.
-
- Returns its stdout, stderr
- """
- if env_vars:
- env = os.environ.copy()
- env.update(env_vars)
- else:
- env = None
- out, err = subprocess.Popen(
- args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
- ).communicate()
- return out, err
-
def get_stack_trace(self, source=None, script=None,
breakpoint='PyObject_Print',
cmds_after_breakpoint=None,
@@ -129,22 +140,34 @@ class DebuggerTests(unittest.TestCase):
# print ' '.join(args)
# Use "args" to invoke gdb, capturing stdout, stderr:
- out, err = self.run_gdb(*args, PYTHONHASHSEED='0')
-
- # Ignore some noise on stderr due to the pending breakpoint:
- err = err.replace('Function "%s" not defined.\n' % breakpoint, '')
- # Ignore some other noise on stderr (http://bugs.python.org/issue8600)
- err = err.replace("warning: Unable to find libthread_db matching"
- " inferior's thread library, thread debugging will"
- " not be available.\n",
- '')
- err = err.replace("warning: Cannot initialize thread debugging"
- " library: Debugger service failed\n",
- '')
+ out, err = run_gdb(*args, PYTHONHASHSEED='0')
+
+ errlines = err.splitlines()
+ unexpected_errlines = []
+
+ # Ignore some benign messages on stderr.
+ ignore_patterns = (
+ 'Function "%s" not defined.' % breakpoint,
+ "warning: no loadable sections found in added symbol-file"
+ " system-supplied DSO",
+ "warning: Unable to find libthread_db matching"
+ " inferior's thread library, thread debugging will"
+ " not be available.",
+ "warning: Cannot initialize thread debugging"
+ " library: Debugger service failed",
+ 'warning: Could not load shared library symbols for '
+ 'linux-vdso.so',
+ 'warning: Could not load shared library symbols for '
+ 'linux-gate.so',
+ 'Do you need "set solib-search-path" or '
+ '"set sysroot"?',
+ )
+ for line in errlines:
+ if not line.startswith(ignore_patterns):
+ unexpected_errlines.append(line)
# Ensure no unexpected error messages:
- self.assertEqual(err, '')
-
+ self.assertEqual(unexpected_errlines, [])
return out
def get_gdb_repr(self, source,
@@ -191,7 +214,7 @@ class PrettyPrintTests(DebuggerTests):
# matches repr(value) in this process:
gdb_repr, gdb_output = self.get_gdb_repr('print ' + repr(val),
cmds_after_breakpoint)
- self.assertEqual(gdb_repr, repr(val), gdb_output)
+ self.assertEqual(gdb_repr, repr(val))
def test_int(self):
'Verify the pretty-printing of various "int" values'
diff --git a/Lib/test/test_generators.py b/Lib/test/test_generators.py
index 19bfe07..27399f7 100644
--- a/Lib/test/test_generators.py
+++ b/Lib/test/test_generators.py
@@ -383,7 +383,8 @@ From the Iterators list, about the types of these things.
<type 'generator'>
>>> [s for s in dir(i) if not s.startswith('_')]
['close', 'gi_code', 'gi_frame', 'gi_running', 'next', 'send', 'throw']
->>> print i.next.__doc__
+>>> from test.test_support import HAVE_DOCSTRINGS
+>>> print(i.next.__doc__ if HAVE_DOCSTRINGS else 'x.next() -> the next value, or raise StopIteration')
x.next() -> the next value, or raise StopIteration
>>> iter(i) is i
True
diff --git a/Lib/test/test_genericpath.py b/Lib/test/test_genericpath.py
index 3975b56..94380b1 100644
--- a/Lib/test/test_genericpath.py
+++ b/Lib/test/test_genericpath.py
@@ -199,13 +199,40 @@ class CommonTest(GenericTest):
self.assertEqual(expandvars("$[foo]bar"), "$[foo]bar")
self.assertEqual(expandvars("$bar bar"), "$bar bar")
self.assertEqual(expandvars("$?bar"), "$?bar")
- self.assertEqual(expandvars("${foo}bar"), "barbar")
self.assertEqual(expandvars("$foo}bar"), "bar}bar")
self.assertEqual(expandvars("${foo"), "${foo")
self.assertEqual(expandvars("${{foo}}"), "baz1}")
self.assertEqual(expandvars("$foo$foo"), "barbar")
self.assertEqual(expandvars("$bar$bar"), "$bar$bar")
+ @unittest.skipUnless(test_support.FS_NONASCII, 'need test_support.FS_NONASCII')
+ def test_expandvars_nonascii(self):
+ if self.pathmodule.__name__ == 'macpath':
+ self.skipTest('macpath.expandvars is a stub')
+ expandvars = self.pathmodule.expandvars
+ def check(value, expected):
+ self.assertEqual(expandvars(value), expected)
+ encoding = sys.getfilesystemencoding()
+ with test_support.EnvironmentVarGuard() as env:
+ env.clear()
+ unonascii = test_support.FS_NONASCII
+ snonascii = unonascii.encode(encoding)
+ env['spam'] = snonascii
+ env[snonascii] = 'ham' + snonascii
+ check(snonascii, snonascii)
+ check('$spam bar', '%s bar' % snonascii)
+ check('${spam}bar', '%sbar' % snonascii)
+ check('${%s}bar' % snonascii, 'ham%sbar' % snonascii)
+ check('$bar%s bar' % snonascii, '$bar%s bar' % snonascii)
+ check('$spam}bar', '%s}bar' % snonascii)
+
+ check(unonascii, unonascii)
+ check(u'$spam bar', u'%s bar' % unonascii)
+ check(u'${spam}bar', u'%sbar' % unonascii)
+ check(u'${%s}bar' % unonascii, u'ham%sbar' % unonascii)
+ check(u'$bar%s bar' % unonascii, u'$bar%s bar' % unonascii)
+ check(u'$spam}bar', u'%s}bar' % unonascii)
+
def test_abspath(self):
self.assertIn("foo", self.pathmodule.abspath("foo"))
diff --git a/Lib/test/test_genexps.py b/Lib/test/test_genexps.py
index 3d896a5..fc593a3 100644
--- a/Lib/test/test_genexps.py
+++ b/Lib/test/test_genexps.py
@@ -223,7 +223,8 @@ Check that generator attributes are present
>>> set(attr for attr in dir(g) if not attr.startswith('__')) >= expected
True
- >>> print g.next.__doc__
+ >>> from test.test_support import HAVE_DOCSTRINGS
+ >>> print(g.next.__doc__ if HAVE_DOCSTRINGS else 'x.next() -> the next value, or raise StopIteration')
x.next() -> the next value, or raise StopIteration
>>> import types
>>> isinstance(g, types.GeneratorType)
diff --git a/Lib/test/test_getargs2.py b/Lib/test/test_getargs2.py
index d06296b..aba304a 100644
--- a/Lib/test/test_getargs2.py
+++ b/Lib/test/test_getargs2.py
@@ -1,5 +1,7 @@
import unittest
from test import test_support
+# Skip this test if the _testcapi module isn't available.
+test_support.import_module('_testcapi')
from _testcapi import getargs_keywords
import warnings
@@ -42,6 +44,13 @@ from _testcapi import UCHAR_MAX, USHRT_MAX, UINT_MAX, ULONG_MAX, INT_MAX, \
INT_MIN, LONG_MIN, LONG_MAX, PY_SSIZE_T_MIN, PY_SSIZE_T_MAX, \
SHRT_MIN, SHRT_MAX
+try:
+ from _testcapi import getargs_L, getargs_K
+except ImportError:
+ _PY_LONG_LONG_available = False
+else:
+ _PY_LONG_LONG_available = True
+
# fake, they are not defined in Python's header files
LLONG_MAX = 2**63-1
LLONG_MIN = -2**63
@@ -208,6 +217,7 @@ class Signed_TestCase(unittest.TestCase):
self.assertRaises(OverflowError, getargs_n, VERY_LARGE)
+@unittest.skipUnless(_PY_LONG_LONG_available, 'PY_LONG_LONG not available')
class LongLong_TestCase(unittest.TestCase):
def test_L(self):
from _testcapi import getargs_L
@@ -322,13 +332,8 @@ class Keywords_TestCase(unittest.TestCase):
self.fail('TypeError should have been raised')
def test_main():
- tests = [Signed_TestCase, Unsigned_TestCase, Tuple_TestCase, Keywords_TestCase]
- try:
- from _testcapi import getargs_L, getargs_K
- except ImportError:
- pass # PY_LONG_LONG not available
- else:
- tests.append(LongLong_TestCase)
+ tests = [Signed_TestCase, Unsigned_TestCase, LongLong_TestCase,
+ Tuple_TestCase, Keywords_TestCase]
test_support.run_unittest(*tests)
if __name__ == "__main__":
diff --git a/Lib/test/test_gl.py b/Lib/test/test_gl.py
index c9264ee..92e1e8d 100755..100644
--- a/Lib/test/test_gl.py
+++ b/Lib/test/test_gl.py
@@ -1,4 +1,3 @@
-#! /usr/bin/env python
"""Very simple test script for the SGI gl library extension module
taken mostly from the documentation.
Roger E. Masse
diff --git a/Lib/test/test_glob.py b/Lib/test/test_glob.py
index 692322d..b360d09 100644
--- a/Lib/test/test_glob.py
+++ b/Lib/test/test_glob.py
@@ -1,8 +1,15 @@
-import unittest
-from test.test_support import run_unittest, TESTFN
import glob
import os
import shutil
+import sys
+import unittest
+
+from test.test_support import run_unittest, TESTFN
+
+
+def fsdecode(s):
+ return unicode(s, sys.getfilesystemencoding())
+
class GlobTests(unittest.TestCase):
@@ -18,16 +25,19 @@ class GlobTests(unittest.TestCase):
f.close()
def setUp(self):
- self.tempdir = TESTFN+"_dir"
+ self.tempdir = TESTFN + "_dir"
self.mktemp('a', 'D')
self.mktemp('aab', 'F')
+ self.mktemp('.aa', 'G')
+ self.mktemp('.bb', 'H')
self.mktemp('aaa', 'zzzF')
self.mktemp('ZZZ')
self.mktemp('a', 'bcd', 'EF')
self.mktemp('a', 'bcd', 'efg', 'ha')
if hasattr(os, 'symlink'):
os.symlink(self.norm('broken'), self.norm('sym1'))
- os.symlink(self.norm('broken'), self.norm('sym2'))
+ os.symlink('broken', self.norm('sym2'))
+ os.symlink(os.path.join('a', 'bcd'), self.norm('sym3'))
def tearDown(self):
shutil.rmtree(self.tempdir)
@@ -40,10 +50,16 @@ class GlobTests(unittest.TestCase):
p = os.path.join(self.tempdir, pattern)
res = glob.glob(p)
self.assertEqual(list(glob.iglob(p)), res)
+ ures = [fsdecode(x) for x in res]
+ self.assertEqual(glob.glob(fsdecode(p)), ures)
+ self.assertEqual(list(glob.iglob(fsdecode(p))), ures)
return res
def assertSequencesEqual_noorder(self, l1, l2):
+ l1 = list(l1)
+ l2 = list(l2)
self.assertEqual(set(l1), set(l2))
+ self.assertEqual(sorted(l1), sorted(l2))
def test_glob_literal(self):
eq = self.assertSequencesEqual_noorder
@@ -52,20 +68,26 @@ class GlobTests(unittest.TestCase):
eq(self.glob('aab'), [self.norm('aab')])
eq(self.glob('zymurgy'), [])
+ res = glob.glob('*')
+ self.assertEqual({type(r) for r in res}, {str})
+ res = glob.glob(os.path.join(os.curdir, '*'))
+ self.assertEqual({type(r) for r in res}, {str})
+
# test return types are unicode, but only if os.listdir
# returns unicode filenames
- uniset = set([unicode])
- tmp = os.listdir(u'.')
- if set(type(x) for x in tmp) == uniset:
- u1 = glob.glob(u'*')
- u2 = glob.glob(u'./*')
- self.assertEqual(set(type(r) for r in u1), uniset)
- self.assertEqual(set(type(r) for r in u2), uniset)
+ tmp = os.listdir(fsdecode(os.curdir))
+ if {type(x) for x in tmp} == {unicode}:
+ res = glob.glob(u'*')
+ self.assertEqual({type(r) for r in res}, {unicode})
+ res = glob.glob(os.path.join(fsdecode(os.curdir), u'*'))
+ self.assertEqual({type(r) for r in res}, {unicode})
def test_glob_one_directory(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('a*'), map(self.norm, ['a', 'aab', 'aaa']))
eq(self.glob('*a'), map(self.norm, ['a', 'aaa']))
+ eq(self.glob('.*'), map(self.norm, ['.aa', '.bb']))
+ eq(self.glob('?aa'), map(self.norm, ['aaa']))
eq(self.glob('aa?'), map(self.norm, ['aaa', 'aab']))
eq(self.glob('aa[ab]'), map(self.norm, ['aaa', 'aab']))
eq(self.glob('*q'), [])
@@ -87,23 +109,68 @@ class GlobTests(unittest.TestCase):
eq(self.glob('*', '*a'), [])
eq(self.glob('a', '*', '*', '*a'),
[self.norm('a', 'bcd', 'efg', 'ha')])
- eq(self.glob('?a?', '*F'), map(self.norm, [os.path.join('aaa', 'zzzF'),
- os.path.join('aab', 'F')]))
+ eq(self.glob('?a?', '*F'), [self.norm('aaa', 'zzzF'),
+ self.norm('aab', 'F')])
def test_glob_directory_with_trailing_slash(self):
- # We are verifying that when there is wildcard pattern which
- # ends with os.sep doesn't blow up.
- res = glob.glob(self.tempdir + '*' + os.sep)
- self.assertEqual(len(res), 1)
- # either of these results are reasonable
- self.assertIn(res[0], [self.tempdir, self.tempdir + os.sep])
-
+ # Patterns ending with a slash shouldn't match non-dirs
+ res = glob.glob(self.norm('Z*Z') + os.sep)
+ self.assertEqual(res, [])
+ res = glob.glob(self.norm('ZZZ') + os.sep)
+ self.assertEqual(res, [])
+ # When there is a wildcard pattern which ends with os.sep, glob()
+ # doesn't blow up.
+ res = glob.glob(self.norm('aa*') + os.sep)
+ self.assertEqual(len(res), 2)
+ # either of these results is reasonable
+ self.assertIn(set(res), [
+ {self.norm('aaa'), self.norm('aab')},
+ {self.norm('aaa') + os.sep, self.norm('aab') + os.sep},
+ ])
+
+ def test_glob_unicode_directory_with_trailing_slash(self):
+ # Same as test_glob_directory_with_trailing_slash, but with an
+ # unicode argument.
+ res = glob.glob(fsdecode(self.norm('Z*Z') + os.sep))
+ self.assertEqual(res, [])
+ res = glob.glob(fsdecode(self.norm('ZZZ') + os.sep))
+ self.assertEqual(res, [])
+ res = glob.glob(fsdecode(self.norm('aa*') + os.sep))
+ self.assertEqual(len(res), 2)
+ # either of these results is reasonable
+ self.assertIn(set(res), [
+ {fsdecode(self.norm('aaa')), fsdecode(self.norm('aab'))},
+ {fsdecode(self.norm('aaa') + os.sep),
+ fsdecode(self.norm('aab') + os.sep)},
+ ])
+
+ @unittest.skipUnless(hasattr(os, 'symlink'), "Requires symlink support")
+ def test_glob_symlinks(self):
+ eq = self.assertSequencesEqual_noorder
+ eq(self.glob('sym3'), [self.norm('sym3')])
+ eq(self.glob('sym3', '*'), [self.norm('sym3', 'EF'),
+ self.norm('sym3', 'efg')])
+ self.assertIn(self.glob('sym3' + os.sep),
+ [[self.norm('sym3')], [self.norm('sym3') + os.sep]])
+ eq(self.glob('*', '*F'),
+ [self.norm('aaa', 'zzzF'), self.norm('aab', 'F'),
+ self.norm('sym3', 'EF')])
+
+ @unittest.skipUnless(hasattr(os, 'symlink'), "Requires symlink support")
def test_glob_broken_symlinks(self):
- if hasattr(os, 'symlink'):
- eq = self.assertSequencesEqual_noorder
- eq(self.glob('sym*'), [self.norm('sym1'), self.norm('sym2')])
- eq(self.glob('sym1'), [self.norm('sym1')])
- eq(self.glob('sym2'), [self.norm('sym2')])
+ eq = self.assertSequencesEqual_noorder
+ eq(self.glob('sym*'), [self.norm('sym1'), self.norm('sym2'),
+ self.norm('sym3')])
+ eq(self.glob('sym1'), [self.norm('sym1')])
+ eq(self.glob('sym2'), [self.norm('sym2')])
+
+ @unittest.skipUnless(sys.platform == "win32", "Win32 specific test")
+ def test_glob_magic_in_drive(self):
+ eq = self.assertSequencesEqual_noorder
+ eq(glob.glob('*:'), [])
+ eq(glob.glob(u'*:'), [])
+ eq(glob.glob('?:'), [])
+ eq(glob.glob(u'?:'), [])
def test_main():
diff --git a/Lib/test/test_grammar.py b/Lib/test/test_grammar.py
index 86cc084..5f77c1d 100644
--- a/Lib/test/test_grammar.py
+++ b/Lib/test/test_grammar.py
@@ -75,6 +75,12 @@ class TokenTests(unittest.TestCase):
x = .3e14
x = 3.1e4
+ def test_float_exponent_tokenization(self):
+ # See issue 21642.
+ self.assertEqual(1 if 1else 0, 1)
+ self.assertEqual(1 if 0else 0, 0)
+ self.assertRaises(SyntaxError, eval, "0 if 1Else 0")
+
def testStringLiterals(self):
x = ''; y = ""; self.assertTrue(len(x) == 0 and x == y)
x = '\''; y = "'"; self.assertTrue(len(x) == 1 and x == y and ord(x) == 39)
diff --git a/Lib/test/test_grp.py b/Lib/test/test_grp.py
index e9e1758..d31e39c 100644
--- a/Lib/test/test_grp.py
+++ b/Lib/test/test_grp.py
@@ -16,7 +16,7 @@ class GroupDatabaseTestCase(unittest.TestCase):
self.assertEqual(value[1], value.gr_passwd)
self.assertIsInstance(value.gr_passwd, basestring)
self.assertEqual(value[2], value.gr_gid)
- self.assertIsInstance(value.gr_gid, int)
+ self.assertIsInstance(value.gr_gid, (long, int))
self.assertEqual(value[3], value.gr_mem)
self.assertIsInstance(value.gr_mem, list)
@@ -26,8 +26,10 @@ class GroupDatabaseTestCase(unittest.TestCase):
for e in entries:
self.check_value(e)
+ def test_values_extended(self):
+ entries = grp.getgrall()
if len(entries) > 1000: # Huge group file (NIS?) -- skip the rest
- return
+ self.skipTest('huge group file, extended test skipped')
for e in entries:
e2 = grp.getgrgid(e.gr_gid)
diff --git a/Lib/test/test_gzip.py b/Lib/test/test_gzip.py
index a28cd34..aa56ed3 100644
--- a/Lib/test/test_gzip.py
+++ b/Lib/test/test_gzip.py
@@ -1,4 +1,3 @@
-#! /usr/bin/env python
"""Test script for the gzip module.
"""
@@ -53,6 +52,13 @@ class TestGzip(unittest.TestCase):
d = f.read()
self.assertEqual(d, data1*50)
+ def test_read_universal_newlines(self):
+ # Issue #5148: Reading breaks when mode contains 'U'.
+ self.test_write()
+ with gzip.GzipFile(self.filename, 'rU') as f:
+ d = f.read()
+ self.assertEqual(d, data1*50)
+
def test_io_on_closed_object(self):
# Test that I/O operations on closed GzipFile objects raise a
# ValueError, just like the corresponding functions on file objects.
@@ -282,6 +288,14 @@ class TestGzip(unittest.TestCase):
with gzip.GzipFile(fileobj=f, mode="w") as g:
self.assertEqual(g.name, "")
+ def test_read_with_extra(self):
+ # Gzip data with an extra field
+ gzdata = (b'\x1f\x8b\x08\x04\xb2\x17cQ\x02\xff'
+ b'\x05\x00Extra'
+ b'\x0bI-.\x01\x002\xd1Mx\x04\x00\x00\x00')
+ with gzip.GzipFile(fileobj=io.BytesIO(gzdata)) as f:
+ self.assertEqual(f.read(), b'Test')
+
def test_main(verbose=None):
test_support.run_unittest(TestGzip)
diff --git a/Lib/test/test_hashlib.py b/Lib/test/test_hashlib.py
index 855ecbf..3fc172f 100644
--- a/Lib/test/test_hashlib.py
+++ b/Lib/test/test_hashlib.py
@@ -16,6 +16,8 @@ except ImportError:
threading = None
import unittest
import warnings
+from binascii import unhexlify
+
from test import test_support
from test.test_support import _4G, precisionbigmemtest
@@ -108,12 +110,8 @@ class HashLibTestCase(unittest.TestCase):
_algo.islower()]))
def test_unknown_hash(self):
- try:
- hashlib.new('spam spam spam spam spam')
- except ValueError:
- pass
- else:
- self.assertTrue(0 == "hashlib didn't reject bogus hash name")
+ self.assertRaises(ValueError, hashlib.new, 'spam spam spam spam spam')
+ self.assertRaises(TypeError, hashlib.new, 1)
def test_get_builtin_constructor(self):
get_builtin_constructor = hashlib.__dict__[
@@ -132,6 +130,7 @@ class HashLibTestCase(unittest.TestCase):
sys.modules['_md5'] = _md5
else:
del sys.modules['_md5']
+ self.assertRaises(TypeError, get_builtin_constructor, 3)
def test_hexdigest(self):
for name in self.supported_hash_names:
@@ -170,6 +169,21 @@ class HashLibTestCase(unittest.TestCase):
% (name, hash_object_constructor,
computed, len(data), digest))
+ def check_update(self, name, data, digest):
+ constructors = self.constructors_to_test[name]
+ # 2 is for hashlib.name(...) and hashlib.new(name, ...)
+ self.assertGreaterEqual(len(constructors), 2)
+ for hash_object_constructor in constructors:
+ h = hash_object_constructor()
+ h.update(data)
+ computed = h.hexdigest()
+ self.assertEqual(
+ computed, digest,
+ "Hash algorithm %s using %s when updated returned hexdigest"
+ " %r for %d byte input data that should have hashed to %r."
+ % (name, hash_object_constructor,
+ computed, len(data), digest))
+
def check_unicode(self, algorithm_name):
# Unicode objects are not allowed as input.
expected = hashlib.new(algorithm_name, str(u'spam')).hexdigest()
@@ -195,21 +209,20 @@ class HashLibTestCase(unittest.TestCase):
self.check('md5', 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'd174ab98d277d9f5a5611c2c9f419d9f')
- @precisionbigmemtest(size=_4G + 5, memuse=1)
+ @unittest.skipIf(sys.maxsize < _4G + 5, 'test cannot run on 32-bit systems')
+ @precisionbigmemtest(size=_4G + 5, memuse=1, dry_run=False)
def test_case_md5_huge(self, size):
- if size == _4G + 5:
- try:
- self.check('md5', 'A'*size, 'c9af2dff37468ce5dfee8f2cfc0a9c6d')
- except OverflowError:
- pass # 32-bit arch
+ self.check('md5', 'A'*size, 'c9af2dff37468ce5dfee8f2cfc0a9c6d')
- @precisionbigmemtest(size=_4G - 1, memuse=1)
+ @unittest.skipIf(sys.maxsize < _4G + 5, 'test cannot run on 32-bit systems')
+ @precisionbigmemtest(size=_4G + 5, memuse=1, dry_run=False)
+ def test_case_md5_huge_update(self, size):
+ self.check_update('md5', 'A'*size, 'c9af2dff37468ce5dfee8f2cfc0a9c6d')
+
+ @unittest.skipIf(sys.maxsize < _4G - 1, 'test cannot run on 32-bit systems')
+ @precisionbigmemtest(size=_4G - 1, memuse=1, dry_run=False)
def test_case_md5_uintmax(self, size):
- if size == _4G - 1:
- try:
- self.check('md5', 'A'*size, '28138d306ff1b8281f1a9067e1a1a2b3')
- except OverflowError:
- pass # 32-bit arch
+ self.check('md5', 'A'*size, '28138d306ff1b8281f1a9067e1a1a2b3')
# use the three examples from Federal Information Processing Standards
# Publication 180-1, Secure Hash Standard, 1995 April 17
@@ -231,6 +244,23 @@ class HashLibTestCase(unittest.TestCase):
self.check('sha1', "a" * 1000000,
"34aa973cd4c4daa4f61eeb2bdbad27316534016f")
+ @precisionbigmemtest(size=_4G + 5, memuse=1)
+ def test_case_sha1_huge(self, size):
+ if size == _4G + 5:
+ try:
+ self.check('sha1', 'A'*size,
+ '87d745c50e6b2879ffa0fb2c930e9fbfe0dc9a5b')
+ except OverflowError:
+ pass # 32-bit arch
+
+ @precisionbigmemtest(size=_4G + 5, memuse=1)
+ def test_case_sha1_huge_update(self, size):
+ if size == _4G + 5:
+ try:
+ self.check_update('sha1', 'A'*size,
+ '87d745c50e6b2879ffa0fb2c930e9fbfe0dc9a5b')
+ except OverflowError:
+ pass # 32-bit arch
# use the examples from Federal Information Processing Standards
# Publication 180-2, Secure Hash Standard, 2002 August 1
@@ -354,8 +384,72 @@ class HashLibTestCase(unittest.TestCase):
self.assertEqual(expected_hash, hasher.hexdigest())
+
+class KDFTests(unittest.TestCase):
+ pbkdf2_test_vectors = [
+ (b'password', b'salt', 1, None),
+ (b'password', b'salt', 2, None),
+ (b'password', b'salt', 4096, None),
+ # too slow, it takes over a minute on a fast CPU.
+ #(b'password', b'salt', 16777216, None),
+ (b'passwordPASSWORDpassword', b'saltSALTsaltSALTsaltSALTsaltSALTsalt',
+ 4096, -1),
+ (b'pass\0word', b'sa\0lt', 4096, 16),
+ ]
+
+ pbkdf2_results = {
+ "sha1": [
+ # offical test vectors from RFC 6070
+ (unhexlify('0c60c80f961f0e71f3a9b524af6012062fe037a6'), None),
+ (unhexlify('ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957'), None),
+ (unhexlify('4b007901b765489abead49d926f721d065a429c1'), None),
+ #(unhexlify('eefe3d61cd4da4e4e9945b3d6ba2158c2634e984'), None),
+ (unhexlify('3d2eec4fe41c849b80c8d83662c0e44a8b291a964c'
+ 'f2f07038'), 25),
+ (unhexlify('56fa6aa75548099dcc37d7f03425e0c3'), None),],
+ "sha256": [
+ (unhexlify('120fb6cffcf8b32c43e7225256c4f837'
+ 'a86548c92ccc35480805987cb70be17b'), None),
+ (unhexlify('ae4d0c95af6b46d32d0adff928f06dd0'
+ '2a303f8ef3c251dfd6e2d85a95474c43'), None),
+ (unhexlify('c5e478d59288c841aa530db6845c4c8d'
+ '962893a001ce4e11a4963873aa98134a'), None),
+ #(unhexlify('cf81c66fe8cfc04d1f31ecb65dab4089'
+ # 'f7f179e89b3b0bcb17ad10e3ac6eba46'), None),
+ (unhexlify('348c89dbcbd32b2f32d814b8116e84cf2b17'
+ '347ebc1800181c4e2a1fb8dd53e1c635518c7dac47e9'), 40),
+ (unhexlify('89b69d0516f829893c696226650a8687'), None),],
+ "sha512": [
+ (unhexlify('867f70cf1ade02cff3752599a3a53dc4af34c7a669815ae5'
+ 'd513554e1c8cf252c02d470a285a0501bad999bfe943c08f'
+ '050235d7d68b1da55e63f73b60a57fce'), None),
+ (unhexlify('e1d9c16aa681708a45f5c7c4e215ceb66e011a2e9f004071'
+ '3f18aefdb866d53cf76cab2868a39b9f7840edce4fef5a82'
+ 'be67335c77a6068e04112754f27ccf4e'), None),
+ (unhexlify('d197b1b33db0143e018b12f3d1d1479e6cdebdcc97c5c0f8'
+ '7f6902e072f457b5143f30602641b3d55cd335988cb36b84'
+ '376060ecd532e039b742a239434af2d5'), None),
+ (unhexlify('8c0511f4c6e597c6ac6315d8f0362e225f3c501495ba23b8'
+ '68c005174dc4ee71115b59f9e60cd9532fa33e0f75aefe30'
+ '225c583a186cd82bd4daea9724a3d3b8'), 64),
+ (unhexlify('9d9e9c4cd21fe4be24d5b8244c759665'), None),],
+ }
+
+ def test_pbkdf2_hmac(self):
+ for digest_name, results in self.pbkdf2_results.items():
+ for i, vector in enumerate(self.pbkdf2_test_vectors):
+ password, salt, rounds, dklen = vector
+ expected, overwrite_dklen = results[i]
+ if overwrite_dklen:
+ dklen = overwrite_dklen
+ out = hashlib.pbkdf2_hmac(
+ digest_name, password, salt, rounds, dklen)
+ self.assertEqual(out, expected,
+ (digest_name, password, salt, rounds, dklen))
+
+
def test_main():
- test_support.run_unittest(HashLibTestCase)
+ test_support.run_unittest(HashLibTestCase, KDFTests)
if __name__ == "__main__":
test_main()
diff --git a/Lib/test/test_heapq.py b/Lib/test/test_heapq.py
index 5932a40..c4de593 100644
--- a/Lib/test/test_heapq.py
+++ b/Lib/test/test_heapq.py
@@ -158,6 +158,15 @@ class TestHeap(TestCase):
self.assertEqual(sorted(chain(*inputs)), list(self.module.merge(*inputs)))
self.assertEqual(list(self.module.merge()), [])
+ def test_merge_does_not_suppress_index_error(self):
+ # Issue 19018: Heapq.merge suppresses IndexError from user generator
+ def iterable():
+ s = list(range(10))
+ for i in range(20):
+ yield s[i] # IndexError when i > 10
+ with self.assertRaises(IndexError):
+ list(self.module.merge(iterable(), iterable()))
+
def test_merge_stability(self):
class Int(int):
pass
@@ -315,6 +324,16 @@ def L(seqn):
'Test multiple tiers of iterators'
return chain(imap(lambda x:x, R(Ig(G(seqn)))))
+class SideEffectLT:
+ def __init__(self, value, heap):
+ self.value = value
+ self.heap = heap
+
+ def __lt__(self, other):
+ self.heap[:] = []
+ return self.value < other.value
+
+
class TestErrorHandling(TestCase):
module = None
@@ -361,6 +380,22 @@ class TestErrorHandling(TestCase):
self.assertRaises(TypeError, f, 2, N(s))
self.assertRaises(ZeroDivisionError, f, 2, E(s))
+ # Issue #17278: the heap may change size while it's being walked.
+
+ def test_heappush_mutating_heap(self):
+ heap = []
+ heap.extend(SideEffectLT(i, heap) for i in range(200))
+ # Python version raises IndexError, C version RuntimeError
+ with self.assertRaises((IndexError, RuntimeError)):
+ self.module.heappush(heap, SideEffectLT(5, heap))
+
+ def test_heappop_mutating_heap(self):
+ heap = []
+ heap.extend(SideEffectLT(i, heap) for i in range(200))
+ # Python version raises IndexError, C version RuntimeError
+ with self.assertRaises((IndexError, RuntimeError)):
+ self.module.heappop(heap)
+
class TestErrorHandlingPython(TestErrorHandling):
module = py_heapq
diff --git a/Lib/test/test_hmac.py b/Lib/test/test_hmac.py
index cd148e9..839d019 100644
--- a/Lib/test/test_hmac.py
+++ b/Lib/test/test_hmac.py
@@ -1,3 +1,5 @@
+# coding: utf-8
+
import hmac
import hashlib
import unittest
@@ -302,12 +304,122 @@ class CopyTestCase(unittest.TestCase):
self.assertTrue(h1.hexdigest() == h2.hexdigest(),
"Hexdigest of copy doesn't match original hexdigest.")
+
+class CompareDigestTestCase(unittest.TestCase):
+
+ def test_compare_digest(self):
+ # Testing input type exception handling
+ a, b = 100, 200
+ self.assertRaises(TypeError, hmac.compare_digest, a, b)
+ a, b = 100, b"foobar"
+ self.assertRaises(TypeError, hmac.compare_digest, a, b)
+ a, b = b"foobar", 200
+ self.assertRaises(TypeError, hmac.compare_digest, a, b)
+ a, b = u"foobar", b"foobar"
+ self.assertRaises(TypeError, hmac.compare_digest, a, b)
+ a, b = b"foobar", u"foobar"
+ self.assertRaises(TypeError, hmac.compare_digest, a, b)
+
+ # Testing bytes of different lengths
+ a, b = b"foobar", b"foo"
+ self.assertFalse(hmac.compare_digest(a, b))
+ a, b = b"\xde\xad\xbe\xef", b"\xde\xad"
+ self.assertFalse(hmac.compare_digest(a, b))
+
+ # Testing bytes of same lengths, different values
+ a, b = b"foobar", b"foobaz"
+ self.assertFalse(hmac.compare_digest(a, b))
+ a, b = b"\xde\xad\xbe\xef", b"\xab\xad\x1d\xea"
+ self.assertFalse(hmac.compare_digest(a, b))
+
+ # Testing bytes of same lengths, same values
+ a, b = b"foobar", b"foobar"
+ self.assertTrue(hmac.compare_digest(a, b))
+ a, b = b"\xde\xad\xbe\xef", b"\xde\xad\xbe\xef"
+ self.assertTrue(hmac.compare_digest(a, b))
+
+ # Testing bytearrays of same lengths, same values
+ a, b = bytearray(b"foobar"), bytearray(b"foobar")
+ self.assertTrue(hmac.compare_digest(a, b))
+
+ # Testing bytearrays of diffeent lengths
+ a, b = bytearray(b"foobar"), bytearray(b"foo")
+ self.assertFalse(hmac.compare_digest(a, b))
+
+ # Testing bytearrays of same lengths, different values
+ a, b = bytearray(b"foobar"), bytearray(b"foobaz")
+ self.assertFalse(hmac.compare_digest(a, b))
+
+ # Testing byte and bytearray of same lengths, same values
+ a, b = bytearray(b"foobar"), b"foobar"
+ self.assertTrue(hmac.compare_digest(a, b))
+ self.assertTrue(hmac.compare_digest(b, a))
+
+ # Testing byte bytearray of diffeent lengths
+ a, b = bytearray(b"foobar"), b"foo"
+ self.assertFalse(hmac.compare_digest(a, b))
+ self.assertFalse(hmac.compare_digest(b, a))
+
+ # Testing byte and bytearray of same lengths, different values
+ a, b = bytearray(b"foobar"), b"foobaz"
+ self.assertFalse(hmac.compare_digest(a, b))
+ self.assertFalse(hmac.compare_digest(b, a))
+
+ # Testing str of same lengths
+ a, b = "foobar", "foobar"
+ self.assertTrue(hmac.compare_digest(a, b))
+
+ # Testing str of diffeent lengths
+ a, b = "foo", "foobar"
+ self.assertFalse(hmac.compare_digest(a, b))
+
+ # Testing bytes of same lengths, different values
+ a, b = "foobar", "foobaz"
+ self.assertFalse(hmac.compare_digest(a, b))
+
+ # Testing error cases
+ a, b = u"foobar", b"foobar"
+ self.assertRaises(TypeError, hmac.compare_digest, a, b)
+ a, b = b"foobar", u"foobar"
+ self.assertRaises(TypeError, hmac.compare_digest, a, b)
+ a, b = b"foobar", 1
+ self.assertRaises(TypeError, hmac.compare_digest, a, b)
+ a, b = 100, 200
+ self.assertRaises(TypeError, hmac.compare_digest, a, b)
+ a, b = "fooä", "fooä"
+ self.assertTrue(hmac.compare_digest(a, b))
+
+ # subclasses are supported by ignore __eq__
+ class mystr(str):
+ def __eq__(self, other):
+ return False
+
+ a, b = mystr("foobar"), mystr("foobar")
+ self.assertTrue(hmac.compare_digest(a, b))
+ a, b = mystr("foobar"), "foobar"
+ self.assertTrue(hmac.compare_digest(a, b))
+ a, b = mystr("foobar"), mystr("foobaz")
+ self.assertFalse(hmac.compare_digest(a, b))
+
+ class mybytes(bytes):
+ def __eq__(self, other):
+ return False
+
+ a, b = mybytes(b"foobar"), mybytes(b"foobar")
+ self.assertTrue(hmac.compare_digest(a, b))
+ a, b = mybytes(b"foobar"), b"foobar"
+ self.assertTrue(hmac.compare_digest(a, b))
+ a, b = mybytes(b"foobar"), mybytes(b"foobaz")
+ self.assertFalse(hmac.compare_digest(a, b))
+
+
def test_main():
test_support.run_unittest(
TestVectorsTestCase,
ConstructorTestCase,
SanityTestCase,
- CopyTestCase
+ CopyTestCase,
+ CompareDigestTestCase,
)
if __name__ == "__main__":
diff --git a/Lib/test/test_htmlparser.py b/Lib/test/test_htmlparser.py
index 41f4340..cde2bd2 100644
--- a/Lib/test/test_htmlparser.py
+++ b/Lib/test/test_htmlparser.py
@@ -206,8 +206,7 @@ text
self._run_check("</$>", [('comment', '$')])
self._run_check("</", [('data', '</')])
self._run_check("</a", [('data', '</a')])
- # XXX this might be wrong
- self._run_check("<a<a>", [('data', '<a'), ('starttag', 'a', [])])
+ self._run_check("<a<a>", [('starttag', 'a<a', [])])
self._run_check("</a<a>", [('endtag', 'a<a')])
self._run_check("<!", [('data', '<!')])
self._run_check("<a", [('data', '<a')])
@@ -215,6 +214,11 @@ text
self._run_check("<a foo='bar", [('data', "<a foo='bar")])
self._run_check("<a foo='>'", [('data', "<a foo='>'")])
self._run_check("<a foo='>", [('data', "<a foo='>")])
+ self._run_check("<a$>", [('starttag', 'a$', [])])
+ self._run_check("<a$b>", [('starttag', 'a$b', [])])
+ self._run_check("<a$b/>", [('startendtag', 'a$b', [])])
+ self._run_check("<a$b >", [('starttag', 'a$b', [])])
+ self._run_check("<a$b />", [('startendtag', 'a$b', [])])
def test_valid_doctypes(self):
# from http://www.w3.org/QA/2002/04/valid-dtd-list.html
@@ -260,6 +264,16 @@ text
('starttag', 'a', [('foo', None), ('=', None), ('bar', None)])
]
self._run_check(html, expected)
+ #see issue #14538
+ html = ('<meta><meta / ><meta // ><meta / / >'
+ '<meta/><meta /><meta //><meta//>')
+ expected = [
+ ('starttag', 'meta', []), ('starttag', 'meta', []),
+ ('starttag', 'meta', []), ('starttag', 'meta', []),
+ ('startendtag', 'meta', []), ('startendtag', 'meta', []),
+ ('startendtag', 'meta', []), ('startendtag', 'meta', []),
+ ]
+ self._run_check(html, expected)
def test_declaration_junk_chars(self):
self._run_check("<!DOCTYPE foo $ >", [('decl', 'DOCTYPE foo $ ')])
@@ -380,6 +394,12 @@ text
("data", "&#bad;"),
("endtag", "p"),
])
+ # add the [] as a workaround to avoid buffering (see #20288)
+ self._run_check(["<div>&#bad;</div>"], [
+ ("starttag", "div", []),
+ ("data", "&#bad;"),
+ ("endtag", "div"),
+ ])
def test_unescape_function(self):
parser = HTMLParser.HTMLParser()
diff --git a/Lib/test/test_httplib.py b/Lib/test/test_httplib.py
index 38c0d2d..72800e5 100644
--- a/Lib/test/test_httplib.py
+++ b/Lib/test/test_httplib.py
@@ -13,10 +13,12 @@ from test import test_support
HOST = test_support.HOST
class FakeSocket:
- def __init__(self, text, fileclass=StringIO.StringIO):
+ def __init__(self, text, fileclass=StringIO.StringIO, host=None, port=None):
self.text = text
self.fileclass = fileclass
self.data = ''
+ self.host = host
+ self.port = port
def sendall(self, data):
self.data += ''.join(data)
@@ -26,6 +28,9 @@ class FakeSocket:
raise httplib.UnimplementedFileMode()
return self.fileclass(self.text)
+ def close(self):
+ pass
+
class EPipeSocket(FakeSocket):
def __init__(self, text, pipe_trigger):
@@ -90,12 +95,40 @@ class HeaderTests(TestCase):
conn.request('POST', '/', body, headers)
self.assertEqual(conn._buffer.count[header.lower()], 1)
+ def test_content_length_0(self):
+
+ class ContentLengthChecker(list):
+ def __init__(self):
+ list.__init__(self)
+ self.content_length = None
+ def append(self, item):
+ kv = item.split(':', 1)
+ if len(kv) > 1 and kv[0].lower() == 'content-length':
+ self.content_length = kv[1].strip()
+ list.append(self, item)
+
+ # POST with empty body
+ conn = httplib.HTTPConnection('example.com')
+ conn.sock = FakeSocket(None)
+ conn._buffer = ContentLengthChecker()
+ conn.request('POST', '/', '')
+ self.assertEqual(conn._buffer.content_length, '0',
+ 'Header Content-Length not set')
+
+ # PUT request with empty body
+ conn = httplib.HTTPConnection('example.com')
+ conn.sock = FakeSocket(None)
+ conn._buffer = ContentLengthChecker()
+ conn.request('PUT', '/', '')
+ self.assertEqual(conn._buffer.content_length, '0',
+ 'Header Content-Length not set')
+
def test_putheader(self):
conn = httplib.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.putrequest('GET','/')
conn.putheader('Content-length',42)
- self.assertTrue('Content-length: 42' in conn._buffer)
+ self.assertIn('Content-length: 42', conn._buffer)
def test_ipv6host_header(self):
# Default host header on IPv6 transaction should wrapped by [] if
@@ -125,6 +158,8 @@ class BasicTest(TestCase):
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
resp.begin()
+ self.assertEqual(resp.read(0), '') # Issue #20007
+ self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(), 'Text')
self.assertTrue(resp.isclosed())
@@ -138,7 +173,7 @@ class BasicTest(TestCase):
self.assertEqual(repr(exc), '''BadStatusLine("\'\'",)''')
def test_partial_reads(self):
- # if we have a lenght, the system knows when to close itself
+ # if we have a length, the system knows when to close itself
# same behaviour than when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
@@ -149,6 +184,32 @@ class BasicTest(TestCase):
self.assertEqual(resp.read(2), 'xt')
self.assertTrue(resp.isclosed())
+ def test_partial_reads_no_content_length(self):
+ # when no length is present, the socket should be gracefully closed when
+ # all data was read
+ body = "HTTP/1.1 200 Ok\r\n\r\nText"
+ sock = FakeSocket(body)
+ resp = httplib.HTTPResponse(sock)
+ resp.begin()
+ self.assertEqual(resp.read(2), 'Te')
+ self.assertFalse(resp.isclosed())
+ self.assertEqual(resp.read(2), 'xt')
+ self.assertEqual(resp.read(1), '')
+ self.assertTrue(resp.isclosed())
+
+ def test_partial_reads_incomplete_body(self):
+ # if the server shuts down the connection before the whole
+ # content-length is delivered, the socket is gracefully closed
+ body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
+ sock = FakeSocket(body)
+ resp = httplib.HTTPResponse(sock)
+ resp.begin()
+ self.assertEqual(resp.read(2), 'Te')
+ self.assertFalse(resp.isclosed())
+ self.assertEqual(resp.read(2), 'xt')
+ self.assertEqual(resp.read(1), '')
+ self.assertTrue(resp.isclosed())
+
def test_host_port(self):
# Check invalid host_port
@@ -279,7 +340,7 @@ class BasicTest(TestCase):
resp = httplib.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), 'Hello\r\n')
- resp.close()
+ self.assertTrue(resp.isclosed())
def test_incomplete_read(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
@@ -293,10 +354,9 @@ class BasicTest(TestCase):
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertEqual(str(i),
"IncompleteRead(7 bytes read, 3 more expected)")
+ self.assertTrue(resp.isclosed())
else:
self.fail('IncompleteRead expected')
- finally:
- resp.close()
def test_epipe(self):
sock = EPipeSocket(
@@ -349,6 +409,14 @@ class BasicTest(TestCase):
resp.begin()
self.assertRaises(httplib.LineTooLong, resp.read)
+ def test_early_eof(self):
+ # Test httpresponse with no \r\n termination,
+ body = "HTTP/1.1 200 Ok"
+ sock = FakeSocket(body)
+ resp = httplib.HTTPResponse(sock)
+ resp.begin()
+ self.assertEqual(resp.read(), '')
+ self.assertTrue(resp.isclosed())
class OfflineTest(TestCase):
def test_responses(self):
@@ -403,7 +471,7 @@ class TimeoutTest(TestCase):
HTTPConnection and into the socket.
'''
# default -- use global socket timeout
- self.assertTrue(socket.getdefaulttimeout() is None)
+ self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = httplib.HTTPConnection(HOST, TimeoutTest.PORT)
@@ -414,7 +482,7 @@ class TimeoutTest(TestCase):
httpConn.close()
# no timeout -- do not use global socket default
- self.assertTrue(socket.getdefaulttimeout() is None)
+ self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = httplib.HTTPConnection(HOST, TimeoutTest.PORT,
@@ -463,9 +531,48 @@ class HTTPSTimeoutTest(TestCase):
self.fail("Port incorrectly parsed: %s != %s" % (p, c.host))
+class TunnelTests(TestCase):
+ def test_connect(self):
+ response_text = (
+ 'HTTP/1.0 200 OK\r\n\r\n' # Reply to CONNECT
+ 'HTTP/1.1 200 OK\r\n' # Reply to HEAD
+ 'Content-Length: 42\r\n\r\n'
+ )
+
+ def create_connection(address, timeout=None, source_address=None):
+ return FakeSocket(response_text, host=address[0], port=address[1])
+
+ conn = httplib.HTTPConnection('proxy.com')
+ conn._create_connection = create_connection
+
+ # Once connected, we should not be able to tunnel anymore
+ conn.connect()
+ self.assertRaises(RuntimeError, conn.set_tunnel, 'destination.com')
+
+ # But if close the connection, we are good.
+ conn.close()
+ conn.set_tunnel('destination.com')
+ conn.request('HEAD', '/', '')
+
+ self.assertEqual(conn.sock.host, 'proxy.com')
+ self.assertEqual(conn.sock.port, 80)
+ self.assertTrue('CONNECT destination.com' in conn.sock.data)
+ self.assertTrue('Host: destination.com' in conn.sock.data)
+
+ self.assertTrue('Host: proxy.com' not in conn.sock.data)
+
+ conn.close()
+
+ conn.request('PUT', '/', '')
+ self.assertEqual(conn.sock.host, 'proxy.com')
+ self.assertEqual(conn.sock.port, 80)
+ self.assertTrue('CONNECT destination.com' in conn.sock.data)
+ self.assertTrue('Host: destination.com' in conn.sock.data)
+
+
def test_main(verbose=None):
test_support.run_unittest(HeaderTests, OfflineTest, BasicTest, TimeoutTest,
- HTTPSTimeoutTest, SourceAddressTest)
+ HTTPSTimeoutTest, SourceAddressTest, TunnelTests)
if __name__ == '__main__':
test_main()
diff --git a/Lib/test/test_httpservers.py b/Lib/test/test_httpservers.py
index 97abdf5..c1c1a2d 100644
--- a/Lib/test/test_httpservers.py
+++ b/Lib/test/test_httpservers.py
@@ -4,11 +4,6 @@ Written by Cody A.W. Somerville <cody-somerville@ubuntu.com>,
Josip Dzolonga, and Michael Otteneder for the 2007/08 GHOP contest.
"""
-from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
-from SimpleHTTPServer import SimpleHTTPRequestHandler
-from CGIHTTPServer import CGIHTTPRequestHandler
-import CGIHTTPServer
-
import os
import sys
import re
@@ -17,12 +12,17 @@ import shutil
import urllib
import httplib
import tempfile
-
import unittest
+import CGIHTTPServer
-from StringIO import StringIO
+from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
+from SimpleHTTPServer import SimpleHTTPRequestHandler
+from CGIHTTPServer import CGIHTTPRequestHandler
+from StringIO import StringIO
from test import test_support
+
+
threading = test_support.import_module('threading')
@@ -43,7 +43,7 @@ class SocketlessRequestHandler(SimpleHTTPRequestHandler):
self.end_headers()
self.wfile.write(b'<html><body>Data</body></html>\r\n')
- def log_message(self, format, *args):
+ def log_message(self, fmt, *args):
pass
@@ -97,9 +97,9 @@ class BaseHTTPRequestHandlerTestCase(unittest.TestCase):
self.handler = SocketlessRequestHandler()
def send_typical_request(self, message):
- input = StringIO(message)
+ input_msg = StringIO(message)
output = StringIO()
- self.handler.rfile = input
+ self.handler.rfile = input_msg
self.handler.wfile = output
self.handler.handle_one_request()
output.seek(0)
@@ -114,7 +114,7 @@ class BaseHTTPRequestHandlerTestCase(unittest.TestCase):
def verify_http_server_response(self, response):
match = self.HTTPResponseMatch.search(response)
- self.assertTrue(match is not None)
+ self.assertIsNotNone(match)
def test_http_1_1(self):
result = self.send_typical_request('GET / HTTP/1.1\r\n\r\n')
@@ -189,7 +189,7 @@ class BaseHTTPServerTestCase(BaseTestCase):
def test_request_line_trimming(self):
self.con._http_vsn_str = 'HTTP/1.1\n'
- self.con.putrequest('GET', '/')
+ self.con.putrequest('XYZBOGUS', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 501)
@@ -216,8 +216,9 @@ class BaseHTTPServerTestCase(BaseTestCase):
self.assertEqual(res.status, 501)
def test_version_none(self):
+ # Test that a valid method is rejected when not HTTP/1.x
self.con._http_vsn_str = ''
- self.con.putrequest('PUT', '/')
+ self.con.putrequest('CUSTOM', '/')
self.con.endheaders()
res = self.con.getresponse()
self.assertEqual(res.status, 400)
@@ -296,7 +297,7 @@ class SimpleHTTPServerTestCase(BaseTestCase):
os.chdir(self.cwd)
try:
shutil.rmtree(self.tempdir)
- except:
+ except OSError:
pass
finally:
BaseTestCase.tearDown(self)
@@ -313,6 +314,9 @@ class SimpleHTTPServerTestCase(BaseTestCase):
#constructs the path relative to the root directory of the HTTPServer
response = self.request(self.tempdir_name + '/test')
self.check_status_and_reason(response, 200, data=self.data)
+ # check for trailing "/" which should return 404. See Issue17324
+ response = self.request(self.tempdir_name + '/test/')
+ self.check_status_and_reason(response, 404)
response = self.request(self.tempdir_name + '/')
self.check_status_and_reason(response, 200)
response = self.request(self.tempdir_name)
@@ -321,17 +325,16 @@ class SimpleHTTPServerTestCase(BaseTestCase):
self.check_status_and_reason(response, 404)
response = self.request('/' + 'ThisDoesNotExist' + '/')
self.check_status_and_reason(response, 404)
- f = open(os.path.join(self.tempdir_name, 'index.html'), 'w')
- response = self.request('/' + self.tempdir_name + '/')
- self.check_status_and_reason(response, 200)
-
- # chmod() doesn't work as expected on Windows, and filesystem
- # permissions are ignored by root on Unix.
- if os.name == 'posix' and os.geteuid() != 0:
- os.chmod(self.tempdir, 0)
- response = self.request(self.tempdir_name + '/')
- self.check_status_and_reason(response, 404)
- os.chmod(self.tempdir, 0755)
+ with open(os.path.join(self.tempdir_name, 'index.html'), 'w') as fp:
+ response = self.request('/' + self.tempdir_name + '/')
+ self.check_status_and_reason(response, 200)
+ # chmod() doesn't work as expected on Windows, and filesystem
+ # permissions are ignored by root on Unix.
+ if os.name == 'posix' and os.geteuid() != 0:
+ os.chmod(self.tempdir, 0)
+ response = self.request(self.tempdir_name + '/')
+ self.check_status_and_reason(response, 404)
+ os.chmod(self.tempdir, 0755)
def test_head(self):
response = self.request(
@@ -393,6 +396,11 @@ class CGIHTTPServerTestCase(BaseTestCase):
else:
self.pythonexe = sys.executable
+ self.nocgi_path = os.path.join(self.parent_dir, 'nocgi.py')
+ with open(self.nocgi_path, 'w') as fp:
+ fp.write(cgi_file1 % self.pythonexe)
+ os.chmod(self.nocgi_path, 0777)
+
self.file1_path = os.path.join(self.cgi_dir, 'file1.py')
with open(self.file1_path, 'w') as file1:
file1.write(cgi_file1 % self.pythonexe)
@@ -411,6 +419,7 @@ class CGIHTTPServerTestCase(BaseTestCase):
os.chdir(self.cwd)
if self.pythonexe != sys.executable:
os.remove(self.pythonexe)
+ os.remove(self.nocgi_path)
os.remove(self.file1_path)
os.remove(self.file2_path)
os.rmdir(self.cgi_dir)
@@ -418,41 +427,44 @@ class CGIHTTPServerTestCase(BaseTestCase):
finally:
BaseTestCase.tearDown(self)
- def test_url_collapse_path_split(self):
+ def test_url_collapse_path(self):
+ # verify tail is the last portion and head is the rest on proper urls
test_vectors = {
- '': ('/', ''),
+ '': '//',
'..': IndexError,
'/.//..': IndexError,
- '/': ('/', ''),
- '//': ('/', ''),
- '/\\': ('/', '\\'),
- '/.//': ('/', ''),
- 'cgi-bin/file1.py': ('/cgi-bin', 'file1.py'),
- '/cgi-bin/file1.py': ('/cgi-bin', 'file1.py'),
- 'a': ('/', 'a'),
- '/a': ('/', 'a'),
- '//a': ('/', 'a'),
- './a': ('/', 'a'),
- './C:/': ('/C:', ''),
- '/a/b': ('/a', 'b'),
- '/a/b/': ('/a/b', ''),
- '/a/b/c/..': ('/a/b', ''),
- '/a/b/c/../d': ('/a/b', 'd'),
- '/a/b/c/../d/e/../f': ('/a/b/d', 'f'),
- '/a/b/c/../d/e/../../f': ('/a/b', 'f'),
- '/a/b/c/../d/e/.././././..//f': ('/a/b', 'f'),
+ '/': '//',
+ '//': '//',
+ '/\\': '//\\',
+ '/.//': '//',
+ 'cgi-bin/file1.py': '/cgi-bin/file1.py',
+ '/cgi-bin/file1.py': '/cgi-bin/file1.py',
+ 'a': '//a',
+ '/a': '//a',
+ '//a': '//a',
+ './a': '//a',
+ './C:/': '/C:/',
+ '/a/b': '/a/b',
+ '/a/b/': '/a/b/',
+ '/a/b/.': '/a/b/',
+ '/a/b/c/..': '/a/b/',
+ '/a/b/c/../d': '/a/b/d',
+ '/a/b/c/../d/e/../f': '/a/b/d/f',
+ '/a/b/c/../d/e/../../f': '/a/b/f',
+ '/a/b/c/../d/e/.././././..//f': '/a/b/f',
'../a/b/c/../d/e/.././././..//f': IndexError,
- '/a/b/c/../d/e/../../../f': ('/a', 'f'),
- '/a/b/c/../d/e/../../../../f': ('/', 'f'),
+ '/a/b/c/../d/e/../../../f': '/a/f',
+ '/a/b/c/../d/e/../../../../f': '//f',
'/a/b/c/../d/e/../../../../../f': IndexError,
- '/a/b/c/../d/e/../../../../f/..': ('/', ''),
+ '/a/b/c/../d/e/../../../../f/..': '//',
+ '/a/b/c/../d/e/../../../../f/../.': '//',
}
for path, expected in test_vectors.iteritems():
if isinstance(expected, type) and issubclass(expected, Exception):
self.assertRaises(expected,
- CGIHTTPServer._url_collapse_path_split, path)
+ CGIHTTPServer._url_collapse_path, path)
else:
- actual = CGIHTTPServer._url_collapse_path_split(path)
+ actual = CGIHTTPServer._url_collapse_path(path)
self.assertEqual(expected, actual,
msg='path = %r\nGot: %r\nWanted: %r' %
(path, actual, expected))
@@ -462,6 +474,10 @@ class CGIHTTPServerTestCase(BaseTestCase):
self.assertEqual(('Hello World\n', 'text/html', 200),
(res.read(), res.getheader('Content-type'), res.status))
+ def test_issue19435(self):
+ res = self.request('///////////nocgi.py/../cgi-bin/nothere.sh')
+ self.assertEqual(res.status, 404)
+
def test_post(self):
params = urllib.urlencode({'spam' : 1, 'eggs' : 'python', 'bacon' : 123456})
headers = {'Content-type' : 'application/x-www-form-urlencoded'}
@@ -495,6 +511,11 @@ class CGIHTTPServerTestCase(BaseTestCase):
(res.read(), res.getheader('Content-type'), res.status))
self.assertEqual(os.environ['SERVER_SOFTWARE'], signature)
+ def test_urlquote_decoding_in_cgi_check(self):
+ res = self.request('/cgi-bin%2ffile1.py')
+ self.assertEqual((b'Hello World\n', 'text/html', 200),
+ (res.read(), res.getheader('Content-type'), res.status))
+
class SimpleHTTPRequestHandlerTestCase(unittest.TestCase):
""" Test url parsing """
diff --git a/Lib/test/test_idle.py b/Lib/test/test_idle.py
new file mode 100644
index 0000000..9bd38fe
--- /dev/null
+++ b/Lib/test/test_idle.py
@@ -0,0 +1,20 @@
+import unittest
+from test import test_support as support
+from test.test_support import import_module
+
+# Skip test if _thread or _tkinter wasn't built or idlelib was deleted.
+import_module('threading') # imported by idlelib.PyShell, imports _thread
+tk = import_module('Tkinter') # imports _tkinter
+idletest = import_module('idlelib.idle_test')
+
+# Without test_main present, regrtest.runtest_inner (line1219) calls
+# unittest.TestLoader().loadTestsFromModule(this_module) which calls
+# load_tests() if it finds it. (Unittest.main does the same.)
+load_tests = idletest.load_tests
+
+# pre-3.3 regrtest does not support the load_tests protocol. use test_main
+def test_main():
+ support.run_unittest(unittest.TestLoader().loadTestsFromModule(idletest))
+
+if __name__ == '__main__':
+ unittest.main(verbosity=2, exit=False)
diff --git a/Lib/test/test_imageop.py b/Lib/test/test_imageop.py
index 56320fb..31edbd1 100755..100644
--- a/Lib/test/test_imageop.py
+++ b/Lib/test/test_imageop.py
@@ -1,5 +1,3 @@
-#! /usr/bin/env python
-
"""Test script for the imageop module. This has the side
effect of partially testing the imgfile module as well.
Roger E. Masse
diff --git a/Lib/test/test_imaplib.py b/Lib/test/test_imaplib.py
index 15b0ea0..405b7ea 100644
--- a/Lib/test/test_imaplib.py
+++ b/Lib/test/test_imaplib.py
@@ -79,7 +79,7 @@ class SimpleIMAPHandler(SocketServer.StreamRequestHandler):
return
line += part
except IOError:
- # ..but SSLSockets throw exceptions.
+ # ..but SSLSockets raise exceptions.
return
if line.endswith('\r\n'):
break
@@ -165,6 +165,16 @@ class BaseThreadedNetworkedTests(unittest.TestCase):
self.imap_class, *server.server_address)
+ def test_linetoolong(self):
+ class TooLongHandler(SimpleIMAPHandler):
+ def handle(self):
+ # Send a very long response line
+ self.wfile.write('* OK ' + imaplib._MAXLINE*'x' + '\r\n')
+
+ with self.reaped_server(TooLongHandler) as server:
+ self.assertRaises(imaplib.IMAP4.error,
+ self.imap_class, *server.server_address)
+
class ThreadedNetworkedTests(BaseThreadedNetworkedTests):
server_class = SocketServer.TCPServer
@@ -177,6 +187,9 @@ class ThreadedNetworkedTestsSSL(BaseThreadedNetworkedTests):
server_class = SecureTCPServer
imap_class = IMAP4_SSL
+ def test_linetoolong(self):
+ raise unittest.SkipTest("test is not reliable on 2.7; see issue 20118")
+
class RemoteIMAPTest(unittest.TestCase):
host = 'cyrus.andrew.cmu.edu'
@@ -236,5 +249,4 @@ def test_main():
if __name__ == "__main__":
- support.use_resources = ['network']
test_main()
diff --git a/Lib/test/test_imgfile.py b/Lib/test/test_imgfile.py
index 39e8917..67a2315 100755..100644
--- a/Lib/test/test_imgfile.py
+++ b/Lib/test/test_imgfile.py
@@ -1,5 +1,3 @@
-#! /usr/bin/env python
-
"""Simple test script for imgfile.c
Roger E. Masse
"""
diff --git a/Lib/test/test_imghdr.py b/Lib/test/test_imghdr.py
new file mode 100644
index 0000000..9306d9b
--- /dev/null
+++ b/Lib/test/test_imghdr.py
@@ -0,0 +1,120 @@
+import imghdr
+import io
+import sys
+import unittest
+from test.test_support import findfile, TESTFN, unlink, run_unittest
+
+TEST_FILES = (
+ ('python.png', 'png'),
+ ('python.gif', 'gif'),
+ ('python.bmp', 'bmp'),
+ ('python.ppm', 'ppm'),
+ ('python.pgm', 'pgm'),
+ ('python.pbm', 'pbm'),
+ ('python.jpg', 'jpeg'),
+ ('python.ras', 'rast'),
+ ('python.sgi', 'rgb'),
+ ('python.tiff', 'tiff'),
+ ('python.xbm', 'xbm')
+)
+
+class UnseekableIO(io.FileIO):
+ def tell(self):
+ raise io.UnsupportedOperation
+
+ def seek(self, *args, **kwargs):
+ raise io.UnsupportedOperation
+
+class TestImghdr(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.testfile = findfile('python.png', subdir='imghdrdata')
+ with open(cls.testfile, 'rb') as stream:
+ cls.testdata = stream.read()
+
+ def tearDown(self):
+ unlink(TESTFN)
+
+ def test_data(self):
+ for filename, expected in TEST_FILES:
+ filename = findfile(filename, subdir='imghdrdata')
+ self.assertEqual(imghdr.what(filename), expected)
+ ufilename = filename.decode(sys.getfilesystemencoding())
+ self.assertEqual(imghdr.what(ufilename), expected)
+ with open(filename, 'rb') as stream:
+ self.assertEqual(imghdr.what(stream), expected)
+ with open(filename, 'rb') as stream:
+ data = stream.read()
+ self.assertEqual(imghdr.what(None, data), expected)
+
+ def test_register_test(self):
+ def test_jumbo(h, file):
+ if h.startswith(b'eggs'):
+ return 'ham'
+ imghdr.tests.append(test_jumbo)
+ self.addCleanup(imghdr.tests.pop)
+ self.assertEqual(imghdr.what(None, b'eggs'), 'ham')
+
+ def test_file_pos(self):
+ with open(TESTFN, 'wb') as stream:
+ stream.write(b'ababagalamaga')
+ pos = stream.tell()
+ stream.write(self.testdata)
+ with open(TESTFN, 'rb') as stream:
+ stream.seek(pos)
+ self.assertEqual(imghdr.what(stream), 'png')
+ self.assertEqual(stream.tell(), pos)
+
+ def test_bad_args(self):
+ with self.assertRaises(TypeError):
+ imghdr.what()
+ with self.assertRaises(AttributeError):
+ imghdr.what(None)
+ with self.assertRaises(TypeError):
+ imghdr.what(self.testfile, 1)
+ with open(self.testfile, 'rb') as f:
+ with self.assertRaises(AttributeError):
+ imghdr.what(f.fileno())
+
+ def test_invalid_headers(self):
+ for header in (b'\211PN\r\n',
+ b'\001\331',
+ b'\x59\xA6',
+ b'cutecat',
+ b'000000JFI',
+ b'GIF80'):
+ self.assertIsNone(imghdr.what(None, header))
+
+ def test_missing_file(self):
+ with self.assertRaises(IOError):
+ imghdr.what('missing')
+
+ def test_closed_file(self):
+ stream = open(self.testfile, 'rb')
+ stream.close()
+ with self.assertRaises(ValueError) as cm:
+ imghdr.what(stream)
+ stream = io.BytesIO(self.testdata)
+ stream.close()
+ with self.assertRaises(ValueError) as cm:
+ imghdr.what(stream)
+
+ def test_unseekable(self):
+ with open(TESTFN, 'wb') as stream:
+ stream.write(self.testdata)
+ with UnseekableIO(TESTFN, 'rb') as stream:
+ with self.assertRaises(io.UnsupportedOperation):
+ imghdr.what(stream)
+
+ def test_output_stream(self):
+ with open(TESTFN, 'wb') as stream:
+ stream.write(self.testdata)
+ stream.seek(0)
+ with self.assertRaises(IOError) as cm:
+ imghdr.what(stream)
+
+def test_main():
+ run_unittest(TestImghdr)
+
+if __name__ == '__main__':
+ test_main()
diff --git a/Lib/test/test_imp.py b/Lib/test/test_imp.py
index 77e44be..1bdc47a 100644
--- a/Lib/test/test_imp.py
+++ b/Lib/test/test_imp.py
@@ -2,7 +2,12 @@ import imp
import unittest
from test import test_support
+try:
+ import thread
+except ImportError:
+ thread = None
+@unittest.skipUnless(thread, 'threading not available')
class LockTests(unittest.TestCase):
"""Very basic test of import lock functions."""
@@ -68,13 +73,8 @@ class ReloadTests(unittest.TestCase):
def test_main():
tests = [
ReloadTests,
+ LockTests,
]
- try:
- import thread
- except ImportError:
- pass
- else:
- tests.append(LockTests)
test_support.run_unittest(*tests)
if __name__ == "__main__":
diff --git a/Lib/test/test_import.py b/Lib/test/test_import.py
index e426290..c17da10 100644
--- a/Lib/test/test_import.py
+++ b/Lib/test/test_import.py
@@ -5,19 +5,34 @@ import os
import py_compile
import random
import stat
+import struct
import sys
import unittest
+import textwrap
+import shutil
+
from test.test_support import (unlink, TESTFN, unload, run_unittest, rmtree,
is_jython, check_warnings, EnvironmentVarGuard)
-import textwrap
+from test import symlink_support
from test import script_helper
+def _files(name):
+ return (name + os.extsep + "py",
+ name + os.extsep + "pyc",
+ name + os.extsep + "pyo",
+ name + os.extsep + "pyw",
+ name + "$py.class")
+
+def chmod_files(name):
+ for f in _files(name):
+ try:
+ os.chmod(f, 0600)
+ except OSError as exc:
+ if exc.errno != errno.ENOENT:
+ raise
+
def remove_files(name):
- for f in (name + os.extsep + "py",
- name + os.extsep + "pyc",
- name + os.extsep + "pyo",
- name + os.extsep + "pyw",
- name + "$py.class"):
+ for f in _files(name):
unlink(f)
@@ -73,7 +88,8 @@ class ImportTests(unittest.TestCase):
unlink(source)
try:
- imp.reload(mod)
+ if not sys.dont_write_bytecode:
+ imp.reload(mod)
except ImportError, err:
self.fail("import from .pyc/.pyo failed: %s" % err)
finally:
@@ -90,7 +106,10 @@ class ImportTests(unittest.TestCase):
finally:
del sys.path[0]
- @unittest.skipUnless(os.name == 'posix', "test meaningful only on posix systems")
+ @unittest.skipUnless(os.name == 'posix',
+ "test meaningful only on posix systems")
+ @unittest.skipIf(sys.dont_write_bytecode,
+ "test meaningful only when writing bytecode")
def test_execute_bit_not_copied(self):
# Issue 6070: under posix .pyc files got their execute bit set if
# the .py file had the execute bit set, but they aren't executable.
@@ -117,6 +136,42 @@ class ImportTests(unittest.TestCase):
unload(TESTFN)
del sys.path[0]
+ @unittest.skipIf(sys.dont_write_bytecode,
+ "test meaningful only when writing bytecode")
+ def test_rewrite_pyc_with_read_only_source(self):
+ # Issue 6074: a long time ago on posix, and more recently on Windows,
+ # a read only source file resulted in a read only pyc file, which
+ # led to problems with updating it later
+ sys.path.insert(0, os.curdir)
+ fname = TESTFN + os.extsep + "py"
+ try:
+ # Write a Python file, make it read-only and import it
+ with open(fname, 'w') as f:
+ f.write("x = 'original'\n")
+ # Tweak the mtime of the source to ensure pyc gets updated later
+ s = os.stat(fname)
+ os.utime(fname, (s.st_atime, s.st_mtime-100000000))
+ os.chmod(fname, 0400)
+ m1 = __import__(TESTFN)
+ self.assertEqual(m1.x, 'original')
+ # Change the file and then reimport it
+ os.chmod(fname, 0600)
+ with open(fname, 'w') as f:
+ f.write("x = 'rewritten'\n")
+ unload(TESTFN)
+ m2 = __import__(TESTFN)
+ self.assertEqual(m2.x, 'rewritten')
+ # Now delete the source file and check the pyc was rewritten
+ unlink(fname)
+ unload(TESTFN)
+ m3 = __import__(TESTFN)
+ self.assertEqual(m3.x, 'rewritten')
+ finally:
+ chmod_files(TESTFN)
+ remove_files(TESTFN)
+ unload(TESTFN)
+ del sys.path[0]
+
def test_imp_module(self):
# Verify that the imp module can correctly load and find .py files
@@ -302,6 +357,46 @@ class ImportTests(unittest.TestCase):
del sys.path[0]
remove_files(TESTFN)
+ def test_pyc_mtime(self):
+ # Test for issue #13863: .pyc timestamp sometimes incorrect on Windows.
+ sys.path.insert(0, os.curdir)
+ try:
+ # Jan 1, 2012; Jul 1, 2012.
+ mtimes = 1325376000, 1341100800
+
+ # Different names to avoid running into import caching.
+ tails = "spam", "eggs"
+ for mtime, tail in zip(mtimes, tails):
+ module = TESTFN + tail
+ source = module + ".py"
+ compiled = source + ('c' if __debug__ else 'o')
+
+ # Create a new Python file with the given mtime.
+ with open(source, 'w') as f:
+ f.write("# Just testing\nx=1, 2, 3\n")
+ os.utime(source, (mtime, mtime))
+
+ # Generate the .pyc/o file; if it couldn't be created
+ # for some reason, skip the test.
+ m = __import__(module)
+ if not os.path.exists(compiled):
+ unlink(source)
+ self.skipTest("Couldn't create .pyc/.pyo file.")
+
+ # Actual modification time of .py file.
+ mtime1 = int(os.stat(source).st_mtime) & 0xffffffff
+
+ # mtime that was encoded in the .pyc file.
+ with open(compiled, 'rb') as f:
+ mtime2 = struct.unpack('<L', f.read(8)[4:])[0]
+
+ unlink(compiled)
+ unlink(source)
+
+ self.assertEqual(mtime1, mtime2)
+ finally:
+ sys.path.pop(0)
+
class PycRewritingTests(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
@@ -352,7 +447,8 @@ func_filename = func.func_code.co_filename
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
- self.assertEqual(mod.module_filename, self.compiled_name)
+ if not sys.dont_write_bytecode:
+ self.assertEqual(mod.module_filename, self.compiled_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
@@ -424,6 +520,13 @@ class PathsTests(unittest.TestCase):
drive = path[0]
unc = "\\\\%s\\%s$"%(hn, drive)
unc += path[2:]
+ try:
+ os.listdir(unc)
+ except OSError as e:
+ if e.errno in (errno.EPERM, errno.EACCES):
+ # See issue #15338
+ self.skipTest("cannot access administrative share %r" % (unc,))
+ raise
sys.path.append(path)
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
@@ -488,8 +591,58 @@ class RelativeImportTests(unittest.TestCase):
"implicit absolute import")
+class TestSymbolicallyLinkedPackage(unittest.TestCase):
+ package_name = 'sample'
+
+ def setUp(self):
+ if os.path.exists(self.tagged):
+ shutil.rmtree(self.tagged)
+ if os.path.exists(self.package_name):
+ symlink_support.remove_symlink(self.package_name)
+ self.orig_sys_path = sys.path[:]
+
+ # create a sample package; imagine you have a package with a tag and
+ # you want to symbolically link it from its untagged name.
+ os.mkdir(self.tagged)
+ init_file = os.path.join(self.tagged, '__init__.py')
+ open(init_file, 'w').close()
+ assert os.path.exists(init_file)
+
+ # now create a symlink to the tagged package
+ # sample -> sample-tagged
+ symlink_support.symlink(self.tagged, self.package_name)
+
+ assert os.path.isdir(self.package_name)
+ assert os.path.isfile(os.path.join(self.package_name, '__init__.py'))
+
+ @property
+ def tagged(self):
+ return self.package_name + '-tagged'
+
+ # regression test for issue6727
+ @unittest.skipUnless(
+ not hasattr(sys, 'getwindowsversion')
+ or sys.getwindowsversion() >= (6, 0),
+ "Windows Vista or later required")
+ @symlink_support.skip_unless_symlink
+ def test_symlinked_dir_importable(self):
+ # make sure sample can only be imported from the current directory.
+ sys.path[:] = ['.']
+
+ # and try to import the package
+ __import__(self.package_name)
+
+ def tearDown(self):
+ # now cleanup
+ if os.path.exists(self.package_name):
+ symlink_support.remove_symlink(self.package_name)
+ if os.path.exists(self.tagged):
+ shutil.rmtree(self.tagged)
+ sys.path[:] = self.orig_sys_path
+
def test_main(verbose=None):
- run_unittest(ImportTests, PycRewritingTests, PathsTests, RelativeImportTests)
+ run_unittest(ImportTests, PycRewritingTests, PathsTests,
+ RelativeImportTests, TestSymbolicallyLinkedPackage)
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
diff --git a/Lib/test/test_index.py b/Lib/test/test_index.py
index a92b442..4c29993 100644
--- a/Lib/test/test_index.py
+++ b/Lib/test/test_index.py
@@ -91,7 +91,7 @@ class SeqTestCase(unittest.TestCase):
self.assertEqual(self.seq[self.o:self.o2], self.seq[1:3])
self.assertEqual(self.seq[self.n:self.n2], self.seq[2:4])
- def test_slice_bug7532(self):
+ def test_slice_bug7532a(self):
seqlen = len(self.seq)
self.o.ind = int(seqlen * 1.5)
self.n.ind = seqlen + 2
@@ -99,9 +99,12 @@ class SeqTestCase(unittest.TestCase):
self.assertEqual(self.seq[:self.o], self.seq)
self.assertEqual(self.seq[self.n:], self.seq[0:0])
self.assertEqual(self.seq[:self.n], self.seq)
+
+ def test_slice_bug7532b(self):
if isinstance(self.seq, ClassicSeq):
- return
+ self.skipTest('test fails for ClassicSeq')
# These tests fail for ClassicSeq (see bug #7532)
+ seqlen = len(self.seq)
self.o2.ind = -seqlen - 2
self.n2.ind = -int(seqlen * 1.5)
self.assertEqual(self.seq[self.o2:], self.seq)
diff --git a/Lib/test/test_inspect.py b/Lib/test/test_inspect.py
index 30b1556..4130cd0 100644
--- a/Lib/test/test_inspect.py
+++ b/Lib/test/test_inspect.py
@@ -220,8 +220,23 @@ class TestRetrievingSourceCode(GetSourceBase):
[('FesteringGob', mod.FesteringGob),
('MalodorousPervert', mod.MalodorousPervert),
('ParrotDroppings', mod.ParrotDroppings),
- ('StupidGit', mod.StupidGit)])
- tree = inspect.getclasstree([cls[1] for cls in classes], 1)
+ ('StupidGit', mod.StupidGit),
+ ('Tit', mod.MalodorousPervert),
+ ])
+ tree = inspect.getclasstree([cls[1] for cls in classes])
+ self.assertEqual(tree,
+ [(mod.ParrotDroppings, ()),
+ [(mod.FesteringGob, (mod.MalodorousPervert,
+ mod.ParrotDroppings))
+ ],
+ (mod.StupidGit, ()),
+ [(mod.MalodorousPervert, (mod.StupidGit,)),
+ [(mod.FesteringGob, (mod.MalodorousPervert,
+ mod.ParrotDroppings))
+ ]
+ ]
+ ])
+ tree = inspect.getclasstree([cls[1] for cls in classes], True)
self.assertEqual(tree,
[(mod.ParrotDroppings, ()),
(mod.StupidGit, ()),
@@ -404,6 +419,12 @@ class TestBuggyCases(GetSourceBase):
self.assertEqual(inspect.findsource(co), (lines,0))
self.assertEqual(inspect.getsource(co), lines[0])
+ def test_findsource_without_filename(self):
+ for fname in ['', '<string>']:
+ co = compile('x=1', fname, "exec")
+ self.assertRaises(IOError, inspect.findsource, co)
+ self.assertRaises(IOError, inspect.getsource, co)
+
class _BrokenDataDescriptor(object):
"""
diff --git a/Lib/test/test_int.py b/Lib/test/test_int.py
index fa46212..365f9a2 100644
--- a/Lib/test/test_int.py
+++ b/Lib/test/test_int.py
@@ -1,6 +1,7 @@
import sys
import unittest
+from test import test_support
from test.test_support import run_unittest, have_unicode
import math
@@ -44,7 +45,27 @@ if have_unicode:
(unichr(0x200), ValueError),
]
-class IntTestCases(unittest.TestCase):
+class IntLongCommonTests(object):
+
+ """Mixin of test cases to share between both test_int and test_long."""
+
+ # Change to int or long in the TestCase subclass.
+ ntype = None
+
+ def test_no_args(self):
+ self.assertEqual(self.ntype(), 0)
+
+ def test_keyword_args(self):
+ # Test invoking constructor using keyword arguments.
+ self.assertEqual(self.ntype(x=1.2), 1)
+ self.assertEqual(self.ntype('100', base=2), 4)
+ self.assertEqual(self.ntype(x='100', base=2), 4)
+ self.assertRaises(TypeError, self.ntype, base=10)
+ self.assertRaises(TypeError, self.ntype, base=0)
+
+class IntTestCases(IntLongCommonTests, unittest.TestCase):
+
+ ntype = int
def test_basic(self):
self.assertEqual(int(314), 314)
@@ -315,6 +336,46 @@ class IntTestCases(unittest.TestCase):
self.assertEqual(int(float(2**54+10)), 2**54+8)
self.assertEqual(int(float(2**54+11)), 2**54+12)
+ def test_valid_non_numeric_input_types_for_x(self):
+ # Test possible valid non-numeric types for x, including subclasses
+ # of the allowed built-in types.
+ class CustomStr(str): pass
+ values = ['100', CustomStr('100')]
+
+ if have_unicode:
+ class CustomUnicode(unicode): pass
+ values += [unicode('100'), CustomUnicode(unicode('100'))]
+
+ for x in values:
+ msg = 'x has value %s and type %s' % (x, type(x).__name__)
+ try:
+ self.assertEqual(int(x), 100, msg=msg)
+ self.assertEqual(int(x, 2), 4, msg=msg)
+ except TypeError, err:
+ raise AssertionError('For %s got TypeError: %s' %
+ (type(x).__name__, err))
+
+ def test_error_on_string_float_for_x(self):
+ self.assertRaises(ValueError, int, '1.2')
+
+ def test_error_on_bytearray_for_x(self):
+ self.assertRaises(TypeError, int, bytearray('100'), 2)
+
+ def test_error_on_invalid_int_bases(self):
+ for base in [-1, 1, 1000]:
+ self.assertRaises(ValueError, int, '100', base)
+
+ def test_error_on_string_base(self):
+ self.assertRaises(TypeError, int, 100, base='foo')
+
+ @test_support.cpython_only
+ def test_small_ints(self):
+ self.assertIs(int('10'), 10)
+ self.assertIs(int('-1'), -1)
+ if have_unicode:
+ self.assertIs(int(u'10'), 10)
+ self.assertIs(int(u'-1'), -1)
+
def test_intconversion(self):
# Test __int__()
class ClassicMissingMethods:
diff --git a/Lib/test/test_io.py b/Lib/test/test_io.py
index b9972f3..af3b90a 100644
--- a/Lib/test/test_io.py
+++ b/Lib/test/test_io.py
@@ -29,12 +29,15 @@ import array
import random
import unittest
import weakref
+import warnings
import abc
import signal
import errno
from itertools import cycle, count
from collections import deque
+from UserList import UserList
from test import test_support as support
+import contextlib
import codecs
import io # C implementation of io
@@ -391,14 +394,9 @@ class IOTest(unittest.TestCase):
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
- if not support.is_resource_enabled("largefile"):
- print("\nTesting large file ops skipped on %s." % sys.platform,
- file=sys.stderr)
- print("It requires %d bytes and a long time." % self.LARGE,
- file=sys.stderr)
- print("Use 'regrtest.py -u largefile test_io' to run it.",
- file=sys.stderr)
- return
+ support.requires(
+ 'largefile',
+ 'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
@@ -572,6 +570,7 @@ class IOTest(unittest.TestCase):
raise IOError()
f.flush = bad_flush
self.assertRaises(IOError, f.close) # exception not swallowed
+ self.assertTrue(f.closed)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
@@ -593,6 +592,33 @@ class IOTest(unittest.TestCase):
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
+ def test_fileio_closefd(self):
+ # Issue #4841
+ with self.open(__file__, 'rb') as f1, \
+ self.open(__file__, 'rb') as f2:
+ fileio = self.FileIO(f1.fileno(), closefd=False)
+ # .__init__() must not close f1
+ fileio.__init__(f2.fileno(), closefd=False)
+ f1.readline()
+ # .close() must not close f2
+ fileio.close()
+ f2.readline()
+
+ def test_nonbuffered_textio(self):
+ with warnings.catch_warnings(record=True) as recorded:
+ with self.assertRaises(ValueError):
+ self.open(support.TESTFN, 'w', buffering=0)
+ support.gc_collect()
+ self.assertEqual(recorded, [])
+
+ def test_invalid_newline(self):
+ with warnings.catch_warnings(record=True) as recorded:
+ with self.assertRaises(ValueError):
+ self.open(support.TESTFN, 'w', newline='invalid')
+ support.gc_collect()
+ self.assertEqual(recorded, [])
+
+
class CIOTest(IOTest):
def test_IOBase_finalize(self):
@@ -634,6 +660,7 @@ class CommonBufferedTests:
self.assertEqual(42, bufio.fileno())
+ @unittest.skip('test having existential crisis')
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
@@ -718,6 +745,21 @@ class CommonBufferedTests:
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(IOError, b.close) # exception not swallowed
+ self.assertTrue(b.closed)
+
+ def test_close_error_on_close(self):
+ raw = self.MockRawIO()
+ def bad_flush():
+ raise IOError('flush')
+ def bad_close():
+ raise IOError('close')
+ raw.close = bad_close
+ b = self.tp(raw)
+ b.flush = bad_flush
+ with self.assertRaises(IOError) as err: # exception not swallowed
+ b.close()
+ self.assertEqual(err.exception.args, ('close',))
+ self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
@@ -735,6 +777,20 @@ class CommonBufferedTests:
buf.raw = x
+class SizeofTest:
+
+ @support.cpython_only
+ def test_sizeof(self):
+ bufsize1 = 4096
+ bufsize2 = 8192
+ rawio = self.MockRawIO()
+ bufio = self.tp(rawio, buffer_size=bufsize1)
+ size = sys.getsizeof(bufio) - bufsize1
+ rawio = self.MockRawIO()
+ bufio = self.tp(rawio, buffer_size=bufsize2)
+ self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
+
+
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
@@ -752,6 +808,16 @@ class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
+ def test_uninitialized(self):
+ bufio = self.tp.__new__(self.tp)
+ del bufio
+ bufio = self.tp.__new__(self.tp)
+ self.assertRaisesRegexp((ValueError, AttributeError),
+ 'uninitialized|has no attribute',
+ bufio.read, 0)
+ bufio.__init__(self.MockRawIO())
+ self.assertEqual(bufio.read(0), b'')
+
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
@@ -918,7 +984,7 @@ class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
-class CBufferedReaderTest(BufferedReaderTest):
+class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
@@ -959,6 +1025,12 @@ class CBufferedReaderTest(BufferedReaderTest):
support.gc_collect()
self.assertTrue(wr() is None, wr)
+ def test_args_error(self):
+ # Issue #17275
+ with self.assertRaisesRegexp(TypeError, "BufferedReader"):
+ self.tp(io.BytesIO(), 1024, 1024, 1024)
+
+
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
@@ -982,6 +1054,16 @@ class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
+ def test_uninitialized(self):
+ bufio = self.tp.__new__(self.tp)
+ del bufio
+ bufio = self.tp.__new__(self.tp)
+ self.assertRaisesRegexp((ValueError, AttributeError),
+ 'uninitialized|has no attribute',
+ bufio.write, b'')
+ bufio.__init__(self.MockRawIO())
+ self.assertEqual(bufio.write(b''), 0)
+
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
@@ -1099,6 +1181,28 @@ class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
+ def test_writelines(self):
+ l = [b'ab', b'cd', b'ef']
+ writer = self.MockRawIO()
+ bufio = self.tp(writer, 8)
+ bufio.writelines(l)
+ bufio.flush()
+ self.assertEqual(b''.join(writer._write_stack), b'abcdef')
+
+ def test_writelines_userlist(self):
+ l = UserList([b'ab', b'cd', b'ef'])
+ writer = self.MockRawIO()
+ bufio = self.tp(writer, 8)
+ bufio.writelines(l)
+ bufio.flush()
+ self.assertEqual(b''.join(writer._write_stack), b'abcdef')
+
+ def test_writelines_error(self):
+ writer = self.MockRawIO()
+ bufio = self.tp(writer, 8)
+ self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
+ self.assertRaises(TypeError, bufio.writelines, None)
+
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
@@ -1180,8 +1284,18 @@ class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
DeprecationWarning)):
self.tp(self.MockRawIO(), 8, 12)
+ def test_write_error_on_close(self):
+ raw = self.MockRawIO()
+ def bad_write(b):
+ raise IOError()
+ raw.write = bad_write
+ b = self.tp(raw)
+ b.write(b'spam')
+ self.assertRaises(IOError, b.close) # exception not swallowed
+ self.assertTrue(b.closed)
+
-class CBufferedWriterTest(BufferedWriterTest):
+class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
@@ -1219,6 +1333,11 @@ class CBufferedWriterTest(BufferedWriterTest):
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
+ def test_args_error(self):
+ # Issue #17275
+ with self.assertRaisesRegexp(TypeError, "BufferedWriter"):
+ self.tp(io.BytesIO(), 1024, 1024, 1024)
+
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
@@ -1229,6 +1348,20 @@ class BufferedRWPairTest(unittest.TestCase):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
+ def test_uninitialized(self):
+ pair = self.tp.__new__(self.tp)
+ del pair
+ pair = self.tp.__new__(self.tp)
+ self.assertRaisesRegexp((ValueError, AttributeError),
+ 'uninitialized|has no attribute',
+ pair.read, 0)
+ self.assertRaisesRegexp((ValueError, AttributeError),
+ 'uninitialized|has no attribute',
+ pair.write, b'')
+ pair.__init__(self.MockRawIO(), self.MockRawIO())
+ self.assertEqual(pair.read(0), b'')
+ self.assertEqual(pair.write(b''), 0)
+
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
@@ -1356,6 +1489,10 @@ class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
+ def test_uninitialized(self):
+ BufferedReaderTest.test_uninitialized(self)
+ BufferedWriterTest.test_uninitialized(self)
+
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
@@ -1570,7 +1707,8 @@ class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
-class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest, BufferedRandomTest):
+class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest,
+ BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
@@ -1587,6 +1725,12 @@ class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest, BufferedRand
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
+ def test_args_error(self):
+ # Issue #17275
+ with self.assertRaisesRegexp(TypeError, "BufferedRandom"):
+ self.tp(io.BytesIO(), 1024, 1024, 1024)
+
+
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
@@ -2183,6 +2327,28 @@ class TextIOWrapperTest(unittest.TestCase):
reads += c
self.assertEqual(reads, "A"*127+"\nB")
+ def test_writelines(self):
+ l = ['ab', 'cd', 'ef']
+ buf = self.BytesIO()
+ txt = self.TextIOWrapper(buf)
+ txt.writelines(l)
+ txt.flush()
+ self.assertEqual(buf.getvalue(), b'abcdef')
+
+ def test_writelines_userlist(self):
+ l = UserList(['ab', 'cd', 'ef'])
+ buf = self.BytesIO()
+ txt = self.TextIOWrapper(buf)
+ txt.writelines(l)
+ txt.flush()
+ self.assertEqual(buf.getvalue(), b'abcdef')
+
+ def test_writelines_error(self):
+ txt = self.TextIOWrapper(self.BytesIO())
+ self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
+ self.assertRaises(TypeError, txt.writelines, None)
+ self.assertRaises(TypeError, txt.writelines, b'abc')
+
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
@@ -2306,6 +2472,7 @@ class TextIOWrapperTest(unittest.TestCase):
raise IOError()
txt.flush = bad_flush
self.assertRaises(IOError, txt.close) # exception not swallowed
+ self.assertTrue(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
@@ -2320,6 +2487,39 @@ class TextIOWrapperTest(unittest.TestCase):
with self.assertRaises((AttributeError, TypeError)):
txt.buffer = buf
+ def test_read_nonbytes(self):
+ # Issue #17106
+ # Crash when underlying read() returns non-bytes
+ class NonbytesStream(self.StringIO):
+ read1 = self.StringIO.read
+ class NonbytesStream(self.StringIO):
+ read1 = self.StringIO.read
+ t = self.TextIOWrapper(NonbytesStream('a'))
+ with self.maybeRaises(TypeError):
+ t.read(1)
+ t = self.TextIOWrapper(NonbytesStream('a'))
+ with self.maybeRaises(TypeError):
+ t.readline()
+ t = self.TextIOWrapper(NonbytesStream('a'))
+ self.assertEqual(t.read(), u'a')
+
+ def test_illegal_decoder(self):
+ # Issue #17106
+ # Crash when decoder returns non-string
+ t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
+ encoding='quopri_codec')
+ with self.maybeRaises(TypeError):
+ t.read(1)
+ t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
+ encoding='quopri_codec')
+ with self.maybeRaises(TypeError):
+ t.readline()
+ t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
+ encoding='quopri_codec')
+ with self.maybeRaises(TypeError):
+ t.read()
+
+
class CTextIOWrapperTest(TextIOWrapperTest):
def test_initialization(self):
@@ -2361,9 +2561,13 @@ class CTextIOWrapperTest(TextIOWrapperTest):
t2.buddy = t1
support.gc_collect()
+ maybeRaises = unittest.TestCase.assertRaises
+
class PyTextIOWrapperTest(TextIOWrapperTest):
- pass
+ @contextlib.contextmanager
+ def maybeRaises(self, *args, **kwds):
+ yield
class IncrementalNewlineDecoderTest(unittest.TestCase):
@@ -2725,7 +2929,7 @@ class SignalsTest(unittest.TestCase):
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
self.assertRaises(ZeroDivisionError,
- wio.write, item * (1024 * 1024))
+ wio.write, item * (support.PIPE_MAX_SIZE // len(item) + 1))
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
@@ -2823,7 +3027,7 @@ class SignalsTest(unittest.TestCase):
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
- N = 1024 * 1024
+ N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
diff --git a/Lib/test/test_iter.py b/Lib/test/test_iter.py
index 64984cc..bd1b32d 100644
--- a/Lib/test/test_iter.py
+++ b/Lib/test/test_iter.py
@@ -526,7 +526,7 @@ class TestCase(unittest.TestCase):
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(reduce(add, d), "".join(d.keys()))
- # This test case will be removed if we don't have Unicode
+ @unittest.skipUnless(have_unicode, 'needs unicode support')
def test_unicode_join_endcase(self):
# This class inserts a Unicode object into its argument's natural
@@ -567,8 +567,6 @@ class TestCase(unittest.TestCase):
unlink(TESTFN)
except OSError:
pass
- if not have_unicode:
- def test_unicode_join_endcase(self): pass
# Test iterators with 'x in y' and 'x not in y'.
def test_in_and_not_in(self):
@@ -908,6 +906,21 @@ class TestCase(unittest.TestCase):
except TypeError:
pass
+ def test_extending_list_with_iterator_does_not_segfault(self):
+ # The code to extend a list with an iterator has a fair
+ # amount of nontrivial logic in terms of guessing how
+ # much memory to allocate in advance, "stealing" refs,
+ # and then shrinking at the end. This is a basic smoke
+ # test for that scenario.
+ def gen():
+ for i in range(500):
+ yield i
+ lst = [0] * 500
+ for i in range(240):
+ lst.pop(0)
+ lst.extend(gen())
+ self.assertEqual(len(lst), 760)
+
def test_main():
run_unittest(TestCase)
diff --git a/Lib/test/test_itertools.py b/Lib/test/test_itertools.py
index ccdbf8a..1bf6c53 100644
--- a/Lib/test/test_itertools.py
+++ b/Lib/test/test_itertools.py
@@ -1,7 +1,7 @@
import unittest
from test import test_support
from itertools import *
-from weakref import proxy
+import weakref
from decimal import Decimal
from fractions import Fraction
import sys
@@ -274,7 +274,7 @@ class TestBasicOps(unittest.TestCase):
self.assertEqual(result, list(permutations(values, None))) # test r as None
self.assertEqual(result, list(permutations(values))) # test default r
- @test_support.impl_detail("tuple resuse is CPython specific")
+ @test_support.impl_detail("tuple reuse is specific to CPython")
def test_permutations_tuple_reuse(self):
self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1)
@@ -536,7 +536,7 @@ class TestBasicOps(unittest.TestCase):
zip('abc', 'def'))
@test_support.impl_detail("tuple reuse is specific to CPython")
- def test_izip_tuple_resuse(self):
+ def test_izip_tuple_reuse(self):
ids = map(id, izip('abc', 'def'))
self.assertEqual(min(ids), max(ids))
ids = map(id, list(izip('abc', 'def')))
@@ -698,6 +698,9 @@ class TestBasicOps(unittest.TestCase):
def test_repeat(self):
self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a'])
+ self.assertEqual(list(repeat(object='a', times=0)), [])
+ self.assertEqual(list(repeat(object='a', times=-1)), [])
+ self.assertEqual(list(repeat(object='a', times=-2)), [])
self.assertEqual(zip(xrange(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
@@ -714,6 +717,12 @@ class TestBasicOps(unittest.TestCase):
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
+ def test_repeat_with_negative_times(self):
+ self.assertEqual(repr(repeat('a', -1)), "repeat('a', 0)")
+ self.assertEqual(repr(repeat('a', -2)), "repeat('a', 0)")
+ self.assertEqual(repr(repeat('a', times=-1)), "repeat('a', 0)")
+ self.assertEqual(repr(repeat('a', times=-2)), "repeat('a', 0)")
+
def test_imap(self):
self.assertEqual(list(imap(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
@@ -792,6 +801,15 @@ class TestBasicOps(unittest.TestCase):
self.assertEqual(list(islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
+ # Issue #21321: check source iterator is not referenced
+ # from islice() after the latter has been exhausted
+ it = (x for x in (1, 2))
+ wr = weakref.ref(it)
+ it = islice(it, 1)
+ self.assertIsNotNone(wr())
+ list(it) # exhaust the iterator
+ self.assertIsNone(wr())
+
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
@@ -901,11 +919,17 @@ class TestBasicOps(unittest.TestCase):
# test that tee objects are weak referencable
a, b = tee(xrange(10))
- p = proxy(a)
+ p = weakref.proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
self.assertRaises(ReferenceError, getattr, p, '__class__')
+ # Issue 13454: Crash when deleting backward iterator from tee()
+ def test_tee_del_backward(self):
+ forward, backward = tee(repeat(None, 20000000))
+ any(forward) # exhaust the iterator
+ del backward
+
def test_StopIteration(self):
self.assertRaises(StopIteration, izip().next)
diff --git a/Lib/test/test_kqueue.py b/Lib/test/test_kqueue.py
index e5eecfa..3dffc73 100644
--- a/Lib/test/test_kqueue.py
+++ b/Lib/test/test_kqueue.py
@@ -70,13 +70,13 @@ class TestKQueue(unittest.TestCase):
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
- bignum = sys.maxsize * 2 + 1
- ev = select.kevent(bignum, 1, 2, 3, sys.maxsize, bignum)
+ bignum = 0x7fff
+ ev = select.kevent(bignum, 1, 2, 3, bignum - 1, bignum)
self.assertEqual(ev.ident, bignum)
self.assertEqual(ev.filter, 1)
self.assertEqual(ev.flags, 2)
self.assertEqual(ev.fflags, 3)
- self.assertEqual(ev.data, sys.maxsize)
+ self.assertEqual(ev.data, bignum - 1)
self.assertEqual(ev.udata, bignum)
self.assertEqual(ev, ev)
self.assertNotEqual(ev, other)
@@ -96,11 +96,6 @@ class TestKQueue(unittest.TestCase):
pass # FreeBSD doesn't raise an exception here
server, addr = serverSocket.accept()
- if sys.platform.startswith("darwin"):
- flags = select.KQ_EV_ADD | select.KQ_EV_ENABLE
- else:
- flags = 0
-
kq = select.kqueue()
kq2 = select.kqueue.fromfd(kq.fileno())
@@ -122,11 +117,10 @@ class TestKQueue(unittest.TestCase):
kq2.control([ev], 0)
events = kq.control(None, 4, 1)
- events = [(e.ident, e.filter, e.flags) for e in events]
- events.sort()
- self.assertEqual(events, [
- (client.fileno(), select.KQ_FILTER_WRITE, flags),
- (server.fileno(), select.KQ_FILTER_WRITE, flags)])
+ events = set((e.ident, e.filter) for e in events)
+ self.assertEqual(events, set([
+ (client.fileno(), select.KQ_FILTER_WRITE),
+ (server.fileno(), select.KQ_FILTER_WRITE)]))
client.send("Hello!")
server.send("world!!!")
@@ -140,14 +134,12 @@ class TestKQueue(unittest.TestCase):
else:
self.fail('timeout waiting for event notifications')
- events = [(e.ident, e.filter, e.flags) for e in events]
- events.sort()
-
- self.assertEqual(events, [
- (client.fileno(), select.KQ_FILTER_WRITE, flags),
- (client.fileno(), select.KQ_FILTER_READ, flags),
- (server.fileno(), select.KQ_FILTER_WRITE, flags),
- (server.fileno(), select.KQ_FILTER_READ, flags)])
+ events = set((e.ident, e.filter) for e in events)
+ self.assertEqual(events, set([
+ (client.fileno(), select.KQ_FILTER_WRITE),
+ (client.fileno(), select.KQ_FILTER_READ),
+ (server.fileno(), select.KQ_FILTER_WRITE),
+ (server.fileno(), select.KQ_FILTER_READ)]))
# Remove completely client, and server read part
ev = select.kevent(client.fileno(),
@@ -164,10 +156,9 @@ class TestKQueue(unittest.TestCase):
kq.control([ev], 0, 0)
events = kq.control([], 4, 0.99)
- events = [(e.ident, e.filter, e.flags) for e in events]
- events.sort()
- self.assertEqual(events, [
- (server.fileno(), select.KQ_FILTER_WRITE, flags)])
+ events = set((e.ident, e.filter) for e in events)
+ self.assertEqual(events, set([
+ (server.fileno(), select.KQ_FILTER_WRITE)]))
client.close()
server.close()
diff --git a/Lib/test/test_locale.py b/Lib/test/test_locale.py
index 4575592..cd4d46d 100644
--- a/Lib/test/test_locale.py
+++ b/Lib/test/test_locale.py
@@ -372,6 +372,77 @@ class TestStringMethods(BaseLocalizedTest):
self.assertEqual('\xec\xa0\xbc'.split(), ['\xec\xa0\xbc'])
+class NormalizeTest(unittest.TestCase):
+ def check(self, localename, expected):
+ self.assertEqual(locale.normalize(localename), expected, msg=localename)
+
+ def test_locale_alias(self):
+ for localename, alias in locale.locale_alias.items():
+ self.check(localename, alias)
+
+ def test_empty(self):
+ self.check('', '')
+
+ def test_c(self):
+ self.check('c', 'C')
+ self.check('posix', 'C')
+
+ def test_english(self):
+ self.check('en', 'en_US.ISO8859-1')
+ self.check('EN', 'en_US.ISO8859-1')
+ self.check('en_US', 'en_US.ISO8859-1')
+ self.check('en_us', 'en_US.ISO8859-1')
+ self.check('en_GB', 'en_GB.ISO8859-1')
+ self.check('en_US.UTF-8', 'en_US.UTF-8')
+ self.check('en_US.utf8', 'en_US.UTF-8')
+ self.check('en_US:UTF-8', 'en_US.UTF-8')
+ self.check('en_US.ISO8859-1', 'en_US.ISO8859-1')
+ self.check('en_US.US-ASCII', 'en_US.ISO8859-1')
+ self.check('english', 'en_EN.ISO8859-1')
+
+ def test_hyphenated_encoding(self):
+ self.check('az_AZ.iso88599e', 'az_AZ.ISO8859-9E')
+ self.check('az_AZ.ISO8859-9E', 'az_AZ.ISO8859-9E')
+ self.check('tt_RU.koi8c', 'tt_RU.KOI8-C')
+ self.check('tt_RU.KOI8-C', 'tt_RU.KOI8-C')
+ self.check('lo_LA.cp1133', 'lo_LA.IBM-CP1133')
+ self.check('lo_LA.ibmcp1133', 'lo_LA.IBM-CP1133')
+ self.check('lo_LA.IBM-CP1133', 'lo_LA.IBM-CP1133')
+ self.check('uk_ua.microsoftcp1251', 'uk_UA.CP1251')
+ self.check('uk_ua.microsoft-cp1251', 'uk_UA.CP1251')
+ self.check('ka_ge.georgianacademy', 'ka_GE.GEORGIAN-ACADEMY')
+ self.check('ka_GE.GEORGIAN-ACADEMY', 'ka_GE.GEORGIAN-ACADEMY')
+ self.check('cs_CZ.iso88592', 'cs_CZ.ISO8859-2')
+ self.check('cs_CZ.ISO8859-2', 'cs_CZ.ISO8859-2')
+
+ def test_euro_modifier(self):
+ self.check('de_DE@euro', 'de_DE.ISO8859-15')
+ self.check('en_US.ISO8859-15@euro', 'en_US.ISO8859-15')
+
+ def test_latin_modifier(self):
+ self.check('be_BY.UTF-8@latin', 'be_BY.UTF-8@latin')
+ self.check('sr_RS.UTF-8@latin', 'sr_RS.UTF-8@latin')
+
+ def test_valencia_modifier(self):
+ self.check('ca_ES.UTF-8@valencia', 'ca_ES.UTF-8@valencia')
+ self.check('ca_ES@valencia', 'ca_ES.ISO8859-1@valencia')
+ self.check('ca@valencia', 'ca_ES.ISO8859-1@valencia')
+
+ def test_devanagari_modifier(self):
+ self.check('ks_IN.UTF-8@devanagari', 'ks_IN.UTF-8@devanagari')
+ self.check('ks_IN@devanagari', 'ks_IN.UTF-8@devanagari')
+ self.check('ks@devanagari', 'ks_IN.UTF-8@devanagari')
+ self.check('ks_IN.UTF-8', 'ks_IN.UTF-8')
+ self.check('ks_IN', 'ks_IN.UTF-8')
+ self.check('ks', 'ks_IN.UTF-8')
+ self.check('sd_IN.UTF-8@devanagari', 'sd_IN.UTF-8@devanagari')
+ self.check('sd_IN@devanagari', 'sd_IN.UTF-8@devanagari')
+ self.check('sd@devanagari', 'sd_IN.UTF-8@devanagari')
+ self.check('sd_IN.UTF-8', 'sd_IN.UTF-8')
+ self.check('sd_IN', 'sd_IN.UTF-8')
+ self.check('sd', 'sd_IN.UTF-8')
+
+
class TestMiscellaneous(unittest.TestCase):
def test_getpreferredencoding(self):
# Invoke getpreferredencoding to make sure it does not cause exceptions.
@@ -400,13 +471,21 @@ class TestMiscellaneous(unittest.TestCase):
# Issue #1813: setting and getting the locale under a Turkish locale
oldlocale = locale.getlocale()
self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
- try:
- locale.setlocale(locale.LC_CTYPE, 'tr_TR')
- except locale.Error:
+ for loc in ('tr_TR', 'tr_TR.UTF-8', 'tr_TR.ISO8859-9'):
+ try:
+ locale.setlocale(locale.LC_CTYPE, loc)
+ break
+ except locale.Error:
+ continue
+ else:
# Unsupported locale on this system
self.skipTest('test needs Turkish locale')
loc = locale.getlocale()
- locale.setlocale(locale.LC_CTYPE, loc)
+ try:
+ locale.setlocale(locale.LC_CTYPE, loc)
+ except Exception as e:
+ self.fail("Failed to set locale %r (default locale is %r): %r" %
+ (loc, oldlocale, e))
self.assertEqual(loc, locale.getlocale())
def test_normalize_issue12752(self):
diff --git a/Lib/test/test_logging.py b/Lib/test/test_logging.py
index 2aac360..677f013 100644
--- a/Lib/test/test_logging.py
+++ b/Lib/test/test_logging.py
@@ -1,6 +1,4 @@
-#!/usr/bin/env python
-#
-# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
+# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
@@ -18,7 +16,7 @@
"""Test harness for the logging module. Run all tests.
-Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
+Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
"""
import logging
@@ -31,6 +29,7 @@ import cStringIO
import gc
import json
import os
+import random
import re
import select
import socket
@@ -40,6 +39,7 @@ import sys
import tempfile
from test.test_support import captured_stdout, run_with_locale, run_unittest
import textwrap
+import time
import unittest
import warnings
import weakref
@@ -712,9 +712,30 @@ class ConfigFileTest(BaseTest):
datefmt=
"""
- def apply_config(self, conf):
+ disable_test = """
+ [loggers]
+ keys=root
+
+ [handlers]
+ keys=screen
+
+ [formatters]
+ keys=
+
+ [logger_root]
+ level=DEBUG
+ handlers=screen
+
+ [handler_screen]
+ level=DEBUG
+ class=StreamHandler
+ args=(sys.stdout,)
+ formatter=
+ """
+
+ def apply_config(self, conf, **kwargs):
file = cStringIO.StringIO(textwrap.dedent(conf))
- logging.config.fileConfig(file)
+ logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
@@ -818,6 +839,15 @@ class ConfigFileTest(BaseTest):
# Original logger output is empty.
self.assert_log_lines([])
+ def test_logger_disabling(self):
+ self.apply_config(self.disable_test)
+ logger = logging.getLogger('foo')
+ self.assertFalse(logger.disabled)
+ self.apply_config(self.disable_test)
+ self.assertTrue(logger.disabled)
+ self.apply_config(self.disable_test, disable_existing_loggers=False)
+ self.assertFalse(logger.disabled)
+
class LogRecordStreamHandler(StreamRequestHandler):
"""Handler for a streaming logging request. It saves the log message in the
@@ -1028,6 +1058,24 @@ class EncodingTest(BaseTest):
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, '\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
+ def test_encoding_utf16_unicode(self):
+ # Issue #19267
+ log = logging.getLogger("test")
+ message = u'b\u0142\u0105d'
+ writer_class = codecs.getwriter('utf-16-le')
+ writer_class.encoding = 'utf-16-le'
+ stream = cStringIO.StringIO()
+ writer = writer_class(stream, 'strict')
+ handler = logging.StreamHandler(writer)
+ log.addHandler(handler)
+ try:
+ log.warning(message)
+ finally:
+ log.removeHandler(handler)
+ handler.close()
+ s = stream.getvalue()
+ self.assertEqual(s, 'b\x00B\x01\x05\x01d\x00\n\x00')
+
class WarningsTest(BaseTest):
@@ -1583,6 +1631,36 @@ class ConfigDictTest(BaseTest):
},
}
+ out_of_order = {
+ "version": 1,
+ "formatters": {
+ "mySimpleFormatter": {
+ "format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s"
+ }
+ },
+ "handlers": {
+ "fileGlobal": {
+ "class": "logging.StreamHandler",
+ "level": "DEBUG",
+ "formatter": "mySimpleFormatter"
+ },
+ "bufferGlobal": {
+ "class": "logging.handlers.MemoryHandler",
+ "capacity": 5,
+ "formatter": "mySimpleFormatter",
+ "target": "fileGlobal",
+ "level": "DEBUG"
+ }
+ },
+ "loggers": {
+ "mymodule": {
+ "level": "DEBUG",
+ "handlers": ["bufferGlobal"],
+ "propagate": "true"
+ }
+ }
+ }
+
def apply_config(self, conf):
logging.config.dictConfig(conf)
@@ -1837,6 +1915,10 @@ class ConfigDictTest(BaseTest):
# Original logger output is empty.
self.assert_log_lines([])
+ def test_out_of_order(self):
+ self.apply_config(self.out_of_order)
+ handler = logging.getLogger('mymodule').handlers[0]
+ self.assertIsInstance(handler.target, logging.Handler)
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
@@ -1873,6 +1955,47 @@ class ChildLoggerTest(BaseTest):
self.assertTrue(c2 is c3)
+class HandlerTest(BaseTest):
+
+ @unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
+ @unittest.skipUnless(threading, 'Threading required for this test.')
+ def test_race(self):
+ # Issue #14632 refers.
+ def remove_loop(fname, tries):
+ for _ in range(tries):
+ try:
+ os.unlink(fname)
+ except OSError:
+ pass
+ time.sleep(0.004 * random.randint(0, 4))
+
+ del_count = 500
+ log_count = 500
+
+ for delay in (False, True):
+ fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
+ os.close(fd)
+ remover = threading.Thread(target=remove_loop, args=(fn, del_count))
+ remover.daemon = True
+ remover.start()
+ h = logging.handlers.WatchedFileHandler(fn, delay=delay)
+ f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
+ h.setFormatter(f)
+ try:
+ for _ in range(log_count):
+ time.sleep(0.005)
+ r = logging.makeLogRecord({'msg': 'testing' })
+ h.handle(r)
+ finally:
+ remover.join()
+ try:
+ h.close()
+ except ValueError:
+ pass
+ if os.path.exists(fn):
+ os.unlink(fn)
+
+
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@@ -1882,7 +2005,7 @@ def test_main():
CustomLevelsAndFiltersTest, MemoryHandlerTest,
ConfigFileTest, SocketHandlerTest, MemoryTest,
EncodingTest, WarningsTest, ConfigDictTest, ManagerTest,
- ChildLoggerTest)
+ ChildLoggerTest, HandlerTest)
if __name__ == "__main__":
test_main()
diff --git a/Lib/test/test_long.py b/Lib/test/test_long.py
index 58cfc3a..8d16bb0 100644
--- a/Lib/test/test_long.py
+++ b/Lib/test/test_long.py
@@ -1,10 +1,11 @@
import unittest
-from test import test_support
import sys
import random
import math
+from test import test_int, test_support
+
# Used for lazy formatting of failure messages
class Frm(object):
def __init__(self, format, *args):
@@ -78,8 +79,9 @@ if test_support.have_unicode:
(unichr(0x200), ValueError),
]
+class LongTest(test_int.IntLongCommonTests, unittest.TestCase):
-class LongTest(unittest.TestCase):
+ ntype = long
# Get quasi-random long consisting of ndigits digits (in base BASE).
# quasi == the most-significant digit will not be 0, and the number
@@ -88,7 +90,7 @@ class LongTest(unittest.TestCase):
# The sign of the number is also random.
def getran(self, ndigits):
- self.assertTrue(ndigits > 0)
+ self.assertGreater(ndigits, 0)
nbits_hi = ndigits * SHIFT
nbits_lo = nbits_hi - SHIFT + 1
answer = 0L
@@ -586,7 +588,7 @@ class LongTest(unittest.TestCase):
pass
x = long2(1L<<100)
y = int(x)
- self.assertTrue(type(y) is long,
+ self.assertIs(type(y), long,
"overflowing int conversion must return long not long subtype")
# long -> Py_ssize_t conversion
diff --git a/Lib/test/test_macos.py b/Lib/test/test_macos.py
index 5af5105..0956d1d 100644
--- a/Lib/test/test_macos.py
+++ b/Lib/test/test_macos.py
@@ -8,11 +8,9 @@ MacOS = test_support.import_module('MacOS')
TESTFN2 = test_support.TESTFN + '2'
class TestMacOS(unittest.TestCase):
-
+ @unittest.skipUnless(os.path.exists('/Developer/Tools/SetFile'),
+ '/Developer/Tools/SetFile does not exist')
def testGetCreatorAndType(self):
- if not os.path.exists('/Developer/Tools/SetFile'):
- return
-
try:
fp = open(test_support.TESTFN, 'w')
fp.write('\n')
@@ -29,10 +27,9 @@ class TestMacOS(unittest.TestCase):
finally:
os.unlink(test_support.TESTFN)
+ @unittest.skipUnless(os.path.exists('/Developer/Tools/GetFileInfo'),
+ '/Developer/Tools/GetFileInfo does not exist')
def testSetCreatorAndType(self):
- if not os.path.exists('/Developer/Tools/GetFileInfo'):
- return
-
try:
fp = open(test_support.TESTFN, 'w')
fp.write('\n')
diff --git a/Lib/test/test_macostools.py b/Lib/test/test_macostools.py
index 4f15982..ed80c1d 100644
--- a/Lib/test/test_macostools.py
+++ b/Lib/test/test_macostools.py
@@ -12,6 +12,8 @@ import macostools
TESTFN2 = test_support.TESTFN + '2'
+requires_32bit = unittest.skipUnless(sys.maxint < 2**32, '32-bit only test')
+
class TestMacostools(unittest.TestCase):
def setUp(self):
@@ -51,30 +53,32 @@ class TestMacostools(unittest.TestCase):
DeprecationWarning), quiet=True):
macostools.touched(test_support.TESTFN)
- if sys.maxint < 2**32:
- def test_copy(self):
- test_support.unlink(TESTFN2)
- macostools.copy(test_support.TESTFN, TESTFN2)
- self.assertEqual(self.compareData(), '')
-
- if sys.maxint < 2**32:
- def test_mkalias(self):
- test_support.unlink(TESTFN2)
- macostools.mkalias(test_support.TESTFN, TESTFN2)
- fss, _, _ = Carbon.File.ResolveAliasFile(TESTFN2, 0)
- self.assertEqual(fss.as_pathname(), os.path.realpath(test_support.TESTFN))
-
- def test_mkalias_relative(self):
- test_support.unlink(TESTFN2)
- # If the directory doesn't exist, then chances are this is a new
- # install of Python so don't create it since the user might end up
- # running ``sudo make install`` and creating the directory here won't
- # leave it with the proper permissions.
- if not os.path.exists(sys.prefix):
- return
- macostools.mkalias(test_support.TESTFN, TESTFN2, sys.prefix)
- fss, _, _ = Carbon.File.ResolveAliasFile(TESTFN2, 0)
- self.assertEqual(fss.as_pathname(), os.path.realpath(test_support.TESTFN))
+ @requires_32bit
+ def test_copy(self):
+ test_support.unlink(TESTFN2)
+ macostools.copy(test_support.TESTFN, TESTFN2)
+ self.assertEqual(self.compareData(), '')
+
+ @requires_32bit
+ def test_mkalias(self):
+ test_support.unlink(TESTFN2)
+ macostools.mkalias(test_support.TESTFN, TESTFN2)
+ fss, _, _ = Carbon.File.ResolveAliasFile(TESTFN2, 0)
+ self.assertEqual(fss.as_pathname(), os.path.realpath(test_support.TESTFN))
+
+ @requires_32bit
+ # If the directory doesn't exist, then chances are this is a new
+ # install of Python so don't create it since the user might end up
+ # running ``sudo make install`` and creating the directory here won't
+ # leave it with the proper permissions.
+ @unittest.skipUnless(os.path.exists(sys.prefix),
+ "%r doesn't exist" % sys.prefix)
+ def test_mkalias_relative(self):
+ test_support.unlink(TESTFN2)
+
+ macostools.mkalias(test_support.TESTFN, TESTFN2, sys.prefix)
+ fss, _, _ = Carbon.File.ResolveAliasFile(TESTFN2, 0)
+ self.assertEqual(fss.as_pathname(), os.path.realpath(test_support.TESTFN))
def test_main():
diff --git a/Lib/test/test_macurl2path.py b/Lib/test/test_macurl2path.py
new file mode 100644
index 0000000..3490d6d
--- /dev/null
+++ b/Lib/test/test_macurl2path.py
@@ -0,0 +1,31 @@
+import macurl2path
+import unittest
+
+class MacUrl2PathTestCase(unittest.TestCase):
+ def test_url2pathname(self):
+ self.assertEqual(":index.html", macurl2path.url2pathname("index.html"))
+ self.assertEqual(":bar:index.html", macurl2path.url2pathname("bar/index.html"))
+ self.assertEqual("foo:bar:index.html", macurl2path.url2pathname("/foo/bar/index.html"))
+ self.assertEqual("foo:bar", macurl2path.url2pathname("/foo/bar/"))
+ self.assertEqual("", macurl2path.url2pathname("/"))
+ self.assertRaises(RuntimeError, macurl2path.url2pathname, "http://foo.com")
+ self.assertEqual("index.html", macurl2path.url2pathname("///index.html"))
+ self.assertRaises(RuntimeError, macurl2path.url2pathname, "//index.html")
+ self.assertEqual(":index.html", macurl2path.url2pathname("./index.html"))
+ self.assertEqual(":index.html", macurl2path.url2pathname("foo/../index.html"))
+ self.assertEqual("::index.html", macurl2path.url2pathname("../index.html"))
+
+ def test_pathname2url(self):
+ self.assertEqual("drive", macurl2path.pathname2url("drive:"))
+ self.assertEqual("drive/dir", macurl2path.pathname2url("drive:dir:"))
+ self.assertEqual("drive/dir/file", macurl2path.pathname2url("drive:dir:file"))
+ self.assertEqual("drive/file", macurl2path.pathname2url("drive:file"))
+ self.assertEqual("file", macurl2path.pathname2url("file"))
+ self.assertEqual("file", macurl2path.pathname2url(":file"))
+ self.assertEqual("dir", macurl2path.pathname2url(":dir:"))
+ self.assertEqual("dir/file", macurl2path.pathname2url(":dir:file"))
+ self.assertRaises(RuntimeError, macurl2path.pathname2url, "/")
+ self.assertEqual("dir/../file", macurl2path.pathname2url("dir::file"))
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/Lib/test/test_mailbox.py b/Lib/test/test_mailbox.py
index a68686e..2261bb8 100644
--- a/Lib/test/test_mailbox.py
+++ b/Lib/test/test_mailbox.py
@@ -6,7 +6,9 @@ import socket
import email
import email.message
import re
+import shutil
import StringIO
+import tempfile
from test import test_support
import unittest
import mailbox
@@ -19,7 +21,7 @@ except ImportError:
# Silence Py3k warning
rfc822 = test_support.import_module('rfc822', deprecated=True)
-class TestBase(unittest.TestCase):
+class TestBase:
def _check_sample(self, msg):
# Inspect a mailbox.Message representation of the sample message
@@ -38,20 +40,15 @@ class TestBase(unittest.TestCase):
def _delete_recursively(self, target):
# Delete a file or delete a directory recursively
if os.path.isdir(target):
- for path, dirs, files in os.walk(target, topdown=False):
- for name in files:
- os.remove(os.path.join(path, name))
- for name in dirs:
- os.rmdir(os.path.join(path, name))
- os.rmdir(target)
+ test_support.rmtree(target)
elif os.path.exists(target):
- os.remove(target)
+ test_support.unlink(target)
class TestMailbox(TestBase):
_factory = None # Overridden by subclasses to reuse tests
- _template = 'From: foo\n\n%s'
+ _template = 'From: foo\n\n%s\n'
def setUp(self):
self._path = test_support.TESTFN
@@ -79,6 +76,18 @@ class TestMailbox(TestBase):
for i in (1, 2, 3, 4):
self._check_sample(self._box[keys[i]])
+ def test_add_file(self):
+ with tempfile.TemporaryFile('w+') as f:
+ f.write(_sample_message)
+ f.seek(0)
+ key = self._box.add(f)
+ self.assertEqual(self._box.get_string(key).split('\n'),
+ _sample_message.split('\n'))
+
+ def test_add_StringIO(self):
+ key = self._box.add(StringIO.StringIO(self._template % "0"))
+ self.assertEqual(self._box.get_string(key), self._template % "0")
+
def test_remove(self):
# Remove messages using remove()
self._test_remove_or_delitem(self._box.remove)
@@ -128,22 +137,23 @@ class TestMailbox(TestBase):
key0 = self._box.add(self._template % 0)
msg = self._box.get(key0)
self.assertEqual(msg['from'], 'foo')
- self.assertEqual(msg.get_payload(), '0')
- self.assertIs(self._box.get('foo'), None)
+ self.assertEqual(msg.get_payload(), '0\n')
+ self.assertIsNone(self._box.get('foo'))
self.assertFalse(self._box.get('foo', False))
self._box.close()
self._box = self._factory(self._path, factory=rfc822.Message)
key1 = self._box.add(self._template % 1)
msg = self._box.get(key1)
self.assertEqual(msg['from'], 'foo')
- self.assertEqual(msg.fp.read(), '1')
+ self.assertEqual(msg.fp.read(), '1' + os.linesep)
+ msg.fp.close()
def test_getitem(self):
# Retrieve message using __getitem__()
key0 = self._box.add(self._template % 0)
msg = self._box[key0]
self.assertEqual(msg['from'], 'foo')
- self.assertEqual(msg.get_payload(), '0')
+ self.assertEqual(msg.get_payload(), '0\n')
self.assertRaises(KeyError, lambda: self._box['foo'])
self._box.discard(key0)
self.assertRaises(KeyError, lambda: self._box[key0])
@@ -155,7 +165,7 @@ class TestMailbox(TestBase):
msg0 = self._box.get_message(key0)
self.assertIsInstance(msg0, mailbox.Message)
self.assertEqual(msg0['from'], 'foo')
- self.assertEqual(msg0.get_payload(), '0')
+ self.assertEqual(msg0.get_payload(), '0\n')
self._check_sample(self._box.get_message(key1))
def test_get_string(self):
@@ -169,10 +179,14 @@ class TestMailbox(TestBase):
# Get file representations of messages
key0 = self._box.add(self._template % 0)
key1 = self._box.add(_sample_message)
- self.assertEqual(self._box.get_file(key0).read().replace(os.linesep, '\n'),
+ msg0 = self._box.get_file(key0)
+ self.assertEqual(msg0.read().replace(os.linesep, '\n'),
self._template % 0)
- self.assertEqual(self._box.get_file(key1).read().replace(os.linesep, '\n'),
+ msg1 = self._box.get_file(key1)
+ self.assertEqual(msg1.read().replace(os.linesep, '\n'),
_sample_message)
+ msg0.close()
+ msg1.close()
def test_get_file_can_be_closed_twice(self):
# Issue 11700
@@ -235,8 +249,7 @@ class TestMailbox(TestBase):
count = 0
for value in returned_values:
self.assertEqual(value['from'], 'foo')
- self.assertTrue(int(value.get_payload()) < repetitions,
- (value.get_payload(), repetitions))
+ self.assertLess(int(value.get_payload()), repetitions)
count += 1
self.assertEqual(len(values), count)
@@ -324,15 +337,15 @@ class TestMailbox(TestBase):
self.assertIn(key0, self._box)
key1 = self._box.add(self._template % 1)
self.assertIn(key1, self._box)
- self.assertEqual(self._box.pop(key0).get_payload(), '0')
+ self.assertEqual(self._box.pop(key0).get_payload(), '0\n')
self.assertNotIn(key0, self._box)
self.assertIn(key1, self._box)
key2 = self._box.add(self._template % 2)
self.assertIn(key2, self._box)
- self.assertEqual(self._box.pop(key2).get_payload(), '2')
+ self.assertEqual(self._box.pop(key2).get_payload(), '2\n')
self.assertNotIn(key2, self._box)
self.assertIn(key1, self._box)
- self.assertEqual(self._box.pop(key1).get_payload(), '1')
+ self.assertEqual(self._box.pop(key1).get_payload(), '1\n')
self.assertNotIn(key1, self._box)
self.assertEqual(len(self._box), 0)
@@ -390,6 +403,17 @@ class TestMailbox(TestBase):
# Write changes to disk
self._test_flush_or_close(self._box.flush, True)
+ def test_popitem_and_flush_twice(self):
+ # See #15036.
+ self._box.add(self._template % 0)
+ self._box.add(self._template % 1)
+ self._box.flush()
+
+ self._box.popitem()
+ self._box.flush()
+ self._box.popitem()
+ self._box.flush()
+
def test_lock_unlock(self):
# Lock and unlock the mailbox
self.assertFalse(os.path.exists(self._get_lock_path()))
@@ -407,6 +431,7 @@ class TestMailbox(TestBase):
self._box.add(contents[0])
self._box.add(contents[1])
self._box.add(contents[2])
+ oldbox = self._box
method()
if should_call_close:
self._box.close()
@@ -415,6 +440,7 @@ class TestMailbox(TestBase):
self.assertEqual(len(keys), 3)
for key in keys:
self.assertIn(self._box.get_string(key), contents)
+ oldbox.close()
def test_dump_message(self):
# Write message representations to disk
@@ -433,7 +459,7 @@ class TestMailbox(TestBase):
return self._path + '.lock'
-class TestMailboxSuperclass(TestBase):
+class TestMailboxSuperclass(TestBase, unittest.TestCase):
def test_notimplemented(self):
# Test that all Mailbox methods raise NotImplementedException.
@@ -468,7 +494,7 @@ class TestMailboxSuperclass(TestBase):
self.assertRaises(NotImplementedError, lambda: box.close())
-class TestMaildir(TestMailbox):
+class TestMaildir(TestMailbox, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.Maildir(path, factory)
@@ -510,7 +536,7 @@ class TestMaildir(TestMailbox):
msg_returned = self._box.get_message(key)
self.assertEqual(msg_returned.get_subdir(), 'new')
self.assertEqual(msg_returned.get_flags(), '')
- self.assertEqual(msg_returned.get_payload(), '1')
+ self.assertEqual(msg_returned.get_payload(), '1\n')
msg2 = mailbox.MaildirMessage(self._template % 2)
msg2.set_info('2,S')
self._box[key] = msg2
@@ -518,7 +544,7 @@ class TestMaildir(TestMailbox):
msg_returned = self._box.get_message(key)
self.assertEqual(msg_returned.get_subdir(), 'new')
self.assertEqual(msg_returned.get_flags(), 'S')
- self.assertEqual(msg_returned.get_payload(), '3')
+ self.assertEqual(msg_returned.get_payload(), '3\n')
def test_consistent_factory(self):
# Add a message.
@@ -637,32 +663,32 @@ class TestMaildir(TestMailbox):
"tmp")),
"File in wrong location: '%s'" % head)
match = pattern.match(tail)
- self.assertTrue(match is not None, "Invalid file name: '%s'" % tail)
+ self.assertIsNotNone(match, "Invalid file name: '%s'" % tail)
groups = match.groups()
if previous_groups is not None:
- self.assertTrue(int(groups[0] >= previous_groups[0]),
+ self.assertGreaterEqual(int(groups[0]), int(previous_groups[0]),
"Non-monotonic seconds: '%s' before '%s'" %
(previous_groups[0], groups[0]))
- self.assertTrue(int(groups[1] >= previous_groups[1]) or
- groups[0] != groups[1],
- "Non-monotonic milliseconds: '%s' before '%s'" %
- (previous_groups[1], groups[1]))
- self.assertTrue(int(groups[2]) == pid,
+ if int(groups[0]) == int(previous_groups[0]):
+ self.assertGreaterEqual(int(groups[1]), int(previous_groups[1]),
+ "Non-monotonic milliseconds: '%s' before '%s'" %
+ (previous_groups[1], groups[1]))
+ self.assertEqual(int(groups[2]), pid,
"Process ID mismatch: '%s' should be '%s'" %
(groups[2], pid))
- self.assertTrue(int(groups[3]) == int(previous_groups[3]) + 1,
+ self.assertEqual(int(groups[3]), int(previous_groups[3]) + 1,
"Non-sequential counter: '%s' before '%s'" %
(previous_groups[3], groups[3]))
- self.assertTrue(groups[4] == hostname,
+ self.assertEqual(groups[4], hostname,
"Host name mismatch: '%s' should be '%s'" %
(groups[4], hostname))
previous_groups = groups
tmp_file.write(_sample_message)
tmp_file.seek(0)
- self.assertTrue(tmp_file.read() == _sample_message)
+ self.assertEqual(tmp_file.read(), _sample_message)
tmp_file.close()
file_count = len(os.listdir(os.path.join(self._path, "tmp")))
- self.assertTrue(file_count == repetitions,
+ self.assertEqual(file_count, repetitions,
"Wrong file count: '%s' should be '%s'" %
(file_count, repetitions))
@@ -745,10 +771,10 @@ class TestMaildir(TestMailbox):
for msg in self._box:
pass
+ @unittest.skipUnless(hasattr(os, 'umask'), 'test needs os.umask()')
+ @unittest.skipUnless(hasattr(os, 'stat'), 'test needs os.stat()')
def test_file_permissions(self):
# Verify that message files are created without execute permissions
- if not hasattr(os, "stat") or not hasattr(os, "umask"):
- return
msg = mailbox.MaildirMessage(self._template % 0)
orig_umask = os.umask(0)
try:
@@ -759,12 +785,11 @@ class TestMaildir(TestMailbox):
mode = os.stat(path).st_mode
self.assertEqual(mode & 0111, 0)
+ @unittest.skipUnless(hasattr(os, 'umask'), 'test needs os.umask()')
+ @unittest.skipUnless(hasattr(os, 'stat'), 'test needs os.stat()')
def test_folder_file_perms(self):
# From bug #3228, we want to verify that the file created inside a Maildir
# subfolder isn't marked as executable.
- if not hasattr(os, "stat") or not hasattr(os, "umask"):
- return
-
orig_umask = os.umask(0)
try:
subfolder = self._box.add_folder('subfolder')
@@ -817,7 +842,49 @@ class TestMaildir(TestMailbox):
self._box._refresh()
self.assertTrue(refreshed())
-class _TestMboxMMDF(TestMailbox):
+
+class _TestSingleFile(TestMailbox):
+ '''Common tests for single-file mailboxes'''
+
+ def test_add_doesnt_rewrite(self):
+ # When only adding messages, flush() should not rewrite the
+ # mailbox file. See issue #9559.
+
+ # Inode number changes if the contents are written to another
+ # file which is then renamed over the original file. So we
+ # must check that the inode number doesn't change.
+ inode_before = os.stat(self._path).st_ino
+
+ self._box.add(self._template % 0)
+ self._box.flush()
+
+ inode_after = os.stat(self._path).st_ino
+ self.assertEqual(inode_before, inode_after)
+
+ # Make sure the message was really added
+ self._box.close()
+ self._box = self._factory(self._path)
+ self.assertEqual(len(self._box), 1)
+
+ def test_permissions_after_flush(self):
+ # See issue #5346
+
+ # Make the mailbox world writable. It's unlikely that the new
+ # mailbox file would have these permissions after flush(),
+ # because umask usually prevents it.
+ mode = os.stat(self._path).st_mode | 0o666
+ os.chmod(self._path, mode)
+
+ self._box.add(self._template % 0)
+ i = self._box.add(self._template % 1)
+ # Need to remove one message to make flush() create a new file
+ self._box.remove(i)
+ self._box.flush()
+
+ self.assertEqual(os.stat(self._path).st_mode, mode)
+
+
+class _TestMboxMMDF(_TestSingleFile):
def tearDown(self):
self._box.close()
@@ -827,14 +894,14 @@ class _TestMboxMMDF(TestMailbox):
def test_add_from_string(self):
# Add a string starting with 'From ' to the mailbox
- key = self._box.add('From foo@bar blah\nFrom: foo\n\n0')
+ key = self._box.add('From foo@bar blah\nFrom: foo\n\n0\n')
self.assertEqual(self._box[key].get_from(), 'foo@bar blah')
- self.assertEqual(self._box[key].get_payload(), '0')
+ self.assertEqual(self._box[key].get_payload(), '0\n')
def test_add_mbox_or_mmdf_message(self):
# Add an mboxMessage or MMDFMessage
for class_ in (mailbox.mboxMessage, mailbox.MMDFMessage):
- msg = class_('From foo@bar blah\nFrom: foo\n\n0')
+ msg = class_('From foo@bar blah\nFrom: foo\n\n0\n')
key = self._box.add(msg)
def test_open_close_open(self):
@@ -918,35 +985,59 @@ class _TestMboxMMDF(TestMailbox):
self._box.close()
-class TestMbox(_TestMboxMMDF):
+class TestMbox(_TestMboxMMDF, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.mbox(path, factory)
+ @unittest.skipUnless(hasattr(os, 'umask'), 'test needs os.umask()')
+ @unittest.skipUnless(hasattr(os, 'stat'), 'test needs os.stat()')
def test_file_perms(self):
# From bug #3228, we want to verify that the mailbox file isn't executable,
# even if the umask is set to something that would leave executable bits set.
# We only run this test on platforms that support umask.
- if hasattr(os, 'umask') and hasattr(os, 'stat'):
- try:
- old_umask = os.umask(0077)
- self._box.close()
- os.unlink(self._path)
- self._box = mailbox.mbox(self._path, create=True)
- self._box.add('')
- self._box.close()
- finally:
- os.umask(old_umask)
+ try:
+ old_umask = os.umask(0077)
+ self._box.close()
+ os.unlink(self._path)
+ self._box = mailbox.mbox(self._path, create=True)
+ self._box.add('')
+ self._box.close()
+ finally:
+ os.umask(old_umask)
+
+ st = os.stat(self._path)
+ perms = st.st_mode
+ self.assertFalse((perms & 0111)) # Execute bits should all be off.
+
+ def test_terminating_newline(self):
+ message = email.message.Message()
+ message['From'] = 'john@example.com'
+ message.set_payload('No newline at the end')
+ i = self._box.add(message)
+
+ # A newline should have been appended to the payload
+ message = self._box.get(i)
+ self.assertEqual(message.get_payload(), 'No newline at the end\n')
+
+ def test_message_separator(self):
+ # Check there's always a single blank line after each message
+ self._box.add('From: foo\n\n0') # No newline at the end
+ with open(self._path) as f:
+ data = f.read()
+ self.assertEqual(data[-3:], '0\n\n')
+
+ self._box.add('From: foo\n\n0\n') # Newline at the end
+ with open(self._path) as f:
+ data = f.read()
+ self.assertEqual(data[-3:], '0\n\n')
- st = os.stat(self._path)
- perms = st.st_mode
- self.assertFalse((perms & 0111)) # Execute bits should all be off.
-class TestMMDF(_TestMboxMMDF):
+class TestMMDF(_TestMboxMMDF, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.MMDF(path, factory)
-class TestMH(TestMailbox):
+class TestMH(TestMailbox, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.MH(path, factory)
@@ -1078,7 +1169,7 @@ class TestMH(TestMailbox):
return os.path.join(self._path, '.mh_sequences.lock')
-class TestBabyl(TestMailbox):
+class TestBabyl(_TestSingleFile, unittest.TestCase):
_factory = lambda self, path, factory=None: mailbox.Babyl(path, factory)
@@ -1107,7 +1198,7 @@ class TestBabyl(TestMailbox):
self.assertEqual(set(self._box.get_labels()), set(['blah']))
-class TestMessage(TestBase):
+class TestMessage(TestBase, unittest.TestCase):
_factory = mailbox.Message # Overridden by subclasses to reuse tests
@@ -1148,7 +1239,7 @@ class TestMessage(TestBase):
self.assertIsInstance(msg, self._factory)
self.assertEqual(msg.keys(), [])
self.assertFalse(msg.is_multipart())
- self.assertEqual(msg.get_payload(), None)
+ self.assertIsNone(msg.get_payload())
def test_initialize_incorrectly(self):
# Initialize with invalid argument
@@ -1178,7 +1269,7 @@ class TestMessage(TestBase):
pass
-class TestMaildirMessage(TestMessage):
+class TestMaildirMessage(TestMessage, unittest.TestCase):
_factory = mailbox.MaildirMessage
@@ -1221,7 +1312,7 @@ class TestMaildirMessage(TestMessage):
# Use get_date() and set_date()
msg = mailbox.MaildirMessage(_sample_message)
diff = msg.get_date() - time.time()
- self.assertTrue(abs(diff) < 60, diff)
+ self.assertLess(abs(diff), 60, diff)
msg.set_date(0.0)
self.assertEqual(msg.get_date(), 0.0)
@@ -1253,7 +1344,7 @@ class TestMaildirMessage(TestMessage):
self._check_sample(msg)
-class _TestMboxMMDFMessage(TestMessage):
+class _TestMboxMMDFMessage:
_factory = mailbox._mboxMMDFMessage
@@ -1296,16 +1387,17 @@ class _TestMboxMMDFMessage(TestMessage):
# Check contents of "From " line
if sender is None:
sender = "MAILER-DAEMON"
- self.assertTrue(re.match(sender + r" \w{3} \w{3} [\d ]\d [\d ]\d:\d{2}:"
- r"\d{2} \d{4}", msg.get_from()))
+ self.assertIsNotNone(re.match(
+ sender + r" \w{3} \w{3} [\d ]\d [\d ]\d:\d{2}:\d{2} \d{4}",
+ msg.get_from()))
-class TestMboxMessage(_TestMboxMMDFMessage):
+class TestMboxMessage(_TestMboxMMDFMessage, TestMessage):
_factory = mailbox.mboxMessage
-class TestMHMessage(TestMessage):
+class TestMHMessage(TestMessage, unittest.TestCase):
_factory = mailbox.MHMessage
@@ -1336,7 +1428,7 @@ class TestMHMessage(TestMessage):
self.assertEqual(msg.get_sequences(), ['foobar', 'replied'])
-class TestBabylMessage(TestMessage):
+class TestBabylMessage(TestMessage, unittest.TestCase):
_factory = mailbox.BabylMessage
@@ -1371,7 +1463,7 @@ class TestBabylMessage(TestMessage):
msg = mailbox.BabylMessage(_sample_message)
visible = msg.get_visible()
self.assertEqual(visible.keys(), [])
- self.assertIs(visible.get_payload(), None)
+ self.assertIsNone(visible.get_payload())
visible['User-Agent'] = 'FooBar 1.0'
visible['X-Whatever'] = 'Blah'
self.assertEqual(msg.get_visible().keys(), [])
@@ -1380,10 +1472,10 @@ class TestBabylMessage(TestMessage):
self.assertEqual(visible.keys(), ['User-Agent', 'X-Whatever'])
self.assertEqual(visible['User-Agent'], 'FooBar 1.0')
self.assertEqual(visible['X-Whatever'], 'Blah')
- self.assertIs(visible.get_payload(), None)
+ self.assertIsNone(visible.get_payload())
msg.update_visible()
self.assertEqual(visible.keys(), ['User-Agent', 'X-Whatever'])
- self.assertIs(visible.get_payload(), None)
+ self.assertIsNone(visible.get_payload())
visible = msg.get_visible()
self.assertEqual(visible.keys(), ['User-Agent', 'Date', 'From', 'To',
'Subject'])
@@ -1391,12 +1483,12 @@ class TestBabylMessage(TestMessage):
self.assertEqual(visible[header], msg[header])
-class TestMMDFMessage(_TestMboxMMDFMessage):
+class TestMMDFMessage(_TestMboxMMDFMessage, TestMessage):
_factory = mailbox.MMDFMessage
-class TestMessageConversion(TestBase):
+class TestMessageConversion(TestBase, unittest.TestCase):
def test_plain_to_x(self):
# Convert Message to all formats
@@ -1719,7 +1811,7 @@ class TestProxyFileBase(TestBase):
proxy.close()
-class TestProxyFile(TestProxyFileBase):
+class TestProxyFile(TestProxyFileBase, unittest.TestCase):
def setUp(self):
self._path = test_support.TESTFN
@@ -1768,7 +1860,7 @@ class TestProxyFile(TestProxyFileBase):
self._test_close(mailbox._ProxyFile(self._file))
-class TestPartialFile(TestProxyFileBase):
+class TestPartialFile(TestProxyFileBase, unittest.TestCase):
def setUp(self):
self._path = test_support.TESTFN
@@ -1835,6 +1927,10 @@ class MaildirTestCase(unittest.TestCase):
def setUp(self):
# create a new maildir mailbox to work with:
self._dir = test_support.TESTFN
+ if os.path.isdir(self._dir):
+ test_support.rmtree(self._dir)
+ if os.path.isfile(self._dir):
+ test_support.unlink(self._dir)
os.mkdir(self._dir)
os.mkdir(os.path.join(self._dir, "cur"))
os.mkdir(os.path.join(self._dir, "tmp"))
@@ -1844,10 +1940,10 @@ class MaildirTestCase(unittest.TestCase):
def tearDown(self):
map(os.unlink, self._msgfiles)
- os.rmdir(os.path.join(self._dir, "cur"))
- os.rmdir(os.path.join(self._dir, "tmp"))
- os.rmdir(os.path.join(self._dir, "new"))
- os.rmdir(self._dir)
+ test_support.rmdir(os.path.join(self._dir, "cur"))
+ test_support.rmdir(os.path.join(self._dir, "tmp"))
+ test_support.rmdir(os.path.join(self._dir, "new"))
+ test_support.rmdir(self._dir)
def createMessage(self, dir, mbox=False):
t = int(time.time() % 1000000)
@@ -1875,46 +1971,56 @@ class MaildirTestCase(unittest.TestCase):
# Make sure the boxes attribute actually gets set.
self.mbox = mailbox.Maildir(test_support.TESTFN)
#self.assertTrue(hasattr(self.mbox, "boxes"))
- #self.assertTrue(len(self.mbox.boxes) == 0)
- self.assertIs(self.mbox.next(), None)
- self.assertIs(self.mbox.next(), None)
+ #self.assertEqual(len(self.mbox.boxes), 0)
+ self.assertIsNone(self.mbox.next())
+ self.assertIsNone(self.mbox.next())
def test_nonempty_maildir_cur(self):
self.createMessage("cur")
self.mbox = mailbox.Maildir(test_support.TESTFN)
- #self.assertTrue(len(self.mbox.boxes) == 1)
- self.assertIsNot(self.mbox.next(), None)
- self.assertIs(self.mbox.next(), None)
- self.assertIs(self.mbox.next(), None)
+ #self.assertEqual(len(self.mbox.boxes), 1)
+ msg = self.mbox.next()
+ self.assertIsNotNone(msg)
+ msg.fp.close()
+ self.assertIsNone(self.mbox.next())
+ self.assertIsNone(self.mbox.next())
def test_nonempty_maildir_new(self):
self.createMessage("new")
self.mbox = mailbox.Maildir(test_support.TESTFN)
- #self.assertTrue(len(self.mbox.boxes) == 1)
- self.assertIsNot(self.mbox.next(), None)
- self.assertIs(self.mbox.next(), None)
- self.assertIs(self.mbox.next(), None)
+ #self.assertEqual(len(self.mbox.boxes), 1)
+ msg = self.mbox.next()
+ self.assertIsNotNone(msg)
+ msg.fp.close()
+ self.assertIsNone(self.mbox.next())
+ self.assertIsNone(self.mbox.next())
def test_nonempty_maildir_both(self):
self.createMessage("cur")
self.createMessage("new")
self.mbox = mailbox.Maildir(test_support.TESTFN)
- #self.assertTrue(len(self.mbox.boxes) == 2)
- self.assertIsNot(self.mbox.next(), None)
- self.assertIsNot(self.mbox.next(), None)
- self.assertIs(self.mbox.next(), None)
- self.assertIs(self.mbox.next(), None)
+ #self.assertEqual(len(self.mbox.boxes), 2)
+ msg = self.mbox.next()
+ self.assertIsNotNone(msg)
+ msg.fp.close()
+ msg = self.mbox.next()
+ self.assertIsNotNone(msg)
+ msg.fp.close()
+ self.assertIsNone(self.mbox.next())
+ self.assertIsNone(self.mbox.next())
def test_unix_mbox(self):
### should be better!
import email.parser
fname = self.createMessage("cur", True)
n = 0
- for msg in mailbox.PortableUnixMailbox(open(fname),
+ fid = open(fname)
+ for msg in mailbox.PortableUnixMailbox(fid,
email.parser.Parser().parse):
n += 1
self.assertEqual(msg["subject"], "Simple Test")
self.assertEqual(len(str(msg)), len(FROM_)+len(DUMMY_MESSAGE))
+ fid.close()
self.assertEqual(n, 1)
## End: classes from the original module (for backward compatibility).
diff --git a/Lib/test/test_marshal.py b/Lib/test/test_marshal.py
index 744f93c..59fa3eb 100644
--- a/Lib/test/test_marshal.py
+++ b/Lib/test/test_marshal.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
from test import test_support
@@ -269,6 +268,53 @@ class BugsTestCase(unittest.TestCase):
invalid_string = 'l\x02\x00\x00\x00\x00\x00\x00\x00'
self.assertRaises(ValueError, marshal.loads, invalid_string)
+LARGE_SIZE = 2**31
+character_size = 4 if sys.maxunicode > 0xFFFF else 2
+pointer_size = 8 if sys.maxsize > 0xFFFFFFFF else 4
+
+@unittest.skipIf(LARGE_SIZE > sys.maxsize, "test cannot run on 32-bit systems")
+class LargeValuesTestCase(unittest.TestCase):
+ def check_unmarshallable(self, data):
+ f = open(test_support.TESTFN, 'wb')
+ self.addCleanup(test_support.unlink, test_support.TESTFN)
+ with f:
+ self.assertRaises(ValueError, marshal.dump, data, f)
+
+ @test_support.precisionbigmemtest(size=LARGE_SIZE, memuse=1, dry_run=False)
+ def test_string(self, size):
+ self.check_unmarshallable('x' * size)
+
+ @test_support.precisionbigmemtest(size=LARGE_SIZE,
+ memuse=character_size + 2, dry_run=False)
+ def test_unicode(self, size):
+ self.check_unmarshallable(u'x' * size)
+
+ @test_support.precisionbigmemtest(size=LARGE_SIZE,
+ memuse=pointer_size, dry_run=False)
+ def test_tuple(self, size):
+ self.check_unmarshallable((None,) * size)
+
+ @test_support.precisionbigmemtest(size=LARGE_SIZE,
+ memuse=pointer_size, dry_run=False)
+ def test_list(self, size):
+ self.check_unmarshallable([None] * size)
+
+ @test_support.precisionbigmemtest(size=LARGE_SIZE,
+ memuse=pointer_size*12 + sys.getsizeof(LARGE_SIZE-1),
+ dry_run=False)
+ def test_set(self, size):
+ self.check_unmarshallable(set(range(size)))
+
+ @test_support.precisionbigmemtest(size=LARGE_SIZE,
+ memuse=pointer_size*12 + sys.getsizeof(LARGE_SIZE-1),
+ dry_run=False)
+ def test_frozenset(self, size):
+ self.check_unmarshallable(frozenset(range(size)))
+
+ @test_support.precisionbigmemtest(size=LARGE_SIZE, memuse=1, dry_run=False)
+ def test_bytearray(self, size):
+ self.check_unmarshallable(bytearray(size))
+
def test_main():
test_support.run_unittest(IntTestCase,
@@ -277,7 +323,9 @@ def test_main():
CodeTestCase,
ContainerTestCase,
ExceptionTestCase,
- BugsTestCase)
+ BugsTestCase,
+ LargeValuesTestCase,
+ )
if __name__ == "__main__":
test_main()
diff --git a/Lib/test/test_math.py b/Lib/test/test_math.py
index ac4475e..c7f5752 100644
--- a/Lib/test/test_math.py
+++ b/Lib/test/test_math.py
@@ -599,6 +599,9 @@ class MathTests(unittest.TestCase):
self.assertEqual(math.log(INF), INF)
self.assertRaises(ValueError, math.log, NINF)
self.assertTrue(math.isnan(math.log(NAN)))
+ # Log values should match for int and long (issue #18739).
+ for n in range(1, 1000):
+ self.assertEqual(math.log(n), math.log(long(n)))
def testLog1p(self):
self.assertRaises(TypeError, math.log1p)
@@ -621,6 +624,9 @@ class MathTests(unittest.TestCase):
self.assertEqual(math.log(INF), INF)
self.assertRaises(ValueError, math.log10, NINF)
self.assertTrue(math.isnan(math.log10(NAN)))
+ # Log values should match for int and long (issue #18739).
+ for n in range(1, 1000):
+ self.assertEqual(math.log10(n), math.log10(long(n)))
def testModf(self):
self.assertRaises(TypeError, math.modf)
@@ -900,38 +906,37 @@ class MathTests(unittest.TestCase):
# still fails this part of the test on some platforms. For now, we only
# *run* test_exceptions() in verbose mode, so that this isn't normally
# tested.
+ @unittest.skipUnless(verbose, 'requires verbose mode')
+ def test_exceptions(self):
+ try:
+ x = math.exp(-1000000000)
+ except:
+ # mathmodule.c is failing to weed out underflows from libm, or
+ # we've got an fp format with huge dynamic range
+ self.fail("underflowing exp() should not have raised "
+ "an exception")
+ if x != 0:
+ self.fail("underflowing exp() should have returned 0")
+
+ # If this fails, probably using a strict IEEE-754 conforming libm, and x
+ # is +Inf afterwards. But Python wants overflows detected by default.
+ try:
+ x = math.exp(1000000000)
+ except OverflowError:
+ pass
+ else:
+ self.fail("overflowing exp() didn't trigger OverflowError")
- if verbose:
- def test_exceptions(self):
- try:
- x = math.exp(-1000000000)
- except:
- # mathmodule.c is failing to weed out underflows from libm, or
- # we've got an fp format with huge dynamic range
- self.fail("underflowing exp() should not have raised "
- "an exception")
- if x != 0:
- self.fail("underflowing exp() should have returned 0")
-
- # If this fails, probably using a strict IEEE-754 conforming libm, and x
- # is +Inf afterwards. But Python wants overflows detected by default.
- try:
- x = math.exp(1000000000)
- except OverflowError:
- pass
- else:
- self.fail("overflowing exp() didn't trigger OverflowError")
-
- # If this fails, it could be a puzzle. One odd possibility is that
- # mathmodule.c's macros are getting confused while comparing
- # Inf (HUGE_VAL) to a NaN, and artificially setting errno to ERANGE
- # as a result (and so raising OverflowError instead).
- try:
- x = math.sqrt(-1.0)
- except ValueError:
- pass
- else:
- self.fail("sqrt(-1) didn't raise ValueError")
+ # If this fails, it could be a puzzle. One odd possibility is that
+ # mathmodule.c's macros are getting confused while comparing
+ # Inf (HUGE_VAL) to a NaN, and artificially setting errno to ERANGE
+ # as a result (and so raising OverflowError instead).
+ try:
+ x = math.sqrt(-1.0)
+ except ValueError:
+ pass
+ else:
+ self.fail("sqrt(-1) didn't raise ValueError")
@requires_IEEE_754
def test_testfile(self):
diff --git a/Lib/test/test_memoryio.py b/Lib/test/test_memoryio.py
index 68657aa..74a9ffb 100644
--- a/Lib/test/test_memoryio.py
+++ b/Lib/test/test_memoryio.py
@@ -328,9 +328,9 @@ class MemoryTestMixin:
self.assertEqual(memio.isatty(), False)
self.assertEqual(memio.closed, False)
memio.close()
- self.assertEqual(memio.writable(), True)
- self.assertEqual(memio.readable(), True)
- self.assertEqual(memio.seekable(), True)
+ self.assertRaises(ValueError, memio.writable)
+ self.assertRaises(ValueError, memio.readable)
+ self.assertRaises(ValueError, memio.seekable)
self.assertRaises(ValueError, memio.isatty)
self.assertEqual(memio.closed, True)
@@ -522,6 +522,17 @@ class TextIOTestMixin:
self.assertIsNone(memio.errors)
self.assertFalse(memio.line_buffering)
+ def test_newline_default(self):
+ memio = self.ioclass("a\nb\r\nc\rd")
+ self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"])
+ self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd")
+
+ memio = self.ioclass()
+ self.assertEqual(memio.write("a\nb\r\nc\rd"), 8)
+ memio.seek(0)
+ self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"])
+ self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd")
+
def test_newline_none(self):
# newline=None
memio = self.ioclass("a\nb\r\nc\rd", newline=None)
@@ -531,12 +542,16 @@ class TextIOTestMixin:
self.assertEqual(memio.read(2), "\nb")
self.assertEqual(memio.read(2), "\nc")
self.assertEqual(memio.read(1), "\n")
+ self.assertEqual(memio.getvalue(), "a\nb\nc\nd")
+
memio = self.ioclass(newline=None)
self.assertEqual(2, memio.write("a\n"))
self.assertEqual(3, memio.write("b\r\n"))
self.assertEqual(3, memio.write("c\rd"))
memio.seek(0)
self.assertEqual(memio.read(), "a\nb\nc\nd")
+ self.assertEqual(memio.getvalue(), "a\nb\nc\nd")
+
memio = self.ioclass("a\r\nb", newline=None)
self.assertEqual(memio.read(3), "a\nb")
@@ -548,6 +563,8 @@ class TextIOTestMixin:
self.assertEqual(memio.read(4), "a\nb\r")
self.assertEqual(memio.read(2), "\nc")
self.assertEqual(memio.read(1), "\r")
+ self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd")
+
memio = self.ioclass(newline="")
self.assertEqual(2, memio.write("a\n"))
self.assertEqual(2, memio.write("b\r"))
@@ -555,11 +572,19 @@ class TextIOTestMixin:
self.assertEqual(2, memio.write("\rd"))
memio.seek(0)
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\r", "d"])
+ self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd")
def test_newline_lf(self):
# newline="\n"
- memio = self.ioclass("a\nb\r\nc\rd")
+ memio = self.ioclass("a\nb\r\nc\rd", newline="\n")
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"])
+ self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd")
+
+ memio = self.ioclass(newline="\n")
+ self.assertEqual(memio.write("a\nb\r\nc\rd"), 8)
+ memio.seek(0)
+ self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"])
+ self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd")
def test_newline_cr(self):
# newline="\r"
@@ -567,6 +592,15 @@ class TextIOTestMixin:
self.assertEqual(memio.read(), "a\rb\r\rc\rd")
memio.seek(0)
self.assertEqual(list(memio), ["a\r", "b\r", "\r", "c\r", "d"])
+ self.assertEqual(memio.getvalue(), "a\rb\r\rc\rd")
+
+ memio = self.ioclass(newline="\r")
+ self.assertEqual(memio.write("a\nb\r\nc\rd"), 8)
+ memio.seek(0)
+ self.assertEqual(list(memio), ["a\r", "b\r", "\r", "c\r", "d"])
+ memio.seek(0)
+ self.assertEqual(memio.readlines(), ["a\r", "b\r", "\r", "c\r", "d"])
+ self.assertEqual(memio.getvalue(), "a\rb\r\rc\rd")
def test_newline_crlf(self):
# newline="\r\n"
@@ -574,11 +608,21 @@ class TextIOTestMixin:
self.assertEqual(memio.read(), "a\r\nb\r\r\nc\rd")
memio.seek(0)
self.assertEqual(list(memio), ["a\r\n", "b\r\r\n", "c\rd"])
+ memio.seek(0)
+ self.assertEqual(memio.readlines(), ["a\r\n", "b\r\r\n", "c\rd"])
+ self.assertEqual(memio.getvalue(), "a\r\nb\r\r\nc\rd")
+
+ memio = self.ioclass(newline="\r\n")
+ self.assertEqual(memio.write("a\nb\r\nc\rd"), 8)
+ memio.seek(0)
+ self.assertEqual(list(memio), ["a\r\n", "b\r\r\n", "c\rd"])
+ self.assertEqual(memio.getvalue(), "a\r\nb\r\r\nc\rd")
def test_issue5265(self):
# StringIO can duplicate newlines in universal newlines mode
memio = self.ioclass("a\r\nb\r\n", newline=None)
self.assertEqual(memio.read(5), "a\nb\n")
+ self.assertEqual(memio.getvalue(), "a\nb\n")
class PyStringIOTest(MemoryTestMixin, MemorySeekTestMixin,
@@ -588,6 +632,16 @@ class PyStringIOTest(MemoryTestMixin, MemorySeekTestMixin,
UnsupportedOperation = pyio.UnsupportedOperation
EOF = ""
+ def test_lone_surrogates(self):
+ # Issue #20424
+ surrogate = unichr(0xd800)
+ memio = self.ioclass(surrogate)
+ self.assertEqual(memio.read(), surrogate)
+
+ memio = self.ioclass()
+ memio.write(surrogate)
+ self.assertEqual(memio.getvalue(), surrogate)
+
class PyStringIOPickleTest(TextIOTestMixin, unittest.TestCase):
"""Test if pickle restores properly the internal state of StringIO.
@@ -638,6 +692,16 @@ class CBytesIOTest(PyBytesIOTest):
memio.close()
self.assertRaises(ValueError, memio.__setstate__, (b"closed", 0, None))
+ check_sizeof = support.check_sizeof
+
+ @support.cpython_only
+ def test_sizeof(self):
+ basesize = support.calcobjsize(b'P2PP2P')
+ check = self.check_sizeof
+ self.assertEqual(object.__sizeof__(io.BytesIO()), basesize)
+ check(io.BytesIO(), basesize )
+ check(io.BytesIO(b'a'), basesize + 1 + 1 )
+ check(io.BytesIO(b'a' * 1000), basesize + 1000 + 1 )
class CStringIOTest(PyStringIOTest):
ioclass = io.StringIO
diff --git a/Lib/test/test_memoryview.py b/Lib/test/test_memoryview.py
index 525ddea..f14bafd 100644
--- a/Lib/test/test_memoryview.py
+++ b/Lib/test/test_memoryview.py
@@ -63,7 +63,7 @@ class AbstractMemoryTests:
def test_setitem_readonly(self):
if not self.ro_type:
- return
+ self.skipTest("no read-only type to test")
b = self.ro_type(self._source)
oldrefcount = sys.getrefcount(b)
m = self._view(b)
@@ -77,7 +77,7 @@ class AbstractMemoryTests:
def test_setitem_writable(self):
if not self.rw_type:
- return
+ self.skipTest("no writable type to test")
tp = self.rw_type
b = self.rw_type(self._source)
oldrefcount = sys.getrefcount(b)
@@ -183,13 +183,13 @@ class AbstractMemoryTests:
def test_attributes_readonly(self):
if not self.ro_type:
- return
+ self.skipTest("no read-only type to test")
m = self.check_attributes_with_type(self.ro_type)
self.assertEqual(m.readonly, True)
def test_attributes_writable(self):
if not self.rw_type:
- return
+ self.skipTest("no writable type to test")
m = self.check_attributes_with_type(self.rw_type)
self.assertEqual(m.readonly, False)
@@ -236,7 +236,7 @@ class AbstractMemoryTests:
# buffer as writable causing a segfault if using mmap
tp = self.ro_type
if tp is None:
- return
+ self.skipTest("no read-only type to test")
b = tp(self._source)
m = self._view(b)
i = io.BytesIO(b'ZZZZ')
diff --git a/Lib/test/test_mimetypes.py b/Lib/test/test_mimetypes.py
index 3508b56..e9a7216 100644
--- a/Lib/test/test_mimetypes.py
+++ b/Lib/test/test_mimetypes.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
import mimetypes
import StringIO
import unittest
@@ -21,6 +23,8 @@ class MimeTypesTestCase(unittest.TestCase):
eq(self.db.guess_type("foo.tgz"), ("application/x-tar", "gzip"))
eq(self.db.guess_type("foo.tar.gz"), ("application/x-tar", "gzip"))
eq(self.db.guess_type("foo.tar.Z"), ("application/x-tar", "compress"))
+ eq(self.db.guess_type("foo.tar.bz2"), ("application/x-tar", "bzip2"))
+ eq(self.db.guess_type("foo.tar.xz"), ("application/x-tar", "xz"))
def test_data_urls(self):
eq = self.assertEqual
@@ -69,8 +73,6 @@ class Win32MimeTypesTestCase(unittest.TestCase):
# ensure all entries actually come from the Windows registry
self.original_types_map = mimetypes.types_map.copy()
mimetypes.types_map.clear()
- mimetypes.init()
- self.db = mimetypes.MimeTypes()
def tearDown(self):
# restore default settings
@@ -82,12 +84,87 @@ class Win32MimeTypesTestCase(unittest.TestCase):
# Windows registry is undocumented AFAIK.
# Use file types that should *always* exist:
eq = self.assertEqual
- eq(self.db.guess_type("foo.txt"), ("text/plain", None))
+ mimetypes.init()
+ db = mimetypes.MimeTypes()
+ eq(db.guess_type("foo.txt"), ("text/plain", None))
+ eq(db.guess_type("image.jpg"), ("image/jpeg", None))
+ eq(db.guess_type("image.png"), ("image/png", None))
+
+ def test_non_latin_extension(self):
+ import _winreg
+
+ class MockWinreg(object):
+ def __getattr__(self, name):
+ if name == 'EnumKey':
+ return lambda key, i: _winreg.EnumKey(key, i) + "\xa3"
+ elif name == 'OpenKey':
+ return lambda key, name: _winreg.OpenKey(key, name.rstrip("\xa3"))
+ elif name == 'QueryValueEx':
+ return lambda subkey, label: (u'текÑÑ‚/проÑтой' , _winreg.REG_SZ)
+ return getattr(_winreg, name)
+
+ mimetypes._winreg = MockWinreg()
+ try:
+ # this used to throw an exception if registry contained non-Latin
+ # characters in extensions (issue #9291)
+ mimetypes.init()
+ finally:
+ mimetypes._winreg = _winreg
+
+ def test_non_latin_type(self):
+ import _winreg
+
+ class MockWinreg(object):
+ def __getattr__(self, name):
+ if name == 'QueryValueEx':
+ return lambda subkey, label: (u'текÑÑ‚/проÑтой', _winreg.REG_SZ)
+ return getattr(_winreg, name)
+
+ mimetypes._winreg = MockWinreg()
+ try:
+ # this used to throw an exception if registry contained non-Latin
+ # characters in content types (issue #9291)
+ mimetypes.init()
+ finally:
+ mimetypes._winreg = _winreg
+
+ def test_type_map_values(self):
+ import _winreg
+
+ class MockWinreg(object):
+ def __getattr__(self, name):
+ if name == 'QueryValueEx':
+ return lambda subkey, label: (u'text/plain', _winreg.REG_SZ)
+ return getattr(_winreg, name)
+
+ mimetypes._winreg = MockWinreg()
+ try:
+ mimetypes.init()
+ self.assertTrue(isinstance(mimetypes.types_map.values()[0], str))
+ finally:
+ mimetypes._winreg = _winreg
+
+ def test_registry_read_error(self):
+ import _winreg
+
+ class MockWinreg(object):
+ def OpenKey(self, key, name):
+ if key != _winreg.HKEY_CLASSES_ROOT:
+ raise WindowsError(5, "Access is denied")
+ return _winreg.OpenKey(key, name)
+ def __getattr__(self, name):
+ return getattr(_winreg, name)
+
+ mimetypes._winreg = MockWinreg()
+ try:
+ mimetypes.init()
+ finally:
+ mimetypes._winreg = _winreg
def test_main():
test_support.run_unittest(MimeTypesTestCase,
Win32MimeTypesTestCase
- )
+ )
if __name__ == "__main__":
diff --git a/Lib/test/test_minidom.py b/Lib/test/test_minidom.py
index 060c1a5..66973ed 100644
--- a/Lib/test/test_minidom.py
+++ b/Lib/test/test_minidom.py
@@ -340,19 +340,6 @@ class MinidomTest(unittest.TestCase):
and el.getAttribute("spam2") == "bam2")
dom.unlink()
- def testGetAttrList(self):
- pass
-
- def testGetAttrValues(self): pass
-
- def testGetAttrLength(self): pass
-
- def testGetAttribute(self): pass
-
- def testGetAttributeNS(self): pass
-
- def testGetAttributeNode(self): pass
-
def testGetElementsByTagNameNS(self):
d="""<foo xmlns:minidom='http://pyxml.sf.net/minidom'>
<minidom:myelem/>
@@ -423,8 +410,6 @@ class MinidomTest(unittest.TestCase):
self.confirm(str(node) == repr(node))
dom.unlink()
- def testTextNodeRepr(self): pass
-
def testWriteXML(self):
str = '<?xml version="1.0" ?><a b="c"/>'
dom = parseString(str)
@@ -488,14 +473,6 @@ class MinidomTest(unittest.TestCase):
and pi.localName is None
and pi.namespaceURI == xml.dom.EMPTY_NAMESPACE)
- def testProcessingInstructionRepr(self): pass
-
- def testTextRepr(self): pass
-
- def testWriteText(self): pass
-
- def testDocumentElement(self): pass
-
def testTooManyDocumentElements(self):
doc = parseString("<doc/>")
elem = doc.createElement("extra")
@@ -504,26 +481,6 @@ class MinidomTest(unittest.TestCase):
elem.unlink()
doc.unlink()
- def testCreateElementNS(self): pass
-
- def testCreateAttributeNS(self): pass
-
- def testParse(self): pass
-
- def testParseString(self): pass
-
- def testComment(self): pass
-
- def testAttrListItem(self): pass
-
- def testAttrListItems(self): pass
-
- def testAttrListItemNS(self): pass
-
- def testAttrListKeys(self): pass
-
- def testAttrListKeysNS(self): pass
-
def testRemoveNamedItem(self):
doc = parseString("<doc a=''/>")
e = doc.documentElement
@@ -543,32 +500,6 @@ class MinidomTest(unittest.TestCase):
self.assertRaises(xml.dom.NotFoundErr, attrs.removeNamedItemNS,
"http://xml.python.org/", "b")
- def testAttrListValues(self): pass
-
- def testAttrListLength(self): pass
-
- def testAttrList__getitem__(self): pass
-
- def testAttrList__setitem__(self): pass
-
- def testSetAttrValueandNodeValue(self): pass
-
- def testParseElement(self): pass
-
- def testParseAttributes(self): pass
-
- def testParseElementNamespaces(self): pass
-
- def testParseAttributeNamespaces(self): pass
-
- def testParseProcessingInstructions(self): pass
-
- def testChildNodes(self): pass
-
- def testFirstChild(self): pass
-
- def testHasChildNodes(self): pass
-
def _testCloneElementCopiesAttributes(self, e1, e2, test):
attrs1 = e1.attributes
attrs2 = e2.attributes
@@ -1060,7 +991,7 @@ class MinidomTest(unittest.TestCase):
'<?xml version="1.0" encoding="iso-8859-15"?><foo>\xa4</foo>',
"testEncodings - encoding EURO SIGN")
- # Verify that character decoding errors throw exceptions instead
+ # Verify that character decoding errors raise exceptions instead
# of crashing
self.assertRaises(UnicodeDecodeError, parseString,
'<fran\xe7ais>Comment \xe7a va ? Tr\xe8s bien ?</fran\xe7ais>')
diff --git a/Lib/test/test_mmap.py b/Lib/test/test_mmap.py
index 2c2863e..62c65bd 100644
--- a/Lib/test/test_mmap.py
+++ b/Lib/test/test_mmap.py
@@ -320,26 +320,25 @@ class MmapTests(unittest.TestCase):
mf.close()
f.close()
+ @unittest.skipUnless(hasattr(os, "stat"), "needs os.stat()")
def test_entire_file(self):
# test mapping of entire file by passing 0 for map length
- if hasattr(os, "stat"):
- f = open(TESTFN, "w+")
+ f = open(TESTFN, "w+")
- f.write(2**16 * 'm') # Arbitrary character
- f.close()
+ f.write(2**16 * 'm') # Arbitrary character
+ f.close()
- f = open(TESTFN, "rb+")
- mf = mmap.mmap(f.fileno(), 0)
- self.assertEqual(len(mf), 2**16, "Map size should equal file size.")
- self.assertEqual(mf.read(2**16), 2**16 * "m")
- mf.close()
- f.close()
+ f = open(TESTFN, "rb+")
+ mf = mmap.mmap(f.fileno(), 0)
+ self.assertEqual(len(mf), 2**16, "Map size should equal file size.")
+ self.assertEqual(mf.read(2**16), 2**16 * "m")
+ mf.close()
+ f.close()
+ @unittest.skipUnless(hasattr(os, "stat"), "needs os.stat()")
def test_length_0_offset(self):
# Issue #10916: test mapping of remainder of file by passing 0 for
# map length with an offset doesn't cause a segfault.
- if not hasattr(os, "stat"):
- self.skipTest("needs os.stat")
# NOTE: allocation granularity is currently 65536 under Win64,
# and therefore the minimum offset alignment.
with open(TESTFN, "wb") as f:
@@ -352,12 +351,10 @@ class MmapTests(unittest.TestCase):
finally:
mf.close()
+ @unittest.skipUnless(hasattr(os, "stat"), "needs os.stat()")
def test_length_0_large_offset(self):
# Issue #10959: test mapping of a file by passing 0 for
# map length with a large offset doesn't cause a segfault.
- if not hasattr(os, "stat"):
- self.skipTest("needs os.stat")
-
with open(TESTFN, "wb") as f:
f.write(115699 * b'm') # Arbitrary character
@@ -466,6 +463,15 @@ class MmapTests(unittest.TestCase):
f.flush ()
return mmap.mmap (f.fileno(), 0)
+ def test_empty_file (self):
+ f = open (TESTFN, 'w+b')
+ f.close()
+ with open(TESTFN, "rb") as f :
+ self.assertRaisesRegexp(ValueError,
+ "cannot mmap an empty file",
+ mmap.mmap, f.fileno(), 0,
+ access=mmap.ACCESS_READ)
+
def test_offset (self):
f = open (TESTFN, 'w+b')
@@ -529,9 +535,8 @@ class MmapTests(unittest.TestCase):
return mmap.mmap.__new__(klass, -1, *args, **kwargs)
anon_mmap(PAGESIZE)
+ @unittest.skipUnless(hasattr(mmap, 'PROT_READ'), "needs mmap.PROT_READ")
def test_prot_readonly(self):
- if not hasattr(mmap, 'PROT_READ'):
- return
mapsize = 10
open(TESTFN, "wb").write("a"*mapsize)
f = open(TESTFN, "rb")
@@ -575,66 +580,68 @@ class MmapTests(unittest.TestCase):
m.seek(8)
self.assertRaises(ValueError, m.write, "bar")
- if os.name == 'nt':
- def test_tagname(self):
- data1 = "0123456789"
- data2 = "abcdefghij"
- assert len(data1) == len(data2)
-
- # Test same tag
- m1 = mmap.mmap(-1, len(data1), tagname="foo")
- m1[:] = data1
- m2 = mmap.mmap(-1, len(data2), tagname="foo")
- m2[:] = data2
- self.assertEqual(m1[:], data2)
- self.assertEqual(m2[:], data2)
- m2.close()
- m1.close()
-
- # Test different tag
- m1 = mmap.mmap(-1, len(data1), tagname="foo")
- m1[:] = data1
- m2 = mmap.mmap(-1, len(data2), tagname="boo")
- m2[:] = data2
- self.assertEqual(m1[:], data1)
- self.assertEqual(m2[:], data2)
- m2.close()
- m1.close()
-
- def test_crasher_on_windows(self):
- # Should not crash (Issue 1733986)
- m = mmap.mmap(-1, 1000, tagname="foo")
- try:
- mmap.mmap(-1, 5000, tagname="foo")[:] # same tagname, but larger size
- except:
- pass
- m.close()
+ @unittest.skipUnless(os.name == 'nt', 'requires Windows')
+ def test_tagname(self):
+ data1 = "0123456789"
+ data2 = "abcdefghij"
+ assert len(data1) == len(data2)
+
+ # Test same tag
+ m1 = mmap.mmap(-1, len(data1), tagname="foo")
+ m1[:] = data1
+ m2 = mmap.mmap(-1, len(data2), tagname="foo")
+ m2[:] = data2
+ self.assertEqual(m1[:], data2)
+ self.assertEqual(m2[:], data2)
+ m2.close()
+ m1.close()
+
+ # Test different tag
+ m1 = mmap.mmap(-1, len(data1), tagname="foo")
+ m1[:] = data1
+ m2 = mmap.mmap(-1, len(data2), tagname="boo")
+ m2[:] = data2
+ self.assertEqual(m1[:], data1)
+ self.assertEqual(m2[:], data2)
+ m2.close()
+ m1.close()
+
+ @unittest.skipUnless(os.name == 'nt', 'requires Windows')
+ def test_crasher_on_windows(self):
+ # Should not crash (Issue 1733986)
+ m = mmap.mmap(-1, 1000, tagname="foo")
+ try:
+ mmap.mmap(-1, 5000, tagname="foo")[:] # same tagname, but larger size
+ except:
+ pass
+ m.close()
- # Should not crash (Issue 5385)
- open(TESTFN, "wb").write("x"*10)
- f = open(TESTFN, "r+b")
- m = mmap.mmap(f.fileno(), 0)
- f.close()
- try:
- m.resize(0) # will raise WindowsError
- except:
- pass
- try:
- m[:]
- except:
- pass
- m.close()
+ # Should not crash (Issue 5385)
+ open(TESTFN, "wb").write("x"*10)
+ f = open(TESTFN, "r+b")
+ m = mmap.mmap(f.fileno(), 0)
+ f.close()
+ try:
+ m.resize(0) # will raise WindowsError
+ except:
+ pass
+ try:
+ m[:]
+ except:
+ pass
+ m.close()
- def test_invalid_descriptor(self):
- # socket file descriptors are valid, but out of range
- # for _get_osfhandle, causing a crash when validating the
- # parameters to _get_osfhandle.
- s = socket.socket()
- try:
- with self.assertRaises(mmap.error):
- m = mmap.mmap(s.fileno(), 10)
- finally:
- s.close()
+ @unittest.skipUnless(os.name == 'nt', 'requires Windows')
+ def test_invalid_descriptor(self):
+ # socket file descriptors are valid, but out of range
+ # for _get_osfhandle, causing a crash when validating the
+ # parameters to _get_osfhandle.
+ s = socket.socket()
+ try:
+ with self.assertRaises(mmap.error):
+ m = mmap.mmap(s.fileno(), 10)
+ finally:
+ s.close()
class LargeMmapTests(unittest.TestCase):
@@ -669,6 +676,13 @@ class LargeMmapTests(unittest.TestCase):
def test_large_filesize(self):
with self._make_test_file(0x17FFFFFFF, b" ") as f:
+ if sys.maxsize < 0x180000000:
+ # On 32 bit platforms the file is larger than sys.maxsize so
+ # mapping the whole file should fail -- Issue #16743
+ with self.assertRaises(OverflowError):
+ mmap.mmap(f.fileno(), 0x180000000, access=mmap.ACCESS_READ)
+ with self.assertRaises(ValueError):
+ mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
m = mmap.mmap(f.fileno(), 0x10000, access=mmap.ACCESS_READ)
try:
self.assertEqual(m.size(), 0x180000000)
diff --git a/Lib/test/test_multibytecodec.py b/Lib/test/test_multibytecodec.py
index 5e86ca2..8aca381 100644
--- a/Lib/test/test_multibytecodec.py
+++ b/Lib/test/test_multibytecodec.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-#
# test_multibytecodec.py
# Unit test for multibytecodec itself
#
@@ -157,57 +155,55 @@ class Test_StreamReader(unittest.TestCase):
os.unlink(TESTFN)
class Test_StreamWriter(unittest.TestCase):
- if len(u'\U00012345') == 2: # UCS2
- def test_gb18030(self):
- s = StringIO.StringIO()
- c = codecs.getwriter('gb18030')(s)
- c.write(u'123')
- self.assertEqual(s.getvalue(), '123')
- c.write(u'\U00012345')
- self.assertEqual(s.getvalue(), '123\x907\x959')
+ @unittest.skipUnless(len(u'\U00012345') == 2, 'need a narrow build')
+ def test_gb18030(self):
+ s = StringIO.StringIO()
+ c = codecs.getwriter('gb18030')(s)
+ c.write(u'123')
+ self.assertEqual(s.getvalue(), '123')
+ c.write(u'\U00012345')
+ self.assertEqual(s.getvalue(), '123\x907\x959')
+ c.write(u'\U00012345'[0])
+ self.assertEqual(s.getvalue(), '123\x907\x959')
+ c.write(u'\U00012345'[1] + u'\U00012345' + u'\uac00\u00ac')
+ self.assertEqual(s.getvalue(),
+ '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
+ c.write(u'\U00012345'[0])
+ self.assertEqual(s.getvalue(),
+ '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
+ self.assertRaises(UnicodeError, c.reset)
+ self.assertEqual(s.getvalue(),
+ '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
+
+ @unittest.skipUnless(len(u'\U00012345') == 2, 'need a narrow build')
+ def test_utf_8(self):
+ s= StringIO.StringIO()
+ c = codecs.getwriter('utf-8')(s)
+ c.write(u'123')
+ self.assertEqual(s.getvalue(), '123')
+ c.write(u'\U00012345')
+ self.assertEqual(s.getvalue(), '123\xf0\x92\x8d\x85')
+
+ # Python utf-8 codec can't buffer surrogate pairs yet.
+ if 0:
c.write(u'\U00012345'[0])
- self.assertEqual(s.getvalue(), '123\x907\x959')
+ self.assertEqual(s.getvalue(), '123\xf0\x92\x8d\x85')
c.write(u'\U00012345'[1] + u'\U00012345' + u'\uac00\u00ac')
self.assertEqual(s.getvalue(),
- '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
+ '123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
+ '\xea\xb0\x80\xc2\xac')
c.write(u'\U00012345'[0])
self.assertEqual(s.getvalue(),
- '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
- self.assertRaises(UnicodeError, c.reset)
+ '123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
+ '\xea\xb0\x80\xc2\xac')
+ c.reset()
self.assertEqual(s.getvalue(),
- '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
-
- def test_utf_8(self):
- s= StringIO.StringIO()
- c = codecs.getwriter('utf-8')(s)
- c.write(u'123')
- self.assertEqual(s.getvalue(), '123')
- c.write(u'\U00012345')
- self.assertEqual(s.getvalue(), '123\xf0\x92\x8d\x85')
-
- # Python utf-8 codec can't buffer surrogate pairs yet.
- if 0:
- c.write(u'\U00012345'[0])
- self.assertEqual(s.getvalue(), '123\xf0\x92\x8d\x85')
- c.write(u'\U00012345'[1] + u'\U00012345' + u'\uac00\u00ac')
- self.assertEqual(s.getvalue(),
- '123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
- '\xea\xb0\x80\xc2\xac')
- c.write(u'\U00012345'[0])
- self.assertEqual(s.getvalue(),
- '123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
- '\xea\xb0\x80\xc2\xac')
- c.reset()
- self.assertEqual(s.getvalue(),
- '123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
- '\xea\xb0\x80\xc2\xac\xed\xa0\x88')
- c.write(u'\U00012345'[1])
- self.assertEqual(s.getvalue(),
- '123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
- '\xea\xb0\x80\xc2\xac\xed\xa0\x88\xed\xbd\x85')
-
- else: # UCS4
- pass
+ '123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
+ '\xea\xb0\x80\xc2\xac\xed\xa0\x88')
+ c.write(u'\U00012345'[1])
+ self.assertEqual(s.getvalue(),
+ '123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
+ '\xea\xb0\x80\xc2\xac\xed\xa0\x88\xed\xbd\x85')
def test_streamwriter_strwrite(self):
s = StringIO.StringIO()
diff --git a/Lib/test/test_multibytecodec_support.py b/Lib/test/test_multibytecodec_support.py
index 52a2e50..c5dcfa3 100644
--- a/Lib/test/test_multibytecodec_support.py
+++ b/Lib/test/test_multibytecodec_support.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-#
# test_multibytecodec_support.py
# Common Unittest Routines for CJK codecs
#
@@ -67,7 +65,7 @@ class TestBase:
def test_xmlcharrefreplace(self):
if self.has_iso10646:
- return
+ self.skipTest('encoding contains full ISO 10646 map')
s = u"\u0b13\u0b23\u0b60 nd eggs"
self.assertEqual(
@@ -77,7 +75,7 @@ class TestBase:
def test_customreplace_encode(self):
if self.has_iso10646:
- return
+ self.skipTest('encoding contains full ISO 10646 map')
from htmlentitydefs import codepoint2name
diff --git a/Lib/test/test_multiprocessing.py b/Lib/test/test_multiprocessing.py
index e5258bb..42f6dd9 100644
--- a/Lib/test/test_multiprocessing.py
+++ b/Lib/test/test_multiprocessing.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
#
# Unit tests for the multiprocessing package
#
@@ -16,6 +14,7 @@ import socket
import random
import logging
import errno
+import test.script_helper
from test import test_support
from StringIO import StringIO
_multiprocessing = test_support.import_module('_multiprocessing')
@@ -181,7 +180,7 @@ class _TestProcess(BaseTestCase):
def test_current(self):
if self.TYPE == 'threads':
- return
+ self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
@@ -248,7 +247,7 @@ class _TestProcess(BaseTestCase):
def test_terminate(self):
if self.TYPE == 'threads':
- return
+ self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._test_terminate)
p.daemon = True
@@ -325,6 +324,36 @@ class _TestProcess(BaseTestCase):
]
self.assertEqual(result, expected)
+ @classmethod
+ def _test_sys_exit(cls, reason, testfn):
+ sys.stderr = open(testfn, 'w')
+ sys.exit(reason)
+
+ def test_sys_exit(self):
+ # See Issue 13854
+ if self.TYPE == 'threads':
+ self.skipTest('test not appropriate for {}'.format(self.TYPE))
+
+ testfn = test_support.TESTFN
+ self.addCleanup(test_support.unlink, testfn)
+
+ for reason, code in (([1, 2, 3], 1), ('ignore this', 1)):
+ p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
+ p.daemon = True
+ p.start()
+ p.join(5)
+ self.assertEqual(p.exitcode, code)
+
+ with open(testfn, 'r') as f:
+ self.assertEqual(f.read().rstrip(), str(reason))
+
+ for reason in (True, False, 8):
+ p = self.Process(target=sys.exit, args=(reason,))
+ p.daemon = True
+ p.start()
+ p.join(5)
+ self.assertEqual(p.exitcode, reason)
+
#
#
#
@@ -551,7 +580,7 @@ class _TestQueue(BaseTestCase):
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
- return
+ self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
@@ -652,7 +681,7 @@ class _TestSemaphore(BaseTestCase):
def test_timeout(self):
if self.TYPE != 'processes':
- return
+ self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
@@ -1086,6 +1115,16 @@ class _TestPool(BaseTestCase):
self.assertEqual(pmap(sqr, range(100), chunksize=20),
map(sqr, range(100)))
+ def test_map_unplicklable(self):
+ # Issue #19425 -- failure to pickle should not cause a hang
+ if self.TYPE == 'threads':
+ self.skipTest('test not appropriate for {}'.format(self.TYPE))
+ class A(object):
+ def __reduce__(self):
+ raise RuntimeError('cannot pickle')
+ with self.assertRaises(RuntimeError):
+ self.pool.map(sqr, [A()]*10)
+
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
@@ -1099,7 +1138,7 @@ class _TestPool(BaseTestCase):
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
- res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
+ res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
@@ -1135,23 +1174,45 @@ class _TestPool(BaseTestCase):
p.join()
def test_terminate(self):
- if self.TYPE == 'manager':
- # On Unix a forked process increfs each shared object to
- # which its parent process held a reference. If the
- # forked process gets terminated then there is likely to
- # be a reference leak. So to prevent
- # _TestZZZNumberOfObjects from failing we skip this test
- # when using a manager.
- return
-
- result = self.pool.map_async(
+ p = self.Pool(4)
+ result = p.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
- self.pool.terminate()
- join = TimingWrapper(self.pool.join)
+ p.terminate()
+ join = TimingWrapper(p.join)
join()
self.assertTrue(join.elapsed < 0.2)
+ def test_empty_iterable(self):
+ # See Issue 12157
+ p = self.Pool(1)
+
+ self.assertEqual(p.map(sqr, []), [])
+ self.assertEqual(list(p.imap(sqr, [])), [])
+ self.assertEqual(list(p.imap_unordered(sqr, [])), [])
+ self.assertEqual(p.map_async(sqr, []).get(), [])
+
+ p.close()
+ p.join()
+
+def unpickleable_result():
+ return lambda: 42
+
+class _TestPoolWorkerErrors(BaseTestCase):
+ ALLOWED_TYPES = ('processes', )
+
+ def test_unpickleable_result(self):
+ from multiprocessing.pool import MaybeEncodingError
+ p = multiprocessing.Pool(2)
+
+ # Make sure we don't lose pool processes because of encoding errors.
+ for iteration in range(20):
+ res = p.apply_async(unpickleable_result)
+ self.assertRaises(MaybeEncodingError, res.get)
+
+ p.close()
+ p.join()
+
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
@@ -1324,7 +1385,7 @@ class _TestRemoteManager(BaseTestCase):
authkey = os.urandom(32)
manager = QueueManager(
- address=('localhost', 0), authkey=authkey, serializer=SERIALIZER
+ address=(test.test_support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
@@ -1362,7 +1423,7 @@ class _TestManagerRestart(BaseTestCase):
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
- address=('localhost', 0), authkey=authkey, serializer=SERIALIZER)
+ address=(test.test_support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
@@ -1452,6 +1513,7 @@ class _TestConnection(BaseTestCase):
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
+ time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
@@ -1509,7 +1571,7 @@ class _TestConnection(BaseTestCase):
def test_sendbytes(self):
if self.TYPE != 'processes':
- return
+ self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
@@ -1651,6 +1713,23 @@ class _TestListenerClient(BaseTestCase):
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
+
+ def test_issue14725(self):
+ l = self.connection.Listener()
+ p = self.Process(target=self._test, args=(l.address,))
+ p.daemon = True
+ p.start()
+ time.sleep(1)
+ # On Windows the client process should by now have connected,
+ # written data and closed the pipe handle by now. This causes
+ # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
+ # 14725.
+ conn = l.accept()
+ self.assertEqual(conn.recv(), 'hello')
+ conn.close()
+ p.join()
+ l.close()
+
#
# Test of sending connection and socket objects between processes
#
@@ -2026,6 +2105,38 @@ class _TestLogging(BaseTestCase):
# assert self.__handled
#
+# Check that Process.join() retries if os.waitpid() fails with EINTR
+#
+
+class _TestPollEintr(BaseTestCase):
+
+ ALLOWED_TYPES = ('processes',)
+
+ @classmethod
+ def _killer(cls, pid):
+ time.sleep(0.5)
+ os.kill(pid, signal.SIGUSR1)
+
+ @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
+ def test_poll_eintr(self):
+ got_signal = [False]
+ def record(*args):
+ got_signal[0] = True
+ pid = os.getpid()
+ oldhandler = signal.signal(signal.SIGUSR1, record)
+ try:
+ killer = self.Process(target=self._killer, args=(pid,))
+ killer.start()
+ p = self.Process(target=time.sleep, args=(1,))
+ p.start()
+ p.join()
+ self.assertTrue(got_signal[0])
+ self.assertEqual(p.exitcode, 0)
+ killer.join()
+ finally:
+ signal.signal(signal.SIGUSR1, oldhandler)
+
+#
# Test to verify handle verification, see issue 3321
#
@@ -2078,7 +2189,7 @@ class ProcessesMixin(object):
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'RawValue',
'RawArray', 'current_process', 'active_children', 'Pipe',
- 'connection', 'JoinableQueue'
+ 'connection', 'JoinableQueue', 'Pool'
)))
testcases_processes = create_test_cases(ProcessesMixin, type='processes')
@@ -2092,7 +2203,7 @@ class ManagerMixin(object):
locals().update(get_attributes(manager, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
- 'Namespace', 'JoinableQueue'
+ 'Namespace', 'JoinableQueue', 'Pool'
)))
testcases_manager = create_test_cases(ManagerMixin, type='manager')
@@ -2106,7 +2217,7 @@ class ThreadsMixin(object):
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'current_process',
'active_children', 'Pipe', 'connection', 'dict', 'list',
- 'Namespace', 'JoinableQueue'
+ 'Namespace', 'JoinableQueue', 'Pool'
)))
testcases_threads = create_test_cases(ThreadsMixin, type='threads')
@@ -2176,15 +2287,15 @@ class TestInitializers(unittest.TestCase):
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
-def _ThisSubProcess(q):
+def _this_sub_process(q):
try:
item = q.get(block=False)
except Queue.Empty:
pass
-def _TestProcess(q):
+def _test_process(q):
queue = multiprocessing.Queue()
- subProc = multiprocessing.Process(target=_ThisSubProcess, args=(queue,))
+ subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
@@ -2221,7 +2332,7 @@ class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
queue = multiprocessing.Queue()
- proc = multiprocessing.Process(target=_TestProcess, args=(queue,))
+ proc = multiprocessing.Process(target=_test_process, args=(queue,))
proc.start()
proc.join()
@@ -2238,8 +2349,192 @@ class TestStdinBadfiledescriptor(unittest.TestCase):
flike.flush()
assert sio.getvalue() == 'foo'
+#
+# Test interaction with socket timeouts - see Issue #6056
+#
+
+class TestTimeouts(unittest.TestCase):
+ @classmethod
+ def _test_timeout(cls, child, address):
+ time.sleep(1)
+ child.send(123)
+ child.close()
+ conn = multiprocessing.connection.Client(address)
+ conn.send(456)
+ conn.close()
+
+ def test_timeout(self):
+ old_timeout = socket.getdefaulttimeout()
+ try:
+ socket.setdefaulttimeout(0.1)
+ parent, child = multiprocessing.Pipe(duplex=True)
+ l = multiprocessing.connection.Listener(family='AF_INET')
+ p = multiprocessing.Process(target=self._test_timeout,
+ args=(child, l.address))
+ p.start()
+ child.close()
+ self.assertEqual(parent.recv(), 123)
+ parent.close()
+ conn = l.accept()
+ self.assertEqual(conn.recv(), 456)
+ conn.close()
+ l.close()
+ p.join(10)
+ finally:
+ socket.setdefaulttimeout(old_timeout)
+
+#
+# Test what happens with no "if __name__ == '__main__'"
+#
+
+class TestNoForkBomb(unittest.TestCase):
+ def test_noforkbomb(self):
+ name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
+ if WIN32:
+ rc, out, err = test.script_helper.assert_python_failure(name)
+ self.assertEqual('', out.decode('ascii'))
+ self.assertIn('RuntimeError', err.decode('ascii'))
+ else:
+ rc, out, err = test.script_helper.assert_python_ok(name)
+ self.assertEqual('123', out.decode('ascii').rstrip())
+ self.assertEqual('', err.decode('ascii'))
+
+#
+# Issue 12098: check sys.flags of child matches that for parent
+#
+
+class TestFlags(unittest.TestCase):
+ @classmethod
+ def run_in_grandchild(cls, conn):
+ conn.send(tuple(sys.flags))
+
+ @classmethod
+ def run_in_child(cls):
+ import json
+ r, w = multiprocessing.Pipe(duplex=False)
+ p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
+ p.start()
+ grandchild_flags = r.recv()
+ p.join()
+ r.close()
+ w.close()
+ flags = (tuple(sys.flags), grandchild_flags)
+ print(json.dumps(flags))
+
+ def test_flags(self):
+ import json, subprocess
+ # start child process using unusual flags
+ prog = ('from test.test_multiprocessing import TestFlags; ' +
+ 'TestFlags.run_in_child()')
+ data = subprocess.check_output(
+ [sys.executable, '-E', '-B', '-O', '-c', prog])
+ child_flags, grandchild_flags = json.loads(data.decode('ascii'))
+ self.assertEqual(child_flags, grandchild_flags)
+
+#
+# Issue #17555: ForkAwareThreadLock
+#
+
+class TestForkAwareThreadLock(unittest.TestCase):
+ # We recurisvely start processes. Issue #17555 meant that the
+ # after fork registry would get duplicate entries for the same
+ # lock. The size of the registry at generation n was ~2**n.
+
+ @classmethod
+ def child(cls, n, conn):
+ if n > 1:
+ p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
+ p.start()
+ p.join()
+ else:
+ conn.send(len(util._afterfork_registry))
+ conn.close()
+
+ def test_lock(self):
+ r, w = multiprocessing.Pipe(False)
+ l = util.ForkAwareThreadLock()
+ old_size = len(util._afterfork_registry)
+ p = multiprocessing.Process(target=self.child, args=(5, w))
+ p.start()
+ new_size = r.recv()
+ p.join()
+ self.assertLessEqual(new_size, old_size)
+
+#
+# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
+#
+
+class TestIgnoreEINTR(unittest.TestCase):
+
+ @classmethod
+ def _test_ignore(cls, conn):
+ def handler(signum, frame):
+ pass
+ signal.signal(signal.SIGUSR1, handler)
+ conn.send('ready')
+ x = conn.recv()
+ conn.send(x)
+ conn.send_bytes(b'x'*(1024*1024)) # sending 1 MB should block
+
+ @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
+ def test_ignore(self):
+ conn, child_conn = multiprocessing.Pipe()
+ try:
+ p = multiprocessing.Process(target=self._test_ignore,
+ args=(child_conn,))
+ p.daemon = True
+ p.start()
+ child_conn.close()
+ self.assertEqual(conn.recv(), 'ready')
+ time.sleep(0.1)
+ os.kill(p.pid, signal.SIGUSR1)
+ time.sleep(0.1)
+ conn.send(1234)
+ self.assertEqual(conn.recv(), 1234)
+ time.sleep(0.1)
+ os.kill(p.pid, signal.SIGUSR1)
+ self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024))
+ time.sleep(0.1)
+ p.join()
+ finally:
+ conn.close()
+
+ @classmethod
+ def _test_ignore_listener(cls, conn):
+ def handler(signum, frame):
+ pass
+ signal.signal(signal.SIGUSR1, handler)
+ l = multiprocessing.connection.Listener()
+ conn.send(l.address)
+ a = l.accept()
+ a.send('welcome')
+
+ @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
+ def test_ignore_listener(self):
+ conn, child_conn = multiprocessing.Pipe()
+ try:
+ p = multiprocessing.Process(target=self._test_ignore_listener,
+ args=(child_conn,))
+ p.daemon = True
+ p.start()
+ child_conn.close()
+ address = conn.recv()
+ time.sleep(0.1)
+ os.kill(p.pid, signal.SIGUSR1)
+ time.sleep(0.1)
+ client = multiprocessing.connection.Client(address)
+ self.assertEqual(client.recv(), 'welcome')
+ p.join()
+ finally:
+ conn.close()
+
+#
+#
+#
+
testcases_other = [OtherTest, TestInvalidHandle, TestInitializers,
- TestStdinBadfiledescriptor]
+ TestStdinBadfiledescriptor, TestTimeouts, TestNoForkBomb,
+ TestFlags, TestForkAwareThreadLock, TestIgnoreEINTR]
#
#
diff --git a/Lib/test/test_mutex.py b/Lib/test/test_mutex.py
index 2882213..030080e 100644
--- a/Lib/test/test_mutex.py
+++ b/Lib/test/test_mutex.py
@@ -14,7 +14,7 @@ class MutexTest(unittest.TestCase):
m.lock(called_by_mutex2, "eggs")
def called_by_mutex2(some_data):
- self.assertEquals(some_data, "eggs")
+ self.assertEqual(some_data, "eggs")
self.assertTrue(m.test(), "mutex not held")
self.assertTrue(ready_for_2,
"called_by_mutex2 called too soon")
diff --git a/Lib/test/test_netrc.py b/Lib/test/test_netrc.py
index 2795456..4156c53 100644
--- a/Lib/test/test_netrc.py
+++ b/Lib/test/test_netrc.py
@@ -5,9 +5,6 @@ temp_filename = test_support.TESTFN
class NetrcTestCase(unittest.TestCase):
- def tearDown(self):
- os.unlink(temp_filename)
-
def make_nrc(self, test_data):
test_data = textwrap.dedent(test_data)
mode = 'w'
@@ -15,6 +12,7 @@ class NetrcTestCase(unittest.TestCase):
mode += 't'
with open(temp_filename, mode) as fp:
fp.write(test_data)
+ self.addCleanup(os.unlink, temp_filename)
return netrc.netrc(temp_filename)
def test_default(self):
@@ -103,6 +101,28 @@ class NetrcTestCase(unittest.TestCase):
""", '#pass')
+ @unittest.skipUnless(os.name == 'posix', 'POSIX only test')
+ def test_security(self):
+ # This test is incomplete since we are normally not run as root and
+ # therefore can't test the file ownership being wrong.
+ d = test_support.TESTFN
+ os.mkdir(d)
+ self.addCleanup(test_support.rmtree, d)
+ fn = os.path.join(d, '.netrc')
+ with open(fn, 'wt') as f:
+ f.write("""\
+ machine foo.domain.com login bar password pass
+ default login foo password pass
+ """)
+ with test_support.EnvironmentVarGuard() as environ:
+ environ.set('HOME', d)
+ os.chmod(fn, 0600)
+ nrc = netrc.netrc()
+ self.assertEqual(nrc.hosts['foo.domain.com'],
+ ('bar', None, 'pass'))
+ os.chmod(fn, 0o622)
+ self.assertRaises(netrc.NetrcParseError, netrc.netrc)
+
def test_main():
test_support.run_unittest(NetrcTestCase)
diff --git a/Lib/test/test_nis.py b/Lib/test/test_nis.py
index 8d49550..2a9f2a8 100644
--- a/Lib/test/test_nis.py
+++ b/Lib/test/test_nis.py
@@ -9,11 +9,7 @@ class NisTests(unittest.TestCase):
maps = nis.maps()
except nis.error, msg:
# NIS is probably not active, so this test isn't useful
- if test_support.verbose:
- print "Test Skipped:", msg
- # Can't raise SkipTest as regrtest only recognizes the exception
- # import time.
- return
+ self.skipTest(str(msg))
try:
# On some systems, this map is only accessible to the
# super user
diff --git a/Lib/test/test_nntplib.py b/Lib/test/test_nntplib.py
new file mode 100644
index 0000000..a7f2267
--- /dev/null
+++ b/Lib/test/test_nntplib.py
@@ -0,0 +1,73 @@
+import socket
+import nntplib
+import time
+import unittest
+
+try:
+ import threading
+except ImportError:
+ threading = None
+
+
+from unittest import TestCase
+from test import test_support
+
+HOST = test_support.HOST
+
+
+def server(evt, serv, evil=False):
+ serv.listen(5)
+ try:
+ conn, addr = serv.accept()
+ except socket.timeout:
+ pass
+ else:
+ if evil:
+ conn.send("1 I'm too long response" * 3000 + "\n")
+ else:
+ conn.send("1 I'm OK response\n")
+ conn.close()
+ finally:
+ serv.close()
+ evt.set()
+
+
+class BaseServerTest(TestCase):
+ def setUp(self):
+ self.evt = threading.Event()
+ self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.sock.settimeout(3)
+ self.port = test_support.bind_port(self.sock)
+ threading.Thread(
+ target=server,
+ args=(self.evt, self.sock, self.evil)).start()
+ time.sleep(.1)
+
+ def tearDown(self):
+ self.evt.wait()
+
+
+@unittest.skipUnless(threading, 'threading required')
+class ServerTests(BaseServerTest):
+ evil = False
+
+ def test_basic_connect(self):
+ nntp = nntplib.NNTP('localhost', self.port)
+ nntp.sock.close()
+
+
+@unittest.skipUnless(threading, 'threading required')
+class EvilServerTests(BaseServerTest):
+ evil = True
+
+ def test_too_long_line(self):
+ self.assertRaises(nntplib.NNTPDataError,
+ nntplib.NNTP, 'localhost', self.port)
+
+
+def test_main(verbose=None):
+ test_support.run_unittest(EvilServerTests)
+ test_support.run_unittest(ServerTests)
+
+if __name__ == '__main__':
+ test_main()
diff --git a/Lib/test/test_normalization.py b/Lib/test/test_normalization.py
index 3040a08..1c45ad5 100644
--- a/Lib/test/test_normalization.py
+++ b/Lib/test/test_normalization.py
@@ -57,7 +57,7 @@ class NormalizationTest(unittest.TestCase):
c1,c2,c3,c4,c5 = [unistr(x) for x in line.split(';')[:-1]]
except RangeError:
# Skip unsupported characters;
- # try atleast adding c1 if we are in part1
+ # try at least adding c1 if we are in part1
if part == "@Part1":
try:
c1 = unistr(line.split(';')[0])
diff --git a/Lib/test/test_ntpath.py b/Lib/test/test_ntpath.py
index e1852c0..78af18c 100644
--- a/Lib/test/test_ntpath.py
+++ b/Lib/test/test_ntpath.py
@@ -1,16 +1,20 @@
+# coding: utf-8
import ntpath
import os
+import sys
from test.test_support import TestFailed
from test import test_support, test_genericpath
import unittest
+def tester0(fn, wantResult):
+ gotResult = eval(fn)
+ if wantResult != gotResult:
+ raise TestFailed, "%s should return: %r but returned: %r" \
+ %(fn, wantResult, gotResult)
def tester(fn, wantResult):
fn = fn.replace("\\", "\\\\")
- gotResult = eval(fn)
- if wantResult != gotResult:
- raise TestFailed, "%s should return: %s but returned: %s" \
- %(str(fn), str(wantResult), str(gotResult))
+ tester0(fn, wantResult)
class TestNtpath(unittest.TestCase):
@@ -31,12 +35,41 @@ class TestNtpath(unittest.TestCase):
('c:', '\\foo\\bar'))
tester('ntpath.splitdrive("c:/foo/bar")',
('c:', '/foo/bar'))
+ tester('ntpath.splitdrive("\\\\conky\\mountpoint\\foo\\bar")',
+ ('\\\\conky\\mountpoint', '\\foo\\bar'))
+ tester('ntpath.splitdrive("//conky/mountpoint/foo/bar")',
+ ('//conky/mountpoint', '/foo/bar'))
+ tester('ntpath.splitdrive("\\\\\\conky\\mountpoint\\foo\\bar")',
+ ('', '\\\\\\conky\\mountpoint\\foo\\bar'))
+ tester('ntpath.splitdrive("///conky/mountpoint/foo/bar")',
+ ('', '///conky/mountpoint/foo/bar'))
+ tester('ntpath.splitdrive("\\\\conky\\\\mountpoint\\foo\\bar")',
+ ('', '\\\\conky\\\\mountpoint\\foo\\bar'))
+ tester('ntpath.splitdrive("//conky//mountpoint/foo/bar")',
+ ('', '//conky//mountpoint/foo/bar'))
+ # Issue #19911: UNC part containing U+0130
+ self.assertEqual(ntpath.splitdrive(u'//conky/MOUNTPOÄ°NT/foo/bar'),
+ (u'//conky/MOUNTPOÄ°NT', '/foo/bar'))
def test_splitunc(self):
+ tester('ntpath.splitunc("c:\\foo\\bar")',
+ ('', 'c:\\foo\\bar'))
+ tester('ntpath.splitunc("c:/foo/bar")',
+ ('', 'c:/foo/bar'))
tester('ntpath.splitunc("\\\\conky\\mountpoint\\foo\\bar")',
('\\\\conky\\mountpoint', '\\foo\\bar'))
tester('ntpath.splitunc("//conky/mountpoint/foo/bar")',
('//conky/mountpoint', '/foo/bar'))
+ tester('ntpath.splitunc("\\\\\\conky\\mountpoint\\foo\\bar")',
+ ('', '\\\\\\conky\\mountpoint\\foo\\bar'))
+ tester('ntpath.splitunc("///conky/mountpoint/foo/bar")',
+ ('', '///conky/mountpoint/foo/bar'))
+ tester('ntpath.splitunc("\\\\conky\\\\mountpoint\\foo\\bar")',
+ ('', '\\\\conky\\\\mountpoint\\foo\\bar'))
+ tester('ntpath.splitunc("//conky//mountpoint/foo/bar")',
+ ('', '//conky//mountpoint/foo/bar'))
+ self.assertEqual(ntpath.splitunc(u'//conky/MOUNTPO\u0130NT/foo/bar'),
+ (u'//conky/MOUNTPO\u0130NT', u'/foo/bar'))
def test_split(self):
tester('ntpath.split("c:\\foo\\bar")', ('c:\\foo', 'bar'))
@@ -45,10 +78,10 @@ class TestNtpath(unittest.TestCase):
tester('ntpath.split("c:\\")', ('c:\\', ''))
tester('ntpath.split("\\\\conky\\mountpoint\\")',
- ('\\\\conky\\mountpoint', ''))
+ ('\\\\conky\\mountpoint\\', ''))
tester('ntpath.split("c:/")', ('c:/', ''))
- tester('ntpath.split("//conky/mountpoint/")', ('//conky/mountpoint', ''))
+ tester('ntpath.split("//conky/mountpoint/")', ('//conky/mountpoint/', ''))
def test_isabs(self):
tester('ntpath.isabs("c:\\")', 1)
@@ -71,10 +104,7 @@ class TestNtpath(unittest.TestCase):
tester('ntpath.join("/a")', '/a')
tester('ntpath.join("\\a")', '\\a')
tester('ntpath.join("a:")', 'a:')
- tester('ntpath.join("a:", "b")', 'a:b')
- tester('ntpath.join("a:", "/b")', 'a:/b')
tester('ntpath.join("a:", "\\b")', 'a:\\b')
- tester('ntpath.join("a", "/b")', '/b')
tester('ntpath.join("a", "\\b")', '\\b')
tester('ntpath.join("a", "b", "c")', 'a\\b\\c')
tester('ntpath.join("a\\", "b", "c")', 'a\\b\\c')
@@ -82,22 +112,46 @@ class TestNtpath(unittest.TestCase):
tester('ntpath.join("a", "b", "\\c")', '\\c')
tester('ntpath.join("d:\\", "\\pleep")', 'd:\\pleep')
tester('ntpath.join("d:\\", "a", "b")', 'd:\\a\\b')
- tester("ntpath.join('c:', '/a')", 'c:/a')
- tester("ntpath.join('c:/', '/a')", 'c:/a')
- tester("ntpath.join('c:/a', '/b')", '/b')
- tester("ntpath.join('c:', 'd:/')", 'd:/')
- tester("ntpath.join('c:/', 'd:/')", 'd:/')
- tester("ntpath.join('c:/', 'd:/a/b')", 'd:/a/b')
-
- tester("ntpath.join('')", '')
- tester("ntpath.join('', '', '', '', '')", '')
- tester("ntpath.join('a')", 'a')
+
tester("ntpath.join('', 'a')", 'a')
tester("ntpath.join('', '', '', '', 'a')", 'a')
tester("ntpath.join('a', '')", 'a\\')
tester("ntpath.join('a', '', '', '', '')", 'a\\')
tester("ntpath.join('a\\', '')", 'a\\')
tester("ntpath.join('a\\', '', '', '', '')", 'a\\')
+ tester("ntpath.join('a/', '')", 'a/')
+
+ tester("ntpath.join('a/b', 'x/y')", 'a/b\\x/y')
+ tester("ntpath.join('/a/b', 'x/y')", '/a/b\\x/y')
+ tester("ntpath.join('/a/b/', 'x/y')", '/a/b/x/y')
+ tester("ntpath.join('c:', 'x/y')", 'c:x/y')
+ tester("ntpath.join('c:a/b', 'x/y')", 'c:a/b\\x/y')
+ tester("ntpath.join('c:a/b/', 'x/y')", 'c:a/b/x/y')
+ tester("ntpath.join('c:/', 'x/y')", 'c:/x/y')
+ tester("ntpath.join('c:/a/b', 'x/y')", 'c:/a/b\\x/y')
+ tester("ntpath.join('c:/a/b/', 'x/y')", 'c:/a/b/x/y')
+ tester("ntpath.join('//computer/share', 'x/y')", '//computer/share\\x/y')
+ tester("ntpath.join('//computer/share/', 'x/y')", '//computer/share/x/y')
+ tester("ntpath.join('//computer/share/a/b', 'x/y')", '//computer/share/a/b\\x/y')
+
+ tester("ntpath.join('a/b', '/x/y')", '/x/y')
+ tester("ntpath.join('/a/b', '/x/y')", '/x/y')
+ tester("ntpath.join('c:', '/x/y')", 'c:/x/y')
+ tester("ntpath.join('c:a/b', '/x/y')", 'c:/x/y')
+ tester("ntpath.join('c:/', '/x/y')", 'c:/x/y')
+ tester("ntpath.join('c:/a/b', '/x/y')", 'c:/x/y')
+ tester("ntpath.join('//computer/share', '/x/y')", '//computer/share/x/y')
+ tester("ntpath.join('//computer/share/', '/x/y')", '//computer/share/x/y')
+ tester("ntpath.join('//computer/share/a', '/x/y')", '//computer/share/x/y')
+
+ tester("ntpath.join('c:', 'C:x/y')", 'C:x/y')
+ tester("ntpath.join('c:a/b', 'C:x/y')", 'C:a/b\\x/y')
+ tester("ntpath.join('c:/', 'C:x/y')", 'C:/x/y')
+ tester("ntpath.join('c:/a/b', 'C:x/y')", 'C:/a/b\\x/y')
+
+ for x in ('', 'a/b', '/a/b', 'c:', 'c:a/b', 'c:/', 'c:/a/b'):
+ for y in ('d:', 'd:x/y', 'd:/', 'd:/x/y'):
+ tester("ntpath.join(%r, %r)" % (x, y), y)
def test_normpath(self):
tester("ntpath.normpath('A//////././//.//B')", r'A\B')
@@ -138,7 +192,6 @@ class TestNtpath(unittest.TestCase):
tester('ntpath.expandvars("$[foo]bar")', "$[foo]bar")
tester('ntpath.expandvars("$bar bar")', "$bar bar")
tester('ntpath.expandvars("$?bar")', "$?bar")
- tester('ntpath.expandvars("${foo}bar")', "barbar")
tester('ntpath.expandvars("$foo}bar")', "bar}bar")
tester('ntpath.expandvars("${foo")', "${foo")
tester('ntpath.expandvars("${{foo}}")', "baz1}")
@@ -152,6 +205,65 @@ class TestNtpath(unittest.TestCase):
tester('ntpath.expandvars("%foo%%bar")', "bar%bar")
tester('ntpath.expandvars("\'%foo%\'%bar")', "\'%foo%\'%bar")
+ @unittest.skipUnless(test_support.FS_NONASCII, 'need test_support.FS_NONASCII')
+ def test_expandvars_nonascii(self):
+ encoding = sys.getfilesystemencoding()
+ def check(value, expected):
+ tester0("ntpath.expandvars(%r)" % value, expected)
+ tester0("ntpath.expandvars(%r)" % value.decode(encoding),
+ expected.decode(encoding))
+ with test_support.EnvironmentVarGuard() as env:
+ env.clear()
+ unonascii = test_support.FS_NONASCII
+ snonascii = unonascii.encode(encoding)
+ env['spam'] = snonascii
+ env[snonascii] = 'ham' + snonascii
+ check('$spam bar', '%s bar' % snonascii)
+ check('$%s bar' % snonascii, '$%s bar' % snonascii)
+ check('${spam}bar', '%sbar' % snonascii)
+ check('${%s}bar' % snonascii, 'ham%sbar' % snonascii)
+ check('$spam}bar', '%s}bar' % snonascii)
+ check('$%s}bar' % snonascii, '$%s}bar' % snonascii)
+ check('%spam% bar', '%s bar' % snonascii)
+ check('%{}% bar'.format(snonascii), 'ham%s bar' % snonascii)
+ check('%spam%bar', '%sbar' % snonascii)
+ check('%{}%bar'.format(snonascii), 'ham%sbar' % snonascii)
+
+ def test_expanduser(self):
+ tester('ntpath.expanduser("test")', 'test')
+
+ with test_support.EnvironmentVarGuard() as env:
+ env.clear()
+ tester('ntpath.expanduser("~test")', '~test')
+
+ env['HOMEPATH'] = 'eric\\idle'
+ env['HOMEDRIVE'] = 'C:\\'
+ tester('ntpath.expanduser("~test")', 'C:\\eric\\test')
+ tester('ntpath.expanduser("~")', 'C:\\eric\\idle')
+
+ del env['HOMEDRIVE']
+ tester('ntpath.expanduser("~test")', 'eric\\test')
+ tester('ntpath.expanduser("~")', 'eric\\idle')
+
+ env.clear()
+ env['USERPROFILE'] = 'C:\\eric\\idle'
+ tester('ntpath.expanduser("~test")', 'C:\\eric\\test')
+ tester('ntpath.expanduser("~")', 'C:\\eric\\idle')
+
+ env.clear()
+ env['HOME'] = 'C:\\idle\\eric'
+ tester('ntpath.expanduser("~test")', 'C:\\idle\\test')
+ tester('ntpath.expanduser("~")', 'C:\\idle\\eric')
+
+ tester('ntpath.expanduser("~test\\foo\\bar")',
+ 'C:\\idle\\test\\foo\\bar')
+ tester('ntpath.expanduser("~test/foo/bar")',
+ 'C:\\idle\\test/foo/bar')
+ tester('ntpath.expanduser("~\\foo\\bar")',
+ 'C:\\idle\\eric\\foo\\bar')
+ tester('ntpath.expanduser("~/foo/bar")',
+ 'C:\\idle\\eric/foo/bar')
+
def test_abspath(self):
# ntpath.abspath() can only be used on a system with the "nt" module
# (reasonably), so we protect this test with "import nt". This allows
diff --git a/Lib/test/test_old_mailbox.py b/Lib/test/test_old_mailbox.py
index e8dff50..14ba9c0 100644
--- a/Lib/test/test_old_mailbox.py
+++ b/Lib/test/test_old_mailbox.py
@@ -73,7 +73,9 @@ class MaildirTestCase(unittest.TestCase):
self.createMessage("cur")
self.mbox = mailbox.Maildir(test_support.TESTFN)
self.assertTrue(len(self.mbox) == 1)
- self.assertTrue(self.mbox.next() is not None)
+ msg = self.mbox.next()
+ self.assertTrue(msg is not None)
+ msg.fp.close()
self.assertTrue(self.mbox.next() is None)
self.assertTrue(self.mbox.next() is None)
@@ -81,7 +83,9 @@ class MaildirTestCase(unittest.TestCase):
self.createMessage("new")
self.mbox = mailbox.Maildir(test_support.TESTFN)
self.assertTrue(len(self.mbox) == 1)
- self.assertTrue(self.mbox.next() is not None)
+ msg = self.mbox.next()
+ self.assertTrue(msg is not None)
+ msg.fp.close()
self.assertTrue(self.mbox.next() is None)
self.assertTrue(self.mbox.next() is None)
@@ -90,8 +94,12 @@ class MaildirTestCase(unittest.TestCase):
self.createMessage("new")
self.mbox = mailbox.Maildir(test_support.TESTFN)
self.assertTrue(len(self.mbox) == 2)
- self.assertTrue(self.mbox.next() is not None)
- self.assertTrue(self.mbox.next() is not None)
+ msg = self.mbox.next()
+ self.assertTrue(msg is not None)
+ msg.fp.close()
+ msg = self.mbox.next()
+ self.assertTrue(msg is not None)
+ msg.fp.close()
self.assertTrue(self.mbox.next() is None)
self.assertTrue(self.mbox.next() is None)
diff --git a/Lib/test/test_openpty.py b/Lib/test/test_openpty.py
index 20c4fe2..4b34b3a 100644
--- a/Lib/test/test_openpty.py
+++ b/Lib/test/test_openpty.py
@@ -10,6 +10,8 @@ if not hasattr(os, "openpty"):
class OpenptyTest(unittest.TestCase):
def test(self):
master, slave = os.openpty()
+ self.addCleanup(os.close, master)
+ self.addCleanup(os.close, slave)
if not os.isatty(slave):
self.fail("Slave-end of pty is not a terminal.")
diff --git a/Lib/test/test_optparse.py b/Lib/test/test_optparse.py
index f86ea01..dc2ef0b 100644
--- a/Lib/test/test_optparse.py
+++ b/Lib/test/test_optparse.py
@@ -383,6 +383,7 @@ class TestOptionParser(BaseTest):
self.assertRaises(self.parser.remove_option, ('foo',), None,
ValueError, "no such option 'foo'")
+ @test_support.impl_detail('Relies on sys.getrefcount', cpython=True)
def test_refleak(self):
# If an OptionParser is carrying around a reference to a large
# object, various cycles can prevent it from being GC'd in
@@ -769,6 +770,13 @@ class TestStandard(BaseTest):
self.assertParseFail(["-test"],
"no such option: -e")
+ def test_add_option_accepts_unicode(self):
+ self.parser.add_option(u"-u", u"--unicode", action="store_true")
+ self.assertParseOK(["-u"],
+ {'a': None, 'boo': None, 'foo': None, 'unicode': True},
+ [])
+
+
class TestBool(BaseTest):
def setUp(self):
options = [make_option("-v",
@@ -1437,6 +1445,39 @@ Options:
-h, --help show this help message and exit
"""
+_expected_very_help_short_lines = """\
+Usage: bar.py [options]
+
+Options:
+ -a APPLE
+ throw
+ APPLEs at
+ basket
+ -b NUM, --boo=NUM
+ shout
+ "boo!" NUM
+ times (in
+ order to
+ frighten
+ away all
+ the evil
+ spirits
+ that cause
+ trouble and
+ mayhem)
+ --foo=FOO
+ store FOO
+ in the foo
+ list for
+ later
+ fooing
+ -h, --help
+ show this
+ help
+ message and
+ exit
+"""
+
class TestHelp(BaseTest):
def setUp(self):
self.parser = self.make_parser(80)
@@ -1498,6 +1539,8 @@ class TestHelp(BaseTest):
# we look at $COLUMNS.
self.parser = self.make_parser(60)
self.assertHelpEquals(_expected_help_short_lines)
+ self.parser = self.make_parser(0)
+ self.assertHelpEquals(_expected_very_help_short_lines)
def test_help_unicode(self):
self.parser = InterceptingOptionParser(usage=SUPPRESS_USAGE)
diff --git a/Lib/test/test_os.py b/Lib/test/test_os.py
index 1d673f6..023ebf4 100644
--- a/Lib/test/test_os.py
+++ b/Lib/test/test_os.py
@@ -10,8 +10,13 @@ import sys
import signal
import subprocess
import time
+try:
+ import resource
+except ImportError:
+ resource = None
from test import test_support
+from test.script_helper import assert_python_ok
import mmap
import uuid
@@ -78,9 +83,8 @@ class TemporaryFileTests(unittest.TestCase):
open(name, "w")
self.files.append(name)
+ @unittest.skipUnless(hasattr(os, 'tempnam'), 'test needs os.tempnam()')
def test_tempnam(self):
- if not hasattr(os, "tempnam"):
- return
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tempnam", RuntimeWarning,
r"test_os$")
@@ -94,9 +98,8 @@ class TemporaryFileTests(unittest.TestCase):
self.assertTrue(os.path.basename(name)[:3] == "pfx")
self.check_tempfile(name)
+ @unittest.skipUnless(hasattr(os, 'tmpfile'), 'test needs os.tmpfile()')
def test_tmpfile(self):
- if not hasattr(os, "tmpfile"):
- return
# As with test_tmpnam() below, the Windows implementation of tmpfile()
# attempts to create a file in the root directory of the current drive.
# On Vista and Server 2008, this test will always fail for normal users
@@ -145,9 +148,8 @@ class TemporaryFileTests(unittest.TestCase):
fp.close()
self.assertTrue(s == "foobar")
+ @unittest.skipUnless(hasattr(os, 'tmpnam'), 'test needs os.tmpnam()')
def test_tmpnam(self):
- if not hasattr(os, "tmpnam"):
- return
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning,
r"test_os$")
@@ -188,10 +190,8 @@ class StatAttributeTests(unittest.TestCase):
os.unlink(self.fname)
os.rmdir(test_support.TESTFN)
+ @unittest.skipUnless(hasattr(os, 'stat'), 'test needs os.stat()')
def test_stat_attributes(self):
- if not hasattr(os, "stat"):
- return
-
import stat
result = os.stat(self.fname)
@@ -214,33 +214,33 @@ class StatAttributeTests(unittest.TestCase):
try:
result[200]
- self.fail("No exception thrown")
+ self.fail("No exception raised")
except IndexError:
pass
# Make sure that assignment fails
try:
result.st_mode = 1
- self.fail("No exception thrown")
+ self.fail("No exception raised")
except (AttributeError, TypeError):
pass
try:
result.st_rdev = 1
- self.fail("No exception thrown")
+ self.fail("No exception raised")
except (AttributeError, TypeError):
pass
try:
result.parrot = 1
- self.fail("No exception thrown")
+ self.fail("No exception raised")
except AttributeError:
pass
# Use the stat_result constructor with a too-short tuple.
try:
result2 = os.stat_result((10,))
- self.fail("No exception thrown")
+ self.fail("No exception raised")
except TypeError:
pass
@@ -251,16 +251,14 @@ class StatAttributeTests(unittest.TestCase):
pass
+ @unittest.skipUnless(hasattr(os, 'statvfs'), 'test needs os.statvfs()')
def test_statvfs_attributes(self):
- if not hasattr(os, "statvfs"):
- return
-
try:
result = os.statvfs(self.fname)
except OSError, e:
# On AtheOS, glibc always returns ENOSYS
if e.errno == errno.ENOSYS:
- return
+ self.skipTest('glibc always returns ENOSYS on AtheOS')
# Make sure direct access works
self.assertEqual(result.f_bfree, result[3])
@@ -274,20 +272,20 @@ class StatAttributeTests(unittest.TestCase):
# Make sure that assignment really fails
try:
result.f_bfree = 1
- self.fail("No exception thrown")
+ self.fail("No exception raised")
except TypeError:
pass
try:
result.parrot = 1
- self.fail("No exception thrown")
+ self.fail("No exception raised")
except AttributeError:
pass
# Use the constructor with a too-short tuple.
try:
result2 = os.statvfs_result((10,))
- self.fail("No exception thrown")
+ self.fail("No exception raised")
except TypeError:
pass
@@ -306,10 +304,10 @@ class StatAttributeTests(unittest.TestCase):
st2 = os.stat(test_support.TESTFN)
self.assertEqual(st2.st_mtime, int(st.st_mtime-delta))
- # Restrict test to Win32, since there is no guarantee other
+ # Restrict tests to Win32, since there is no guarantee other
# systems support centiseconds
- if sys.platform == 'win32':
- def get_file_system(path):
+ def get_file_system(path):
+ if sys.platform == 'win32':
root = os.path.splitdrive(os.path.abspath(path))[0] + '\\'
import ctypes
kernel32 = ctypes.windll.kernel32
@@ -317,25 +315,31 @@ class StatAttributeTests(unittest.TestCase):
if kernel32.GetVolumeInformationA(root, None, 0, None, None, None, buf, len(buf)):
return buf.value
- if get_file_system(test_support.TESTFN) == "NTFS":
- def test_1565150(self):
- t1 = 1159195039.25
- os.utime(self.fname, (t1, t1))
- self.assertEqual(os.stat(self.fname).st_mtime, t1)
-
- def test_large_time(self):
- t1 = 5000000000 # some day in 2128
- os.utime(self.fname, (t1, t1))
- self.assertEqual(os.stat(self.fname).st_mtime, t1)
-
- def test_1686475(self):
- # Verify that an open file can be stat'ed
- try:
- os.stat(r"c:\pagefile.sys")
- except WindowsError, e:
- if e.errno == 2: # file does not exist; cannot run test
- return
- self.fail("Could not stat pagefile.sys")
+ @unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
+ @unittest.skipUnless(get_file_system(test_support.TESTFN) == "NTFS",
+ "requires NTFS")
+ def test_1565150(self):
+ t1 = 1159195039.25
+ os.utime(self.fname, (t1, t1))
+ self.assertEqual(os.stat(self.fname).st_mtime, t1)
+
+ @unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
+ @unittest.skipUnless(get_file_system(test_support.TESTFN) == "NTFS",
+ "requires NTFS")
+ def test_large_time(self):
+ t1 = 5000000000 # some day in 2128
+ os.utime(self.fname, (t1, t1))
+ self.assertEqual(os.stat(self.fname).st_mtime, t1)
+
+ @unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
+ def test_1686475(self):
+ # Verify that an open file can be stat'ed
+ try:
+ os.stat(r"c:\pagefile.sys")
+ except WindowsError, e:
+ if e.errno == 2: # file does not exist; cannot run test
+ self.skipTest(r'c:\pagefile.sys does not exist')
+ self.fail("Could not stat pagefile.sys")
from test import mapping_tests
@@ -563,9 +567,37 @@ class URandomTests (unittest.TestCase):
data2 = self.get_urandom_subprocess(16)
self.assertNotEqual(data1, data2)
+ @unittest.skipUnless(resource, "test requires the resource module")
+ def test_urandom_failure(self):
+ # Check urandom() failing when it is not able to open /dev/random.
+ # We spawn a new process to make the test more robust (if getrlimit()
+ # failed to restore the file descriptor limit after this, the whole
+ # test suite would crash; this actually happened on the OS X Tiger
+ # buildbot).
+ code = """if 1:
+ import errno
+ import os
+ import resource
+
+ soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
+ resource.setrlimit(resource.RLIMIT_NOFILE, (1, hard_limit))
+ try:
+ os.urandom(16)
+ except OSError as e:
+ assert e.errno == errno.EMFILE, e.errno
+ else:
+ raise AssertionError("OSError not raised")
+ """
+ assert_python_ok('-c', code)
+
+
+class ExecvpeTests(unittest.TestCase):
+
def test_execvpe_with_bad_arglist(self):
self.assertRaises(ValueError, os.execvpe, 'notepad', [], None)
+
+@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32ErrorTests(unittest.TestCase):
def test_rename(self):
self.assertRaises(WindowsError, os.rename, test_support.TESTFN, test_support.TESTFN+".bak")
@@ -612,121 +644,118 @@ class TestInvalidFD(unittest.TestCase):
self.fail("%r didn't raise a OSError with a bad file descriptor"
% f)
+ @unittest.skipUnless(hasattr(os, 'isatty'), 'test needs os.isatty()')
def test_isatty(self):
- if hasattr(os, "isatty"):
- self.assertEqual(os.isatty(test_support.make_bad_fd()), False)
+ self.assertEqual(os.isatty(test_support.make_bad_fd()), False)
+ @unittest.skipUnless(hasattr(os, 'closerange'), 'test needs os.closerange()')
def test_closerange(self):
- if hasattr(os, "closerange"):
- fd = test_support.make_bad_fd()
- # Make sure none of the descriptors we are about to close are
- # currently valid (issue 6542).
- for i in range(10):
- try: os.fstat(fd+i)
- except OSError:
- pass
- else:
- break
- if i < 2:
- raise unittest.SkipTest(
- "Unable to acquire a range of invalid file descriptors")
- self.assertEqual(os.closerange(fd, fd + i-1), None)
+ fd = test_support.make_bad_fd()
+ # Make sure none of the descriptors we are about to close are
+ # currently valid (issue 6542).
+ for i in range(10):
+ try: os.fstat(fd+i)
+ except OSError:
+ pass
+ else:
+ break
+ if i < 2:
+ raise unittest.SkipTest(
+ "Unable to acquire a range of invalid file descriptors")
+ self.assertEqual(os.closerange(fd, fd + i-1), None)
+ @unittest.skipUnless(hasattr(os, 'dup2'), 'test needs os.dup2()')
def test_dup2(self):
- if hasattr(os, "dup2"):
- self.check(os.dup2, 20)
+ self.check(os.dup2, 20)
+ @unittest.skipUnless(hasattr(os, 'fchmod'), 'test needs os.fchmod()')
def test_fchmod(self):
- if hasattr(os, "fchmod"):
- self.check(os.fchmod, 0)
+ self.check(os.fchmod, 0)
+ @unittest.skipUnless(hasattr(os, 'fchown'), 'test needs os.fchown()')
def test_fchown(self):
- if hasattr(os, "fchown"):
- self.check(os.fchown, -1, -1)
+ self.check(os.fchown, -1, -1)
+ @unittest.skipUnless(hasattr(os, 'fpathconf'), 'test needs os.fpathconf()')
def test_fpathconf(self):
- if hasattr(os, "fpathconf"):
- self.check(os.fpathconf, "PC_NAME_MAX")
+ self.check(os.fpathconf, "PC_NAME_MAX")
+ @unittest.skipUnless(hasattr(os, 'ftruncate'), 'test needs os.ftruncate()')
def test_ftruncate(self):
- if hasattr(os, "ftruncate"):
- self.check(os.ftruncate, 0)
+ self.check(os.ftruncate, 0)
+ @unittest.skipUnless(hasattr(os, 'lseek'), 'test needs os.lseek()')
def test_lseek(self):
- if hasattr(os, "lseek"):
- self.check(os.lseek, 0, 0)
+ self.check(os.lseek, 0, 0)
+ @unittest.skipUnless(hasattr(os, 'read'), 'test needs os.read()')
def test_read(self):
- if hasattr(os, "read"):
- self.check(os.read, 1)
+ self.check(os.read, 1)
+ @unittest.skipUnless(hasattr(os, 'tcsetpgrp'), 'test needs os.tcsetpgrp()')
def test_tcsetpgrpt(self):
- if hasattr(os, "tcsetpgrp"):
- self.check(os.tcsetpgrp, 0)
+ self.check(os.tcsetpgrp, 0)
+ @unittest.skipUnless(hasattr(os, 'write'), 'test needs os.write()')
def test_write(self):
- if hasattr(os, "write"):
- self.check(os.write, " ")
-
-if sys.platform != 'win32':
- class Win32ErrorTests(unittest.TestCase):
- pass
-
- class PosixUidGidTests(unittest.TestCase):
- if hasattr(os, 'setuid'):
- def test_setuid(self):
- if os.getuid() != 0:
- self.assertRaises(os.error, os.setuid, 0)
- self.assertRaises(OverflowError, os.setuid, 1<<32)
-
- if hasattr(os, 'setgid'):
- def test_setgid(self):
- if os.getuid() != 0:
- self.assertRaises(os.error, os.setgid, 0)
- self.assertRaises(OverflowError, os.setgid, 1<<32)
-
- if hasattr(os, 'seteuid'):
- def test_seteuid(self):
- if os.getuid() != 0:
- self.assertRaises(os.error, os.seteuid, 0)
- self.assertRaises(OverflowError, os.seteuid, 1<<32)
-
- if hasattr(os, 'setegid'):
- def test_setegid(self):
- if os.getuid() != 0:
- self.assertRaises(os.error, os.setegid, 0)
- self.assertRaises(OverflowError, os.setegid, 1<<32)
-
- if hasattr(os, 'setreuid'):
- def test_setreuid(self):
- if os.getuid() != 0:
- self.assertRaises(os.error, os.setreuid, 0, 0)
- self.assertRaises(OverflowError, os.setreuid, 1<<32, 0)
- self.assertRaises(OverflowError, os.setreuid, 0, 1<<32)
-
- def test_setreuid_neg1(self):
- # Needs to accept -1. We run this in a subprocess to avoid
- # altering the test runner's process state (issue8045).
- subprocess.check_call([
- sys.executable, '-c',
- 'import os,sys;os.setreuid(-1,-1);sys.exit(0)'])
-
- if hasattr(os, 'setregid'):
- def test_setregid(self):
- if os.getuid() != 0:
- self.assertRaises(os.error, os.setregid, 0, 0)
- self.assertRaises(OverflowError, os.setregid, 1<<32, 0)
- self.assertRaises(OverflowError, os.setregid, 0, 1<<32)
-
- def test_setregid_neg1(self):
- # Needs to accept -1. We run this in a subprocess to avoid
- # altering the test runner's process state (issue8045).
- subprocess.check_call([
- sys.executable, '-c',
- 'import os,sys;os.setregid(-1,-1);sys.exit(0)'])
-else:
- class PosixUidGidTests(unittest.TestCase):
- pass
+ self.check(os.write, " ")
+
+@unittest.skipIf(sys.platform == "win32", "Posix specific tests")
+class PosixUidGidTests(unittest.TestCase):
+ @unittest.skipUnless(hasattr(os, 'setuid'), 'test needs os.setuid()')
+ def test_setuid(self):
+ if os.getuid() != 0:
+ self.assertRaises(os.error, os.setuid, 0)
+ self.assertRaises(OverflowError, os.setuid, 1<<32)
+
+ @unittest.skipUnless(hasattr(os, 'setgid'), 'test needs os.setgid()')
+ def test_setgid(self):
+ if os.getuid() != 0:
+ self.assertRaises(os.error, os.setgid, 0)
+ self.assertRaises(OverflowError, os.setgid, 1<<32)
+
+ @unittest.skipUnless(hasattr(os, 'seteuid'), 'test needs os.seteuid()')
+ def test_seteuid(self):
+ if os.getuid() != 0:
+ self.assertRaises(os.error, os.seteuid, 0)
+ self.assertRaises(OverflowError, os.seteuid, 1<<32)
+
+ @unittest.skipUnless(hasattr(os, 'setegid'), 'test needs os.setegid()')
+ def test_setegid(self):
+ if os.getuid() != 0:
+ self.assertRaises(os.error, os.setegid, 0)
+ self.assertRaises(OverflowError, os.setegid, 1<<32)
+
+ @unittest.skipUnless(hasattr(os, 'setreuid'), 'test needs os.setreuid()')
+ def test_setreuid(self):
+ if os.getuid() != 0:
+ self.assertRaises(os.error, os.setreuid, 0, 0)
+ self.assertRaises(OverflowError, os.setreuid, 1<<32, 0)
+ self.assertRaises(OverflowError, os.setreuid, 0, 1<<32)
+
+ @unittest.skipUnless(hasattr(os, 'setreuid'), 'test needs os.setreuid()')
+ def test_setreuid_neg1(self):
+ # Needs to accept -1. We run this in a subprocess to avoid
+ # altering the test runner's process state (issue8045).
+ subprocess.check_call([
+ sys.executable, '-c',
+ 'import os,sys;os.setreuid(-1,-1);sys.exit(0)'])
+
+ @unittest.skipUnless(hasattr(os, 'setregid'), 'test needs os.setregid()')
+ def test_setregid(self):
+ if os.getuid() != 0:
+ self.assertRaises(os.error, os.setregid, 0, 0)
+ self.assertRaises(OverflowError, os.setregid, 1<<32, 0)
+ self.assertRaises(OverflowError, os.setregid, 0, 1<<32)
+
+ @unittest.skipUnless(hasattr(os, 'setregid'), 'test needs os.setregid()')
+ def test_setregid_neg1(self):
+ # Needs to accept -1. We run this in a subprocess to avoid
+ # altering the test runner's process state (issue8045).
+ subprocess.check_call([
+ sys.executable, '-c',
+ 'import os,sys;os.setregid(-1,-1);sys.exit(0)'])
+
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32KillTests(unittest.TestCase):
@@ -852,6 +881,7 @@ def test_main():
MakedirTests,
DevNullTests,
URandomTests,
+ ExecvpeTests,
Win32ErrorTests,
TestInvalidFD,
PosixUidGidTests,
diff --git a/Lib/test/test_parser.py b/Lib/test/test_parser.py
index 33b91bd..65a762c 100644
--- a/Lib/test/test_parser.py
+++ b/Lib/test/test_parser.py
@@ -1,7 +1,9 @@
import parser
import unittest
import sys
-from test import test_support
+import struct
+from test import test_support as support
+from test.script_helper import assert_python_failure
#
# First, we test that we can generate trees from valid source fragments,
@@ -566,8 +568,19 @@ class CompileTestCase(unittest.TestCase):
st = parser.suite('a = u"\u1"')
self.assertRaises(SyntaxError, parser.compilest, st)
+ def test_issue_9011(self):
+ # Issue 9011: compilation of an unary minus expression changed
+ # the meaning of the ST, so that a second compilation produced
+ # incorrect results.
+ st = parser.expr('-3')
+ code1 = parser.compilest(st)
+ self.assertEqual(eval(code1), -3)
+ code2 = parser.compilest(st)
+ self.assertEqual(eval(code2), -3)
+
+
class ParserStackLimitTestCase(unittest.TestCase):
- """try to push the parser to/over it's limits.
+ """try to push the parser to/over its limits.
see http://bugs.python.org/issue1881 for a discussion
"""
def _nested_expression(self, level):
@@ -580,15 +593,63 @@ class ParserStackLimitTestCase(unittest.TestCase):
def test_trigger_memory_error(self):
e = self._nested_expression(100)
- print >>sys.stderr, "Expecting 's_push: parser stack overflow' in next line"
- self.assertRaises(MemoryError, parser.expr, e)
+ rc, out, err = assert_python_failure('-c', e)
+ # parsing the expression will result in an error message
+ # followed by a MemoryError (see #11963)
+ self.assertIn(b's_push: parser stack overflow', err)
+ self.assertIn(b'MemoryError', err)
+
+class STObjectTestCase(unittest.TestCase):
+ """Test operations on ST objects themselves"""
+
+ check_sizeof = support.check_sizeof
+
+ @support.cpython_only
+ def test_sizeof(self):
+ def XXXROUNDUP(n):
+ if n <= 1:
+ return n
+ if n <= 128:
+ return (n + 3) & ~3
+ return 1 << (n - 1).bit_length()
+
+ basesize = support.calcobjsize('Pii')
+ nodesize = struct.calcsize('hP3iP0h')
+ def sizeofchildren(node):
+ if node is None:
+ return 0
+ res = 0
+ hasstr = len(node) > 1 and isinstance(node[-1], str)
+ if hasstr:
+ res += len(node[-1]) + 1
+ children = node[1:-1] if hasstr else node[1:]
+ if children:
+ res += XXXROUNDUP(len(children)) * nodesize
+ for child in children:
+ res += sizeofchildren(child)
+ return res
+
+ def check_st_sizeof(st):
+ self.check_sizeof(st, basesize + nodesize +
+ sizeofchildren(st.totuple()))
+
+ check_st_sizeof(parser.expr('2 + 3'))
+ check_st_sizeof(parser.expr('2 + 3 + 4'))
+ check_st_sizeof(parser.suite('x = 2 + 3'))
+ check_st_sizeof(parser.suite(''))
+ check_st_sizeof(parser.suite('# -*- coding: utf-8 -*-'))
+ check_st_sizeof(parser.expr('[' + '2,' * 1000 + ']'))
+
+
+ # XXX tests for pickling and unpickling of ST objects should go here
def test_main():
- test_support.run_unittest(
+ support.run_unittest(
RoundtripLegalSyntaxTestCase,
IllegalSyntaxTestCase,
CompileTestCase,
ParserStackLimitTestCase,
+ STObjectTestCase,
)
diff --git a/Lib/test/test_pdb.py b/Lib/test/test_pdb.py
index 5a9091f..559f756 100644
--- a/Lib/test/test_pdb.py
+++ b/Lib/test/test_pdb.py
@@ -6,12 +6,69 @@ import sys
import os
import unittest
import subprocess
+import textwrap
from test import test_support
# This little helper class is essential for testing pdb under doctest.
from test_doctest import _FakeInput
+class PdbTestCase(unittest.TestCase):
+
+ def run_pdb(self, script, commands):
+ """Run 'script' lines with pdb and the pdb 'commands'."""
+ filename = 'main.py'
+ with open(filename, 'w') as f:
+ f.write(textwrap.dedent(script))
+ self.addCleanup(test_support.unlink, filename)
+ cmd = [sys.executable, '-m', 'pdb', filename]
+ stdout = stderr = None
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+ stdout, stderr = proc.communicate(commands)
+ proc.stdout.close()
+ proc.stdin.close()
+ return stdout, stderr
+
+ def test_issue13183(self):
+ script = """
+ from bar import bar
+
+ def foo():
+ bar()
+
+ def nope():
+ pass
+
+ def foobar():
+ foo()
+ nope()
+
+ foobar()
+ """
+ commands = """
+ from bar import bar
+ break bar
+ continue
+ step
+ step
+ quit
+ """
+ bar = """
+ def bar():
+ pass
+ """
+ with open('bar.py', 'w') as f:
+ f.write(textwrap.dedent(bar))
+ self.addCleanup(test_support.unlink, 'bar.py')
+ stdout, stderr = self.run_pdb(script, commands)
+ self.assertTrue(
+ any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
+ 'Fail to step into the caller after a return')
+
+
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
@@ -309,7 +366,9 @@ class ModuleInitTester(unittest.TestCase):
def test_main():
from test import test_pdb
test_support.run_doctest(test_pdb, verbosity=True)
- test_support.run_unittest(ModuleInitTester)
+ test_support.run_unittest(
+ PdbTestCase,
+ ModuleInitTester)
if __name__ == '__main__':
test_main()
diff --git a/Lib/test/test_peepholer.py b/Lib/test/test_peepholer.py
index 3e8b7ae..7e05f49 100644
--- a/Lib/test/test_peepholer.py
+++ b/Lib/test/test_peepholer.py
@@ -138,21 +138,22 @@ class TestTranforms(unittest.TestCase):
self.assertIn('(1000)', asm)
def test_binary_subscr_on_unicode(self):
- # valid code get optimized
+ # unicode strings don't get optimized
asm = dis_single('u"foo"[0]')
- self.assertIn("(u'f')", asm)
- self.assertNotIn('BINARY_SUBSCR', asm)
+ self.assertNotIn("(u'f')", asm)
+ self.assertIn('BINARY_SUBSCR', asm)
asm = dis_single('u"\u0061\uffff"[1]')
- self.assertIn("(u'\\uffff')", asm)
- self.assertNotIn('BINARY_SUBSCR', asm)
+ self.assertNotIn("(u'\\uffff')", asm)
+ self.assertIn('BINARY_SUBSCR', asm)
- # invalid code doesn't get optimized
# out of range
asm = dis_single('u"fuu"[10]')
self.assertIn('BINARY_SUBSCR', asm)
# non-BMP char (see #5057)
asm = dis_single('u"\U00012345"[0]')
self.assertIn('BINARY_SUBSCR', asm)
+ asm = dis_single('u"\U00012345abcdef"[3]')
+ self.assertIn('BINARY_SUBSCR', asm)
def test_folding_of_unaryops_on_constants(self):
diff --git a/Lib/test/test_pep263.py b/Lib/test/test_pep263.py
index 9286467..a3abc3c 100644
--- a/Lib/test/test_pep263.py
+++ b/Lib/test/test_pep263.py
@@ -41,6 +41,29 @@ class PEP263Test(unittest.TestCase):
# two bytes in common with the UTF-8 BOM
self.assertRaises(SyntaxError, eval, '\xef\xbb\x20')
+ def test_error_message(self):
+ compile('# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec')
+ compile('\xef\xbb\xbf\n', 'dummy', 'exec')
+ compile('\xef\xbb\xbf# -*- coding: utf-8 -*-\n', 'dummy', 'exec')
+ with self.assertRaisesRegexp(SyntaxError, 'fake'):
+ compile('# -*- coding: fake -*-\n', 'dummy', 'exec')
+ with self.assertRaisesRegexp(SyntaxError, 'iso-8859-15'):
+ compile('\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n',
+ 'dummy', 'exec')
+ with self.assertRaisesRegexp(SyntaxError, 'BOM'):
+ compile('\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n',
+ 'dummy', 'exec')
+ with self.assertRaisesRegexp(SyntaxError, 'fake'):
+ compile('\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec')
+ with self.assertRaisesRegexp(SyntaxError, 'BOM'):
+ compile('\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec')
+
+ def test_non_unicode_codec(self):
+ with self.assertRaisesRegexp(SyntaxError,
+ 'codec did not return a unicode'):
+ from test import bad_coding3
+
+
def test_main():
test_support.run_unittest(PEP263Test)
diff --git a/Lib/test/test_pickle.py b/Lib/test/test_pickle.py
index 6c83811..c312649 100644
--- a/Lib/test/test_pickle.py
+++ b/Lib/test/test_pickle.py
@@ -3,10 +3,11 @@ from cStringIO import StringIO
from test import test_support
-from test.pickletester import AbstractPickleTests
-from test.pickletester import AbstractPickleModuleTests
-from test.pickletester import AbstractPersistentPicklerTests
-from test.pickletester import AbstractPicklerUnpicklerObjectTests
+from test.pickletester import (AbstractPickleTests,
+ AbstractPickleModuleTests,
+ AbstractPersistentPicklerTests,
+ AbstractPicklerUnpicklerObjectTests,
+ BigmemPickleTests)
class PickleTests(AbstractPickleTests, AbstractPickleModuleTests):
@@ -66,6 +67,16 @@ class PicklerUnpicklerObjectTests(AbstractPicklerUnpicklerObjectTests):
pickler_class = pickle.Pickler
unpickler_class = pickle.Unpickler
+class PickleBigmemPickleTests(BigmemPickleTests):
+
+ def dumps(self, arg, proto=0, fast=0):
+ # Ignore fast
+ return pickle.dumps(arg, proto)
+
+ def loads(self, buf):
+ # Ignore fast
+ return pickle.loads(buf)
+
def test_main():
test_support.run_unittest(
@@ -73,6 +84,7 @@ def test_main():
PicklerTests,
PersPicklerTests,
PicklerUnpicklerObjectTests,
+ PickleBigmemPickleTests,
)
test_support.run_doctest(pickle)
diff --git a/Lib/test/test_platform.py b/Lib/test/test_platform.py
index 941d428..331995f 100644
--- a/Lib/test/test_platform.py
+++ b/Lib/test/test_platform.py
@@ -84,15 +84,28 @@ class PlatformTest(unittest.TestCase):
("CPython", "2.6.1", "tags/r261", "67515",
('r261:67515', 'Dec 6 2008 15:26:00'),
'GCC 4.0.1 (Apple Computer, Inc. build 5370)'),
+
("IronPython 2.0 (2.0.0.0) on .NET 2.0.50727.3053", None, "cli")
:
("IronPython", "2.0.0", "", "", ("", ""),
".NET 2.0.50727.3053"),
+
+ ("2.6.1 (IronPython 2.6.1 (2.6.10920.0) on .NET 2.0.50727.1433)", None, "cli")
+ :
+ ("IronPython", "2.6.1", "", "", ("", ""),
+ ".NET 2.0.50727.1433"),
+
+ ("2.7.4 (IronPython 2.7.4 (2.7.0.40) on Mono 4.0.30319.1 (32-bit))", None, "cli")
+ :
+ ("IronPython", "2.7.4", "", "", ("", ""),
+ "Mono 4.0.30319.1 (32-bit)"),
+
("2.5 (trunk:6107, Mar 26 2009, 13:02:18) \n[Java HotSpot(TM) Client VM (\"Apple Computer, Inc.\")]",
('Jython', 'trunk', '6107'), "java1.5.0_16")
:
("Jython", "2.5.0", "trunk", "6107",
('trunk:6107', 'Mar 26 2009'), "java1.5.0_16"),
+
("2.5.2 (63378, Mar 26 2009, 18:03:29)\n[PyPy 1.0.0]",
('PyPy', 'trunk', '63378'), self.save_platform)
:
diff --git a/Lib/test/test_plistlib.py b/Lib/test/test_plistlib.py
index e6e0f86..7859ad0 100644
--- a/Lib/test/test_plistlib.py
+++ b/Lib/test/test_plistlib.py
@@ -135,6 +135,18 @@ class TestPlistlib(unittest.TestCase):
data2 = plistlib.writePlistToString(pl2)
self.assertEqual(data, data2)
+ def test_indentation_array(self):
+ data = [[[[[[[[{'test': plistlib.Data(b'aaaaaa')}]]]]]]]]
+ self.assertEqual(plistlib.readPlistFromString(plistlib.writePlistToString(data)), data)
+
+ def test_indentation_dict(self):
+ data = {'1': {'2': {'3': {'4': {'5': {'6': {'7': {'8': {'9': plistlib.Data(b'aaaaaa')}}}}}}}}}
+ self.assertEqual(plistlib.readPlistFromString(plistlib.writePlistToString(data)), data)
+
+ def test_indentation_dict_mix(self):
+ data = {'1': {'2': [{'3': [[[[[{'test': plistlib.Data(b'aaaaaa')}]]]]]}]}}
+ self.assertEqual(plistlib.readPlistFromString(plistlib.writePlistToString(data)), data)
+
def test_appleformatting(self):
pl = plistlib.readPlistFromString(TESTDATA)
data = plistlib.writePlistToString(pl)
diff --git a/Lib/test/test_poll.py b/Lib/test/test_poll.py
index d33af91..1e195ed 100644
--- a/Lib/test/test_poll.py
+++ b/Lib/test/test_poll.py
@@ -1,7 +1,15 @@
# Test case for the os.poll() function
-import os, select, random, unittest
-from test.test_support import TESTFN, run_unittest
+import os
+import random
+import select
+try:
+ import threading
+except ImportError:
+ threading = None
+import time
+import unittest
+from test.test_support import TESTFN, run_unittest, reap_threads, cpython_only
try:
select.poll
@@ -150,6 +158,54 @@ class PollTests(unittest.TestCase):
if x != 5:
self.fail('Overflow must have occurred')
+ # Issues #15989, #17919
+ self.assertRaises(OverflowError, pollster.register, 0, -1)
+ self.assertRaises(OverflowError, pollster.register, 0, 1 << 64)
+ self.assertRaises(OverflowError, pollster.modify, 1, -1)
+ self.assertRaises(OverflowError, pollster.modify, 1, 1 << 64)
+
+ @cpython_only
+ def test_poll_c_limits(self):
+ from _testcapi import USHRT_MAX, INT_MAX, UINT_MAX
+ pollster = select.poll()
+ pollster.register(1)
+
+ # Issues #15989, #17919
+ self.assertRaises(OverflowError, pollster.register, 0, USHRT_MAX + 1)
+ self.assertRaises(OverflowError, pollster.modify, 1, USHRT_MAX + 1)
+ self.assertRaises(OverflowError, pollster.poll, INT_MAX + 1)
+ self.assertRaises(OverflowError, pollster.poll, UINT_MAX + 1)
+
+ @unittest.skipUnless(threading, 'Threading required for this test.')
+ @reap_threads
+ def test_threaded_poll(self):
+ r, w = os.pipe()
+ self.addCleanup(os.close, r)
+ self.addCleanup(os.close, w)
+ rfds = []
+ for i in range(10):
+ fd = os.dup(r)
+ self.addCleanup(os.close, fd)
+ rfds.append(fd)
+ pollster = select.poll()
+ for fd in rfds:
+ pollster.register(fd, select.POLLIN)
+
+ t = threading.Thread(target=pollster.poll)
+ t.start()
+ try:
+ time.sleep(0.5)
+ # trigger ufds array reallocation
+ for fd in rfds:
+ pollster.unregister(fd)
+ pollster.register(w, select.POLLOUT)
+ self.assertRaises(RuntimeError, pollster.poll)
+ finally:
+ # and make the call to poll() from the thread return
+ os.write(w, b'spam')
+ t.join()
+
+
def test_main():
run_unittest(PollTests)
diff --git a/Lib/test/test_popen.py b/Lib/test/test_popen.py
index 92b5e92..82d4621 100644
--- a/Lib/test/test_popen.py
+++ b/Lib/test/test_popen.py
@@ -1,4 +1,3 @@
-#! /usr/bin/env python
"""Basic tests for os.popen()
Particularly useful for platforms that fake popen.
diff --git a/Lib/test/test_popen2.py b/Lib/test/test_popen2.py
index ea41075..4a745e2 100644
--- a/Lib/test/test_popen2.py
+++ b/Lib/test/test_popen2.py
@@ -1,4 +1,3 @@
-#! /usr/bin/env python
"""Test script for popen2.py"""
import warnings
diff --git a/Lib/test/test_poplib.py b/Lib/test/test_poplib.py
index 9505c22..af48fdd 100644
--- a/Lib/test/test_poplib.py
+++ b/Lib/test/test_poplib.py
@@ -11,7 +11,7 @@ import os
import time
import errno
-from unittest import TestCase
+from unittest import TestCase, skipUnless
from test import test_support
from test.test_support import HOST
threading = test_support.import_module('threading')
@@ -263,17 +263,20 @@ if hasattr(poplib, 'POP3_SSL'):
else:
DummyPOP3Handler.handle_read(self)
- class TestPOP3_SSLClass(TestPOP3Class):
- # repeat previous tests by using poplib.POP3_SSL
+requires_ssl = skipUnless(SUPPORTS_SSL, 'SSL not supported')
- def setUp(self):
- self.server = DummyPOP3Server((HOST, 0))
- self.server.handler = DummyPOP3_SSLHandler
- self.server.start()
- self.client = poplib.POP3_SSL(self.server.host, self.server.port)
+@requires_ssl
+class TestPOP3_SSLClass(TestPOP3Class):
+ # repeat previous tests by using poplib.POP3_SSL
- def test__all__(self):
- self.assertIn('POP3_SSL', poplib.__all__)
+ def setUp(self):
+ self.server = DummyPOP3Server((HOST, 0))
+ self.server.handler = DummyPOP3_SSLHandler
+ self.server.start()
+ self.client = poplib.POP3_SSL(self.server.host, self.server.port)
+
+ def test__all__(self):
+ self.assertIn('POP3_SSL', poplib.__all__)
class TestTimeouts(TestCase):
@@ -305,7 +308,7 @@ class TestTimeouts(TestCase):
serv.close()
def testTimeoutDefault(self):
- self.assertTrue(socket.getdefaulttimeout() is None)
+ self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
pop = poplib.POP3(HOST, self.port)
@@ -315,13 +318,13 @@ class TestTimeouts(TestCase):
pop.sock.close()
def testTimeoutNone(self):
- self.assertTrue(socket.getdefaulttimeout() is None)
+ self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
pop = poplib.POP3(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
- self.assertTrue(pop.sock.gettimeout() is None)
+ self.assertIsNone(pop.sock.gettimeout())
pop.sock.close()
def testTimeoutValue(self):
@@ -331,9 +334,8 @@ class TestTimeouts(TestCase):
def test_main():
- tests = [TestPOP3Class, TestTimeouts]
- if SUPPORTS_SSL:
- tests.append(TestPOP3_SSLClass)
+ tests = [TestPOP3Class, TestTimeouts,
+ TestPOP3_SSLClass]
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
diff --git a/Lib/test/test_posix.py b/Lib/test/test_posix.py
index a6a7306..df122f7 100644
--- a/Lib/test/test_posix.py
+++ b/Lib/test/test_posix.py
@@ -9,6 +9,7 @@ import errno
import sys
import time
import os
+import platform
import pwd
import shutil
import stat
@@ -52,47 +53,55 @@ class PosixTester(unittest.TestCase):
posix_func()
self.assertRaises(TypeError, posix_func, 1)
- if hasattr(posix, 'getresuid'):
- def test_getresuid(self):
- user_ids = posix.getresuid()
- self.assertEqual(len(user_ids), 3)
- for val in user_ids:
- self.assertGreaterEqual(val, 0)
-
- if hasattr(posix, 'getresgid'):
- def test_getresgid(self):
- group_ids = posix.getresgid()
- self.assertEqual(len(group_ids), 3)
- for val in group_ids:
- self.assertGreaterEqual(val, 0)
-
- if hasattr(posix, 'setresuid'):
- def test_setresuid(self):
- current_user_ids = posix.getresuid()
- self.assertIsNone(posix.setresuid(*current_user_ids))
- # -1 means don't change that value.
- self.assertIsNone(posix.setresuid(-1, -1, -1))
-
- def test_setresuid_exception(self):
- # Don't do this test if someone is silly enough to run us as root.
- current_user_ids = posix.getresuid()
- if 0 not in current_user_ids:
- new_user_ids = (current_user_ids[0]+1, -1, -1)
- self.assertRaises(OSError, posix.setresuid, *new_user_ids)
-
- if hasattr(posix, 'setresgid'):
- def test_setresgid(self):
- current_group_ids = posix.getresgid()
- self.assertIsNone(posix.setresgid(*current_group_ids))
- # -1 means don't change that value.
- self.assertIsNone(posix.setresgid(-1, -1, -1))
-
- def test_setresgid_exception(self):
- # Don't do this test if someone is silly enough to run us as root.
- current_group_ids = posix.getresgid()
- if 0 not in current_group_ids:
- new_group_ids = (current_group_ids[0]+1, -1, -1)
- self.assertRaises(OSError, posix.setresgid, *new_group_ids)
+ @unittest.skipUnless(hasattr(posix, 'getresuid'),
+ 'test needs posix.getresuid()')
+ def test_getresuid(self):
+ user_ids = posix.getresuid()
+ self.assertEqual(len(user_ids), 3)
+ for val in user_ids:
+ self.assertGreaterEqual(val, 0)
+
+ @unittest.skipUnless(hasattr(posix, 'getresgid'),
+ 'test needs posix.getresgid()')
+ def test_getresgid(self):
+ group_ids = posix.getresgid()
+ self.assertEqual(len(group_ids), 3)
+ for val in group_ids:
+ self.assertGreaterEqual(val, 0)
+
+ @unittest.skipUnless(hasattr(posix, 'setresuid'),
+ 'test needs posix.setresuid()')
+ def test_setresuid(self):
+ current_user_ids = posix.getresuid()
+ self.assertIsNone(posix.setresuid(*current_user_ids))
+ # -1 means don't change that value.
+ self.assertIsNone(posix.setresuid(-1, -1, -1))
+
+ @unittest.skipUnless(hasattr(posix, 'setresuid'),
+ 'test needs posix.setresuid()')
+ def test_setresuid_exception(self):
+ # Don't do this test if someone is silly enough to run us as root.
+ current_user_ids = posix.getresuid()
+ if 0 not in current_user_ids:
+ new_user_ids = (current_user_ids[0]+1, -1, -1)
+ self.assertRaises(OSError, posix.setresuid, *new_user_ids)
+
+ @unittest.skipUnless(hasattr(posix, 'setresgid'),
+ 'test needs posix.setresgid()')
+ def test_setresgid(self):
+ current_group_ids = posix.getresgid()
+ self.assertIsNone(posix.setresgid(*current_group_ids))
+ # -1 means don't change that value.
+ self.assertIsNone(posix.setresgid(-1, -1, -1))
+
+ @unittest.skipUnless(hasattr(posix, 'setresgid'),
+ 'test needs posix.setresgid()')
+ def test_setresgid_exception(self):
+ # Don't do this test if someone is silly enough to run us as root.
+ current_group_ids = posix.getresgid()
+ if 0 not in current_group_ids:
+ new_group_ids = (current_group_ids[0]+1, -1, -1)
+ self.assertRaises(OSError, posix.setresgid, *new_group_ids)
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs os.initgroups()")
@@ -107,7 +116,11 @@ class PosixTester(unittest.TestCase):
# If a non-privileged user invokes it, it should fail with OSError
# EPERM.
if os.getuid() != 0:
- name = pwd.getpwuid(posix.getuid()).pw_name
+ try:
+ name = pwd.getpwuid(posix.getuid()).pw_name
+ except KeyError:
+ # the current UID may not have a pwd entry
+ raise unittest.SkipTest("need a pwd entry")
try:
posix.initgroups(name, 13)
except OSError as e:
@@ -115,128 +128,185 @@ class PosixTester(unittest.TestCase):
else:
self.fail("Expected OSError to be raised by initgroups")
+ @unittest.skipUnless(hasattr(posix, 'statvfs'),
+ 'test needs posix.statvfs()')
def test_statvfs(self):
- if hasattr(posix, 'statvfs'):
- self.assertTrue(posix.statvfs(os.curdir))
+ self.assertTrue(posix.statvfs(os.curdir))
+ @unittest.skipUnless(hasattr(posix, 'fstatvfs'),
+ 'test needs posix.fstatvfs()')
def test_fstatvfs(self):
- if hasattr(posix, 'fstatvfs'):
- fp = open(test_support.TESTFN)
- try:
- self.assertTrue(posix.fstatvfs(fp.fileno()))
- finally:
- fp.close()
+ fp = open(test_support.TESTFN)
+ try:
+ self.assertTrue(posix.fstatvfs(fp.fileno()))
+ finally:
+ fp.close()
+ @unittest.skipUnless(hasattr(posix, 'ftruncate'),
+ 'test needs posix.ftruncate()')
def test_ftruncate(self):
- if hasattr(posix, 'ftruncate'):
- fp = open(test_support.TESTFN, 'w+')
- try:
- # we need to have some data to truncate
- fp.write('test')
- fp.flush()
- posix.ftruncate(fp.fileno(), 0)
- finally:
- fp.close()
+ fp = open(test_support.TESTFN, 'w+')
+ try:
+ # we need to have some data to truncate
+ fp.write('test')
+ fp.flush()
+ posix.ftruncate(fp.fileno(), 0)
+ finally:
+ fp.close()
+ @unittest.skipUnless(hasattr(posix, 'dup'),
+ 'test needs posix.dup()')
def test_dup(self):
- if hasattr(posix, 'dup'):
- fp = open(test_support.TESTFN)
- try:
- fd = posix.dup(fp.fileno())
- self.assertIsInstance(fd, int)
- os.close(fd)
- finally:
- fp.close()
+ fp = open(test_support.TESTFN)
+ try:
+ fd = posix.dup(fp.fileno())
+ self.assertIsInstance(fd, int)
+ os.close(fd)
+ finally:
+ fp.close()
+ @unittest.skipUnless(hasattr(posix, 'confstr'),
+ 'test needs posix.confstr()')
def test_confstr(self):
- if hasattr(posix, 'confstr'):
- self.assertRaises(ValueError, posix.confstr, "CS_garbage")
- self.assertEqual(len(posix.confstr("CS_PATH")) > 0, True)
+ self.assertRaises(ValueError, posix.confstr, "CS_garbage")
+ self.assertEqual(len(posix.confstr("CS_PATH")) > 0, True)
+ @unittest.skipUnless(hasattr(posix, 'dup2'),
+ 'test needs posix.dup2()')
def test_dup2(self):
- if hasattr(posix, 'dup2'):
- fp1 = open(test_support.TESTFN)
- fp2 = open(test_support.TESTFN)
- try:
- posix.dup2(fp1.fileno(), fp2.fileno())
- finally:
- fp1.close()
- fp2.close()
+ fp1 = open(test_support.TESTFN)
+ fp2 = open(test_support.TESTFN)
+ try:
+ posix.dup2(fp1.fileno(), fp2.fileno())
+ finally:
+ fp1.close()
+ fp2.close()
def fdopen_helper(self, *args):
fd = os.open(test_support.TESTFN, os.O_RDONLY)
fp2 = posix.fdopen(fd, *args)
fp2.close()
+ @unittest.skipUnless(hasattr(posix, 'fdopen'),
+ 'test needs posix.fdopen()')
def test_fdopen(self):
- if hasattr(posix, 'fdopen'):
- self.fdopen_helper()
- self.fdopen_helper('r')
- self.fdopen_helper('r', 100)
+ self.fdopen_helper()
+ self.fdopen_helper('r')
+ self.fdopen_helper('r', 100)
+
+ @unittest.skipUnless(hasattr(posix, 'fdopen') and
+ not sys.platform.startswith("sunos"),
+ 'test needs posix.fdopen()')
+ def test_fdopen_keeps_fd_open_on_errors(self):
+ fd = os.open(test_support.TESTFN, os.O_RDONLY)
+ self.assertRaises(OSError, posix.fdopen, fd, 'w')
+ os.close(fd) # fd should not be closed.
+ @unittest.skipUnless(hasattr(posix, 'O_EXLOCK'),
+ 'test needs posix.O_EXLOCK')
def test_osexlock(self):
- if hasattr(posix, "O_EXLOCK"):
+ fd = os.open(test_support.TESTFN,
+ os.O_WRONLY|os.O_EXLOCK|os.O_CREAT)
+ self.assertRaises(OSError, os.open, test_support.TESTFN,
+ os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
+ os.close(fd)
+
+ if hasattr(posix, "O_SHLOCK"):
fd = os.open(test_support.TESTFN,
- os.O_WRONLY|os.O_EXLOCK|os.O_CREAT)
+ os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, test_support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
- if hasattr(posix, "O_SHLOCK"):
- fd = os.open(test_support.TESTFN,
- os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
- self.assertRaises(OSError, os.open, test_support.TESTFN,
- os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
- os.close(fd)
-
+ @unittest.skipUnless(hasattr(posix, 'O_SHLOCK'),
+ 'test needs posix.O_SHLOCK')
def test_osshlock(self):
- if hasattr(posix, "O_SHLOCK"):
- fd1 = os.open(test_support.TESTFN,
+ fd1 = os.open(test_support.TESTFN,
+ os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
+ fd2 = os.open(test_support.TESTFN,
+ os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
+ os.close(fd2)
+ os.close(fd1)
+
+ if hasattr(posix, "O_EXLOCK"):
+ fd = os.open(test_support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
- fd2 = os.open(test_support.TESTFN,
- os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
- os.close(fd2)
- os.close(fd1)
-
- if hasattr(posix, "O_EXLOCK"):
- fd = os.open(test_support.TESTFN,
- os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
- self.assertRaises(OSError, os.open, test_support.TESTFN,
- os.O_RDONLY|os.O_EXLOCK|os.O_NONBLOCK)
- os.close(fd)
+ self.assertRaises(OSError, os.open, test_support.TESTFN,
+ os.O_RDONLY|os.O_EXLOCK|os.O_NONBLOCK)
+ os.close(fd)
+ @unittest.skipUnless(hasattr(posix, 'fstat'),
+ 'test needs posix.fstat()')
def test_fstat(self):
- if hasattr(posix, 'fstat'):
- fp = open(test_support.TESTFN)
- try:
- self.assertTrue(posix.fstat(fp.fileno()))
- finally:
- fp.close()
+ fp = open(test_support.TESTFN)
+ try:
+ self.assertTrue(posix.fstat(fp.fileno()))
+ finally:
+ fp.close()
+ @unittest.skipUnless(hasattr(posix, 'stat'),
+ 'test needs posix.stat()')
def test_stat(self):
- if hasattr(posix, 'stat'):
- self.assertTrue(posix.stat(test_support.TESTFN))
+ self.assertTrue(posix.stat(test_support.TESTFN))
- def _test_all_chown_common(self, chown_func, first_param):
+ def _test_all_chown_common(self, chown_func, first_param, stat_func):
"""Common code for chown, fchown and lchown tests."""
- if os.getuid() == 0:
- try:
- # Many linux distros have a nfsnobody user as MAX_UID-2
- # that makes a good test case for signedness issues.
- # http://bugs.python.org/issue1747858
- # This part of the test only runs when run as root.
- # Only scary people run their tests as root.
- ent = pwd.getpwnam('nfsnobody')
- chown_func(first_param, ent.pw_uid, ent.pw_gid)
- except KeyError:
- pass
+ def check_stat(uid, gid):
+ if stat_func is not None:
+ stat = stat_func(first_param)
+ self.assertEqual(stat.st_uid, uid)
+ self.assertEqual(stat.st_gid, gid)
+ uid = os.getuid()
+ gid = os.getgid()
+ # test a successful chown call
+ chown_func(first_param, uid, gid)
+ check_stat(uid, gid)
+ chown_func(first_param, -1, gid)
+ check_stat(uid, gid)
+ chown_func(first_param, uid, -1)
+ check_stat(uid, gid)
+
+ if uid == 0:
+ # Try an amusingly large uid/gid to make sure we handle
+ # large unsigned values. (chown lets you use any
+ # uid/gid you like, even if they aren't defined.)
+ #
+ # This problem keeps coming up:
+ # http://bugs.python.org/issue1747858
+ # http://bugs.python.org/issue4591
+ # http://bugs.python.org/issue15301
+ # Hopefully the fix in 4591 fixes it for good!
+ #
+ # This part of the test only runs when run as root.
+ # Only scary people run their tests as root.
+
+ big_value = 2**31
+ chown_func(first_param, big_value, big_value)
+ check_stat(big_value, big_value)
+ chown_func(first_param, -1, -1)
+ check_stat(big_value, big_value)
+ chown_func(first_param, uid, gid)
+ check_stat(uid, gid)
+ elif platform.system() in ('HP-UX', 'SunOS'):
+ # HP-UX and Solaris can allow a non-root user to chown() to root
+ # (issue #5113)
+ raise unittest.SkipTest("Skipping because of non-standard chown() "
+ "behavior")
else:
# non-root cannot chown to root, raises OSError
- self.assertRaises(OSError, chown_func,
- first_param, 0, 0)
-
- # test a successful chown call
- chown_func(first_param, os.getuid(), os.getgid())
+ self.assertRaises(OSError, chown_func, first_param, 0, 0)
+ check_stat(uid, gid)
+ self.assertRaises(OSError, chown_func, first_param, 0, -1)
+ check_stat(uid, gid)
+ if 0 not in os.getgroups():
+ self.assertRaises(OSError, chown_func, first_param, -1, 0)
+ check_stat(uid, gid)
+ # test illegal types
+ for t in str, float:
+ self.assertRaises(TypeError, chown_func, first_param, t(uid), gid)
+ check_stat(uid, gid)
+ self.assertRaises(TypeError, chown_func, first_param, uid, t(gid))
+ check_stat(uid, gid)
@unittest.skipUnless(hasattr(posix, 'chown'), "test needs os.chown()")
def test_chown(self):
@@ -246,7 +316,8 @@ class PosixTester(unittest.TestCase):
# re-create the file
open(test_support.TESTFN, 'w').close()
- self._test_all_chown_common(posix.chown, test_support.TESTFN)
+ self._test_all_chown_common(posix.chown, test_support.TESTFN,
+ getattr(posix, 'stat', None))
@unittest.skipUnless(hasattr(posix, 'fchown'), "test needs os.fchown()")
def test_fchown(self):
@@ -256,7 +327,8 @@ class PosixTester(unittest.TestCase):
test_file = open(test_support.TESTFN, 'w')
try:
fd = test_file.fileno()
- self._test_all_chown_common(posix.fchown, fd)
+ self._test_all_chown_common(posix.fchown, fd,
+ getattr(posix, 'fstat', None))
finally:
test_file.close()
@@ -265,66 +337,79 @@ class PosixTester(unittest.TestCase):
os.unlink(test_support.TESTFN)
# create a symlink
os.symlink(_DUMMY_SYMLINK, test_support.TESTFN)
- self._test_all_chown_common(posix.lchown, test_support.TESTFN)
+ self._test_all_chown_common(posix.lchown, test_support.TESTFN,
+ getattr(posix, 'lstat', None))
+ @unittest.skipUnless(hasattr(posix, 'chdir'), 'test needs posix.chdir()')
def test_chdir(self):
- if hasattr(posix, 'chdir'):
- posix.chdir(os.curdir)
- self.assertRaises(OSError, posix.chdir, test_support.TESTFN)
+ posix.chdir(os.curdir)
+ self.assertRaises(OSError, posix.chdir, test_support.TESTFN)
+ @unittest.skipUnless(hasattr(posix, 'lsdir'), 'test needs posix.lsdir()')
def test_lsdir(self):
- if hasattr(posix, 'lsdir'):
- self.assertIn(test_support.TESTFN, posix.lsdir(os.curdir))
+ self.assertIn(test_support.TESTFN, posix.lsdir(os.curdir))
+ @unittest.skipUnless(hasattr(posix, 'access'), 'test needs posix.access()')
def test_access(self):
- if hasattr(posix, 'access'):
- self.assertTrue(posix.access(test_support.TESTFN, os.R_OK))
+ self.assertTrue(posix.access(test_support.TESTFN, os.R_OK))
+ @unittest.skipUnless(hasattr(posix, 'umask'), 'test needs posix.umask()')
def test_umask(self):
- if hasattr(posix, 'umask'):
- old_mask = posix.umask(0)
- self.assertIsInstance(old_mask, int)
- posix.umask(old_mask)
+ old_mask = posix.umask(0)
+ self.assertIsInstance(old_mask, int)
+ posix.umask(old_mask)
+ @unittest.skipUnless(hasattr(posix, 'strerror'),
+ 'test needs posix.strerror()')
def test_strerror(self):
- if hasattr(posix, 'strerror'):
- self.assertTrue(posix.strerror(0))
+ self.assertTrue(posix.strerror(0))
+ @unittest.skipUnless(hasattr(posix, 'pipe'), 'test needs posix.pipe()')
def test_pipe(self):
- if hasattr(posix, 'pipe'):
- reader, writer = posix.pipe()
- os.close(reader)
- os.close(writer)
+ reader, writer = posix.pipe()
+ os.close(reader)
+ os.close(writer)
+ @unittest.skipUnless(hasattr(posix, 'tempnam'),
+ 'test needs posix.tempnam()')
def test_tempnam(self):
- if hasattr(posix, 'tempnam'):
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore", "tempnam", DeprecationWarning)
- self.assertTrue(posix.tempnam())
- self.assertTrue(posix.tempnam(os.curdir))
- self.assertTrue(posix.tempnam(os.curdir, 'blah'))
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", "tempnam", DeprecationWarning)
+ self.assertTrue(posix.tempnam())
+ self.assertTrue(posix.tempnam(os.curdir))
+ self.assertTrue(posix.tempnam(os.curdir, 'blah'))
+ @unittest.skipUnless(hasattr(posix, 'tmpfile'),
+ 'test needs posix.tmpfile()')
def test_tmpfile(self):
- if hasattr(posix, 'tmpfile'):
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore", "tmpfile", DeprecationWarning)
- fp = posix.tmpfile()
- fp.close()
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", "tmpfile", DeprecationWarning)
+ fp = posix.tmpfile()
+ fp.close()
+ @unittest.skipUnless(hasattr(posix, 'utime'), 'test needs posix.utime()')
def test_utime(self):
- if hasattr(posix, 'utime'):
- now = time.time()
- posix.utime(test_support.TESTFN, None)
- self.assertRaises(TypeError, posix.utime, test_support.TESTFN, (None, None))
- self.assertRaises(TypeError, posix.utime, test_support.TESTFN, (now, None))
- self.assertRaises(TypeError, posix.utime, test_support.TESTFN, (None, now))
- posix.utime(test_support.TESTFN, (int(now), int(now)))
- posix.utime(test_support.TESTFN, (now, now))
+ now = time.time()
+ posix.utime(test_support.TESTFN, None)
+ self.assertRaises(TypeError, posix.utime, test_support.TESTFN, (None, None))
+ self.assertRaises(TypeError, posix.utime, test_support.TESTFN, (now, None))
+ self.assertRaises(TypeError, posix.utime, test_support.TESTFN, (None, now))
+ posix.utime(test_support.TESTFN, (int(now), int(now)))
+ posix.utime(test_support.TESTFN, (now, now))
def _test_chflags_regular_file(self, chflags_func, target_file):
st = os.stat(target_file)
self.assertTrue(hasattr(st, 'st_flags'))
- chflags_func(target_file, st.st_flags | stat.UF_IMMUTABLE)
+
+ # ZFS returns EOPNOTSUPP when attempting to set flag UF_IMMUTABLE.
+ try:
+ chflags_func(target_file, st.st_flags | stat.UF_IMMUTABLE)
+ except OSError as err:
+ if err.errno != errno.EOPNOTSUPP:
+ raise
+ msg = 'chflag UF_IMMUTABLE not supported by underlying fs'
+ self.skipTest(msg)
+
try:
new_st = os.stat(target_file)
self.assertEqual(st.st_flags | stat.UF_IMMUTABLE, new_st.st_flags)
@@ -353,8 +438,16 @@ class PosixTester(unittest.TestCase):
self.teardown_files.append(_DUMMY_SYMLINK)
dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
- posix.lchflags(_DUMMY_SYMLINK,
- dummy_symlink_st.st_flags | stat.UF_IMMUTABLE)
+ # ZFS returns EOPNOTSUPP when attempting to set flag UF_IMMUTABLE.
+ try:
+ posix.lchflags(_DUMMY_SYMLINK,
+ dummy_symlink_st.st_flags | stat.UF_IMMUTABLE)
+ except OSError as err:
+ if err.errno != errno.EOPNOTSUPP:
+ raise
+ msg = 'chflag UF_IMMUTABLE not supported by underlying fs'
+ self.skipTest(msg)
+
try:
new_testfn_st = os.stat(test_support.TESTFN)
new_dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
@@ -365,57 +458,71 @@ class PosixTester(unittest.TestCase):
finally:
posix.lchflags(_DUMMY_SYMLINK, dummy_symlink_st.st_flags)
+ @unittest.skipUnless(hasattr(posix, 'getcwd'),
+ 'test needs posix.getcwd()')
def test_getcwd_long_pathnames(self):
- if hasattr(posix, 'getcwd'):
- dirname = 'getcwd-test-directory-0123456789abcdef-01234567890abcdef'
- curdir = os.getcwd()
- base_path = os.path.abspath(test_support.TESTFN) + '.getcwd'
+ dirname = 'getcwd-test-directory-0123456789abcdef-01234567890abcdef'
+ curdir = os.getcwd()
+ base_path = os.path.abspath(test_support.TESTFN) + '.getcwd'
- try:
- os.mkdir(base_path)
- os.chdir(base_path)
- except:
-# Just returning nothing instead of the SkipTest exception,
-# because the test results in Error in that case.
-# Is that ok?
-# raise unittest.SkipTest, "cannot create directory for testing"
- return
+ try:
+ os.mkdir(base_path)
+ os.chdir(base_path)
+ except:
+ self.skipTest("cannot create directory for testing")
- try:
- def _create_and_do_getcwd(dirname, current_path_length = 0):
- try:
- os.mkdir(dirname)
- except:
- raise unittest.SkipTest, "mkdir cannot create directory sufficiently deep for getcwd test"
-
- os.chdir(dirname)
- try:
- os.getcwd()
- if current_path_length < 4099:
- _create_and_do_getcwd(dirname, current_path_length + len(dirname) + 1)
- except OSError as e:
- expected_errno = errno.ENAMETOOLONG
- if 'sunos' in sys.platform or 'openbsd' in sys.platform:
- expected_errno = errno.ERANGE # Issue 9185
- self.assertEqual(e.errno, expected_errno)
- finally:
- os.chdir('..')
- os.rmdir(dirname)
-
- _create_and_do_getcwd(dirname)
-
- finally:
- os.chdir(curdir)
- shutil.rmtree(base_path)
+ try:
+ def _create_and_do_getcwd(dirname, current_path_length = 0):
+ try:
+ os.mkdir(dirname)
+ except:
+ self.skipTest("mkdir cannot create directory sufficiently "
+ "deep for getcwd test")
+
+ os.chdir(dirname)
+ try:
+ os.getcwd()
+ if current_path_length < 4099:
+ _create_and_do_getcwd(dirname, current_path_length + len(dirname) + 1)
+ except OSError as e:
+ expected_errno = errno.ENAMETOOLONG
+ # The following platforms have quirky getcwd()
+ # behaviour -- see issue 9185 and 15765 for
+ # more information.
+ quirky_platform = (
+ 'sunos' in sys.platform or
+ 'netbsd' in sys.platform or
+ 'openbsd' in sys.platform
+ )
+ if quirky_platform:
+ expected_errno = errno.ERANGE
+ self.assertEqual(e.errno, expected_errno)
+ finally:
+ os.chdir('..')
+ os.rmdir(dirname)
+
+ _create_and_do_getcwd(dirname)
+
+ finally:
+ os.chdir(curdir)
+ shutil.rmtree(base_path)
@unittest.skipUnless(hasattr(os, 'getegid'), "test needs os.getegid()")
def test_getgroups(self):
- with os.popen('id -G') as idg:
+ with os.popen('id -G 2>/dev/null') as idg:
groups = idg.read().strip()
+ ret = idg.close()
- if not groups:
+ if ret != None or not groups:
raise unittest.SkipTest("need working 'id -G'")
+ # Issues 16698: OS X ABIs prior to 10.6 have limits on getgroups()
+ if sys.platform == 'darwin':
+ import sysconfig
+ dt = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') or '10.0'
+ if tuple(int(n) for n in dt.split('.')[0:2]) < (10, 6):
+ raise unittest.SkipTest("getgroups(2) is broken prior to 10.6")
+
# 'id -G' and 'os.getgroups()' should return the same
# groups, ignoring order and duplicates.
# #10822 - it is implementation defined whether posix.getgroups()
@@ -443,17 +550,17 @@ class PosixGroupsTester(unittest.TestCase):
posix.initgroups(name, self.saved_groups[0])
@unittest.skipUnless(hasattr(posix, 'initgroups'),
- "test needs posix.initgroups()")
+ 'test needs posix.initgroups()')
def test_initgroups(self):
# find missing group
- g = max(self.saved_groups) + 1
+ g = max(self.saved_groups or [0]) + 1
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, g)
self.assertIn(g, posix.getgroups())
@unittest.skipUnless(hasattr(posix, 'setgroups'),
- "test needs posix.setgroups()")
+ 'test needs posix.setgroups()')
def test_setgroups(self):
for groups in [[0], range(16)]:
posix.setgroups(groups)
diff --git a/Lib/test/test_posixpath.py b/Lib/test/test_posixpath.py
index 8bb78d6..f74dc14 100644
--- a/Lib/test/test_posixpath.py
+++ b/Lib/test/test_posixpath.py
@@ -9,6 +9,16 @@ from posixpath import realpath, abspath, dirname, basename
ABSTFN = abspath(test_support.TESTFN)
+def skip_if_ABSTFN_contains_backslash(test):
+ """
+ On Windows, posixpath.abspath still returns paths with backslashes
+ instead of posix forward slashes. If this is the case, several tests
+ fail, so skip them.
+ """
+ found_backslash = '\\' in ABSTFN
+ msg = "ABSTFN is not a posix path - tests fail"
+ return [test, unittest.skip(msg)(test)][found_backslash]
+
def safe_rmdir(dirname):
try:
os.rmdir(dirname)
@@ -110,8 +120,10 @@ class PosixPathTest(unittest.TestCase):
),
True
)
- # If we don't have links, assume that os.stat doesn't return resonable
- # inode information and thus, that samefile() doesn't work
+
+ # If we don't have links, assume that os.stat doesn't return
+ # reasonable inode information and thus, that samefile() doesn't
+ # work.
if hasattr(os, "symlink"):
os.symlink(
test_support.TESTFN + "1",
@@ -151,19 +163,19 @@ class PosixPathTest(unittest.TestCase):
),
True
)
- # If we don't have links, assume that os.stat() doesn't return resonable
- # inode information and thus, that samefile() doesn't work
+ # If we don't have links, assume that os.stat() doesn't return
+ # reasonable inode information and thus, that samestat() doesn't
+ # work.
if hasattr(os, "symlink"):
- if hasattr(os, "symlink"):
- os.symlink(test_support.TESTFN + "1", test_support.TESTFN + "2")
- self.assertIs(
- posixpath.samestat(
- os.stat(test_support.TESTFN + "1"),
- os.stat(test_support.TESTFN + "2")
- ),
- True
- )
- os.remove(test_support.TESTFN + "2")
+ os.symlink(test_support.TESTFN + "1", test_support.TESTFN + "2")
+ self.assertIs(
+ posixpath.samestat(
+ os.stat(test_support.TESTFN + "1"),
+ os.stat(test_support.TESTFN + "2")
+ ),
+ True
+ )
+ os.remove(test_support.TESTFN + "2")
f = open(test_support.TESTFN + "2", "wb")
f.write("bar")
f.close()
@@ -201,6 +213,7 @@ class PosixPathTest(unittest.TestCase):
with test_support.EnvironmentVarGuard() as env:
env['HOME'] = '/'
self.assertEqual(posixpath.expanduser("~"), "/")
+ self.assertEqual(posixpath.expanduser("~/foo"), "/foo")
def test_normpath(self):
self.assertEqual(posixpath.normpath(""), ".")
@@ -211,6 +224,18 @@ class PosixPathTest(unittest.TestCase):
self.assertEqual(posixpath.normpath("///foo/.//bar//.//..//.//baz"), "/foo/baz")
self.assertEqual(posixpath.normpath("///..//./foo/.//bar"), "/foo/bar")
+ @skip_if_ABSTFN_contains_backslash
+ def test_realpath_curdir(self):
+ self.assertEqual(realpath('.'), os.getcwd())
+ self.assertEqual(realpath('./.'), os.getcwd())
+ self.assertEqual(realpath('/'.join(['.'] * 100)), os.getcwd())
+
+ @skip_if_ABSTFN_contains_backslash
+ def test_realpath_pardir(self):
+ self.assertEqual(realpath('..'), dirname(os.getcwd()))
+ self.assertEqual(realpath('../..'), dirname(dirname(os.getcwd())))
+ self.assertEqual(realpath('/'.join(['..'] * 100)), '/')
+
if hasattr(os, "symlink"):
def test_realpath_basic(self):
# Basic operation.
@@ -233,6 +258,22 @@ class PosixPathTest(unittest.TestCase):
self.assertEqual(realpath(ABSTFN+"1"), ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"2"), ABSTFN+"2")
+ self.assertEqual(realpath(ABSTFN+"1/x"), ABSTFN+"1/x")
+ self.assertEqual(realpath(ABSTFN+"1/.."), dirname(ABSTFN))
+ self.assertEqual(realpath(ABSTFN+"1/../x"), dirname(ABSTFN) + "/x")
+ os.symlink(ABSTFN+"x", ABSTFN+"y")
+ self.assertEqual(realpath(ABSTFN+"1/../" + basename(ABSTFN) + "y"),
+ ABSTFN + "y")
+ self.assertEqual(realpath(ABSTFN+"1/../" + basename(ABSTFN) + "1"),
+ ABSTFN + "1")
+
+ os.symlink(basename(ABSTFN) + "a/b", ABSTFN+"a")
+ self.assertEqual(realpath(ABSTFN+"a"), ABSTFN+"a/b")
+
+ os.symlink("../" + basename(dirname(ABSTFN)) + "/" +
+ basename(ABSTFN) + "c", ABSTFN+"c")
+ self.assertEqual(realpath(ABSTFN+"c"), ABSTFN+"c")
+
# Test using relative path as well.
os.chdir(dirname(ABSTFN))
self.assertEqual(realpath(basename(ABSTFN)), ABSTFN)
@@ -241,6 +282,40 @@ class PosixPathTest(unittest.TestCase):
test_support.unlink(ABSTFN)
test_support.unlink(ABSTFN+"1")
test_support.unlink(ABSTFN+"2")
+ test_support.unlink(ABSTFN+"y")
+ test_support.unlink(ABSTFN+"c")
+ test_support.unlink(ABSTFN+"a")
+
+ def test_realpath_repeated_indirect_symlinks(self):
+ # Issue #6975.
+ try:
+ os.mkdir(ABSTFN)
+ os.symlink('../' + basename(ABSTFN), ABSTFN + '/self')
+ os.symlink('self/self/self', ABSTFN + '/link')
+ self.assertEqual(realpath(ABSTFN + '/link'), ABSTFN)
+ finally:
+ test_support.unlink(ABSTFN + '/self')
+ test_support.unlink(ABSTFN + '/link')
+ safe_rmdir(ABSTFN)
+
+ def test_realpath_deep_recursion(self):
+ depth = 10
+ old_path = abspath('.')
+ try:
+ os.mkdir(ABSTFN)
+ for i in range(depth):
+ os.symlink('/'.join(['%d' % i] * 10), ABSTFN + '/%d' % (i + 1))
+ os.symlink('.', ABSTFN + '/0')
+ self.assertEqual(realpath(ABSTFN + '/%d' % depth), ABSTFN)
+
+ # Test using relative path as well.
+ os.chdir(ABSTFN)
+ self.assertEqual(realpath('%d' % depth), ABSTFN)
+ finally:
+ os.chdir(old_path)
+ for i in range(depth + 1):
+ test_support.unlink(ABSTFN + '/%d' % i)
+ safe_rmdir(ABSTFN)
def test_realpath_resolve_parents(self):
# We also need to resolve any symlinks in the parents of a relative
diff --git a/Lib/test/test_pprint.py b/Lib/test/test_pprint.py
index e0137df..50493f6 100644
--- a/Lib/test/test_pprint.py
+++ b/Lib/test/test_pprint.py
@@ -24,6 +24,20 @@ class tuple3(tuple):
def __repr__(self):
return tuple.__repr__(self)
+class set2(set):
+ pass
+
+class set3(set):
+ def __repr__(self):
+ return set.__repr__(self)
+
+class frozenset2(frozenset):
+ pass
+
+class frozenset3(frozenset):
+ def __repr__(self):
+ return frozenset.__repr__(self)
+
class dict2(dict):
pass
@@ -114,22 +128,24 @@ class QueryTestCase(unittest.TestCase):
for simple in (0, 0L, 0+0j, 0.0, "", uni(""),
(), tuple2(), tuple3(),
[], list2(), list3(),
+ set(), set2(), set3(),
+ frozenset(), frozenset2(), frozenset3(),
{}, dict2(), dict3(),
self.assertTrue, pprint,
-6, -6L, -6-6j, -1.5, "x", uni("x"), (3,), [3], {3: 6},
(1,2), [3,4], {5: 6},
tuple2((1,2)), tuple3((1,2)), tuple3(range(100)),
[3,4], list2([3,4]), list3([3,4]), list3(range(100)),
+ set({7}), set2({7}), set3({7}),
+ frozenset({8}), frozenset2({8}), frozenset3({8}),
dict2({5: 6}), dict3({5: 6}),
range(10, -11, -1)
):
native = repr(simple)
- for function in "pformat", "saferepr":
- f = getattr(pprint, function)
- got = f(simple)
- self.assertEqual(native, got,
- "expected %s got %s from pprint.%s" %
- (native, got, function))
+ self.assertEqual(pprint.pformat(simple), native)
+ self.assertEqual(pprint.pformat(simple, width=1, indent=0)
+ .replace('\n', ' '), native)
+ self.assertEqual(pprint.saferepr(simple), native)
def test_basic_line_wrap(self):
# verify basic line-wrapping operation
@@ -205,19 +221,59 @@ class QueryTestCase(unittest.TestCase):
self.assertEqual(DottedPrettyPrinter().pformat(o), exp)
def test_set_reprs(self):
- self.assertEqual(pprint.pformat(set()), 'set()')
+ self.assertEqual(pprint.pformat(set()), 'set([])')
self.assertEqual(pprint.pformat(set(range(3))), 'set([0, 1, 2])')
- self.assertEqual(pprint.pformat(frozenset()), 'frozenset()')
- self.assertEqual(pprint.pformat(frozenset(range(3))), 'frozenset([0, 1, 2])')
+ self.assertEqual(pprint.pformat(set(range(7)), width=20), '''\
+set([0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6])''')
+ self.assertEqual(pprint.pformat(set2(range(7)), width=20), '''\
+set2([0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6])''')
+ self.assertEqual(pprint.pformat(set3(range(7)), width=20),
+ 'set3([0, 1, 2, 3, 4, 5, 6])')
+
+ self.assertEqual(pprint.pformat(frozenset()), 'frozenset([])')
+ self.assertEqual(pprint.pformat(frozenset(range(3))),
+ 'frozenset([0, 1, 2])')
+ self.assertEqual(pprint.pformat(frozenset(range(7)), width=20), '''\
+frozenset([0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6])''')
+ self.assertEqual(pprint.pformat(frozenset2(range(7)), width=20), '''\
+frozenset2([0,
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6])''')
+ self.assertEqual(pprint.pformat(frozenset3(range(7)), width=20),
+ 'frozenset3([0, 1, 2, 3, 4, 5, 6])')
+
+ def test_set_of_sets_reprs(self):
cube_repr_tgt = """\
{frozenset([]): frozenset([frozenset([2]), frozenset([0]), frozenset([1])]),
- frozenset([0]): frozenset([frozenset(),
+ frozenset([0]): frozenset([frozenset([]),
frozenset([0, 2]),
frozenset([0, 1])]),
- frozenset([1]): frozenset([frozenset(),
+ frozenset([1]): frozenset([frozenset([]),
frozenset([1, 2]),
frozenset([0, 1])]),
- frozenset([2]): frozenset([frozenset(),
+ frozenset([2]): frozenset([frozenset([]),
frozenset([1, 2]),
frozenset([0, 2])]),
frozenset([1, 2]): frozenset([frozenset([2]),
@@ -243,7 +299,7 @@ class QueryTestCase(unittest.TestCase):
frozenset([frozenset([0]),
frozenset([0,
1])]),
- frozenset([frozenset(),
+ frozenset([frozenset([]),
frozenset([0])]),
frozenset([frozenset([2]),
frozenset([0,
@@ -259,7 +315,7 @@ class QueryTestCase(unittest.TestCase):
frozenset([frozenset([1]),
frozenset([1,
2])]),
- frozenset([frozenset(),
+ frozenset([frozenset([]),
frozenset([1])])]),
frozenset([frozenset([1, 2]), frozenset([1])]): frozenset([frozenset([frozenset([1,
2]),
@@ -269,7 +325,7 @@ class QueryTestCase(unittest.TestCase):
frozenset([frozenset([2]),
frozenset([1,
2])]),
- frozenset([frozenset(),
+ frozenset([frozenset([]),
frozenset([1])]),
frozenset([frozenset([1]),
frozenset([0,
@@ -285,7 +341,7 @@ class QueryTestCase(unittest.TestCase):
frozenset([frozenset([2]),
frozenset([0,
2])]),
- frozenset([frozenset(),
+ frozenset([frozenset([]),
frozenset([2])])]),
frozenset([frozenset([]), frozenset([0])]): frozenset([frozenset([frozenset([0]),
frozenset([0,
@@ -293,16 +349,16 @@ class QueryTestCase(unittest.TestCase):
frozenset([frozenset([0]),
frozenset([0,
2])]),
- frozenset([frozenset(),
+ frozenset([frozenset([]),
frozenset([1])]),
- frozenset([frozenset(),
+ frozenset([frozenset([]),
frozenset([2])])]),
- frozenset([frozenset([]), frozenset([1])]): frozenset([frozenset([frozenset(),
+ frozenset([frozenset([]), frozenset([1])]): frozenset([frozenset([frozenset([]),
frozenset([0])]),
frozenset([frozenset([1]),
frozenset([1,
2])]),
- frozenset([frozenset(),
+ frozenset([frozenset([]),
frozenset([2])]),
frozenset([frozenset([1]),
frozenset([0,
@@ -310,9 +366,9 @@ class QueryTestCase(unittest.TestCase):
frozenset([frozenset([2]), frozenset([])]): frozenset([frozenset([frozenset([2]),
frozenset([1,
2])]),
- frozenset([frozenset(),
+ frozenset([frozenset([]),
frozenset([0])]),
- frozenset([frozenset(),
+ frozenset([frozenset([]),
frozenset([1])]),
frozenset([frozenset([2]),
frozenset([0,
@@ -333,7 +389,7 @@ class QueryTestCase(unittest.TestCase):
frozenset([frozenset([1]),
frozenset([0,
1])])]),
- frozenset([frozenset([0]), frozenset([0, 1])]): frozenset([frozenset([frozenset(),
+ frozenset([frozenset([0]), frozenset([0, 1])]): frozenset([frozenset([frozenset([]),
frozenset([0])]),
frozenset([frozenset([0,
1]),
@@ -357,7 +413,7 @@ class QueryTestCase(unittest.TestCase):
frozenset([frozenset([0]),
frozenset([0,
2])]),
- frozenset([frozenset(),
+ frozenset([frozenset([]),
frozenset([2])])]),
frozenset([frozenset([0, 1, 2]), frozenset([0, 2])]): frozenset([frozenset([frozenset([1,
2]),
diff --git a/Lib/test/test_property.py b/Lib/test/test_property.py
index e5fc174..e5a029b 100644
--- a/Lib/test/test_property.py
+++ b/Lib/test/test_property.py
@@ -163,7 +163,7 @@ class PropertySubclassTests(unittest.TestCase):
Foo.spam.__doc__,
"spam wrapped in property subclass")
- @unittest.skipIf(sys.flags.optimize <= 2,
+ @unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_setter_copies_getter_docstring(self):
class Foo(object):
@@ -196,7 +196,7 @@ class PropertySubclassTests(unittest.TestCase):
FooSub.spam.__doc__,
"spam wrapped in property subclass")
- @unittest.skipIf(sys.flags.optimize <= 2,
+ @unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_new_getter_new_docstring(self):
diff --git a/Lib/test/test_pty.py b/Lib/test/test_pty.py
index 623f23b..bec38c4 100644
--- a/Lib/test/test_pty.py
+++ b/Lib/test/test_pty.py
@@ -152,7 +152,7 @@ class PtyTest(unittest.TestCase):
# platform-dependent amount of data is written to its fd. On
# Linux 2.6, it's 4000 bytes and the child won't block, but on OS
# X even the small writes in the child above will block it. Also
- # on Linux, the read() will throw an OSError (input/output error)
+ # on Linux, the read() will raise an OSError (input/output error)
# when it tries to read past the end of the buffer but the child's
# already exited, so catch and discard those exceptions. It's not
# worth checking for EIO.
diff --git a/Lib/test/test_pwd.py b/Lib/test/test_pwd.py
index 67e11b6..828f6de 100644
--- a/Lib/test/test_pwd.py
+++ b/Lib/test/test_pwd.py
@@ -8,8 +8,6 @@ class PwdTest(unittest.TestCase):
def test_values(self):
entries = pwd.getpwall()
- entriesbyname = {}
- entriesbyuid = {}
for e in entries:
self.assertEqual(len(e), 7)
@@ -18,9 +16,9 @@ class PwdTest(unittest.TestCase):
self.assertEqual(e[1], e.pw_passwd)
self.assertIsInstance(e.pw_passwd, basestring)
self.assertEqual(e[2], e.pw_uid)
- self.assertIsInstance(e.pw_uid, int)
+ self.assertIsInstance(e.pw_uid, (int, long))
self.assertEqual(e[3], e.pw_gid)
- self.assertIsInstance(e.pw_gid, int)
+ self.assertIsInstance(e.pw_gid, (int, long))
self.assertEqual(e[4], e.pw_gecos)
self.assertIsInstance(e.pw_gecos, basestring)
self.assertEqual(e[5], e.pw_dir)
@@ -32,13 +30,20 @@ class PwdTest(unittest.TestCase):
# for one uid
# self.assertEqual(pwd.getpwuid(e.pw_uid), e)
# instead of this collect all entries for one uid
- # and check afterwards
+ # and check afterwards (done in test_values_extended)
+
+ def test_values_extended(self):
+ entries = pwd.getpwall()
+ entriesbyname = {}
+ entriesbyuid = {}
+
+ if len(entries) > 1000: # Huge passwd file (NIS?) -- skip this test
+ self.skipTest('passwd file is huge; extended test skipped')
+
+ for e in entries:
entriesbyname.setdefault(e.pw_name, []).append(e)
entriesbyuid.setdefault(e.pw_uid, []).append(e)
- if len(entries) > 1000: # Huge passwd file (NIS?) -- skip the rest
- return
-
# check whether the entry returned by getpwuid()
# for each uid is among those from getpwall() for this uid
for e in entries:
@@ -49,7 +54,9 @@ class PwdTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(TypeError, pwd.getpwuid)
+ self.assertRaises(TypeError, pwd.getpwuid, 3.14)
self.assertRaises(TypeError, pwd.getpwnam)
+ self.assertRaises(TypeError, pwd.getpwnam, 42)
self.assertRaises(TypeError, pwd.getpwall, 42)
# try to get some errors
@@ -93,6 +100,13 @@ class PwdTest(unittest.TestCase):
self.assertNotIn(fakeuid, byuids)
self.assertRaises(KeyError, pwd.getpwuid, fakeuid)
+ # -1 shouldn't be a valid uid because it has a special meaning in many
+ # uid-related functions
+ self.assertRaises(KeyError, pwd.getpwuid, -1)
+ # should be out of uid_t range
+ self.assertRaises(KeyError, pwd.getpwuid, 2**128)
+ self.assertRaises(KeyError, pwd.getpwuid, -2**128)
+
def test_main():
test_support.run_unittest(PwdTest)
diff --git a/Lib/test/test_py3kwarn.py b/Lib/test/test_py3kwarn.py
index 2afd8a1..5aee6a5 100644
--- a/Lib/test/test_py3kwarn.py
+++ b/Lib/test/test_py3kwarn.py
@@ -307,6 +307,11 @@ class TestPy3KWarnings(unittest.TestCase):
w.reset()
self.assertWarning(sequenceIncludes(range(3), 2), w, seq_warn)
+ def test_nonascii_bytes_literals(self):
+ expected = "non-ascii bytes literals not supported in 3.x"
+ with check_py3k_warnings((expected, SyntaxWarning)):
+ exec "b'\xbd'"
+
class TestStdlibRemovals(unittest.TestCase):
diff --git a/Lib/test/test_pyclbr.py b/Lib/test/test_pyclbr.py
index 6aa96d5..7bdc555 100644
--- a/Lib/test/test_pyclbr.py
+++ b/Lib/test/test_pyclbr.py
@@ -188,6 +188,11 @@ class PyclbrTest(TestCase):
cm('email.parser')
cm('test.test_pyclbr')
+ def test_issue_14798(self):
+ # test ImportError is raised when the first part of a dotted name is
+ # not a package
+ self.assertRaises(ImportError, pyclbr.readmodule_ex, 'asyncore.foo')
+
def test_main():
run_unittest(PyclbrTest)
diff --git a/Lib/test/test_pydoc.py b/Lib/test/test_pydoc.py
index 59cbffe..c11c06b 100644
--- a/Lib/test/test_pydoc.py
+++ b/Lib/test/test_pydoc.py
@@ -4,18 +4,29 @@ import difflib
import __builtin__
import re
import pydoc
+import contextlib
import inspect
import keyword
+import pkgutil
import unittest
import xml.etree
+import types
import test.test_support
from collections import namedtuple
from test.script_helper import assert_python_ok
-from test.test_support import (
- TESTFN, rmtree, reap_children, captured_stdout)
+from test.test_support import (TESTFN, rmtree, reap_children, captured_stdout,
+ captured_stderr, requires_docstrings)
from test import pydoc_mod
+if test.test_support.HAVE_DOCSTRINGS:
+ expected_data_docstrings = (
+ 'dictionary for instance variables (if defined)',
+ 'list of weak references to the object (if defined)',
+ )
+else:
+ expected_data_docstrings = ('', '')
+
expected_text_pattern = \
"""
NAME
@@ -27,6 +38,7 @@ FILE
CLASSES
__builtin__.object
B
+ C
A
\x20\x20\x20\x20
class A
@@ -40,16 +52,34 @@ CLASSES
class B(__builtin__.object)
| Data descriptors defined here:
|\x20\x20
- | __dict__
- | dictionary for instance variables (if defined)
+ | __dict__%s
|\x20\x20
- | __weakref__
- | list of weak references to the object (if defined)
+ | __weakref__%s
|\x20\x20
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|\x20\x20
| NO_MEANING = 'eggs'
+\x20\x20\x20\x20
+ class C(__builtin__.object)
+ | Methods defined here:
+ |\x20\x20
+ | get_answer(self)
+ | Return say_no()
+ |\x20\x20
+ | is_it_true(self)
+ | Return self.get_answer()
+ |\x20\x20
+ | say_no(self)
+ |\x20\x20
+ | ----------------------------------------------------------------------
+ | Data descriptors defined here:
+ |\x20\x20
+ | __dict__
+ | dictionary for instance variables (if defined)
+ |\x20\x20
+ | __weakref__
+ | list of weak references to the object (if defined)
FUNCTIONS
doc_func()
@@ -75,6 +105,9 @@ CREDITS
Nobody
""".strip()
+expected_text_data_docstrings = tuple('\n | ' + s if s else ''
+ for s in expected_data_docstrings)
+
expected_html_pattern = \
"""
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
@@ -96,6 +129,7 @@ expected_html_pattern = \
</font></dt><dd>
<dl>
<dt><font face="helvetica, arial"><a href="test.pydoc_mod.html#B">B</a>
+</font></dt><dt><font face="helvetica, arial"><a href="test.pydoc_mod.html#C">C</a>
</font></dt></dl>
</dd>
<dt><font face="helvetica, arial"><a href="test.pydoc_mod.html#A">A</a>
@@ -121,15 +155,37 @@ expected_html_pattern = \
<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
<td width="100%%">Data descriptors defined here:<br>
<dl><dt><strong>__dict__</strong></dt>
-<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+<dd><tt>%s</tt></dd>
</dl>
<dl><dt><strong>__weakref__</strong></dt>
-<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+<dd><tt>%s</tt></dd>
</dl>
<hr>
Data and other attributes defined here:<br>
<dl><dt><strong>NO_MEANING</strong> = 'eggs'</dl>
+</td></tr></table> <p>
+<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
+<tr bgcolor="#ffc8d8">
+<td colspan=3 valign=bottom>&nbsp;<br>
+<font color="#000000" face="helvetica, arial"><a name="C">class <strong>C</strong></a>(<a href="__builtin__.html#object">__builtin__.object</a>)</font></td></tr>
+\x20\x20\x20\x20
+<tr><td bgcolor="#ffc8d8"><tt>&nbsp;&nbsp;&nbsp;</tt></td><td>&nbsp;</td>
+<td width="100%%">Methods defined here:<br>
+<dl><dt><a name="C-get_answer"><strong>get_answer</strong></a>(self)</dt><dd><tt>Return&nbsp;<a href="#C-say_no">say_no</a>()</tt></dd></dl>
+
+<dl><dt><a name="C-is_it_true"><strong>is_it_true</strong></a>(self)</dt><dd><tt>Return&nbsp;self.<a href="#C-get_answer">get_answer</a>()</tt></dd></dl>
+
+<dl><dt><a name="C-say_no"><strong>say_no</strong></a>(self)</dt></dl>
+
+<hr>
+Data descriptors defined here:<br>
+<dl><dt><strong>__dict__</strong></dt>
+<dd><tt>dictionary&nbsp;for&nbsp;instance&nbsp;variables&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
+<dl><dt><strong>__weakref__</strong></dt>
+<dd><tt>list&nbsp;of&nbsp;weak&nbsp;references&nbsp;to&nbsp;the&nbsp;object&nbsp;(if&nbsp;defined)</tt></dd>
+</dl>
</td></tr></table></td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#eeaa77">
@@ -168,6 +224,8 @@ war</tt></dd></dl>
<td width="100%%">Nobody</td></tr></table>
""".strip()
+expected_html_data_docstrings = tuple(s.replace(' ', '&nbsp;')
+ for s in expected_data_docstrings)
# output pattern for missing module
missing_pattern = "no Python documentation found for '%s'"
@@ -217,8 +275,32 @@ def print_diffs(text1, text2):
print '\n' + ''.join(diffs)
-class PyDocDocTest(unittest.TestCase):
+class PydocBaseTest(unittest.TestCase):
+
+ def _restricted_walk_packages(self, walk_packages, path=None):
+ """
+ A version of pkgutil.walk_packages() that will restrict itself to
+ a given path.
+ """
+ default_path = path or [os.path.dirname(__file__)]
+ def wrapper(path=None, prefix='', onerror=None):
+ return walk_packages(path or default_path, prefix, onerror)
+ return wrapper
+
+ @contextlib.contextmanager
+ def restrict_walk_packages(self, path=None):
+ walk_packages = pkgutil.walk_packages
+ pkgutil.walk_packages = self._restricted_walk_packages(walk_packages,
+ path)
+ try:
+ yield
+ finally:
+ pkgutil.walk_packages = walk_packages
+
+class PydocDocTest(unittest.TestCase):
+
+ @requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_html_doc(self):
@@ -229,17 +311,21 @@ class PyDocDocTest(unittest.TestCase):
mod_url = nturl2path.pathname2url(mod_file)
else:
mod_url = mod_file
- expected_html = expected_html_pattern % (mod_url, mod_file, doc_loc)
+ expected_html = expected_html_pattern % (
+ (mod_url, mod_file, doc_loc) +
+ expected_html_data_docstrings)
if result != expected_html:
print_diffs(expected_html, result)
self.fail("outputs are not equal, see diff above")
+ @requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_text_doc(self):
result, doc_loc = get_pydoc_text(pydoc_mod)
- expected_text = expected_text_pattern % \
- (inspect.getabsfile(pydoc_mod), doc_loc)
+ expected_text = expected_text_pattern % (
+ (inspect.getabsfile(pydoc_mod), doc_loc) +
+ expected_text_data_docstrings)
if result != expected_text:
print_diffs(expected_text, result)
self.fail("outputs are not equal, see diff above")
@@ -249,6 +335,25 @@ class PyDocDocTest(unittest.TestCase):
result, doc_loc = get_pydoc_text(xml.etree)
self.assertEqual(doc_loc, "", "MODULE DOCS incorrectly includes a link")
+ def test_getpager_with_stdin_none(self):
+ previous_stdin = sys.stdin
+ try:
+ sys.stdin = None
+ pydoc.getpager() # Shouldn't fail.
+ finally:
+ sys.stdin = previous_stdin
+
+ def test_non_str_name(self):
+ # issue14638
+ # Treat illegal (non-str) name like no name
+ class A:
+ __name__ = 42
+ class B:
+ pass
+ adoc = pydoc.render_doc(A())
+ bdoc = pydoc.render_doc(B())
+ self.assertEqual(adoc.replace("A", "B"), bdoc)
+
def test_not_here(self):
missing_module = "test.i_am_not_here"
result = run_pydoc(missing_module)
@@ -278,7 +383,7 @@ class PyDocDocTest(unittest.TestCase):
"<type 'exceptions.Exception'>")
-class PydocImportTest(unittest.TestCase):
+class PydocImportTest(PydocBaseTest):
def setUp(self):
self.test_dir = os.mkdir(TESTFN)
@@ -313,8 +418,19 @@ class PydocImportTest(unittest.TestCase):
badsyntax = os.path.join(pkgdir, "__init__") + os.extsep + "py"
with open(badsyntax, 'w') as f:
f.write("invalid python syntax = $1\n")
- result = run_pydoc('zqwykjv', '-k', PYTHONPATH=TESTFN)
- self.assertEqual('', result)
+ with self.restrict_walk_packages(path=[TESTFN]):
+ with captured_stdout() as out:
+ with captured_stderr() as err:
+ pydoc.apropos('xyzzy')
+ # No result, no error
+ self.assertEqual(out.getvalue(), '')
+ self.assertEqual(err.getvalue(), '')
+ # The package name is still matched
+ with captured_stdout() as out:
+ with captured_stderr() as err:
+ pydoc.apropos('syntaxerr')
+ self.assertEqual(out.getvalue().strip(), 'syntaxerr')
+ self.assertEqual(err.getvalue(), '')
def test_apropos_with_unreadable_dir(self):
# Issue 7367 - pydoc -k failed when unreadable dir on path
@@ -323,8 +439,13 @@ class PydocImportTest(unittest.TestCase):
self.addCleanup(os.rmdir, self.unreadable_dir)
# Note, on Windows the directory appears to be still
# readable so this is not really testing the issue there
- result = run_pydoc('zqwykjv', '-k', PYTHONPATH=TESTFN)
- self.assertEqual('', result)
+ with self.restrict_walk_packages(path=[TESTFN]):
+ with captured_stdout() as out:
+ with captured_stderr() as err:
+ pydoc.apropos('SOMEKEY')
+ # No result, no error
+ self.assertEqual(out.getvalue(), '')
+ self.assertEqual(err.getvalue(), '')
class TestDescriptions(unittest.TestCase):
@@ -355,13 +476,103 @@ class TestDescriptions(unittest.TestCase):
def test_namedtuple_public_underscore(self):
NT = namedtuple('NT', ['abc', 'def'], rename=True)
with captured_stdout() as help_io:
- help(NT)
+ pydoc.help(NT)
helptext = help_io.getvalue()
self.assertIn('_1', helptext)
self.assertIn('_replace', helptext)
self.assertIn('_asdict', helptext)
+@unittest.skipUnless(test.test_support.have_unicode,
+ "test requires unicode support")
+class TestUnicode(unittest.TestCase):
+
+ def setUp(self):
+ # Better not to use unicode escapes in literals, lest the
+ # parser choke on it if Python has been built without
+ # unicode support.
+ self.Q = types.ModuleType(
+ 'Q', 'Rational numbers: \xe2\x84\x9a'.decode('utf8'))
+ self.Q.__version__ = '\xe2\x84\x9a'.decode('utf8')
+ self.Q.__date__ = '\xe2\x84\x9a'.decode('utf8')
+ self.Q.__author__ = '\xe2\x84\x9a'.decode('utf8')
+ self.Q.__credits__ = '\xe2\x84\x9a'.decode('utf8')
+
+ self.assertIsInstance(self.Q.__doc__, unicode)
+
+ def test_render_doc(self):
+ # render_doc is robust against unicode in docstrings
+ doc = pydoc.render_doc(self.Q)
+ self.assertIsInstance(doc, str)
+
+ def test_encode(self):
+ # _encode is robust against characters out the specified encoding
+ self.assertEqual(pydoc._encode(self.Q.__doc__, 'ascii'), 'Rational numbers: &#8474;')
+
+ def test_pipepager(self):
+ # pipepager does not choke on unicode
+ doc = pydoc.render_doc(self.Q)
+
+ saved, os.popen = os.popen, open
+ try:
+ with test.test_support.temp_cwd():
+ pydoc.pipepager(doc, 'pipe')
+ self.assertEqual(open('pipe').read(), pydoc._encode(doc))
+ finally:
+ os.popen = saved
+
+ def test_tempfilepager(self):
+ # tempfilepager does not choke on unicode
+ doc = pydoc.render_doc(self.Q)
+
+ output = {}
+ def mock_system(cmd):
+ filename = cmd.strip()[1:-1]
+ self.assertEqual('"' + filename + '"', cmd.strip())
+ output['content'] = open(filename).read()
+ saved, os.system = os.system, mock_system
+ try:
+ pydoc.tempfilepager(doc, '')
+ self.assertEqual(output['content'], pydoc._encode(doc))
+ finally:
+ os.system = saved
+
+ def test_plainpager(self):
+ # plainpager does not choke on unicode
+ doc = pydoc.render_doc(self.Q)
+
+ # Note: captured_stdout is too permissive when it comes to
+ # unicode, and using it here would make the test always
+ # pass.
+ with test.test_support.temp_cwd():
+ with open('output', 'w') as f:
+ saved, sys.stdout = sys.stdout, f
+ try:
+ pydoc.plainpager(doc)
+ finally:
+ sys.stdout = saved
+ self.assertIn('Rational numbers:', open('output').read())
+
+ def test_ttypager(self):
+ # ttypager does not choke on unicode
+ doc = pydoc.render_doc(self.Q)
+ # Test ttypager
+ with test.test_support.temp_cwd(), test.test_support.captured_stdin():
+ with open('output', 'w') as f:
+ saved, sys.stdout = sys.stdout, f
+ try:
+ pydoc.ttypager(doc)
+ finally:
+ sys.stdout = saved
+ self.assertIn('Rational numbers:', open('output').read())
+
+ def test_htmlpage(self):
+ # html.page does not choke on unicode
+ with test.test_support.temp_cwd():
+ with captured_stdout() as output:
+ pydoc.writedoc(self.Q)
+ self.assertEqual(output.getvalue(), 'wrote Q.html\n')
+
class TestHelper(unittest.TestCase):
def test_keywords(self):
self.assertEqual(sorted(pydoc.Helper.keywords),
@@ -376,7 +587,7 @@ class TestHelper(unittest.TestCase):
try:
pydoc.render_doc(name)
except ImportError:
- self.fail('finding the doc of {!r} failed'.format(o))
+ self.fail('finding the doc of {!r} failed'.format(name))
for name in ('not__builtin__', 'strrr', 'strr.translate',
'str.trrrranslate', '__builtin__.strrr',
@@ -387,9 +598,10 @@ class TestHelper(unittest.TestCase):
def test_main():
try:
- test.test_support.run_unittest(PyDocDocTest,
+ test.test_support.run_unittest(PydocDocTest,
PydocImportTest,
TestDescriptions,
+ TestUnicode,
TestHelper)
finally:
reap_children()
diff --git a/Lib/test/test_pyexpat.py b/Lib/test/test_pyexpat.py
index 75b031a..9f63d4e 100644
--- a/Lib/test/test_pyexpat.py
+++ b/Lib/test/test_pyexpat.py
@@ -228,6 +228,17 @@ class ParseTest(unittest.TestCase):
finally:
test_support.unlink(test_support.TESTFN)
+ def test_parse_again(self):
+ parser = expat.ParserCreate()
+ file = StringIO.StringIO(data)
+ parser.ParseFile(file)
+ # Issue 6676: ensure a meaningful exception is raised when attempting
+ # to parse more than one XML document per xmlparser instance,
+ # a limitation of the Expat library.
+ with self.assertRaises(expat.error) as cm:
+ parser.ParseFile(file)
+ self.assertEqual(expat.ErrorString(cm.exception.code),
+ expat.errors.XML_ERROR_FINISHED)
class NamespaceSeparatorTest(unittest.TestCase):
def test_legal(self):
@@ -588,6 +599,58 @@ class MalformedInputText(unittest.TestCase):
except expat.ExpatError as e:
self.assertEqual(str(e), 'XML declaration not well-formed: line 1, column 14')
+class ForeignDTDTests(unittest.TestCase):
+ """
+ Tests for the UseForeignDTD method of expat parser objects.
+ """
+ def test_use_foreign_dtd(self):
+ """
+ If UseForeignDTD is passed True and a document without an external
+ entity reference is parsed, ExternalEntityRefHandler is first called
+ with None for the public and system ids.
+ """
+ handler_call_args = []
+ def resolve_entity(context, base, system_id, public_id):
+ handler_call_args.append((public_id, system_id))
+ return 1
+
+ parser = expat.ParserCreate()
+ parser.UseForeignDTD(True)
+ parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
+ parser.ExternalEntityRefHandler = resolve_entity
+ parser.Parse("<?xml version='1.0'?><element/>")
+ self.assertEqual(handler_call_args, [(None, None)])
+
+ # test UseForeignDTD() is equal to UseForeignDTD(True)
+ handler_call_args[:] = []
+
+ parser = expat.ParserCreate()
+ parser.UseForeignDTD()
+ parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
+ parser.ExternalEntityRefHandler = resolve_entity
+ parser.Parse("<?xml version='1.0'?><element/>")
+ self.assertEqual(handler_call_args, [(None, None)])
+
+ def test_ignore_use_foreign_dtd(self):
+ """
+ If UseForeignDTD is passed True and a document with an external
+ entity reference is parsed, ExternalEntityRefHandler is called with
+ the public and system ids from the document.
+ """
+ handler_call_args = []
+ def resolve_entity(context, base, system_id, public_id):
+ handler_call_args.append((public_id, system_id))
+ return 1
+
+ parser = expat.ParserCreate()
+ parser.UseForeignDTD(True)
+ parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
+ parser.ExternalEntityRefHandler = resolve_entity
+ parser.Parse(
+ "<?xml version='1.0'?><!DOCTYPE foo PUBLIC 'bar' 'baz'><element/>")
+ self.assertEqual(handler_call_args, [("bar", "baz")])
+
+
def test_main():
run_unittest(SetAttributeTest,
ParseTest,
@@ -598,7 +661,8 @@ def test_main():
PositionTest,
sf1296433Test,
ChardataBufferTest,
- MalformedInputText)
+ MalformedInputText,
+ ForeignDTDTests)
if __name__ == "__main__":
test_main()
diff --git a/Lib/test/test_queue.py b/Lib/test/test_queue.py
index 0b38e7e..34a4aef 100644
--- a/Lib/test/test_queue.py
+++ b/Lib/test/test_queue.py
@@ -43,6 +43,9 @@ class _TriggerThread(threading.Thread):
class BlockingTestMixin:
+ def tearDown(self):
+ self.t = None
+
def do_blocking_test(self, block_func, block_args, trigger_func, trigger_args):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
@@ -79,7 +82,7 @@ class BlockingTestMixin:
self.fail("trigger thread ended but event never set")
-class BaseQueueTest(unittest.TestCase, BlockingTestMixin):
+class BaseQueueTest(BlockingTestMixin):
def setUp(self):
self.cum = 0
self.cumlock = threading.Lock()
@@ -191,13 +194,13 @@ class BaseQueueTest(unittest.TestCase, BlockingTestMixin):
self.simple_queue_test(q)
-class QueueTest(BaseQueueTest):
+class QueueTest(BaseQueueTest, unittest.TestCase):
type2test = Queue.Queue
-class LifoQueueTest(BaseQueueTest):
+class LifoQueueTest(BaseQueueTest, unittest.TestCase):
type2test = Queue.LifoQueue
-class PriorityQueueTest(BaseQueueTest):
+class PriorityQueueTest(BaseQueueTest, unittest.TestCase):
type2test = Queue.PriorityQueue
@@ -222,7 +225,7 @@ class FailingQueue(Queue.Queue):
raise FailingQueueException, "You Lose"
return Queue.Queue._get(self)
-class FailingQueueTest(unittest.TestCase, BlockingTestMixin):
+class FailingQueueTest(BlockingTestMixin, unittest.TestCase):
def failing_queue_test(self, q):
if not q.empty():
diff --git a/Lib/test/test_random.py b/Lib/test/test_random.py
index df82990..1a5a86b 100644
--- a/Lib/test/test_random.py
+++ b/Lib/test/test_random.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
import unittest
import random
import time
@@ -57,6 +55,14 @@ class TestBasicOps(unittest.TestCase):
self.assertRaises(TypeError, self.gen.jumpahead) # needs an arg
self.assertRaises(TypeError, self.gen.jumpahead, 2, 3) # too many
+ def test_jumpahead_produces_valid_state(self):
+ # From http://bugs.python.org/issue14591.
+ self.gen.seed(199210368)
+ self.gen.jumpahead(13550674232554645900)
+ for i in range(500):
+ val = self.gen.random()
+ self.assertLess(val, 1.0)
+
def test_sample(self):
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
@@ -243,10 +249,10 @@ class SystemRandom_TestBasicOps(TestBasicOps):
def test_bigrand_ranges(self):
for i in [40,80, 160, 200, 211, 250, 375, 512, 550]:
- start = self.gen.randrange(2 ** i)
- stop = self.gen.randrange(2 ** (i-2))
+ start = self.gen.randrange(2 ** (i-2))
+ stop = self.gen.randrange(2 ** i)
if stop <= start:
- return
+ continue
self.assertTrue(start <= self.gen.randrange(start, stop) < stop)
def test_rangelimits(self):
@@ -395,10 +401,10 @@ class MersenneTwister_TestBasicOps(TestBasicOps):
def test_bigrand_ranges(self):
for i in [40,80, 160, 200, 211, 250, 375, 512, 550]:
- start = self.gen.randrange(2 ** i)
- stop = self.gen.randrange(2 ** (i-2))
+ start = self.gen.randrange(2 ** (i-2))
+ stop = self.gen.randrange(2 ** i)
if stop <= start:
- return
+ continue
self.assertTrue(start <= self.gen.randrange(start, stop) < stop)
def test_rangelimits(self):
@@ -486,6 +492,7 @@ class TestDistributions(unittest.TestCase):
g.random = x[:].pop; g.paretovariate(1.0)
g.random = x[:].pop; g.expovariate(1.0)
g.random = x[:].pop; g.weibullvariate(1.0, 1.0)
+ g.random = x[:].pop; g.vonmisesvariate(1.0, 1.0)
g.random = x[:].pop; g.normalvariate(0.0, 1.0)
g.random = x[:].pop; g.gauss(0.0, 1.0)
g.random = x[:].pop; g.lognormvariate(0.0, 1.0)
@@ -506,6 +513,7 @@ class TestDistributions(unittest.TestCase):
(g.uniform, (1.0,10.0), (10.0+1.0)/2, (10.0-1.0)**2/12),
(g.triangular, (0.0, 1.0, 1.0/3.0), 4.0/9.0, 7.0/9.0/18.0),
(g.expovariate, (1.5,), 1/1.5, 1/1.5**2),
+ (g.vonmisesvariate, (1.23, 0), pi, pi**2/3),
(g.paretovariate, (5.0,), 5.0/(5.0-1),
5.0/((5.0-1)**2*(5.0-2))),
(g.weibullvariate, (1.0, 3.0), gamma(1+1/3.0),
@@ -522,8 +530,50 @@ class TestDistributions(unittest.TestCase):
s1 += e
s2 += (e - mu) ** 2
N = len(y)
- self.assertAlmostEqual(s1/N, mu, 2)
- self.assertAlmostEqual(s2/(N-1), sigmasqrd, 2)
+ self.assertAlmostEqual(s1/N, mu, places=2,
+ msg='%s%r' % (variate.__name__, args))
+ self.assertAlmostEqual(s2/(N-1), sigmasqrd, places=2,
+ msg='%s%r' % (variate.__name__, args))
+
+ def test_constant(self):
+ g = random.Random()
+ N = 100
+ for variate, args, expected in [
+ (g.uniform, (10.0, 10.0), 10.0),
+ (g.triangular, (10.0, 10.0), 10.0),
+ (g.triangular, (10.0, 10.0, 10.0), 10.0),
+ (g.expovariate, (float('inf'),), 0.0),
+ (g.vonmisesvariate, (3.0, float('inf')), 3.0),
+ (g.gauss, (10.0, 0.0), 10.0),
+ (g.lognormvariate, (0.0, 0.0), 1.0),
+ (g.lognormvariate, (-float('inf'), 0.0), 0.0),
+ (g.normalvariate, (10.0, 0.0), 10.0),
+ (g.paretovariate, (float('inf'),), 1.0),
+ (g.weibullvariate, (10.0, float('inf')), 10.0),
+ (g.weibullvariate, (0.0, 10.0), 0.0),
+ ]:
+ for i in range(N):
+ self.assertEqual(variate(*args), expected)
+
+ def test_von_mises_range(self):
+ # Issue 17149: von mises variates were not consistently in the
+ # range [0, 2*PI].
+ g = random.Random()
+ N = 100
+ for mu in 0.0, 0.1, 3.1, 6.2:
+ for kappa in 0.0, 2.3, 500.0:
+ for _ in range(N):
+ sample = g.vonmisesvariate(mu, kappa)
+ self.assertTrue(
+ 0 <= sample <= random.TWOPI,
+ msg=("vonmisesvariate({}, {}) produced a result {} out"
+ " of range [0, 2*pi]").format(mu, kappa, sample))
+
+ def test_von_mises_large_kappa(self):
+ # Issue #17141: vonmisesvariate() was hang for large kappas
+ random.vonmisesvariate(0, 1e15)
+ random.vonmisesvariate(0, 1e100)
+
class TestModule(unittest.TestCase):
def testMagicConstants(self):
diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py
index 1f73967..d769288 100644
--- a/Lib/test/test_re.py
+++ b/Lib/test/test_re.py
@@ -1,11 +1,15 @@
from test.test_support import verbose, run_unittest, import_module
+from test.test_support import precisionbigmemtest, _2G, cpython_only
+from test.test_support import captured_stdout
import re
from re import Scanner
+import sre_constants
import sys
import string
import traceback
from weakref import proxy
+
# Misc tests from Tim Peters' re.doc
# WARNING: Don't change details in these tests if you don't know
@@ -174,11 +178,31 @@ class ReTests(unittest.TestCase):
self.assertEqual(re.sub('x*', '-', 'abxd'), '-a-b-d-')
self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d')
+ def test_symbolic_groups(self):
+ re.compile('(?P<a>x)(?P=a)(?(a)y)')
+ re.compile('(?P<a1>x)(?P=a1)(?(a1)y)')
+ self.assertRaises(re.error, re.compile, '(?P<a>)(?P<a>)')
+ self.assertRaises(re.error, re.compile, '(?Px)')
+ self.assertRaises(re.error, re.compile, '(?P=)')
+ self.assertRaises(re.error, re.compile, '(?P=1)')
+ self.assertRaises(re.error, re.compile, '(?P=a)')
+ self.assertRaises(re.error, re.compile, '(?P=a1)')
+ self.assertRaises(re.error, re.compile, '(?P=a.)')
+ self.assertRaises(re.error, re.compile, '(?P<)')
+ self.assertRaises(re.error, re.compile, '(?P<>)')
+ self.assertRaises(re.error, re.compile, '(?P<1>)')
+ self.assertRaises(re.error, re.compile, '(?P<a.>)')
+ self.assertRaises(re.error, re.compile, '(?())')
+ self.assertRaises(re.error, re.compile, '(?(a))')
+ self.assertRaises(re.error, re.compile, '(?(1a))')
+ self.assertRaises(re.error, re.compile, '(?(a.))')
+
def test_symbolic_refs(self):
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a a>', 'xx')
+ self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<1a1>', 'xx')
self.assertRaises(IndexError, re.sub, '(?P<a>x)', '\g<ab>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')
@@ -373,11 +397,45 @@ class ReTests(unittest.TestCase):
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.UNICODE).group(0), "1aa! a")
+ def test_string_boundaries(self):
+ # See http://bugs.python.org/issue10713
+ self.assertEqual(re.search(r"\b(abc)\b", "abc").group(1),
+ "abc")
+ # There's a word boundary at the start of a string.
+ self.assertTrue(re.match(r"\b", "abc"))
+ # A non-empty string includes a non-boundary zero-length match.
+ self.assertTrue(re.search(r"\B", "abc"))
+ # There is no non-boundary match at the start of a string.
+ self.assertFalse(re.match(r"\B", "abc"))
+ # However, an empty string contains no word boundaries, and also no
+ # non-boundaries.
+ self.assertEqual(re.search(r"\B", ""), None)
+ # This one is questionable and different from the perlre behaviour,
+ # but describes current behavior.
+ self.assertEqual(re.search(r"\b", ""), None)
+ # A single word-character string has two boundaries, but no
+ # non-boundary gaps.
+ self.assertEqual(len(re.findall(r"\b", "a")), 2)
+ self.assertEqual(len(re.findall(r"\B", "a")), 0)
+ # If there are no words, there are no boundaries
+ self.assertEqual(len(re.findall(r"\b", " ")), 0)
+ self.assertEqual(len(re.findall(r"\b", " ")), 0)
+ # Can match around the whitespace.
+ self.assertEqual(len(re.findall(r"\B", " ")), 2)
+
def test_bigcharset(self):
self.assertEqual(re.match(u"([\u2222\u2223])",
u"\u2222").group(1), u"\u2222")
self.assertEqual(re.match(u"([\u2222\u2223])",
u"\u2222", re.UNICODE).group(1), u"\u2222")
+ r = u'[%s]' % u''.join(map(unichr, range(256, 2**16, 255)))
+ self.assertEqual(re.match(r, u"\uff01", re.UNICODE).group(), u"\uff01")
+
+ def test_big_codesize(self):
+ # Issue #1160
+ r = re.compile('|'.join(('%d'%x for x in range(10000))))
+ self.assertIsNotNone(r.match('1000'))
+ self.assertIsNotNone(r.match('9999'))
def test_anyall(self):
self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0),
@@ -574,6 +632,15 @@ class ReTests(unittest.TestCase):
self.assertEqual(re.match('(x)*y', 50000*'x'+'y').group(1), 'x')
self.assertEqual(re.match('(x)*?y', 50000*'x'+'y').group(1), 'x')
+ def test_unlimited_zero_width_repeat(self):
+ # Issue #9669
+ self.assertIsNone(re.match(r'(?:a?)*y', 'z'))
+ self.assertIsNone(re.match(r'(?:a?)+y', 'z'))
+ self.assertIsNone(re.match(r'(?:a?){2,}y', 'z'))
+ self.assertIsNone(re.match(r'(?:a?)*?y', 'z'))
+ self.assertIsNone(re.match(r'(?:a?)+?y', 'z'))
+ self.assertIsNone(re.match(r'(?:a?){2,}?y', 'z'))
+
def test_scanner(self):
def s_ident(scanner, token): return token
def s_operator(scanner, token): return "op%s" % token
@@ -634,7 +701,7 @@ class ReTests(unittest.TestCase):
try:
unicode
except NameError:
- return # no problem if we have no unicode
+ self.skipTest('no problem if we have no unicode')
class my_unicode(unicode): pass
pat = re.compile(my_unicode("abc"))
self.assertEqual(pat.match("xyz"), None)
@@ -648,7 +715,7 @@ class ReTests(unittest.TestCase):
try:
unicode
except NameError:
- return # no problem if we have no unicode
+ self.skipTest('no problem if we have no unicode')
self.assertTrue(re.compile('bug_926075') is not
re.compile(eval("u'bug_926075'")))
@@ -656,7 +723,7 @@ class ReTests(unittest.TestCase):
try:
unicode
except NameError:
- pass
+ self.skipTest('no problem if we have no unicode')
pattern = eval('u"[\u002E\u3002\uFF0E\uFF61]"')
self.assertEqual(re.compile(pattern).split("a.b.c"),
['a','b','c'])
@@ -757,6 +824,129 @@ class ReTests(unittest.TestCase):
self.assertRaises(TypeError, re.finditer, "a", {})
self.assertRaises(OverflowError, _sre.compile, "abc", 0, [long_overflow])
+ def test_compile(self):
+ # Test return value when given string and pattern as parameter
+ pattern = re.compile('random pattern')
+ self.assertIsInstance(pattern, re._pattern_type)
+ same_pattern = re.compile(pattern)
+ self.assertIsInstance(same_pattern, re._pattern_type)
+ self.assertIs(same_pattern, pattern)
+ # Test behaviour when not given a string or pattern as parameter
+ self.assertRaises(TypeError, re.compile, 0)
+
+ def test_bug_13899(self):
+ # Issue #13899: re pattern r"[\A]" should work like "A" but matches
+ # nothing. Ditto B and Z.
+ self.assertEqual(re.findall(r'[\A\B\b\C\Z]', 'AB\bCZ'),
+ ['A', 'B', '\b', 'C', 'Z'])
+
+ @precisionbigmemtest(size=_2G, memuse=1)
+ def test_large_search(self, size):
+ # Issue #10182: indices were 32-bit-truncated.
+ s = 'a' * size
+ m = re.search('$', s)
+ self.assertIsNotNone(m)
+ self.assertEqual(m.start(), size)
+ self.assertEqual(m.end(), size)
+
+ # The huge memuse is because of re.sub() using a list and a join()
+ # to create the replacement result.
+ @precisionbigmemtest(size=_2G, memuse=16 + 2)
+ def test_large_subn(self, size):
+ # Issue #10182: indices were 32-bit-truncated.
+ s = 'a' * size
+ r, n = re.subn('', '', s)
+ self.assertEqual(r, s)
+ self.assertEqual(n, size + 1)
+
+
+ def test_repeat_minmax_overflow(self):
+ # Issue #13169
+ string = "x" * 100000
+ self.assertEqual(re.match(r".{65535}", string).span(), (0, 65535))
+ self.assertEqual(re.match(r".{,65535}", string).span(), (0, 65535))
+ self.assertEqual(re.match(r".{65535,}?", string).span(), (0, 65535))
+ self.assertEqual(re.match(r".{65536}", string).span(), (0, 65536))
+ self.assertEqual(re.match(r".{,65536}", string).span(), (0, 65536))
+ self.assertEqual(re.match(r".{65536,}?", string).span(), (0, 65536))
+ # 2**128 should be big enough to overflow both SRE_CODE and Py_ssize_t.
+ self.assertRaises(OverflowError, re.compile, r".{%d}" % 2**128)
+ self.assertRaises(OverflowError, re.compile, r".{,%d}" % 2**128)
+ self.assertRaises(OverflowError, re.compile, r".{%d,}?" % 2**128)
+ self.assertRaises(OverflowError, re.compile, r".{%d,%d}" % (2**129, 2**128))
+
+ @cpython_only
+ def test_repeat_minmax_overflow_maxrepeat(self):
+ try:
+ from _sre import MAXREPEAT
+ except ImportError:
+ self.skipTest('requires _sre.MAXREPEAT constant')
+ string = "x" * 100000
+ self.assertIsNone(re.match(r".{%d}" % (MAXREPEAT - 1), string))
+ self.assertEqual(re.match(r".{,%d}" % (MAXREPEAT - 1), string).span(),
+ (0, 100000))
+ self.assertIsNone(re.match(r".{%d,}?" % (MAXREPEAT - 1), string))
+ self.assertRaises(OverflowError, re.compile, r".{%d}" % MAXREPEAT)
+ self.assertRaises(OverflowError, re.compile, r".{,%d}" % MAXREPEAT)
+ self.assertRaises(OverflowError, re.compile, r".{%d,}?" % MAXREPEAT)
+
+ def test_backref_group_name_in_exception(self):
+ # Issue 17341: Poor error message when compiling invalid regex
+ with self.assertRaisesRegexp(sre_constants.error, '<foo>'):
+ re.compile('(?P=<foo>)')
+
+ def test_group_name_in_exception(self):
+ # Issue 17341: Poor error message when compiling invalid regex
+ with self.assertRaisesRegexp(sre_constants.error, '\?foo'):
+ re.compile('(?P<?foo>)')
+
+ def test_issue17998(self):
+ for reps in '*', '+', '?', '{1}':
+ for mod in '', '?':
+ pattern = '.' + reps + mod + 'yz'
+ self.assertEqual(re.compile(pattern, re.S).findall('xyz'),
+ ['xyz'], msg=pattern)
+ pattern = pattern.encode()
+ self.assertEqual(re.compile(pattern, re.S).findall(b'xyz'),
+ [b'xyz'], msg=pattern)
+
+
+ def test_bug_2537(self):
+ # issue 2537: empty submatches
+ for outer_op in ('{0,}', '*', '+', '{1,187}'):
+ for inner_op in ('{0,}', '*', '?'):
+ r = re.compile("^((x|y)%s)%s" % (inner_op, outer_op))
+ m = r.match("xyyzy")
+ self.assertEqual(m.group(0), "xyy")
+ self.assertEqual(m.group(1), "")
+ self.assertEqual(m.group(2), "y")
+
+ def test_debug_flag(self):
+ with captured_stdout() as out:
+ re.compile('foo', re.DEBUG)
+ self.assertEqual(out.getvalue().splitlines(),
+ ['literal 102', 'literal 111', 'literal 111'])
+ # Debug output is output again even a second time (bypassing
+ # the cache -- issue #20426).
+ with captured_stdout() as out:
+ re.compile('foo', re.DEBUG)
+ self.assertEqual(out.getvalue().splitlines(),
+ ['literal 102', 'literal 111', 'literal 111'])
+
+ def test_keyword_parameters(self):
+ # Issue #20283: Accepting the string keyword parameter.
+ pat = re.compile(r'(ab)')
+ self.assertEqual(
+ pat.match(string='abracadabra', pos=7, endpos=10).span(), (7, 9))
+ self.assertEqual(
+ pat.search(string='abracadabra', pos=3, endpos=10).span(), (7, 9))
+ self.assertEqual(
+ pat.findall(string='abracadabra', pos=3, endpos=10), ['ab'])
+ self.assertEqual(
+ pat.split(string='abracadabra', maxsplit=1),
+ ['', 'ab', 'racadabra'])
+
+
def run_re_tests():
from test.re_tests import tests, SUCCEED, FAIL, SYNTAX_ERROR
if verbose:
diff --git a/Lib/test/test_readline.py b/Lib/test/test_readline.py
index 11045c8..945c7f4 100644
--- a/Lib/test/test_readline.py
+++ b/Lib/test/test_readline.py
@@ -12,6 +12,10 @@ from test.test_support import run_unittest, import_module
readline = import_module('readline')
class TestHistoryManipulation (unittest.TestCase):
+
+ @unittest.skipIf(not hasattr(readline, 'clear_history'),
+ "The history update test cannot be run because the "
+ "clear_history method is not available.")
def testHistoryUpdates(self):
readline.clear_history()
diff --git a/Lib/test/test_repr.py b/Lib/test/test_repr.py
index 8b20acd..3e1e890 100644
--- a/Lib/test/test_repr.py
+++ b/Lib/test/test_repr.py
@@ -130,10 +130,10 @@ class ReprTests(unittest.TestCase):
def test_file(self):
fp = open(unittest.__file__)
self.assertTrue(repr(fp).startswith(
- "<open file '%s', mode 'r' at 0x" % unittest.__file__))
+ "<open file %r, mode 'r' at 0x" % unittest.__file__))
fp.close()
self.assertTrue(repr(fp).startswith(
- "<closed file '%s', mode 'r' at 0x" % unittest.__file__))
+ "<closed file %r, mode 'r' at 0x" % unittest.__file__))
def test_lambda(self):
self.assertTrue(repr(lambda x: x).startswith(
@@ -179,8 +179,15 @@ class ReprTests(unittest.TestCase):
self.assertTrue(repr(x).startswith('<read-only buffer for 0x'))
def test_cell(self):
- # XXX Hmm? How to get at a cell object?
- pass
+ def get_cell():
+ x = 42
+ def inner():
+ return x
+ return inner
+ x = get_cell().__closure__[0]
+ self.assertRegexpMatches(repr(x), r'<cell at 0x[0-9A-Fa-f]+: '
+ r'int object at 0x[0-9A-Fa-f]+>')
+ self.assertRegexpMatches(r(x), r'<cell at.*\.\.\..*>')
def test_descriptors(self):
eq = self.assertEqual
@@ -261,6 +268,7 @@ class foo(object):
eq(repr(foo.foo),
"<class '%s.foo'>" % foo.__name__)
+ @unittest.skip('need a suitable object')
def test_object(self):
# XXX Test the repr of a type with a really long tp_name but with no
# tp_repr. WIBNI we had ::Inline? :)
@@ -302,6 +310,7 @@ class aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
'<bound method aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.amethod of <%s.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa instance at 0x' \
% (qux.__name__,) ))
+ @unittest.skip('needs a built-in function with a really long name')
def test_builtin_function(self):
# XXX test built-in functions and methods with really long names
pass
diff --git a/Lib/test/test_resource.py b/Lib/test/test_resource.py
index 52692a7..de29d3b 100644
--- a/Lib/test/test_resource.py
+++ b/Lib/test/test_resource.py
@@ -18,62 +18,60 @@ class ResourceTest(unittest.TestCase):
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
- pass
- else:
- # RLIMIT_FSIZE should be RLIM_INFINITY, which will be a really big
- # number on a platform with large file support. On these platforms,
- # we need to test that the get/setrlimit functions properly convert
- # the number to a C long long and that the conversion doesn't raise
- # an error.
- self.assertEqual(resource.RLIM_INFINITY, max)
- resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
+ self.skipTest('RLIMIT_FSIZE not available')
+ # RLIMIT_FSIZE should be RLIM_INFINITY, which will be a really big
+ # number on a platform with large file support. On these platforms,
+ # we need to test that the get/setrlimit functions properly convert
+ # the number to a C long long and that the conversion doesn't raise
+ # an error.
+ self.assertEqual(resource.RLIM_INFINITY, max)
+ resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
def test_fsize_enforced(self):
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
- pass
- else:
- # Check to see what happens when the RLIMIT_FSIZE is small. Some
- # versions of Python were terminated by an uncaught SIGXFSZ, but
- # pythonrun.c has been fixed to ignore that exception. If so, the
- # write() should return EFBIG when the limit is exceeded.
+ self.skipTest('RLIMIT_FSIZE not available')
+ # Check to see what happens when the RLIMIT_FSIZE is small. Some
+ # versions of Python were terminated by an uncaught SIGXFSZ, but
+ # pythonrun.c has been fixed to ignore that exception. If so, the
+ # write() should return EFBIG when the limit is exceeded.
- # At least one platform has an unlimited RLIMIT_FSIZE and attempts
- # to change it raise ValueError instead.
+ # At least one platform has an unlimited RLIMIT_FSIZE and attempts
+ # to change it raise ValueError instead.
+ try:
try:
+ resource.setrlimit(resource.RLIMIT_FSIZE, (1024, max))
+ limit_set = True
+ except ValueError:
+ limit_set = False
+ f = open(test_support.TESTFN, "wb")
+ try:
+ f.write("X" * 1024)
try:
- resource.setrlimit(resource.RLIMIT_FSIZE, (1024, max))
- limit_set = True
- except ValueError:
- limit_set = False
- f = open(test_support.TESTFN, "wb")
- try:
- f.write("X" * 1024)
- try:
- f.write("Y")
+ f.write("Y")
+ f.flush()
+ # On some systems (e.g., Ubuntu on hppa) the flush()
+ # doesn't always cause the exception, but the close()
+ # does eventually. Try flushing several times in
+ # an attempt to ensure the file is really synced and
+ # the exception raised.
+ for i in range(5):
+ time.sleep(.1)
f.flush()
- # On some systems (e.g., Ubuntu on hppa) the flush()
- # doesn't always cause the exception, but the close()
- # does eventually. Try flushing several times in
- # an attempt to ensure the file is really synced and
- # the exception raised.
- for i in range(5):
- time.sleep(.1)
- f.flush()
- except IOError:
- if not limit_set:
- raise
- if limit_set:
- # Close will attempt to flush the byte we wrote
- # Restore limit first to avoid getting a spurious error
- resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
- finally:
- f.close()
- finally:
+ except IOError:
+ if not limit_set:
+ raise
if limit_set:
+ # Close will attempt to flush the byte we wrote
+ # Restore limit first to avoid getting a spurious error
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
- test_support.unlink(test_support.TESTFN)
+ finally:
+ f.close()
+ finally:
+ if limit_set:
+ resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
+ test_support.unlink(test_support.TESTFN)
def test_fsize_toobig(self):
# Be sure that setrlimit is checking for really large values
@@ -81,16 +79,15 @@ class ResourceTest(unittest.TestCase):
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
+ self.skipTest('RLIMIT_FSIZE not available')
+ try:
+ resource.setrlimit(resource.RLIMIT_FSIZE, (too_big, max))
+ except (OverflowError, ValueError):
+ pass
+ try:
+ resource.setrlimit(resource.RLIMIT_FSIZE, (max, too_big))
+ except (OverflowError, ValueError):
pass
- else:
- try:
- resource.setrlimit(resource.RLIMIT_FSIZE, (too_big, max))
- except (OverflowError, ValueError):
- pass
- try:
- resource.setrlimit(resource.RLIMIT_FSIZE, (max, too_big))
- except (OverflowError, ValueError):
- pass
def test_getrusage(self):
self.assertRaises(TypeError, resource.getrusage)
@@ -103,6 +100,22 @@ class ResourceTest(unittest.TestCase):
except (ValueError, AttributeError):
pass
+ # Issue 6083: Reference counting bug
+ def test_setrusage_refcount(self):
+ try:
+ limits = resource.getrlimit(resource.RLIMIT_CPU)
+ except AttributeError:
+ self.skipTest('RLIMIT_CPU not available')
+ class BadSequence:
+ def __len__(self):
+ return 2
+ def __getitem__(self, key):
+ if key in (0, 1):
+ return len(tuple(range(1000000)))
+ raise IndexError
+
+ resource.setrlimit(resource.RLIMIT_CPU, BadSequence())
+
def test_main(verbose=None):
test_support.run_unittest(ResourceTest)
diff --git a/Lib/test/test_robotparser.py b/Lib/test/test_robotparser.py
index b3d4a46..36ac941 100644
--- a/Lib/test/test_robotparser.py
+++ b/Lib/test/test_robotparser.py
@@ -2,6 +2,12 @@ import unittest, StringIO, robotparser
from test import test_support
from urllib2 import urlopen, HTTPError
+HAVE_HTTPS = True
+try:
+ from urllib2 import HTTPSHandler
+except ImportError:
+ HAVE_HTTPS = False
+
class RobotTestCase(unittest.TestCase):
def __init__(self, index, parser, url, good, agent):
unittest.TestCase.__init__(self)
@@ -228,6 +234,18 @@ bad = ['/some/path']
RobotTest(15, doc, good, bad)
+# 16. Empty query (issue #17403). Normalizing the url first.
+doc = """
+User-agent: *
+Allow: /some/path?
+Disallow: /another/path?
+"""
+
+good = ['/some/path?']
+bad = ['/another/path?']
+
+RobotTest(16, doc, good, bad)
+
class NetworkTestCase(unittest.TestCase):
@@ -257,6 +275,7 @@ class NetworkTestCase(unittest.TestCase):
self.skipTest('%s is unavailable' % url)
self.assertEqual(parser.can_fetch("*", robots_url), False)
+ @unittest.skipUnless(HAVE_HTTPS, 'need SSL support to download license')
def testPythonOrg(self):
test_support.requires('network')
with test_support.transient_internet('www.python.org'):
diff --git a/Lib/test/test_runpy.py b/Lib/test/test_runpy.py
index dbcb23d..76858d5 100644
--- a/Lib/test/test_runpy.py
+++ b/Lib/test/test_runpy.py
@@ -170,11 +170,12 @@ class RunModuleTest(unittest.TestCase):
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
- if verbose: print "Running from compiled:", mod_name
- d2 = run_module(mod_name) # Read from bytecode
- self.assertIn("x", d2)
- self.assertTrue(d2["x"] == 1)
- del d2 # Ensure __loader__ entry doesn't keep file open
+ if not sys.dont_write_bytecode:
+ if verbose: print "Running from compiled:", mod_name
+ d2 = run_module(mod_name) # Read from bytecode
+ self.assertIn("x", d2)
+ self.assertTrue(d2["x"] == 1)
+ del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print "Module executed successfully"
@@ -192,11 +193,12 @@ class RunModuleTest(unittest.TestCase):
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
- if verbose: print "Running from compiled:", pkg_name
- d2 = run_module(pkg_name) # Read from bytecode
- self.assertIn("x", d2)
- self.assertTrue(d2["x"] == 1)
- del d2 # Ensure __loader__ entry doesn't keep file open
+ if not sys.dont_write_bytecode:
+ if verbose: print "Running from compiled:", pkg_name
+ d2 = run_module(pkg_name) # Read from bytecode
+ self.assertIn("x", d2)
+ self.assertTrue(d2["x"] == 1)
+ del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, pkg_name)
if verbose: print "Package executed successfully"
@@ -246,13 +248,14 @@ from ..uncle.cousin import nephew
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
- if verbose: print "Running from compiled:", mod_name
- d2 = run_module(mod_name, run_name=run_name) # Read from bytecode
- self.assertIn("__package__", d2)
- self.assertTrue(d2["__package__"] == pkg_name)
- self.assertIn("sibling", d2)
- self.assertIn("nephew", d2)
- del d2 # Ensure __loader__ entry doesn't keep file open
+ if not sys.dont_write_bytecode:
+ if verbose: print "Running from compiled:", mod_name
+ d2 = run_module(mod_name, run_name=run_name) # Read from bytecode
+ self.assertIn("__package__", d2)
+ self.assertTrue(d2["__package__"] == pkg_name)
+ self.assertIn("sibling", d2)
+ self.assertIn("nephew", d2)
+ del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print "Module executed successfully"
diff --git a/Lib/test/test_sax.py b/Lib/test/test_sax.py
index adb9305..86638a2 100644
--- a/Lib/test/test_sax.py
+++ b/Lib/test/test_sax.py
@@ -14,12 +14,28 @@ from xml.sax.expatreader import create_parser
from xml.sax.handler import feature_namespaces
from xml.sax.xmlreader import InputSource, AttributesImpl, AttributesNSImpl
from cStringIO import StringIO
+import io
+import os.path
+import shutil
+import test.test_support as support
from test.test_support import findfile, run_unittest
import unittest
TEST_XMLFILE = findfile("test.xml", subdir="xmltestdata")
TEST_XMLFILE_OUT = findfile("test.xml.out", subdir="xmltestdata")
+supports_unicode_filenames = True
+if not os.path.supports_unicode_filenames:
+ try:
+ support.TESTFN_UNICODE.encode(support.TESTFN_ENCODING)
+ except (AttributeError, UnicodeError, TypeError):
+ # Either the file system encoding is None, or the file name
+ # cannot be encoded in the file system encoding.
+ supports_unicode_filenames = False
+requires_unicode_filenames = unittest.skipUnless(
+ supports_unicode_filenames,
+ 'Requires unicode filenames support')
+
ns_uri = "http://www.python.org/xml-ns/saxtest/"
class XmlTestBase(unittest.TestCase):
@@ -155,9 +171,9 @@ class SaxutilsTest(unittest.TestCase):
start = '<?xml version="1.0" encoding="iso-8859-1"?>\n'
-class XmlgenTest(unittest.TestCase):
+class XmlgenTest:
def test_xmlgen_basic(self):
- result = StringIO()
+ result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
@@ -167,7 +183,7 @@ class XmlgenTest(unittest.TestCase):
self.assertEqual(result.getvalue(), start + "<doc></doc>")
def test_xmlgen_content(self):
- result = StringIO()
+ result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
@@ -179,7 +195,7 @@ class XmlgenTest(unittest.TestCase):
self.assertEqual(result.getvalue(), start + "<doc>huhei</doc>")
def test_xmlgen_pi(self):
- result = StringIO()
+ result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
@@ -191,7 +207,7 @@ class XmlgenTest(unittest.TestCase):
self.assertEqual(result.getvalue(), start + "<?test data?><doc></doc>")
def test_xmlgen_content_escape(self):
- result = StringIO()
+ result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
@@ -204,7 +220,7 @@ class XmlgenTest(unittest.TestCase):
start + "<doc>&lt;huhei&amp;</doc>")
def test_xmlgen_attr_escape(self):
- result = StringIO()
+ result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
@@ -223,8 +239,41 @@ class XmlgenTest(unittest.TestCase):
"<e a=\"'&quot;\"></e>"
"<e a=\"&#10;&#13;&#9;\"></e></doc>"))
+ def test_xmlgen_encoding(self):
+ encodings = ('iso-8859-15', 'utf-8',
+ 'utf-16be', 'utf-16le',
+ 'utf-32be', 'utf-32le')
+ for encoding in encodings:
+ result = self.ioclass()
+ gen = XMLGenerator(result, encoding=encoding)
+
+ gen.startDocument()
+ gen.startElement("doc", {"a": u'\u20ac'})
+ gen.characters(u"\u20ac")
+ gen.endElement("doc")
+ gen.endDocument()
+
+ self.assertEqual(result.getvalue(), (
+ u'<?xml version="1.0" encoding="%s"?>\n'
+ u'<doc a="\u20ac">\u20ac</doc>' % encoding
+ ).encode(encoding, 'xmlcharrefreplace'))
+
+ def test_xmlgen_unencodable(self):
+ result = self.ioclass()
+ gen = XMLGenerator(result, encoding='ascii')
+
+ gen.startDocument()
+ gen.startElement("doc", {"a": u'\u20ac'})
+ gen.characters(u"\u20ac")
+ gen.endElement("doc")
+ gen.endDocument()
+
+ self.assertEqual(result.getvalue(),
+ '<?xml version="1.0" encoding="ascii"?>\n'
+ '<doc a="&#8364;">&#8364;</doc>')
+
def test_xmlgen_ignorable(self):
- result = StringIO()
+ result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
@@ -235,8 +284,28 @@ class XmlgenTest(unittest.TestCase):
self.assertEqual(result.getvalue(), start + "<doc> </doc>")
+ def test_xmlgen_encoding_bytes(self):
+ encodings = ('iso-8859-15', 'utf-8',
+ 'utf-16be', 'utf-16le',
+ 'utf-32be', 'utf-32le')
+ for encoding in encodings:
+ result = self.ioclass()
+ gen = XMLGenerator(result, encoding=encoding)
+
+ gen.startDocument()
+ gen.startElement("doc", {"a": u'\u20ac'})
+ gen.characters(u"\u20ac".encode(encoding))
+ gen.ignorableWhitespace(" ".encode(encoding))
+ gen.endElement("doc")
+ gen.endDocument()
+
+ self.assertEqual(result.getvalue(), (
+ u'<?xml version="1.0" encoding="%s"?>\n'
+ u'<doc a="\u20ac">\u20ac </doc>' % encoding
+ ).encode(encoding, 'xmlcharrefreplace'))
+
def test_xmlgen_ns(self):
- result = StringIO()
+ result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
@@ -254,7 +323,7 @@ class XmlgenTest(unittest.TestCase):
ns_uri))
def test_1463026_1(self):
- result = StringIO()
+ result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
@@ -265,7 +334,7 @@ class XmlgenTest(unittest.TestCase):
self.assertEqual(result.getvalue(), start+'<a b="c"></a>')
def test_1463026_2(self):
- result = StringIO()
+ result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
@@ -278,7 +347,7 @@ class XmlgenTest(unittest.TestCase):
self.assertEqual(result.getvalue(), start+'<a xmlns="qux"></a>')
def test_1463026_3(self):
- result = StringIO()
+ result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
@@ -294,7 +363,7 @@ class XmlgenTest(unittest.TestCase):
def test_5027_1(self):
# The xml prefix (as in xml:lang below) is reserved and bound by
# definition to http://www.w3.org/XML/1998/namespace. XMLGenerator had
- # a bug whereby a KeyError is thrown because this namespace is missing
+ # a bug whereby a KeyError is raised because this namespace is missing
# from a dictionary.
#
# This test demonstrates the bug by parsing a document.
@@ -306,7 +375,7 @@ class XmlgenTest(unittest.TestCase):
parser = make_parser()
parser.setFeature(feature_namespaces, True)
- result = StringIO()
+ result = self.ioclass()
gen = XMLGenerator(result)
parser.setContentHandler(gen)
parser.parse(test_xml)
@@ -320,12 +389,12 @@ class XmlgenTest(unittest.TestCase):
def test_5027_2(self):
# The xml prefix (as in xml:lang below) is reserved and bound by
# definition to http://www.w3.org/XML/1998/namespace. XMLGenerator had
- # a bug whereby a KeyError is thrown because this namespace is missing
+ # a bug whereby a KeyError is raised because this namespace is missing
# from a dictionary.
#
# This test demonstrates the bug by direct manipulation of the
# XMLGenerator.
- result = StringIO()
+ result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
@@ -345,6 +414,44 @@ class XmlgenTest(unittest.TestCase):
'<a:g2 xml:lang="en">Hello</a:g2>'
'</a:g1>'))
+ def test_no_close_file(self):
+ result = self.ioclass()
+ def func(out):
+ gen = XMLGenerator(out)
+ gen.startDocument()
+ gen.startElement("doc", {})
+ func(result)
+ self.assertFalse(result.closed)
+
+ def test_xmlgen_fragment(self):
+ result = self.ioclass()
+ gen = XMLGenerator(result)
+
+ # Don't call gen.startDocument()
+ gen.startElement("foo", {"a": "1.0"})
+ gen.characters("Hello")
+ gen.endElement("foo")
+ gen.startElement("bar", {"b": "2.0"})
+ gen.endElement("bar")
+ # Don't call gen.endDocument()
+
+ self.assertEqual(result.getvalue(),
+ '<foo a="1.0">Hello</foo><bar b="2.0"></bar>')
+
+class StringXmlgenTest(XmlgenTest, unittest.TestCase):
+ ioclass = StringIO
+
+class BytesIOXmlgenTest(XmlgenTest, unittest.TestCase):
+ ioclass = io.BytesIO
+
+class WriterXmlgenTest(XmlgenTest, unittest.TestCase):
+ class ioclass(list):
+ write = list.append
+ closed = False
+
+ def getvalue(self):
+ return b''.join(self)
+
class XMLFilterBaseTest(unittest.TestCase):
def test_filter_basic(self):
@@ -384,6 +491,21 @@ class ExpatReaderTest(XmlTestBase):
self.assertEqual(result.getvalue(), xml_test_out)
+ @requires_unicode_filenames
+ def test_expat_file_unicode(self):
+ fname = support.TESTFN_UNICODE
+ shutil.copyfile(TEST_XMLFILE, fname)
+ self.addCleanup(support.unlink, fname)
+
+ parser = create_parser()
+ result = StringIO()
+ xmlgen = XMLGenerator(result)
+
+ parser.setContentHandler(xmlgen)
+ parser.parse(open(fname))
+
+ self.assertEqual(result.getvalue(), xml_test_out)
+
# ===== DTDHandler support
class TestDTDHandler:
@@ -523,6 +645,21 @@ class ExpatReaderTest(XmlTestBase):
self.assertEqual(result.getvalue(), xml_test_out)
+ @requires_unicode_filenames
+ def test_expat_inpsource_sysid_unicode(self):
+ fname = support.TESTFN_UNICODE
+ shutil.copyfile(TEST_XMLFILE, fname)
+ self.addCleanup(support.unlink, fname)
+
+ parser = create_parser()
+ result = StringIO()
+ xmlgen = XMLGenerator(result)
+
+ parser.setContentHandler(xmlgen)
+ parser.parse(InputSource(fname))
+
+ self.assertEqual(result.getvalue(), xml_test_out)
+
def test_expat_inpsource_stream(self):
parser = create_parser()
result = StringIO()
@@ -596,6 +733,21 @@ class ExpatReaderTest(XmlTestBase):
self.assertEqual(parser.getSystemId(), TEST_XMLFILE)
self.assertEqual(parser.getPublicId(), None)
+ @requires_unicode_filenames
+ def test_expat_locator_withinfo_unicode(self):
+ fname = support.TESTFN_UNICODE
+ shutil.copyfile(TEST_XMLFILE, fname)
+ self.addCleanup(support.unlink, fname)
+
+ result = StringIO()
+ xmlgen = XMLGenerator(result)
+ parser = create_parser()
+ parser.setContentHandler(xmlgen)
+ parser.parse(fname)
+
+ self.assertEqual(parser.getSystemId(), fname)
+ self.assertEqual(parser.getPublicId(), None)
+
# ===========================================================================
#
@@ -744,7 +896,9 @@ class XmlReaderTest(XmlTestBase):
def test_main():
run_unittest(MakeParserTest,
SaxutilsTest,
- XmlgenTest,
+ StringXmlgenTest,
+ BytesIOXmlgenTest,
+ WriterXmlgenTest,
ExpatReaderTest,
ErrorReportingTest,
XmlReaderTest)
diff --git a/Lib/test/test_select.py b/Lib/test/test_select.py
index 79b249b..175bbda 100644
--- a/Lib/test/test_select.py
+++ b/Lib/test/test_select.py
@@ -49,6 +49,15 @@ class SelectTestCase(unittest.TestCase):
self.fail('Unexpected return values from select():', rfd, wfd, xfd)
p.close()
+ # Issue 16230: Crash on select resized list
+ def test_select_mutated(self):
+ a = []
+ class F:
+ def fileno(self):
+ del a[-1]
+ return sys.__stdout__.fileno()
+ a[:] = [F()] * 10
+ self.assertEqual(select.select([], a, []), ([], a[:5], []))
def test_main():
test_support.run_unittest(SelectTestCase)
diff --git a/Lib/test/test_set.py b/Lib/test/test_set.py
index 8db26ed..610be7c 100644
--- a/Lib/test/test_set.py
+++ b/Lib/test/test_set.py
@@ -561,10 +561,10 @@ class TestSet(TestJointOps):
s = None
self.assertRaises(ReferenceError, str, p)
- # C API test only available in a debug build
- if hasattr(set, "test_c_api"):
- def test_c_api(self):
- self.assertEqual(set().test_c_api(), True)
+ @unittest.skipUnless(hasattr(set, "test_c_api"),
+ 'C API test only available in a debug build')
+ def test_c_api(self):
+ self.assertEqual(set().test_c_api(), True)
class SetSubclass(set):
pass
@@ -1017,8 +1017,6 @@ class TestBinaryOps(unittest.TestCase):
# without calling __cmp__.
self.assertEqual(cmp(a, a), 0)
- self.assertRaises(TypeError, cmp, a, 12)
- self.assertRaises(TypeError, cmp, "abc", a)
#==============================================================================
@@ -1269,17 +1267,6 @@ class TestOnlySetsInBinaryOps(unittest.TestCase):
self.assertEqual(self.other != self.set, True)
self.assertEqual(self.set != self.other, True)
- def test_ge_gt_le_lt(self):
- self.assertRaises(TypeError, lambda: self.set < self.other)
- self.assertRaises(TypeError, lambda: self.set <= self.other)
- self.assertRaises(TypeError, lambda: self.set > self.other)
- self.assertRaises(TypeError, lambda: self.set >= self.other)
-
- self.assertRaises(TypeError, lambda: self.other < self.set)
- self.assertRaises(TypeError, lambda: self.other <= self.set)
- self.assertRaises(TypeError, lambda: self.other > self.set)
- self.assertRaises(TypeError, lambda: self.other >= self.set)
-
def test_update_operator(self):
try:
self.set |= self.other
@@ -1392,18 +1379,6 @@ class TestOnlySetsDict(TestOnlySetsInBinaryOps):
#------------------------------------------------------------------------------
-class TestOnlySetsOperator(TestOnlySetsInBinaryOps):
- def setUp(self):
- self.set = set((1, 2, 3))
- self.other = operator.add
- self.otherIsIterable = False
-
- def test_ge_gt_le_lt(self):
- with test_support.check_py3k_warnings():
- super(TestOnlySetsOperator, self).test_ge_gt_le_lt()
-
-#------------------------------------------------------------------------------
-
class TestOnlySetsTuple(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
@@ -1615,7 +1590,7 @@ class TestVariousIteratorArgs(unittest.TestCase):
for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint):
for g in (G, I, Ig, L, R):
expected = meth(data)
- actual = meth(G(data))
+ actual = meth(g(data))
if isinstance(expected, bool):
self.assertEqual(actual, expected)
else:
@@ -1801,7 +1776,6 @@ def test_main(verbose=None):
TestSubsetNonOverlap,
TestOnlySetsNumeric,
TestOnlySetsDict,
- TestOnlySetsOperator,
TestOnlySetsTuple,
TestOnlySetsString,
TestOnlySetsGenerator,
diff --git a/Lib/test/test_sets.py b/Lib/test/test_sets.py
index 0754ed7..319bdcb 100644
--- a/Lib/test/test_sets.py
+++ b/Lib/test/test_sets.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
import unittest, operator, copy, pickle, random
from test import test_support
diff --git a/Lib/test/test_shutil.py b/Lib/test/test_shutil.py
index b4e5415..0e81145 100644
--- a/Lib/test/test_shutil.py
+++ b/Lib/test/test_shutil.py
@@ -7,6 +7,7 @@ import sys
import stat
import os
import os.path
+import errno
from os.path import splitdrive
from distutils.spawn import find_executable, spawn
from shutil import (_make_tarball, _make_zipfile, make_archive,
@@ -77,33 +78,34 @@ class TestShutil(unittest.TestCase):
filename = tempfile.mktemp()
self.assertRaises(OSError, shutil.rmtree, filename)
- # See bug #1071513 for why we don't run this on cygwin
- # and bug #1076467 for why we don't run this as root.
- if (hasattr(os, 'chmod') and sys.platform[:6] != 'cygwin'
- and not (hasattr(os, 'geteuid') and os.geteuid() == 0)):
- def test_on_error(self):
- self.errorState = 0
- os.mkdir(TESTFN)
- self.childpath = os.path.join(TESTFN, 'a')
- f = open(self.childpath, 'w')
- f.close()
- old_dir_mode = os.stat(TESTFN).st_mode
- old_child_mode = os.stat(self.childpath).st_mode
- # Make unwritable.
- os.chmod(self.childpath, stat.S_IREAD)
- os.chmod(TESTFN, stat.S_IREAD)
-
- shutil.rmtree(TESTFN, onerror=self.check_args_to_onerror)
- # Test whether onerror has actually been called.
- self.assertEqual(self.errorState, 2,
- "Expected call to onerror function did not happen.")
-
- # Make writable again.
- os.chmod(TESTFN, old_dir_mode)
- os.chmod(self.childpath, old_child_mode)
-
- # Clean up.
- shutil.rmtree(TESTFN)
+ @unittest.skipUnless(hasattr(os, 'chmod'), 'requires os.chmod()')
+ @unittest.skipIf(sys.platform[:6] == 'cygwin',
+ "This test can't be run on Cygwin (issue #1071513).")
+ @unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
+ "This test can't be run reliably as root (issue #1076467).")
+ def test_on_error(self):
+ self.errorState = 0
+ os.mkdir(TESTFN)
+ self.childpath = os.path.join(TESTFN, 'a')
+ f = open(self.childpath, 'w')
+ f.close()
+ old_dir_mode = os.stat(TESTFN).st_mode
+ old_child_mode = os.stat(self.childpath).st_mode
+ # Make unwritable.
+ os.chmod(self.childpath, stat.S_IREAD)
+ os.chmod(TESTFN, stat.S_IREAD)
+
+ shutil.rmtree(TESTFN, onerror=self.check_args_to_onerror)
+ # Test whether onerror has actually been called.
+ self.assertEqual(self.errorState, 2,
+ "Expected call to onerror function did not happen.")
+
+ # Make writable again.
+ os.chmod(TESTFN, old_dir_mode)
+ os.chmod(self.childpath, old_child_mode)
+
+ # Clean up.
+ shutil.rmtree(TESTFN)
def check_args_to_onerror(self, func, arg, exc):
# test_rmtree_errors deliberately runs rmtree
@@ -307,37 +309,67 @@ class TestShutil(unittest.TestCase):
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
- if hasattr(os, "mkfifo"):
- # Issue #3002: copyfile and copytree block indefinitely on named pipes
- def test_copyfile_named_pipe(self):
- os.mkfifo(TESTFN)
- try:
- self.assertRaises(shutil.SpecialFileError,
- shutil.copyfile, TESTFN, TESTFN2)
- self.assertRaises(shutil.SpecialFileError,
- shutil.copyfile, __file__, TESTFN)
- finally:
- os.remove(TESTFN)
+ # Issue #3002: copyfile and copytree block indefinitely on named pipes
+ @unittest.skipUnless(hasattr(os, "mkfifo"), 'requires os.mkfifo()')
+ def test_copyfile_named_pipe(self):
+ os.mkfifo(TESTFN)
+ try:
+ self.assertRaises(shutil.SpecialFileError,
+ shutil.copyfile, TESTFN, TESTFN2)
+ self.assertRaises(shutil.SpecialFileError,
+ shutil.copyfile, __file__, TESTFN)
+ finally:
+ os.remove(TESTFN)
- def test_copytree_named_pipe(self):
- os.mkdir(TESTFN)
+ @unittest.skipUnless(hasattr(os, "mkfifo"), 'requires os.mkfifo()')
+ def test_copytree_named_pipe(self):
+ os.mkdir(TESTFN)
+ try:
+ subdir = os.path.join(TESTFN, "subdir")
+ os.mkdir(subdir)
+ pipe = os.path.join(subdir, "mypipe")
+ os.mkfifo(pipe)
try:
- subdir = os.path.join(TESTFN, "subdir")
- os.mkdir(subdir)
- pipe = os.path.join(subdir, "mypipe")
- os.mkfifo(pipe)
- try:
- shutil.copytree(TESTFN, TESTFN2)
- except shutil.Error as e:
- errors = e.args[0]
- self.assertEqual(len(errors), 1)
- src, dst, error_msg = errors[0]
- self.assertEqual("`%s` is a named pipe" % pipe, error_msg)
- else:
- self.fail("shutil.Error should have been raised")
- finally:
- shutil.rmtree(TESTFN, ignore_errors=True)
- shutil.rmtree(TESTFN2, ignore_errors=True)
+ shutil.copytree(TESTFN, TESTFN2)
+ except shutil.Error as e:
+ errors = e.args[0]
+ self.assertEqual(len(errors), 1)
+ src, dst, error_msg = errors[0]
+ self.assertEqual("`%s` is a named pipe" % pipe, error_msg)
+ else:
+ self.fail("shutil.Error should have been raised")
+ finally:
+ shutil.rmtree(TESTFN, ignore_errors=True)
+ shutil.rmtree(TESTFN2, ignore_errors=True)
+
+ @unittest.skipUnless(hasattr(os, 'chflags') and
+ hasattr(errno, 'EOPNOTSUPP') and
+ hasattr(errno, 'ENOTSUP'),
+ "requires os.chflags, EOPNOTSUPP & ENOTSUP")
+ def test_copystat_handles_harmless_chflags_errors(self):
+ tmpdir = self.mkdtemp()
+ file1 = os.path.join(tmpdir, 'file1')
+ file2 = os.path.join(tmpdir, 'file2')
+ self.write_file(file1, 'xxx')
+ self.write_file(file2, 'xxx')
+
+ def make_chflags_raiser(err):
+ ex = OSError()
+
+ def _chflags_raiser(path, flags):
+ ex.errno = err
+ raise ex
+ return _chflags_raiser
+ old_chflags = os.chflags
+ try:
+ for err in errno.EOPNOTSUPP, errno.ENOTSUP:
+ os.chflags = make_chflags_raiser(err)
+ shutil.copystat(file1, file2)
+ # assert others errors break it
+ os.chflags = make_chflags_raiser(errno.EOPNOTSUPP + errno.ENOTSUP)
+ self.assertRaises(OSError, shutil.copystat, file1, file2)
+ finally:
+ os.chflags = old_chflags
@unittest.skipUnless(zlib, "requires zlib")
def test_make_tarball(self):
@@ -619,16 +651,14 @@ class TestMove(unittest.TestCase):
def test_move_file_other_fs(self):
# Move a file to an existing dir on another filesystem.
if not self.dir_other_fs:
- # skip
- return
+ self.skipTest('dir on other filesystem not available')
self._check_move_file(self.src_file, self.file_other_fs,
self.file_other_fs)
def test_move_file_to_dir_other_fs(self):
# Move a file to another location on another filesystem.
if not self.dir_other_fs:
- # skip
- return
+ self.skipTest('dir on other filesystem not available')
self._check_move_file(self.src_file, self.dir_other_fs,
self.file_other_fs)
@@ -646,8 +676,7 @@ class TestMove(unittest.TestCase):
def test_move_dir_other_fs(self):
# Move a dir to another location on another filesystem.
if not self.dir_other_fs:
- # skip
- return
+ self.skipTest('dir on other filesystem not available')
dst_dir = tempfile.mktemp(dir=self.dir_other_fs)
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
@@ -665,11 +694,19 @@ class TestMove(unittest.TestCase):
def test_move_dir_to_dir_other_fs(self):
# Move a dir inside an existing dir on another filesystem.
if not self.dir_other_fs:
- # skip
- return
+ self.skipTest('dir on other filesystem not available')
self._check_move_dir(self.src_dir, self.dir_other_fs,
os.path.join(self.dir_other_fs, os.path.basename(self.src_dir)))
+ def test_move_dir_sep_to_dir(self):
+ self._check_move_dir(self.src_dir + os.path.sep, self.dst_dir,
+ os.path.join(self.dst_dir, os.path.basename(self.src_dir)))
+
+ @unittest.skipUnless(os.path.altsep, 'requires os.path.altsep')
+ def test_move_dir_altsep_to_dir(self):
+ self._check_move_dir(self.src_dir + os.path.altsep, self.dst_dir,
+ os.path.join(self.dst_dir, os.path.basename(self.src_dir)))
+
def test_existing_file_inside_dest_dir(self):
# A file with the same name inside the destination dir already exists.
with open(self.dst_file, "wb"):
diff --git a/Lib/test/test_signal.py b/Lib/test/test_signal.py
index 8feb145..7483f64 100644
--- a/Lib/test/test_signal.py
+++ b/Lib/test/test_signal.py
@@ -109,7 +109,7 @@ class InterProcessSignalTests(unittest.TestCase):
# This wait should be interrupted by the signal's exception.
self.wait(child)
time.sleep(1) # Give the signal time to be delivered.
- self.fail('HandlerBCalled exception not thrown')
+ self.fail('HandlerBCalled exception not raised')
except HandlerBCalled:
self.assertTrue(self.b_called)
self.assertFalse(self.a_called)
@@ -148,7 +148,7 @@ class InterProcessSignalTests(unittest.TestCase):
# test-running process from all the signals. It then
# communicates with that child process over a pipe and
# re-raises information about any exceptions the child
- # throws. The real work happens in self.run_test().
+ # raises. The real work happens in self.run_test().
os_done_r, os_done_w = os.pipe()
with closing(os.fdopen(os_done_r)) as done_r, \
closing(os.fdopen(os_done_w, 'w')) as done_w:
@@ -227,6 +227,13 @@ class WindowsSignalTests(unittest.TestCase):
signal.signal(7, handler)
+class WakeupFDTests(unittest.TestCase):
+
+ def test_invalid_fd(self):
+ fd = test_support.make_bad_fd()
+ self.assertRaises(ValueError, signal.set_wakeup_fd, fd)
+
+
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class WakeupSignalTests(unittest.TestCase):
TIMEOUT_FULL = 10
@@ -485,8 +492,9 @@ class ItimerTest(unittest.TestCase):
def test_main():
test_support.run_unittest(BasicSignalTests, InterProcessSignalTests,
- WakeupSignalTests, SiginterruptTest,
- ItimerTest, WindowsSignalTests)
+ WakeupFDTests, WakeupSignalTests,
+ SiginterruptTest, ItimerTest,
+ WindowsSignalTests)
if __name__ == "__main__":
diff --git a/Lib/test/test_site.py b/Lib/test/test_site.py
index f4b5fc6..0898449 100644
--- a/Lib/test/test_site.py
+++ b/Lib/test/test_site.py
@@ -343,6 +343,7 @@ class ImportSideEffectTests(unittest.TestCase):
self.assertNotIn(path, seen_paths)
seen_paths.add(path)
+ @unittest.skip('test not implemented')
def test_add_build_dir(self):
# Test that the build directory's Modules directory is used when it
# should be.
diff --git a/Lib/test/test_smtplib.py b/Lib/test/test_smtplib.py
index 81806c9..aa90eab 100644
--- a/Lib/test/test_smtplib.py
+++ b/Lib/test/test_smtplib.py
@@ -77,7 +77,7 @@ class GeneralTests(unittest.TestCase):
smtp.close()
def testTimeoutDefault(self):
- self.assertTrue(socket.getdefaulttimeout() is None)
+ self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port)
@@ -87,13 +87,13 @@ class GeneralTests(unittest.TestCase):
smtp.close()
def testTimeoutNone(self):
- self.assertTrue(socket.getdefaulttimeout() is None)
+ self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
- self.assertTrue(smtp.sock.gettimeout() is None)
+ self.assertIsNone(smtp.sock.gettimeout())
smtp.close()
def testTimeoutValue(self):
diff --git a/Lib/test/test_smtpnet.py b/Lib/test/test_smtpnet.py
index 2dc39eb..00ce9dd 100644
--- a/Lib/test/test_smtpnet.py
+++ b/Lib/test/test_smtpnet.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
import unittest
from test import test_support
import smtplib
diff --git a/Lib/test/test_socket.py b/Lib/test/test_socket.py
index 13d0eb4..5d5005b 100644
--- a/Lib/test/test_socket.py
+++ b/Lib/test/test_socket.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
import unittest
from test import test_support
@@ -330,28 +328,29 @@ class GeneralModuleTests(unittest.TestCase):
ip = socket.gethostbyname(hostname)
except socket.error:
# Probably name lookup wasn't set up right; skip this test
- return
+ self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except socket.error:
# Probably a similar problem as above; skip this test
- return
+ self.skipTest('address lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
+ @unittest.skipUnless(hasattr(sys, 'getrefcount'),
+ 'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
- if hasattr(sys, "getrefcount"):
- try:
- # On some versions, this loses a reference
- orig = sys.getrefcount(__name__)
- socket.getnameinfo(__name__,0)
- except TypeError:
- self.assertEqual(sys.getrefcount(__name__), orig,
- "socket.getnameinfo loses a reference")
+ try:
+ # On some versions, this loses a reference
+ orig = sys.getrefcount(__name__)
+ socket.getnameinfo(__name__,0)
+ except TypeError:
+ self.assertEqual(sys.getrefcount(__name__), orig,
+ "socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
@@ -414,7 +413,7 @@ class GeneralModuleTests(unittest.TestCase):
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
- # Try udp, but don't barf it it doesn't exist
+ # Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except socket.error:
@@ -458,17 +457,17 @@ class GeneralModuleTests(unittest.TestCase):
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
+ @unittest.skipUnless(hasattr(socket, 'inet_aton'),
+ 'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
- if not hasattr(socket, 'inet_aton'):
- return # No inet_aton, nothing to check
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual('\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual('\xff'*4, socket.inet_aton('255.255.255.255'))
+ @unittest.skipUnless(hasattr(socket, 'inet_pton'),
+ 'test needs socket.inet_pton()')
def testIPv4toString(self):
- if not hasattr(socket, 'inet_pton'):
- return # No inet_pton() on this platform
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
@@ -483,15 +482,15 @@ class GeneralModuleTests(unittest.TestCase):
self.assertEqual('\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual('\xff\xff\xff\xff', g('255.255.255.255'))
+ @unittest.skipUnless(hasattr(socket, 'inet_pton'),
+ 'test needs socket.inet_pton()')
def testIPv6toString(self):
- if not hasattr(socket, 'inet_pton'):
- return # No inet_pton() on this platform
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
- return
+ self.skipTest('IPv6 not available')
except ImportError:
- return
+ self.skipTest('could not import needed symbols from socket')
f = lambda a: inet_pton(AF_INET6, a)
self.assertEqual('\x00' * 16, f('::'))
@@ -502,9 +501,9 @@ class GeneralModuleTests(unittest.TestCase):
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
+ @unittest.skipUnless(hasattr(socket, 'inet_ntop'),
+ 'test needs socket.inet_ntop()')
def testStringToIPv4(self):
- if not hasattr(socket, 'inet_ntop'):
- return # No inet_ntop() on this platform
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
@@ -517,15 +516,15 @@ class GeneralModuleTests(unittest.TestCase):
self.assertEqual('170.85.170.85', g('\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g('\xff\xff\xff\xff'))
+ @unittest.skipUnless(hasattr(socket, 'inet_ntop'),
+ 'test needs socket.inet_ntop()')
def testStringToIPv6(self):
- if not hasattr(socket, 'inet_ntop'):
- return # No inet_ntop() on this platform
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
- return
+ self.skipTest('IPv6 not available')
except ImportError:
- return
+ self.skipTest('could not import needed symbols from socket')
f = lambda a: inet_ntop(AF_INET6, a)
self.assertEqual('::', f('\x00' * 16))
@@ -565,7 +564,7 @@ class GeneralModuleTests(unittest.TestCase):
my_ip_addr = socket.gethostbyname(socket.gethostname())
except socket.error:
# Probably name lookup wasn't set up right; skip this test
- return
+ self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
@@ -644,9 +643,10 @@ class GeneralModuleTests(unittest.TestCase):
if SUPPORTS_IPV6:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
- # port number or None
+ # port number (int or long), or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
+ socket.getaddrinfo(HOST, 80L)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, None, socket.AF_INET)
@@ -663,6 +663,15 @@ class GeneralModuleTests(unittest.TestCase):
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
+ # Issue 17269: test workaround for OS X platform bug segfault
+ if hasattr(socket, 'AI_NUMERICSERV'):
+ try:
+ # The arguments here are undefined and the call may succeed
+ # or fail. All we care here is that it doesn't segfault.
+ socket.getaddrinfo("localhost", None, 0, 0, 0,
+ socket.AI_NUMERICSERV)
+ except socket.gaierror:
+ pass
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not stricly required, but it makes things easier.
@@ -683,11 +692,12 @@ class GeneralModuleTests(unittest.TestCase):
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
- c.sendall(b"x" * (1024**2))
+ c.sendall(b"x" * test_support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
- self.assertRaises(socket.timeout, c.sendall, b"x" * (1024**2))
+ self.assertRaises(socket.timeout, c.sendall,
+ b"x" * test_support.SOCK_MAX_SIZE)
finally:
signal.signal(signal.SIGALRM, old_alarm)
c.close()
@@ -699,11 +709,20 @@ class GeneralModuleTests(unittest.TestCase):
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
- def testListenBacklog0(self):
+ def test_listen_backlog(self):
+ for backlog in 0, -1:
+ srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ srv.bind((HOST, 0))
+ srv.listen(backlog)
+ srv.close()
+
+ @test_support.cpython_only
+ def test_listen_backlog_overflow(self):
+ # Issue 15989
+ import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
- # backlog = 0
- srv.listen(0)
+ self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(SUPPORTS_IPV6, 'IPv6 required for this test.')
@@ -773,10 +792,10 @@ class BasicTCPTest(SocketConnectedTest):
big_chunk = 'f' * 2048
self.serv_conn.sendall(big_chunk)
+ @unittest.skipUnless(hasattr(socket, 'fromfd'),
+ 'socket.fromfd not availble')
def testFromFd(self):
# Testing fromfd()
- if not hasattr(socket, "fromfd"):
- return # On Windows, this doesn't exist
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
@@ -809,6 +828,19 @@ class BasicTCPTest(SocketConnectedTest):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
+ testShutdown_overflow = test_support.cpython_only(testShutdown)
+
+ @test_support.cpython_only
+ def _testShutdown_overflow(self):
+ import _testcapi
+ self.serv_conn.send(MSG)
+ # Issue 15989
+ self.assertRaises(OverflowError, self.serv_conn.shutdown,
+ _testcapi.INT_MAX + 1)
+ self.assertRaises(OverflowError, self.serv_conn.shutdown,
+ 2 + (_testcapi.UINT_MAX + 1))
+ self.serv_conn.shutdown(2)
+
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
@@ -854,6 +886,8 @@ class TCPCloserTest(ThreadedTCPSocketTest):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
+@unittest.skipUnless(hasattr(socket, 'socketpair'),
+ 'test needs socket.socketpair()')
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
@@ -882,7 +916,10 @@ class NonBlockingTCPTests(ThreadedTCPSocketTest):
def testSetBlocking(self):
# Testing whether set blocking works
- self.serv.setblocking(0)
+ self.serv.setblocking(True)
+ self.assertIsNone(self.serv.gettimeout())
+ self.serv.setblocking(False)
+ self.assertEqual(self.serv.gettimeout(), 0.0)
start = time.time()
try:
self.serv.accept()
@@ -894,6 +931,19 @@ class NonBlockingTCPTests(ThreadedTCPSocketTest):
def _testSetBlocking(self):
pass
+ @test_support.cpython_only
+ def testSetBlocking_overflow(self):
+ # Issue 15989
+ import _testcapi
+ if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
+ self.skipTest('needs UINT_MAX < ULONG_MAX')
+ self.serv.setblocking(False)
+ self.assertEqual(self.serv.gettimeout(), 0.0)
+ self.serv.setblocking(_testcapi.UINT_MAX + 1)
+ self.assertIsNone(self.serv.gettimeout())
+
+ _testSetBlocking_overflow = test_support.cpython_only(_testSetBlocking)
+
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
@@ -961,8 +1011,8 @@ class FileObjectClassTestCase(SocketConnectedTest):
def tearDown(self):
self.serv_file.close()
self.assertTrue(self.serv_file.closed)
- self.serv_file = None
SocketConnectedTest.tearDown(self)
+ self.serv_file = None
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
@@ -1150,6 +1200,64 @@ class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
+ class SocketMemo(object):
+ """A wrapper to keep track of sent data, needed to examine write behaviour"""
+ def __init__(self, sock):
+ self._sock = sock
+ self.sent = []
+
+ def send(self, data, flags=0):
+ n = self._sock.send(data, flags)
+ self.sent.append(data[:n])
+ return n
+
+ def sendall(self, data, flags=0):
+ self._sock.sendall(data, flags)
+ self.sent.append(data)
+
+ def __getattr__(self, attr):
+ return getattr(self._sock, attr)
+
+ def getsent(self):
+ return [e.tobytes() if isinstance(e, memoryview) else e for e in self.sent]
+
+ def setUp(self):
+ FileObjectClassTestCase.setUp(self)
+ self.serv_file._sock = self.SocketMemo(self.serv_file._sock)
+
+ def testLinebufferedWrite(self):
+ # Write two lines, in small chunks
+ msg = MSG.strip()
+ print >> self.serv_file, msg,
+ print >> self.serv_file, msg
+
+ # second line:
+ print >> self.serv_file, msg,
+ print >> self.serv_file, msg,
+ print >> self.serv_file, msg
+
+ # third line
+ print >> self.serv_file, ''
+
+ self.serv_file.flush()
+
+ msg1 = "%s %s\n"%(msg, msg)
+ msg2 = "%s %s %s\n"%(msg, msg, msg)
+ msg3 = "\n"
+ self.assertEqual(self.serv_file._sock.getsent(), [msg1, msg2, msg3])
+
+ def _testLinebufferedWrite(self):
+ msg = MSG.strip()
+ msg1 = "%s %s\n"%(msg, msg)
+ msg2 = "%s %s %s\n"%(msg, msg, msg)
+ msg3 = "\n"
+ l1 = self.cli_file.readline()
+ self.assertEqual(l1, msg1)
+ l2 = self.cli_file.readline()
+ self.assertEqual(l2, msg2)
+ l3 = self.cli_file.readline()
+ self.assertEqual(l3, msg3)
+
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
@@ -1197,7 +1305,26 @@ class NetworkConnectionNoServer(unittest.TestCase):
port = test_support.find_unused_port()
with self.assertRaises(socket.error) as cm:
socket.create_connection((HOST, port))
- self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
+
+ # Issue #16257: create_connection() calls getaddrinfo() against
+ # 'localhost'. This may result in an IPV6 addr being returned
+ # as well as an IPV4 one:
+ # >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
+ # >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
+ # (26, 2, 0, '', ('::1', 41230, 0, 0))]
+ #
+ # create_connection() enumerates through all the addresses returned
+ # and if it doesn't successfully bind to any of them, it propagates
+ # the last exception it encountered.
+ #
+ # On Solaris, ENETUNREACH is returned in this circumstance instead
+ # of ECONNREFUSED. So, if that errno exists, add it to our list of
+ # expected errnos.
+ expected_errnos = [ errno.ECONNREFUSED, ]
+ if hasattr(errno, 'ENETUNREACH'):
+ expected_errnos.append(errno.ENETUNREACH)
+
+ self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
@@ -1355,12 +1482,12 @@ class TCPTimeoutTest(SocketTCPTest):
if not ok:
self.fail("accept() returned success when we did not expect it")
+ @unittest.skipUnless(hasattr(signal, 'alarm'),
+ 'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
- if not hasattr(signal, "alarm"):
- return # can only test on *nix
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
@@ -1420,6 +1547,7 @@ class TestExceptions(unittest.TestCase):
self.assertTrue(issubclass(socket.gaierror, socket.error))
self.assertTrue(issubclass(socket.timeout, socket.error))
+@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
@@ -1515,6 +1643,23 @@ class BufferIOTest(SocketConnectedTest):
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
+ def testRecvFromIntoSmallBuffer(self):
+ # See issue #20246.
+ buf = bytearray(8)
+ self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
+
+ def _testRecvFromIntoSmallBuffer(self):
+ with test_support.check_py3k_warnings():
+ buf = buffer(MSG)
+ self.serv_conn.send(buf)
+
+ def testRecvFromIntoEmptyBuffer(self):
+ buf = bytearray()
+ self.cli_conn.recvfrom_into(buf)
+ self.cli_conn.recvfrom_into(buf, 0)
+
+ _testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
+
TIPC_STYPE = 2000
TIPC_LOWER = 200
@@ -1534,11 +1679,11 @@ def isTipcAvailable():
for line in f:
if line.startswith("tipc "):
return True
- if test_support.verbose:
- print "TIPC module is not loaded, please 'sudo modprobe tipc'"
return False
-class TIPCTest (unittest.TestCase):
+@unittest.skipUnless(isTipcAvailable(),
+ "TIPC module is not loaded, please 'sudo modprobe tipc'")
+class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
@@ -1558,7 +1703,9 @@ class TIPCTest (unittest.TestCase):
self.assertEqual(msg, MSG)
-class TIPCThreadableTest (unittest.TestCase, ThreadableTest):
+@unittest.skipUnless(isTipcAvailable(),
+ "TIPC module is not loaded, please 'sudo modprobe tipc'")
+class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
@@ -1611,13 +1758,9 @@ def test_main():
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
])
- if hasattr(socket, "socketpair"):
- tests.append(BasicSocketPairTest)
- if sys.platform == 'linux2':
- tests.append(TestLinuxAbstractNamespace)
- if isTipcAvailable():
- tests.append(TIPCTest)
- tests.append(TIPCThreadableTest)
+ tests.append(BasicSocketPairTest)
+ tests.append(TestLinuxAbstractNamespace)
+ tests.extend([TIPCTest, TIPCThreadableTest])
thread_info = test_support.threading_setup()
test_support.run_unittest(*tests)
diff --git a/Lib/test/test_socketserver.py b/Lib/test/test_socketserver.py
index 08fb033..83f5e3f 100644
--- a/Lib/test/test_socketserver.py
+++ b/Lib/test/test_socketserver.py
@@ -8,6 +8,8 @@ import os
import select
import signal
import socket
+import select
+import errno
import tempfile
import unittest
import SocketServer
@@ -25,15 +27,21 @@ TEST_STR = "hello world\n"
HOST = test.test_support.HOST
HAVE_UNIX_SOCKETS = hasattr(socket, "AF_UNIX")
+requires_unix_sockets = unittest.skipUnless(HAVE_UNIX_SOCKETS,
+ 'requires Unix sockets')
HAVE_FORKING = hasattr(os, "fork") and os.name != "os2"
+requires_forking = unittest.skipUnless(HAVE_FORKING, 'requires forking')
def signal_alarm(n):
"""Call signal.alarm when it exists (i.e. not on Windows)."""
if hasattr(signal, 'alarm'):
signal.alarm(n)
+# Remember real select() to avoid interferences with mocking
+_real_select = select.select
+
def receive(sock, n, timeout=20):
- r, w, x = select.select([sock], [], [], timeout)
+ r, w, x = _real_select([sock], [], [], timeout)
if sock in r:
return sock.recv(n)
else:
@@ -53,7 +61,7 @@ if HAVE_UNIX_SOCKETS:
def simple_subprocess(testcase):
pid = os.fork()
if pid == 0:
- # Don't throw an exception; it would be caught by the test harness.
+ # Don't raise an exception; it would be caught by the test harness.
os._exit(72)
yield None
pid2, status = os.waitpid(pid, 0)
@@ -183,31 +191,33 @@ class SocketServerTest(unittest.TestCase):
SocketServer.StreamRequestHandler,
self.stream_examine)
- if HAVE_FORKING:
- def test_ForkingTCPServer(self):
- with simple_subprocess(self):
- self.run_server(SocketServer.ForkingTCPServer,
- SocketServer.StreamRequestHandler,
- self.stream_examine)
-
- if HAVE_UNIX_SOCKETS:
- def test_UnixStreamServer(self):
- self.run_server(SocketServer.UnixStreamServer,
+ @requires_forking
+ def test_ForkingTCPServer(self):
+ with simple_subprocess(self):
+ self.run_server(SocketServer.ForkingTCPServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
- def test_ThreadingUnixStreamServer(self):
- self.run_server(SocketServer.ThreadingUnixStreamServer,
+ @requires_unix_sockets
+ def test_UnixStreamServer(self):
+ self.run_server(SocketServer.UnixStreamServer,
+ SocketServer.StreamRequestHandler,
+ self.stream_examine)
+
+ @requires_unix_sockets
+ def test_ThreadingUnixStreamServer(self):
+ self.run_server(SocketServer.ThreadingUnixStreamServer,
+ SocketServer.StreamRequestHandler,
+ self.stream_examine)
+
+ @requires_unix_sockets
+ @requires_forking
+ def test_ForkingUnixStreamServer(self):
+ with simple_subprocess(self):
+ self.run_server(ForkingUnixStreamServer,
SocketServer.StreamRequestHandler,
self.stream_examine)
- if HAVE_FORKING:
- def test_ForkingUnixStreamServer(self):
- with simple_subprocess(self):
- self.run_server(ForkingUnixStreamServer,
- SocketServer.StreamRequestHandler,
- self.stream_examine)
-
def test_UDPServer(self):
self.run_server(SocketServer.UDPServer,
SocketServer.DatagramRequestHandler,
@@ -218,32 +228,66 @@ class SocketServerTest(unittest.TestCase):
SocketServer.DatagramRequestHandler,
self.dgram_examine)
- if HAVE_FORKING:
- def test_ForkingUDPServer(self):
- with simple_subprocess(self):
- self.run_server(SocketServer.ForkingUDPServer,
- SocketServer.DatagramRequestHandler,
- self.dgram_examine)
+ @requires_forking
+ def test_ForkingUDPServer(self):
+ with simple_subprocess(self):
+ self.run_server(SocketServer.ForkingUDPServer,
+ SocketServer.DatagramRequestHandler,
+ self.dgram_examine)
+
+ @contextlib.contextmanager
+ def mocked_select_module(self):
+ """Mocks the select.select() call to raise EINTR for first call"""
+ old_select = select.select
+
+ class MockSelect:
+ def __init__(self):
+ self.called = 0
+
+ def __call__(self, *args):
+ self.called += 1
+ if self.called == 1:
+ # raise the exception on first call
+ raise select.error(errno.EINTR, os.strerror(errno.EINTR))
+ else:
+ # Return real select value for consecutive calls
+ return old_select(*args)
+
+ select.select = MockSelect()
+ try:
+ yield select.select
+ finally:
+ select.select = old_select
+
+ def test_InterruptServerSelectCall(self):
+ with self.mocked_select_module() as mock_select:
+ pid = self.run_server(SocketServer.TCPServer,
+ SocketServer.StreamRequestHandler,
+ self.stream_examine)
+ # Make sure select was called again:
+ self.assertGreater(mock_select.called, 1)
# Alas, on Linux (at least) recvfrom() doesn't return a meaningful
# client address so this cannot work:
- # if HAVE_UNIX_SOCKETS:
- # def test_UnixDatagramServer(self):
- # self.run_server(SocketServer.UnixDatagramServer,
- # SocketServer.DatagramRequestHandler,
- # self.dgram_examine)
+ # @requires_unix_sockets
+ # def test_UnixDatagramServer(self):
+ # self.run_server(SocketServer.UnixDatagramServer,
+ # SocketServer.DatagramRequestHandler,
+ # self.dgram_examine)
#
- # def test_ThreadingUnixDatagramServer(self):
- # self.run_server(SocketServer.ThreadingUnixDatagramServer,
- # SocketServer.DatagramRequestHandler,
- # self.dgram_examine)
+ # @requires_unix_sockets
+ # def test_ThreadingUnixDatagramServer(self):
+ # self.run_server(SocketServer.ThreadingUnixDatagramServer,
+ # SocketServer.DatagramRequestHandler,
+ # self.dgram_examine)
#
- # if HAVE_FORKING:
- # def test_ForkingUnixDatagramServer(self):
- # self.run_server(SocketServer.ForkingUnixDatagramServer,
- # SocketServer.DatagramRequestHandler,
- # self.dgram_examine)
+ # @requires_unix_sockets
+ # @requires_forking
+ # def test_ForkingUnixDatagramServer(self):
+ # self.run_server(SocketServer.ForkingUnixDatagramServer,
+ # SocketServer.DatagramRequestHandler,
+ # self.dgram_examine)
@reap_threads
def test_shutdown(self):
diff --git a/Lib/test/test_spwd.py b/Lib/test/test_spwd.py
new file mode 100644
index 0000000..b637da2
--- /dev/null
+++ b/Lib/test/test_spwd.py
@@ -0,0 +1,62 @@
+import os
+import unittest
+from test import test_support
+
+spwd = test_support.import_module('spwd')
+
+
+@unittest.skipUnless(hasattr(os, 'geteuid') and os.geteuid() == 0,
+ 'root privileges required')
+class TestSpwdRoot(unittest.TestCase):
+
+ def test_getspall(self):
+ entries = spwd.getspall()
+ self.assertIsInstance(entries, list)
+ for entry in entries:
+ self.assertIsInstance(entry, spwd.struct_spwd)
+
+ def test_getspnam(self):
+ entries = spwd.getspall()
+ if not entries:
+ self.skipTest('empty shadow password database')
+ random_name = entries[0].sp_nam
+ entry = spwd.getspnam(random_name)
+ self.assertIsInstance(entry, spwd.struct_spwd)
+ self.assertEqual(entry.sp_nam, random_name)
+ self.assertEqual(entry.sp_nam, entry[0])
+ self.assertIsInstance(entry.sp_pwd, str)
+ self.assertEqual(entry.sp_pwd, entry[1])
+ self.assertIsInstance(entry.sp_lstchg, int)
+ self.assertEqual(entry.sp_lstchg, entry[2])
+ self.assertIsInstance(entry.sp_min, int)
+ self.assertEqual(entry.sp_min, entry[3])
+ self.assertIsInstance(entry.sp_max, int)
+ self.assertEqual(entry.sp_max, entry[4])
+ self.assertIsInstance(entry.sp_warn, int)
+ self.assertEqual(entry.sp_warn, entry[5])
+ self.assertIsInstance(entry.sp_inact, int)
+ self.assertEqual(entry.sp_inact, entry[6])
+ self.assertIsInstance(entry.sp_expire, int)
+ self.assertEqual(entry.sp_expire, entry[7])
+ self.assertIsInstance(entry.sp_flag, int)
+ self.assertEqual(entry.sp_flag, entry[8])
+ with self.assertRaises(KeyError) as cx:
+ spwd.getspnam('invalid user name')
+ self.assertEqual(str(cx.exception), "'getspnam(): name not found'")
+ self.assertRaises(TypeError, spwd.getspnam)
+ self.assertRaises(TypeError, spwd.getspnam, 0)
+ self.assertRaises(TypeError, spwd.getspnam, random_name, 0)
+ if test_support.have_unicode:
+ try:
+ unicode_name = unicode(random_name)
+ except UnicodeDecodeError:
+ pass
+ else:
+ self.assertEqual(spwd.getspnam(unicode_name), entry)
+
+
+def test_main():
+ test_support.run_unittest(TestSpwdRoot)
+
+if __name__ == "__main__":
+ test_main()
diff --git a/Lib/test/test_ssl.py b/Lib/test/test_ssl.py
index 97552e8..37a0cdb 100644
--- a/Lib/test/test_ssl.py
+++ b/Lib/test/test_ssl.py
@@ -25,6 +25,7 @@ ssl = test_support.import_module("ssl")
HOST = test_support.HOST
CERTFILE = None
SVN_PYTHON_ORG_ROOT_CERT = None
+NULLBYTECERT = None
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
@@ -95,12 +96,8 @@ class BasicSocketTests(unittest.TestCase):
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
- try:
- ssl.RAND_egd(1)
- except TypeError:
- pass
- else:
- print "didn't raise TypeError"
+ self.assertRaises(TypeError, ssl.RAND_egd, 1)
+ self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
def test_parse_cert(self):
@@ -111,13 +108,12 @@ class BasicSocketTests(unittest.TestCase):
if test_support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subject'],
- ((('countryName', u'US'),),
- (('stateOrProvinceName', u'Delaware'),),
- (('localityName', u'Wilmington'),),
- (('organizationName', u'Python Software Foundation'),),
- (('organizationalUnitName', u'SSL'),),
- (('commonName', u'somemachine.python.org'),)),
+ ((('countryName', 'XY'),),
+ (('localityName', 'Castle Anthrax'),),
+ (('organizationName', 'Python Software Foundation'),),
+ (('commonName', 'localhost'),))
)
+ self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
@@ -128,6 +124,35 @@ class BasicSocketTests(unittest.TestCase):
('DNS', 'projects.forum.nokia.com'))
)
+ def test_parse_cert_CVE_2013_4238(self):
+ p = ssl._ssl._test_decode_cert(NULLBYTECERT)
+ if test_support.verbose:
+ sys.stdout.write("\n" + pprint.pformat(p) + "\n")
+ subject = ((('countryName', 'US'),),
+ (('stateOrProvinceName', 'Oregon'),),
+ (('localityName', 'Beaverton'),),
+ (('organizationName', 'Python Software Foundation'),),
+ (('organizationalUnitName', 'Python Core Development'),),
+ (('commonName', 'null.python.org\x00example.org'),),
+ (('emailAddress', 'python-dev@python.org'),))
+ self.assertEqual(p['subject'], subject)
+ self.assertEqual(p['issuer'], subject)
+ if ssl.OPENSSL_VERSION_INFO >= (0, 9, 8):
+ san = (('DNS', 'altnull.python.org\x00example.com'),
+ ('email', 'null@python.org\x00user@example.org'),
+ ('URI', 'http://null.python.org\x00http://example.org'),
+ ('IP Address', '192.0.2.1'),
+ ('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
+ else:
+ # OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
+ san = (('DNS', 'altnull.python.org\x00example.com'),
+ ('email', 'null@python.org\x00user@example.org'),
+ ('URI', 'http://null.python.org\x00http://example.org'),
+ ('IP Address', '192.0.2.1'),
+ ('IP Address', '<invalid>'))
+
+ self.assertEqual(p['subjectAltName'], san)
+
def test_DER_to_PEM(self):
with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f:
pem = f.read()
@@ -167,9 +192,8 @@ class BasicSocketTests(unittest.TestCase):
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
+ @test_support.requires_resource('network')
def test_ciphers(self):
- if not test_support.is_resource_enabled('network'):
- return
remote = ("svn.python.org", 443)
with test_support.transient_internet(remote[0]):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
@@ -208,6 +232,13 @@ class BasicSocketTests(unittest.TestCase):
self.assertRaises(socket.error, ss.send, b'x')
self.assertRaises(socket.error, ss.sendto, b'x', ('0.0.0.0', 0))
+ def test_unsupported_dtls(self):
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ self.addCleanup(s.close)
+ with self.assertRaises(NotImplementedError) as cx:
+ ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
+ self.assertEqual(str(cx.exception), "only stream sockets are supported")
+
class NetworkedTests(unittest.TestCase):
@@ -284,6 +315,34 @@ class NetworkedTests(unittest.TestCase):
finally:
s.close()
+ def test_timeout_connect_ex(self):
+ # Issue #12065: on a timeout, connect_ex() should return the original
+ # errno (mimicking the behaviour of non-SSL sockets).
+ with test_support.transient_internet("svn.python.org"):
+ s = ssl.wrap_socket(socket.socket(socket.AF_INET),
+ cert_reqs=ssl.CERT_REQUIRED,
+ ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
+ do_handshake_on_connect=False)
+ try:
+ s.settimeout(0.0000001)
+ rc = s.connect_ex(('svn.python.org', 443))
+ if rc == 0:
+ self.skipTest("svn.python.org responded too quickly")
+ self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
+ finally:
+ s.close()
+
+ def test_connect_ex_error(self):
+ with test_support.transient_internet("svn.python.org"):
+ s = ssl.wrap_socket(socket.socket(socket.AF_INET),
+ cert_reqs=ssl.CERT_REQUIRED,
+ ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
+ try:
+ self.assertEqual(errno.ECONNREFUSED,
+ s.connect_ex(("svn.python.org", 444)))
+ finally:
+ s.close()
+
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
@@ -331,19 +390,24 @@ class NetworkedTests(unittest.TestCase):
def test_get_server_certificate(self):
with test_support.transient_internet("svn.python.org"):
- pem = ssl.get_server_certificate(("svn.python.org", 443))
+ pem = ssl.get_server_certificate(("svn.python.org", 443),
+ ssl.PROTOCOL_SSLv23)
if not pem:
self.fail("No server certificate on svn.python.org:443!")
try:
- pem = ssl.get_server_certificate(("svn.python.org", 443), ca_certs=CERTFILE)
+ pem = ssl.get_server_certificate(("svn.python.org", 443),
+ ssl.PROTOCOL_SSLv23,
+ ca_certs=CERTFILE)
except ssl.SSLError:
#should fail
pass
else:
self.fail("Got server certificate %s for svn.python.org!" % pem)
- pem = ssl.get_server_certificate(("svn.python.org", 443), ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
+ pem = ssl.get_server_certificate(("svn.python.org", 443),
+ ssl.PROTOCOL_SSLv23,
+ ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
if not pem:
self.fail("No server certificate on svn.python.org:443!")
if test_support.verbose:
@@ -355,7 +419,8 @@ class NetworkedTests(unittest.TestCase):
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
- # NOTE: https://sha256.tbs-internet.com is another possible test host
+ self.skipTest("remote host needs SNI, only available on Python 3.2+")
+ # NOTE: https://sha2.hboeck.de is another possible test host
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with test_support.transient_internet("sha256.tbs-internet.com"):
@@ -992,7 +1057,7 @@ else:
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
- try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True)
+ try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
@@ -1336,7 +1401,7 @@ else:
def test_main(verbose=False):
- global CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, NOKIACERT
+ global CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, NOKIACERT, NULLBYTECERT
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir,
"keycert.pem")
SVN_PYTHON_ORG_ROOT_CERT = os.path.join(
@@ -1344,10 +1409,13 @@ def test_main(verbose=False):
"https_svn_python_org_root.pem")
NOKIACERT = os.path.join(os.path.dirname(__file__) or os.curdir,
"nokia.pem")
+ NULLBYTECERT = os.path.join(os.path.dirname(__file__) or os.curdir,
+ "nullbytecert.pem")
if (not os.path.exists(CERTFILE) or
not os.path.exists(SVN_PYTHON_ORG_ROOT_CERT) or
- not os.path.exists(NOKIACERT)):
+ not os.path.exists(NOKIACERT) or
+ not os.path.exists(NULLBYTECERT)):
raise test_support.TestFailed("Can't read certificate files!")
tests = [BasicTests, BasicSocketTests]
diff --git a/Lib/test/test_stat.py b/Lib/test/test_stat.py
new file mode 100644
index 0000000..a71f599
--- /dev/null
+++ b/Lib/test/test_stat.py
@@ -0,0 +1,175 @@
+import unittest
+import os
+from test.test_support import TESTFN, run_unittest
+import stat
+
+class TestFilemode(unittest.TestCase):
+ file_flags = {'SF_APPEND', 'SF_ARCHIVED', 'SF_IMMUTABLE', 'SF_NOUNLINK',
+ 'SF_SNAPSHOT', 'UF_APPEND', 'UF_COMPRESSED', 'UF_HIDDEN',
+ 'UF_IMMUTABLE', 'UF_NODUMP', 'UF_NOUNLINK', 'UF_OPAQUE'}
+
+ formats = {'S_IFBLK', 'S_IFCHR', 'S_IFDIR', 'S_IFIFO', 'S_IFLNK',
+ 'S_IFREG', 'S_IFSOCK'}
+
+ format_funcs = {'S_ISBLK', 'S_ISCHR', 'S_ISDIR', 'S_ISFIFO', 'S_ISLNK',
+ 'S_ISREG', 'S_ISSOCK'}
+
+ stat_struct = {
+ 'ST_MODE': 0,
+ 'ST_INO': 1,
+ 'ST_DEV': 2,
+ 'ST_NLINK': 3,
+ 'ST_UID': 4,
+ 'ST_GID': 5,
+ 'ST_SIZE': 6,
+ 'ST_ATIME': 7,
+ 'ST_MTIME': 8,
+ 'ST_CTIME': 9}
+
+ # permission bit value are defined by POSIX
+ permission_bits = {
+ 'S_ISUID': 0o4000,
+ 'S_ISGID': 0o2000,
+ 'S_ENFMT': 0o2000,
+ 'S_ISVTX': 0o1000,
+ 'S_IRWXU': 0o700,
+ 'S_IRUSR': 0o400,
+ 'S_IREAD': 0o400,
+ 'S_IWUSR': 0o200,
+ 'S_IWRITE': 0o200,
+ 'S_IXUSR': 0o100,
+ 'S_IEXEC': 0o100,
+ 'S_IRWXG': 0o070,
+ 'S_IRGRP': 0o040,
+ 'S_IWGRP': 0o020,
+ 'S_IXGRP': 0o010,
+ 'S_IRWXO': 0o007,
+ 'S_IROTH': 0o004,
+ 'S_IWOTH': 0o002,
+ 'S_IXOTH': 0o001}
+
+ def setUp(self):
+ try:
+ os.remove(TESTFN)
+ except OSError:
+ try:
+ os.rmdir(TESTFN)
+ except OSError:
+ pass
+ tearDown = setUp
+
+ def get_mode(self, fname=TESTFN, lstat=True):
+ if lstat:
+ st_mode = os.lstat(fname).st_mode
+ else:
+ st_mode = os.stat(fname).st_mode
+ return st_mode
+
+ def assertS_IS(self, name, mode):
+ # test format, lstrip is for S_IFIFO
+ fmt = getattr(stat, "S_IF" + name.lstrip("F"))
+ self.assertEqual(stat.S_IFMT(mode), fmt)
+ # test that just one function returns true
+ testname = "S_IS" + name
+ for funcname in self.format_funcs:
+ func = getattr(stat, funcname, None)
+ if func is None:
+ if funcname == testname:
+ raise ValueError(funcname)
+ continue
+ if funcname == testname:
+ self.assertTrue(func(mode))
+ else:
+ self.assertFalse(func(mode))
+
+ def test_mode(self):
+ with open(TESTFN, 'w'):
+ pass
+ if os.name == 'posix':
+ os.chmod(TESTFN, 0o700)
+ st_mode = self.get_mode()
+ self.assertS_IS("REG", st_mode)
+ self.assertEqual(stat.S_IMODE(st_mode),
+ stat.S_IRWXU)
+
+ os.chmod(TESTFN, 0o070)
+ st_mode = self.get_mode()
+ self.assertS_IS("REG", st_mode)
+ self.assertEqual(stat.S_IMODE(st_mode),
+ stat.S_IRWXG)
+
+ os.chmod(TESTFN, 0o007)
+ st_mode = self.get_mode()
+ self.assertS_IS("REG", st_mode)
+ self.assertEqual(stat.S_IMODE(st_mode),
+ stat.S_IRWXO)
+
+ os.chmod(TESTFN, 0o444)
+ st_mode = self.get_mode()
+ self.assertS_IS("REG", st_mode)
+ self.assertEqual(stat.S_IMODE(st_mode), 0o444)
+ else:
+ os.chmod(TESTFN, 0o700)
+ st_mode = self.get_mode()
+ self.assertS_IS("REG", st_mode)
+ self.assertEqual(stat.S_IFMT(st_mode),
+ stat.S_IFREG)
+
+ def test_directory(self):
+ os.mkdir(TESTFN)
+ os.chmod(TESTFN, 0o700)
+ st_mode = self.get_mode()
+ self.assertS_IS("DIR", st_mode)
+
+ @unittest.skipUnless(hasattr(os, 'symlink'), 'os.symlink not available')
+ def test_link(self):
+ try:
+ os.symlink(os.getcwd(), TESTFN)
+ except (OSError, NotImplementedError) as err:
+ raise unittest.SkipTest(str(err))
+ else:
+ st_mode = self.get_mode()
+ self.assertS_IS("LNK", st_mode)
+
+ @unittest.skipUnless(hasattr(os, 'mkfifo'), 'os.mkfifo not available')
+ def test_fifo(self):
+ os.mkfifo(TESTFN, 0o700)
+ st_mode = self.get_mode()
+ self.assertS_IS("FIFO", st_mode)
+
+ @unittest.skipUnless(os.name == 'posix', 'requires Posix')
+ def test_devices(self):
+ if os.path.exists(os.devnull):
+ st_mode = self.get_mode(os.devnull, lstat=False)
+ self.assertS_IS("CHR", st_mode)
+ # Linux block devices, BSD has no block devices anymore
+ for blockdev in ("/dev/sda", "/dev/hda"):
+ if os.path.exists(blockdev):
+ st_mode = self.get_mode(blockdev, lstat=False)
+ self.assertS_IS("BLK", st_mode)
+ break
+
+ def test_module_attributes(self):
+ for key, value in self.stat_struct.items():
+ modvalue = getattr(stat, key)
+ self.assertEqual(value, modvalue, key)
+ for key, value in self.permission_bits.items():
+ modvalue = getattr(stat, key)
+ self.assertEqual(value, modvalue, key)
+ for key in self.file_flags:
+ modvalue = getattr(stat, key)
+ self.assertIsInstance(modvalue, int)
+ for key in self.formats:
+ modvalue = getattr(stat, key)
+ self.assertIsInstance(modvalue, int)
+ for key in self.format_funcs:
+ func = getattr(stat, key)
+ self.assertTrue(callable(func))
+ self.assertEqual(func(0), 0)
+
+
+def test_main():
+ run_unittest(TestFilemode)
+
+if __name__ == '__main__':
+ test_main()
diff --git a/Lib/test/test_str.py b/Lib/test/test_str.py
index 2ecf327..2cd7966 100644
--- a/Lib/test/test_str.py
+++ b/Lib/test/test_str.py
@@ -1,4 +1,4 @@
-
+import unittest
import struct
import sys
from test import test_support, string_tests
@@ -35,6 +35,18 @@ class StrTest(
string_tests.MixinStrUnicodeUserStringTest.test_formatting(self)
self.assertRaises(OverflowError, '%c'.__mod__, 0x1234)
+ @test_support.cpython_only
+ def test_formatting_huge_precision(self):
+ from _testcapi import INT_MAX
+ format_string = "%.{}f".format(INT_MAX + 1)
+ with self.assertRaises(ValueError):
+ result = format_string % 2.34
+
+ def test_formatting_huge_width(self):
+ format_string = "%{}f".format(sys.maxsize + 1)
+ with self.assertRaises(ValueError):
+ result = format_string % 2.34
+
def test_conversion(self):
# Make sure __str__() behaves properly
class Foo0:
@@ -98,12 +110,12 @@ class StrTest(
self.assertEqual(str(Foo9("foo")), "string")
self.assertEqual(unicode(Foo9("foo")), u"not unicode")
+ # This test only affects 32-bit platforms because expandtabs can only take
+ # an int as the max value, not a 64-bit C long. If expandtabs is changed
+ # to take a 64-bit long, this test should apply to all platforms.
+ @unittest.skipIf(sys.maxint > (1 << 32) or struct.calcsize('P') != 4,
+ 'only applies to 32-bit platforms')
def test_expandtabs_overflows_gracefully(self):
- # This test only affects 32-bit platforms because expandtabs can only take
- # an int as the max value, not a 64-bit C long. If expandtabs is changed
- # to take a 64-bit long, this test should apply to all platforms.
- if sys.maxint > (1 << 32) or struct.calcsize('P') != 4:
- return
self.assertRaises(OverflowError, 't\tt\t'.expandtabs, sys.maxint)
def test__format__(self):
@@ -371,6 +383,21 @@ class StrTest(
self.assertRaises(ValueError, format, "", "-")
self.assertRaises(ValueError, "{0:=s}".format, '')
+ def test_format_huge_precision(self):
+ format_string = ".{}f".format(sys.maxsize + 1)
+ with self.assertRaises(ValueError):
+ result = format(2.34, format_string)
+
+ def test_format_huge_width(self):
+ format_string = "{}f".format(sys.maxsize + 1)
+ with self.assertRaises(ValueError):
+ result = format(2.34, format_string)
+
+ def test_format_huge_item_number(self):
+ format_string = "{{{}:.6f}}".format(sys.maxsize + 1)
+ with self.assertRaises(ValueError):
+ result = format_string.format(2.34)
+
def test_format_auto_numbering(self):
class C:
def __init__(self, x=100):
diff --git a/Lib/test/test_strop.py b/Lib/test/test_strop.py
index 8ce29ef..45c90a6 100644
--- a/Lib/test/test_strop.py
+++ b/Lib/test/test_strop.py
@@ -4,6 +4,7 @@ warnings.filterwarnings("ignore", "strop functions are obsolete;",
r'test.test_strop|unittest')
import strop
import unittest
+import sys
from test import test_support
@@ -115,6 +116,11 @@ class StropFunctionTestCase(unittest.TestCase):
strop.uppercase
strop.whitespace
+ @unittest.skipUnless(sys.maxsize == 2147483647, "only for 32-bit")
+ def test_expandtabs_overflow(self):
+ s = '\t\n' * 0x10000 + 'A' * 0x1000000
+ self.assertRaises(OverflowError, strop.expandtabs, s, 0x10001)
+
@test_support.precisionbigmemtest(size=test_support._2G - 1, memuse=5)
def test_stropjoin_huge_list(self, size):
a = "A" * size
diff --git a/Lib/test/test_strptime.py b/Lib/test/test_strptime.py
index 63760c3..66b9ab3 100644
--- a/Lib/test/test_strptime.py
+++ b/Lib/test/test_strptime.py
@@ -38,9 +38,9 @@ class LocaleTime_Tests(unittest.TestCase):
comparison = testing[self.time_tuple[tuple_position]]
self.assertIn(strftime_output, testing,
"%s: not found in tuple" % error_msg)
- self.assertTrue(comparison == strftime_output,
- "%s: position within tuple incorrect; %s != %s" %
- (error_msg, comparison, strftime_output))
+ self.assertEqual(comparison, strftime_output,
+ "%s: position within tuple incorrect; %s != %s" %
+ (error_msg, comparison, strftime_output))
def test_weekday(self):
# Make sure that full and abbreviated weekday names are correct in
@@ -65,8 +65,8 @@ class LocaleTime_Tests(unittest.TestCase):
"AM/PM representation not in tuple")
if self.time_tuple[3] < 12: position = 0
else: position = 1
- self.assertTrue(strftime_output == self.LT_ins.am_pm[position],
- "AM/PM representation in the wrong position within the tuple")
+ self.assertEqual(self.LT_ins.am_pm[position], strftime_output,
+ "AM/PM representation in the wrong position within the tuple")
def test_timezone(self):
# Make sure timezone is correct
@@ -86,17 +86,14 @@ class LocaleTime_Tests(unittest.TestCase):
# output.
magic_date = (1999, 3, 17, 22, 44, 55, 2, 76, 0)
strftime_output = time.strftime("%c", magic_date)
- self.assertTrue(strftime_output == time.strftime(self.LT_ins.LC_date_time,
- magic_date),
- "LC_date_time incorrect")
+ self.assertEqual(time.strftime(self.LT_ins.LC_date_time, magic_date),
+ strftime_output, "LC_date_time incorrect")
strftime_output = time.strftime("%x", magic_date)
- self.assertTrue(strftime_output == time.strftime(self.LT_ins.LC_date,
- magic_date),
- "LC_date incorrect")
+ self.assertEqual(time.strftime(self.LT_ins.LC_date, magic_date),
+ strftime_output, "LC_date incorrect")
strftime_output = time.strftime("%X", magic_date)
- self.assertTrue(strftime_output == time.strftime(self.LT_ins.LC_time,
- magic_date),
- "LC_time incorrect")
+ self.assertEqual(time.strftime(self.LT_ins.LC_time, magic_date),
+ strftime_output, "LC_time incorrect")
LT = _strptime.LocaleTime()
LT.am_pm = ('', '')
self.assertTrue(LT.LC_time, "LocaleTime's LC directives cannot handle "
@@ -168,8 +165,8 @@ class TimeRETests(unittest.TestCase):
# Fixes bug #661354
test_locale = _strptime.LocaleTime()
test_locale.timezone = (frozenset(), frozenset())
- self.assertTrue(_strptime.TimeRE(test_locale).pattern("%Z") == '',
- "with timezone == ('',''), TimeRE().pattern('%Z') != ''")
+ self.assertEqual(_strptime.TimeRE(test_locale).pattern("%Z"), '',
+ "with timezone == ('',''), TimeRE().pattern('%Z') != ''")
def test_matching_with_escapes(self):
# Make sure a format that requires escaping of characters works
@@ -195,7 +192,7 @@ class TimeRETests(unittest.TestCase):
# so as to not allow to subpatterns to end up next to each other and
# "steal" characters from each other.
pattern = self.time_re.pattern('%j %H')
- self.assertTrue(not re.match(pattern, "180"))
+ self.assertFalse(re.match(pattern, "180"))
self.assertTrue(re.match(pattern, "18 0"))
@@ -316,7 +313,7 @@ class StrptimeTests(unittest.TestCase):
# when time.tzname[0] == time.tzname[1] and time.daylight
tz_name = time.tzname[0]
if tz_name.upper() in ("UTC", "GMT"):
- return
+ self.skipTest('need non-UTC/GMT timezone')
try:
original_tzname = time.tzname
original_daylight = time.daylight
@@ -381,6 +378,14 @@ class StrptimeTests(unittest.TestCase):
need_escaping = ".^$*+?{}\[]|)("
self.assertTrue(_strptime._strptime_time(need_escaping, need_escaping))
+ def test_feb29_on_leap_year_without_year(self):
+ time.strptime("Feb 29", "%b %d")
+
+ def test_mar1_comes_after_feb29_even_when_omitting_the_year(self):
+ self.assertLess(
+ time.strptime("Feb 29", "%b %d"),
+ time.strptime("Mar 1", "%b %d"))
+
class Strptime12AMPMTests(unittest.TestCase):
"""Test a _strptime regression in '%I %p' at 12 noon (12 PM)"""
@@ -521,7 +526,7 @@ class CacheTests(unittest.TestCase):
try:
locale.setlocale(locale.LC_TIME, ('en_US', 'UTF8'))
except locale.Error:
- return
+ self.skipTest('test needs en_US.UTF8 locale')
try:
_strptime._strptime_time('10', '%d')
# Get id of current cache object.
@@ -538,7 +543,7 @@ class CacheTests(unittest.TestCase):
# If this is the case just suppress the exception and fall-through
# to the resetting to the original locale.
except locale.Error:
- pass
+ self.skipTest('test needs de_DE.UTF8 locale')
# Make sure we don't trample on the locale setting once we leave the
# test.
finally:
diff --git a/Lib/test/test_strtod.py b/Lib/test/test_strtod.py
index 7bc595d..8cc2377 100644
--- a/Lib/test/test_strtod.py
+++ b/Lib/test/test_strtod.py
@@ -249,6 +249,38 @@ class StrtodTests(unittest.TestCase):
else:
assert False, "expected ValueError"
+ @test_support.precisionbigmemtest(size=test_support._2G, memuse=3,
+ dry_run=False)
+ def test_oversized_digit_strings(self, maxsize):
+ # Input string whose length doesn't fit in an INT.
+ s = "1." + "1" * int(2.2e9)
+ with self.assertRaises(ValueError):
+ float(s)
+ del s
+
+ s = "0." + "0" * int(2.2e9) + "1"
+ with self.assertRaises(ValueError):
+ float(s)
+ del s
+
+ def test_large_exponents(self):
+ # Verify that the clipping of the exponent in strtod doesn't affect the
+ # output values.
+ def positive_exp(n):
+ """ Long string with value 1.0 and exponent n"""
+ return '0.{}1e+{}'.format('0'*(n-1), n)
+
+ def negative_exp(n):
+ """ Long string with value 1.0 and exponent -n"""
+ return '1{}e-{}'.format('0'*n, n)
+
+ self.assertEqual(float(positive_exp(10000)), 1.0)
+ self.assertEqual(float(positive_exp(20000)), 1.0)
+ self.assertEqual(float(positive_exp(30000)), 1.0)
+ self.assertEqual(float(negative_exp(10000)), 1.0)
+ self.assertEqual(float(negative_exp(20000)), 1.0)
+ self.assertEqual(float(negative_exp(30000)), 1.0)
+
def test_particular(self):
# inputs that produced crashes or incorrectly rounded results with
# previous versions of dtoa.c, for various reasons
diff --git a/Lib/test/test_struct.py b/Lib/test/test_struct.py
index 5a63135..4811974 100644
--- a/Lib/test/test_struct.py
+++ b/Lib/test/test_struct.py
@@ -3,7 +3,8 @@ import array
import unittest
import struct
import inspect
-from test.test_support import run_unittest, check_warnings, check_py3k_warnings
+from test import test_support as support
+from test.test_support import (check_warnings, check_py3k_warnings)
import sys
ISBIGENDIAN = sys.byteorder == "big"
@@ -495,6 +496,14 @@ class StructTest(unittest.TestCase):
self.test_unpack_from(cls=buffer)
+ def test_unpack_with_memoryview(self):
+ # Bug 10212: struct.unpack doesn't support new buffer protocol objects
+ data1 = memoryview('\x12\x34\x56\x78')
+ for data in [data1,]:
+ value, = struct.unpack('>I', data)
+ self.assertEqual(value, 0x12345678)
+ self.test_unpack_from(cls=memoryview)
+
def test_bool(self):
class ExplodingBool(object):
def __nonzero__(self):
@@ -544,8 +553,41 @@ class StructTest(unittest.TestCase):
hugecount2 = '{}b{}H'.format(sys.maxsize//2, sys.maxsize//2)
self.assertRaises(struct.error, struct.calcsize, hugecount2)
+ def check_sizeof(self, format_str, number_of_codes):
+ # The size of 'PyStructObject'
+ totalsize = support.calcobjsize('5P')
+ # The size taken up by the 'formatcode' dynamic array
+ totalsize += struct.calcsize('3P') * (number_of_codes + 1)
+ support.check_sizeof(self, struct.Struct(format_str), totalsize)
+
+ @support.cpython_only
+ def test__sizeof__(self):
+ for code in integer_codes:
+ self.check_sizeof(code, 1)
+ self.check_sizeof('BHILfdspP', 9)
+ self.check_sizeof('B' * 1234, 1234)
+ self.check_sizeof('fd', 2)
+ self.check_sizeof('xxxxxxxxxxxxxx', 0)
+ self.check_sizeof('100H', 100)
+ self.check_sizeof('187s', 1)
+ self.check_sizeof('20p', 1)
+ self.check_sizeof('0s', 1)
+ self.check_sizeof('0c', 0)
+
+ def test_unicode_format(self):
+ try:
+ unicode
+ except NameError:
+ self.skipTest('no unicode support')
+ # Issue #19099
+ s = struct.Struct(unichr(ord('I')))
+ self.assertEqual(s.format, 'I')
+ self.assertIs(type(s.format), str)
+ self.assertRaises(ValueError, struct.Struct, unichr(0x80))
+
+
def test_main():
- run_unittest(StructTest)
+ support.run_unittest(StructTest)
if __name__ == '__main__':
test_main()
diff --git a/Lib/test/test_structmembers.py b/Lib/test/test_structmembers.py
index 0cc58c3..3e2b261 100644
--- a/Lib/test/test_structmembers.py
+++ b/Lib/test/test_structmembers.py
@@ -1,3 +1,8 @@
+import unittest
+from test import test_support
+
+# Skip this test if the _testcapi module isn't available.
+test_support.import_module('_testcapi')
from _testcapi import _test_structmembersType, \
CHAR_MAX, CHAR_MIN, UCHAR_MAX, \
SHRT_MAX, SHRT_MIN, USHRT_MAX, \
@@ -5,9 +10,6 @@ from _testcapi import _test_structmembersType, \
LONG_MAX, LONG_MIN, ULONG_MAX, \
LLONG_MAX, LLONG_MIN, ULLONG_MAX
-import unittest
-from test import test_support
-
ts=_test_structmembersType(False, 1, 2, 3, 4, 5, 6, 7, 8,
9.99999, 10.1010101010, "hi")
diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py
index b66356d..0efcdbf 100644
--- a/Lib/test/test_subprocess.py
+++ b/Lib/test/test_subprocess.py
@@ -14,6 +14,10 @@ try:
import resource
except ImportError:
resource = None
+try:
+ import threading
+except ImportError:
+ threading = None
mswindows = (sys.platform == "win32")
@@ -58,6 +62,18 @@ class BaseTestCase(unittest.TestCase):
self.assertEqual(actual, expected, msg)
+class PopenTestException(Exception):
+ pass
+
+
+class PopenExecuteChildRaises(subprocess.Popen):
+ """Popen subclass for testing cleanup of subprocess.PIPE filehandles when
+ _execute_child fails.
+ """
+ def _execute_child(self, *args, **kwargs):
+ raise PopenTestException("Forced Exception for Test")
+
+
class ProcessTestCase(BaseTestCase):
def test_call_seq(self):
@@ -138,16 +154,27 @@ class ProcessTestCase(BaseTestCase):
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
- # .stdout is None when not redirected
- p = subprocess.Popen([sys.executable, "-c",
- 'print " this bit of output is from a '
- 'test of stdout in a different '
- 'process ..."'],
- stdin=subprocess.PIPE, stderr=subprocess.PIPE)
- self.addCleanup(p.stdin.close)
+ # .stdout is None when not redirected, and the child's stdout will
+ # be inherited from the parent. In order to test this we run a
+ # subprocess in a subprocess:
+ # this_test
+ # \-- subprocess created by this test (parent)
+ # \-- subprocess created by the parent subprocess (child)
+ # The parent doesn't specify stdout, so the child will use the
+ # parent's stdout. This test checks that the message printed by the
+ # child goes to the parent stdout. The parent also checks that the
+ # child's stdout is None. See #11963.
+ code = ('import sys; from subprocess import Popen, PIPE;'
+ 'p = Popen([sys.executable, "-c", "print \'test_stdout_none\'"],'
+ ' stdin=PIPE, stderr=PIPE);'
+ 'p.wait(); assert p.stdout is None;')
+ p = subprocess.Popen([sys.executable, "-c", code],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
- p.wait()
- self.assertEqual(p.stdout, None)
+ out, err = p.communicate()
+ self.assertEqual(p.returncode, 0, err)
+ self.assertEqual(out.rstrip(), 'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
@@ -296,9 +323,22 @@ class ProcessTestCase(BaseTestCase):
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
- cmd = r"import sys, os; sys.exit(os.write(sys.stdout.fileno(), '.\n'))"
- rc = subprocess.call([sys.executable, "-c", cmd], stdout=1)
- self.assertEqual(rc, 2)
+ # To avoid printing the text on stdout, we do something similar to
+ # test_stdout_none (see above). The parent subprocess calls the child
+ # subprocess passing stdout=1, and this test uses stdout=PIPE in
+ # order to capture and check the output of the parent. See #11963.
+ code = ('import sys, subprocess; '
+ 'rc = subprocess.call([sys.executable, "-c", '
+ ' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
+ '\'test with stdout=1\'))"], stdout=1); '
+ 'assert rc == 18')
+ p = subprocess.Popen([sys.executable, "-c", code],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ self.addCleanup(p.stdout.close)
+ self.addCleanup(p.stderr.close)
+ out, err = p.communicate()
+ self.assertEqual(p.returncode, 0, err)
+ self.assertEqual(out.rstrip(), 'test with stdout=1')
def test_cwd(self):
tmpdir = tempfile.gettempdir()
@@ -526,6 +566,7 @@ class ProcessTestCase(BaseTestCase):
finally:
for h in handles:
os.close(h)
+ test_support.unlink(test_support.TESTFN)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
@@ -592,6 +633,36 @@ class ProcessTestCase(BaseTestCase):
if c.exception.errno not in (errno.ENOENT, errno.EACCES):
raise c.exception
+ @unittest.skipIf(threading is None, "threading required")
+ def test_double_close_on_error(self):
+ # Issue #18851
+ fds = []
+ def open_fds():
+ for i in range(20):
+ fds.extend(os.pipe())
+ time.sleep(0.001)
+ t = threading.Thread(target=open_fds)
+ t.start()
+ try:
+ with self.assertRaises(EnvironmentError):
+ subprocess.Popen(['nonexisting_i_hope'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ finally:
+ t.join()
+ exc = None
+ for fd in fds:
+ # If a double close occurred, some of those fds will
+ # already have been closed by mistake, and os.close()
+ # here will raise.
+ try:
+ os.close(fd)
+ except OSError as e:
+ exc = e
+ if exc is not None:
+ raise exc
+
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
@@ -631,6 +702,27 @@ class ProcessTestCase(BaseTestCase):
time.sleep(2)
p.communicate("x" * 2**20)
+ # This test is Linux-ish specific for simplicity to at least have
+ # some coverage. It is not a platform specific bug.
+ @unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
+ "Linux specific")
+ def test_failed_child_execute_fd_leak(self):
+ """Test for the fork() failure fd leak reported in issue16327."""
+ fd_directory = '/proc/%d/fd' % os.getpid()
+ fds_before_popen = os.listdir(fd_directory)
+ with self.assertRaises(PopenTestException):
+ PopenExecuteChildRaises(
+ [sys.executable, '-c', 'pass'], stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ # NOTE: This test doesn't verify that the real _execute_child
+ # does not close the file descriptors itself on the way out
+ # during an exception. Code inspection has confirmed that.
+
+ fds_after_exception = os.listdir(fd_directory)
+ self.assertEqual(fds_before_popen, fds_after_exception)
+
+
# context manager
class _SuppressCoreFiles(object):
"""Try to prevent core files from being created."""
@@ -717,6 +809,53 @@ class POSIXProcessTestCase(BaseTestCase):
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), "apple")
+ class _TestExecuteChildPopen(subprocess.Popen):
+ """Used to test behavior at the end of _execute_child."""
+ def __init__(self, testcase, *args, **kwargs):
+ self._testcase = testcase
+ subprocess.Popen.__init__(self, *args, **kwargs)
+
+ def _execute_child(
+ self, args, executable, preexec_fn, close_fds, cwd, env,
+ universal_newlines, startupinfo, creationflags, shell, to_close,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite):
+ try:
+ subprocess.Popen._execute_child(
+ self, args, executable, preexec_fn, close_fds,
+ cwd, env, universal_newlines,
+ startupinfo, creationflags, shell, to_close,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+ finally:
+ # Open a bunch of file descriptors and verify that
+ # none of them are the same as the ones the Popen
+ # instance is using for stdin/stdout/stderr.
+ devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
+ for _ in range(8)]
+ try:
+ for fd in devzero_fds:
+ self._testcase.assertNotIn(
+ fd, (p2cwrite, c2pread, errread))
+ finally:
+ for fd in devzero_fds:
+ os.close(fd)
+
+ @unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
+ def test_preexec_errpipe_does_not_double_close_pipes(self):
+ """Issue16140: Don't double close pipes on preexec error."""
+
+ def raise_it():
+ raise RuntimeError("force the _execute_child() errpipe_data path.")
+
+ with self.assertRaises(RuntimeError):
+ self._TestExecuteChildPopen(
+ self, [sys.executable, "-c", "pass"],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, preexec_fn=raise_it)
+
def test_args_string(self):
# args is a string
f, fname = mkstemp()
@@ -812,6 +951,29 @@ class POSIXProcessTestCase(BaseTestCase):
getattr(p, method)(*args)
return p
+ @unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
+ "Due to known OS bug (issue #16762)")
+ def _kill_dead_process(self, method, *args):
+ # Do not inherit file handles from the parent.
+ # It should fix failures on some platforms.
+ p = subprocess.Popen([sys.executable, "-c", """if 1:
+ import sys, time
+ sys.stdout.write('x\\n')
+ sys.stdout.flush()
+ """],
+ close_fds=True,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ # Wait for the interpreter to be completely initialized before
+ # sending any signal.
+ p.stdout.read(1)
+ # The process should end after this
+ time.sleep(1)
+ # This shouldn't raise even though the child is now dead
+ getattr(p, method)(*args)
+ p.communicate()
+
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
@@ -830,6 +992,18 @@ class POSIXProcessTestCase(BaseTestCase):
self.assertStderrEqual(stderr, '')
self.assertEqual(p.wait(), -signal.SIGTERM)
+ def test_send_signal_dead(self):
+ # Sending a signal to a dead process
+ self._kill_dead_process('send_signal', signal.SIGINT)
+
+ def test_kill_dead(self):
+ # Killing a dead process
+ self._kill_dead_process('kill')
+
+ def test_terminate_dead(self):
+ # Terminating a dead process
+ self._kill_dead_process('terminate')
+
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
@@ -1126,6 +1300,31 @@ class Win32ProcessTestCase(BaseTestCase):
returncode = p.wait()
self.assertNotEqual(returncode, 0)
+ def _kill_dead_process(self, method, *args):
+ p = subprocess.Popen([sys.executable, "-c", """if 1:
+ import sys, time
+ sys.stdout.write('x\\n')
+ sys.stdout.flush()
+ sys.exit(42)
+ """],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ self.addCleanup(p.stdout.close)
+ self.addCleanup(p.stderr.close)
+ self.addCleanup(p.stdin.close)
+ # Wait for the interpreter to be completely initialized before
+ # sending any signal.
+ p.stdout.read(1)
+ # The process should end after this
+ time.sleep(1)
+ # This shouldn't raise even though the child is now dead
+ getattr(p, method)(*args)
+ _, stderr = p.communicate()
+ self.assertStderrEqual(stderr, b'')
+ rc = p.wait()
+ self.assertEqual(rc, 42)
+
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
@@ -1135,6 +1334,15 @@ class Win32ProcessTestCase(BaseTestCase):
def test_terminate(self):
self._kill_process('terminate')
+ def test_send_signal_dead(self):
+ self._kill_dead_process('send_signal', signal.SIGTERM)
+
+ def test_kill_dead(self):
+ self._kill_dead_process('kill')
+
+ def test_terminate_dead(self):
+ self._kill_dead_process('terminate')
+
@unittest.skipUnless(getattr(subprocess, '_has_poll', False),
"poll system call not supported")
diff --git a/Lib/test/test_sunau.py b/Lib/test/test_sunau.py
new file mode 100644
index 0000000..f682868
--- /dev/null
+++ b/Lib/test/test_sunau.py
@@ -0,0 +1,100 @@
+from test.test_support import TESTFN, run_unittest
+import unittest
+from test import audiotests
+import sys
+import sunau
+
+
+class SunauTest(audiotests.AudioWriteTests,
+ audiotests.AudioTestsWithSourceFile):
+ module = sunau
+
+
+class SunauPCM8Test(SunauTest, unittest.TestCase):
+ sndfilename = 'pluck-pcm8.au'
+ sndfilenframes = 3307
+ nchannels = 2
+ sampwidth = 1
+ framerate = 11025
+ nframes = 48
+ comptype = 'NONE'
+ compname = 'not compressed'
+ frames = audiotests.fromhex("""\
+ 02FF 4B00 3104 8008 CB06 4803 BF01 03FE B8FA B4F3 29EB 1AE6 \
+ EDE4 C6E2 0EE0 EFE0 57E2 FBE8 13EF D8F7 97FB F5FC 08FB DFFB \
+ 11FA 3EFB BCFC 66FF CF04 4309 C10E 5112 EE17 8216 7F14 8012 \
+ 490E 520D EF0F CE0F E40C 630A 080A 2B0B 510E 8B11 B60E 440A \
+ """)
+
+
+class SunauPCM16Test(SunauTest, unittest.TestCase):
+ sndfilename = 'pluck-pcm16.au'
+ sndfilenframes = 3307
+ nchannels = 2
+ sampwidth = 2
+ framerate = 11025
+ nframes = 48
+ comptype = 'NONE'
+ compname = 'not compressed'
+ frames = audiotests.fromhex("""\
+ 022EFFEA 4B5C00F9 311404EF 80DB0844 CBE006B0 48AB03F3 BFE601B5 0367FE80 \
+ B853FA42 B4AFF351 2997EBCD 1A5AE6DC EDF9E492 C627E277 0E06E0B7 EF29E029 \
+ 5759E271 FB34E83F 1377EF85 D82CF727 978EFB79 F5F7FC12 0864FB9E DF30FB40 \
+ 1183FA30 3EEAFB59 BC78FCB4 66D5FF60 CF130415 431A097D C1BA0EC7 512312A0 \
+ EEE11754 82071666 7FFE1448 80001298 49990EB7 52B40DC1 EFAD0F65 CE3A0FBE \
+ E4B70CE6 63490A57 08CC0A1D 2BBC0B09 51480E46 8BCB113C B6F60EE9 44150A5A \
+ """)
+
+
+class SunauPCM32Test(SunauTest, unittest.TestCase):
+ sndfilename = 'pluck-pcm32.au'
+ sndfilenframes = 3307
+ nchannels = 2
+ sampwidth = 4
+ framerate = 11025
+ nframes = 48
+ comptype = 'NONE'
+ compname = 'not compressed'
+ frames = audiotests.fromhex("""\
+ 022D65BCFFEB9D92 4B5A0F8000FA549C 3113C34004EE2BC0 80DCD680084303E0 \
+ CBDEC0C006B26140 48A9980003F2F8FC BFE8248001B07D92 036BFB60FE7B5D34 \
+ B8575600FA3EC920 B4B05500F3502BC0 29983000EBCB6240 1A5CA7A0E6D99A60 \
+ EDFA3E80E491BD40 C625EB80E27884A0 0E05A9A0E0B6CFE0 EF292940E0292280 \
+ 5758D800E2706700 FB3557D8E83E1640 1377BF00EF840280 D82C5B80F7272A80 \
+ 978F1600FB774560 F5F86510FC101364 086635A0FB9C4E20 DF30FC40FB40EE28 \
+ 117FE0A0FA3438B0 3EE6B840FB5AC3F0 BC77A380FCB2F454 66D6DA80FF5F32B4 \
+ CF13B980041275B0 431D6980097A8C00 C1BB60000EC74E00 5120B98012A2BAA0 \
+ EEDF64C01754C060 820700001664B780 7FFFFFFF14453F40 800000001294E6E0 \
+ 499C1B000EB3B270 52B73E000DBCA020 EFB2B2E00F5FD880 CE3CDB400FBE1270 \
+ E4B49CC00CEA2D90 6344A8800A5A7CA0 08C8FE800A1FFEE0 2BB986C00B0A0E00 \
+ 51486F800E44E190 8BCC6480113B0580 B6F4EC000EEB3630 441317800A5B48A0 \
+ """)
+
+
+class SunauULAWTest(SunauTest, unittest.TestCase):
+ sndfilename = 'pluck-ulaw.au'
+ sndfilenframes = 3307
+ nchannels = 2
+ sampwidth = 2
+ framerate = 11025
+ nframes = 48
+ comptype = 'ULAW'
+ compname = 'CCITT G.711 u-law'
+ frames = audiotests.fromhex("""\
+ 022CFFE8 497C00F4 307C04DC 8284083C CB84069C 497C03DC BE8401AC 036CFE74 \
+ B684FA24 B684F344 2A7CEC04 19FCE704 EE04E504 C584E204 0E3CE104 EF04DF84 \
+ 557CE204 FB24E804 12FCEF04 D784F744 9684FB64 F5C4FC24 083CFBA4 DF84FB24 \
+ 11FCFA24 3E7CFB64 BA84FCB4 657CFF5C CF84041C 417C09BC C1840EBC 517C12FC \
+ EF0416FC 828415FC 7D7C13FC 828412FC 497C0EBC 517C0DBC F0040F3C CD840FFC \
+ E5040CBC 617C0A3C 08BC0A3C 2C7C0B3C 517C0E3C 8A8410FC B6840EBC 457C0A3C \
+ """)
+ if sys.byteorder != 'big':
+ frames = audiotests.byteswap2(frames)
+
+
+def test_main():
+ run_unittest(SunauPCM8Test, SunauPCM16Test, SunauPCM16Test,
+ SunauPCM32Test, SunauULAWTest)
+
+if __name__ == "__main__":
+ test_main()
diff --git a/Lib/test/test_sundry.py b/Lib/test/test_sundry.py
index 6e2b797..46cbab5 100644
--- a/Lib/test/test_sundry.py
+++ b/Lib/test/test_sundry.py
@@ -49,11 +49,9 @@ class TestUntestedModules(unittest.TestCase):
import getpass
import htmlentitydefs
import ihooks
- import imghdr
import imputil
import keyword
import linecache
- import macurl2path
import mailcap
import mimify
import nntplib
diff --git a/Lib/test/test_support.py b/Lib/test/test_support.py
index b0a2048..e1ee3f5 100644
--- a/Lib/test/test_support.py
+++ b/Lib/test/test_support.py
@@ -18,6 +18,8 @@ import importlib
import UserDict
import re
import time
+import struct
+import sysconfig
try:
import thread
except ImportError:
@@ -179,15 +181,79 @@ def unload(name):
except KeyError:
pass
+if sys.platform.startswith("win"):
+ def _waitfor(func, pathname, waitall=False):
+ # Perform the operation
+ func(pathname)
+ # Now setup the wait loop
+ if waitall:
+ dirname = pathname
+ else:
+ dirname, name = os.path.split(pathname)
+ dirname = dirname or '.'
+ # Check for `pathname` to be removed from the filesystem.
+ # The exponential backoff of the timeout amounts to a total
+ # of ~1 second after which the deletion is probably an error
+ # anyway.
+ # Testing on a i7@4.3GHz shows that usually only 1 iteration is
+ # required when contention occurs.
+ timeout = 0.001
+ while timeout < 1.0:
+ # Note we are only testing for the existence of the file(s) in
+ # the contents of the directory regardless of any security or
+ # access rights. If we have made it this far, we have sufficient
+ # permissions to do that much using Python's equivalent of the
+ # Windows API FindFirstFile.
+ # Other Windows APIs can fail or give incorrect results when
+ # dealing with files that are pending deletion.
+ L = os.listdir(dirname)
+ if not (L if waitall else name in L):
+ return
+ # Increase the timeout and try again
+ time.sleep(timeout)
+ timeout *= 2
+ warnings.warn('tests may fail, delete still pending for ' + pathname,
+ RuntimeWarning, stacklevel=4)
+
+ def _unlink(filename):
+ _waitfor(os.unlink, filename)
+
+ def _rmdir(dirname):
+ _waitfor(os.rmdir, dirname)
+
+ def _rmtree(path):
+ def _rmtree_inner(path):
+ for name in os.listdir(path):
+ fullname = os.path.join(path, name)
+ if os.path.isdir(fullname):
+ _waitfor(_rmtree_inner, fullname, waitall=True)
+ os.rmdir(fullname)
+ else:
+ os.unlink(fullname)
+ _waitfor(_rmtree_inner, path, waitall=True)
+ _waitfor(os.rmdir, path)
+else:
+ _unlink = os.unlink
+ _rmdir = os.rmdir
+ _rmtree = shutil.rmtree
+
def unlink(filename):
try:
- os.unlink(filename)
+ _unlink(filename)
except OSError:
pass
+def rmdir(dirname):
+ try:
+ _rmdir(dirname)
+ except OSError as error:
+ # The directory need not exist.
+ if error.errno != errno.ENOENT:
+ raise
+
def rmtree(path):
try:
- shutil.rmtree(path)
+ _rmtree(path)
except OSError, e:
# Unix returns ENOENT, Windows returns ESRCH.
if e.errno not in (errno.ENOENT, errno.ESRCH):
@@ -204,26 +270,104 @@ def forget(modname):
# is exited) but there is a .pyo file.
unlink(os.path.join(dirname, modname + os.extsep + 'pyo'))
+# Check whether a gui is actually available
+def _is_gui_available():
+ if hasattr(_is_gui_available, 'result'):
+ return _is_gui_available.result
+ reason = None
+ if sys.platform.startswith('win'):
+ # if Python is running as a service (such as the buildbot service),
+ # gui interaction may be disallowed
+ import ctypes
+ import ctypes.wintypes
+ UOI_FLAGS = 1
+ WSF_VISIBLE = 0x0001
+ class USEROBJECTFLAGS(ctypes.Structure):
+ _fields_ = [("fInherit", ctypes.wintypes.BOOL),
+ ("fReserved", ctypes.wintypes.BOOL),
+ ("dwFlags", ctypes.wintypes.DWORD)]
+ dll = ctypes.windll.user32
+ h = dll.GetProcessWindowStation()
+ if not h:
+ raise ctypes.WinError()
+ uof = USEROBJECTFLAGS()
+ needed = ctypes.wintypes.DWORD()
+ res = dll.GetUserObjectInformationW(h,
+ UOI_FLAGS,
+ ctypes.byref(uof),
+ ctypes.sizeof(uof),
+ ctypes.byref(needed))
+ if not res:
+ raise ctypes.WinError()
+ if not bool(uof.dwFlags & WSF_VISIBLE):
+ reason = "gui not available (WSF_VISIBLE flag not set)"
+ elif sys.platform == 'darwin':
+ # The Aqua Tk implementations on OS X can abort the process if
+ # being called in an environment where a window server connection
+ # cannot be made, for instance when invoked by a buildbot or ssh
+ # process not running under the same user id as the current console
+ # user. To avoid that, raise an exception if the window manager
+ # connection is not available.
+ from ctypes import cdll, c_int, pointer, Structure
+ from ctypes.util import find_library
+
+ app_services = cdll.LoadLibrary(find_library("ApplicationServices"))
+
+ if app_services.CGMainDisplayID() == 0:
+ reason = "gui tests cannot run without OS X window manager"
+ else:
+ class ProcessSerialNumber(Structure):
+ _fields_ = [("highLongOfPSN", c_int),
+ ("lowLongOfPSN", c_int)]
+ psn = ProcessSerialNumber()
+ psn_p = pointer(psn)
+ if ( (app_services.GetCurrentProcess(psn_p) < 0) or
+ (app_services.SetFrontProcess(psn_p) < 0) ):
+ reason = "cannot run without OS X gui process"
+
+ # check on every platform whether tkinter can actually do anything
+ # but skip the test on OS X because it can cause segfaults in Cocoa Tk
+ # when running regrtest with the -j option (multiple threads/subprocesses)
+ if (not reason) and (sys.platform != 'darwin'):
+ try:
+ from Tkinter import Tk
+ root = Tk()
+ root.destroy()
+ except Exception as e:
+ err_string = str(e)
+ if len(err_string) > 50:
+ err_string = err_string[:50] + ' [...]'
+ reason = 'Tk unavailable due to {}: {}'.format(type(e).__name__,
+ err_string)
+
+ _is_gui_available.reason = reason
+ _is_gui_available.result = not reason
+
+ return _is_gui_available.result
+
def is_resource_enabled(resource):
- """Test whether a resource is enabled. Known resources are set by
- regrtest.py."""
- return use_resources is not None and resource in use_resources
+ """Test whether a resource is enabled.
-def requires(resource, msg=None):
- """Raise ResourceDenied if the specified resource is not available.
+ Known resources are set by regrtest.py. If not running under regrtest.py,
+ all resources are assumed enabled unless use_resources has been set.
+ """
+ return use_resources is None or resource in use_resources
- If the caller's module is __main__ then automatically return True. The
- possibility of False being returned occurs when regrtest.py is executing."""
- # see if the caller's module is __main__ - if so, treat as if
- # the resource was set
- if sys._getframe(1).f_globals.get("__name__") == "__main__":
- return
+def requires(resource, msg=None):
+ """Raise ResourceDenied if the specified resource is not available."""
+ if resource == 'gui' and not _is_gui_available():
+ raise ResourceDenied(_is_gui_available.reason)
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
-HOST = 'localhost'
+
+# Don't use "localhost", since resolving it uses the DNS under recent
+# Windows versions (see issue #18792).
+HOST = "127.0.0.1"
+HOSTv6 = "::1"
+
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
@@ -305,9 +449,15 @@ def bind_port(sock, host=HOST):
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
- if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
- raise TestFailed("tests should never set the SO_REUSEPORT " \
- "socket option on TCP/IP sockets!")
+ try:
+ if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
+ raise TestFailed("tests should never set the SO_REUSEPORT " \
+ "socket option on TCP/IP sockets!")
+ except EnvironmentError:
+ # Python's socket module was compiled using modern headers
+ # thus defining SO_REUSEPORT but this process is running
+ # under an older kernel that does not support SO_REUSEPORT.
+ pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
@@ -333,6 +483,21 @@ def fcmp(x, y): # fuzzy comparison function
return (len(x) > len(y)) - (len(x) < len(y))
return (x > y) - (x < y)
+
+# A constant likely larger than the underlying OS pipe buffer size, to
+# make writes blocking.
+# Windows limit seems to be around 512 B, and many Unix kernels have a
+# 64 KiB pipe buffer size or 16 * PAGE_SIZE: take a few megs to be sure.
+# (see issue #17835 for a discussion of this number).
+PIPE_MAX_SIZE = 4 * 1024 * 1024 + 1
+
+# A constant likely larger than the underlying OS socket buffer size, to make
+# writes blocking.
+# The socket buffer sizes can usually be tuned system-wide (e.g. through sysctl
+# on Linux), or on a per-socket basis (SO_SNDBUF/SO_RCVBUF). See issue #18643
+# for a discussion of this number).
+SOCK_MAX_SIZE = 16 * 1024 * 1024 + 1
+
try:
unicode
have_unicode = True
@@ -341,6 +506,52 @@ except NameError:
is_jython = sys.platform.startswith('java')
+# FS_NONASCII: non-ASCII Unicode character encodable by
+# sys.getfilesystemencoding(), or None if there is no such character.
+FS_NONASCII = None
+if have_unicode:
+ for character in (
+ # First try printable and common characters to have a readable filename.
+ # For each character, the encoding list are just example of encodings able
+ # to encode the character (the list is not exhaustive).
+
+ # U+00E6 (Latin Small Letter Ae): cp1252, iso-8859-1
+ unichr(0x00E6),
+ # U+0130 (Latin Capital Letter I With Dot Above): cp1254, iso8859_3
+ unichr(0x0130),
+ # U+0141 (Latin Capital Letter L With Stroke): cp1250, cp1257
+ unichr(0x0141),
+ # U+03C6 (Greek Small Letter Phi): cp1253
+ unichr(0x03C6),
+ # U+041A (Cyrillic Capital Letter Ka): cp1251
+ unichr(0x041A),
+ # U+05D0 (Hebrew Letter Alef): Encodable to cp424
+ unichr(0x05D0),
+ # U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic
+ unichr(0x060C),
+ # U+062A (Arabic Letter Teh): cp720
+ unichr(0x062A),
+ # U+0E01 (Thai Character Ko Kai): cp874
+ unichr(0x0E01),
+
+ # Then try more "special" characters. "special" because they may be
+ # interpreted or displayed differently depending on the exact locale
+ # encoding and the font.
+
+ # U+00A0 (No-Break Space)
+ unichr(0x00A0),
+ # U+20AC (Euro Sign)
+ unichr(0x20AC),
+ ):
+ try:
+ character.encode(sys.getfilesystemencoding())\
+ .decode(sys.getfilesystemencoding())
+ except UnicodeError:
+ pass
+ else:
+ FS_NONASCII = character
+ break
+
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
@@ -405,7 +616,7 @@ def temp_cwd(name='tempcwd', quiet=False):
the CWD, an error is raised. If it's True, only a warning is raised
and the original CWD is used.
"""
- if isinstance(name, unicode):
+ if have_unicode and isinstance(name, unicode):
try:
name = name.encode(sys.getfilesystemencoding() or 'ascii')
except UnicodeEncodeError:
@@ -767,6 +978,9 @@ def transient_internet(resource_name, timeout=30.0, errnos=()):
('EAI_FAIL', -4),
('EAI_NONAME', -2),
('EAI_NODATA', -5),
+ # Windows defines EAI_NODATA as 11001 but idiotic getaddrinfo()
+ # implementation actually returns WSANO_DATA i.e. 11004.
+ ('WSANO_DATA', 11004),
]
denied = ResourceDenied("Resource '%s' is not available" % resource_name)
@@ -858,6 +1072,33 @@ def gc_collect():
gc.collect()
+_header = '2P'
+if hasattr(sys, "gettotalrefcount"):
+ _header = '2P' + _header
+_vheader = _header + 'P'
+
+def calcobjsize(fmt):
+ return struct.calcsize(_header + fmt + '0P')
+
+def calcvobjsize(fmt):
+ return struct.calcsize(_vheader + fmt + '0P')
+
+
+_TPFLAGS_HAVE_GC = 1<<14
+_TPFLAGS_HEAPTYPE = 1<<9
+
+def check_sizeof(test, o, size):
+ import _testcapi
+ result = sys.getsizeof(o)
+ # add GC header size
+ if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\
+ ((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))):
+ size += _testcapi.SIZEOF_PYGC_HEAD
+ msg = 'wrong size for %s: got %d, expected %d' \
+ % (type(o), result, size)
+ test.assertEqual(result, size, msg)
+
+
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
@@ -966,7 +1207,7 @@ def bigmemtest(minsize, memuse, overhead=5*_1M):
return wrapper
return decorator
-def precisionbigmemtest(size, memuse, overhead=5*_1M):
+def precisionbigmemtest(size, memuse, overhead=5*_1M, dry_run=True):
def decorator(f):
def wrapper(self):
if not real_max_memuse:
@@ -974,11 +1215,12 @@ def precisionbigmemtest(size, memuse, overhead=5*_1M):
else:
maxsize = size
- if real_max_memuse and real_max_memuse < maxsize * memuse:
- if verbose:
- sys.stderr.write("Skipping %s because of memory "
- "constraint\n" % (f.__name__,))
- return
+ if ((real_max_memuse or not dry_run)
+ and real_max_memuse < maxsize * memuse):
+ if verbose:
+ sys.stderr.write("Skipping %s because of memory "
+ "constraint\n" % (f.__name__,))
+ return
return f(self, maxsize)
wrapper.size = size
@@ -1011,6 +1253,8 @@ def _id(obj):
return obj
def requires_resource(resource):
+ if resource == 'gui' and not _is_gui_available():
+ return unittest.skip(_is_gui_available.reason)
if is_resource_enabled(resource):
return _id
else:
@@ -1093,6 +1337,16 @@ def run_unittest(*classes):
suite.addTest(unittest.makeSuite(cls))
_run_suite(suite)
+#=======================================================================
+# Check for the presence of docstrings.
+
+HAVE_DOCSTRINGS = (check_impl_detail(cpython=False) or
+ sys.platform == 'win32' or
+ sysconfig.get_config_var('WITH_DOC_STRINGS'))
+
+requires_docstrings = unittest.skipUnless(HAVE_DOCSTRINGS,
+ "test requires docstrings")
+
#=======================================================================
# doctest driver.
@@ -1192,6 +1446,33 @@ def reap_children():
except:
break
+@contextlib.contextmanager
+def swap_attr(obj, attr, new_val):
+ """Temporary swap out an attribute with a new object.
+
+ Usage:
+ with swap_attr(obj, "attr", 5):
+ ...
+
+ This will set obj.attr to 5 for the duration of the with: block,
+ restoring the old value at the end of the block. If `attr` doesn't
+ exist on `obj`, it will be created and then deleted at the end of the
+ block.
+ """
+ if hasattr(obj, attr):
+ real_val = getattr(obj, attr)
+ setattr(obj, attr, new_val)
+ try:
+ yield
+ finally:
+ setattr(obj, attr, real_val)
+ else:
+ setattr(obj, attr, new_val)
+ try:
+ yield
+ finally:
+ delattr(obj, attr)
+
def py3k_bytes(b):
"""Emulate the py3k bytes() constructor.
@@ -1210,22 +1491,8 @@ def py3k_bytes(b):
def args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags."""
- flag_opt_map = {
- 'bytes_warning': 'b',
- 'dont_write_bytecode': 'B',
- 'ignore_environment': 'E',
- 'no_user_site': 's',
- 'no_site': 'S',
- 'optimize': 'O',
- 'py3k_warning': '3',
- 'verbose': 'v',
- }
- args = []
- for flag, opt in flag_opt_map.items():
- v = getattr(sys.flags, flag)
- if v > 0:
- args.append('-' + opt * v)
- return args
+ import subprocess
+ return subprocess._args_from_interpreter_flags()
def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
diff --git a/Lib/test/test_sys.py b/Lib/test/test_sys.py
index 12d4d31..745e4ad 100644
--- a/Lib/test/test_sys.py
+++ b/Lib/test/test_sys.py
@@ -1,5 +1,6 @@
# -*- coding: iso-8859-1 -*-
import unittest, test.test_support
+from test.script_helper import assert_python_ok, assert_python_failure
import sys, os, cStringIO
import struct
import operator
@@ -114,90 +115,69 @@ class SysModuleTest(unittest.TestCase):
clear_check(exc)
def test_exit(self):
+ # call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
- try:
- sys.exit(0)
- except SystemExit, exc:
- self.assertEqual(exc.code, 0)
- except:
- self.fail("wrong exception")
- else:
- self.fail("no exception")
+ with self.assertRaises(SystemExit) as cm:
+ sys.exit()
+ self.assertIsNone(cm.exception.code)
- # call with tuple argument with one entry
- # entry will be unpacked
- try:
- sys.exit(42)
- except SystemExit, exc:
- self.assertEqual(exc.code, 42)
- except:
- self.fail("wrong exception")
- else:
- self.fail("no exception")
+ rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
+ self.assertEqual(rc, 0)
+ self.assertEqual(out, b'')
+ self.assertEqual(err, b'')
# call with integer argument
- try:
+ with self.assertRaises(SystemExit) as cm:
+ sys.exit(42)
+ self.assertEqual(cm.exception.code, 42)
+
+ # call with tuple argument with one entry
+ # entry will be unpacked
+ with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
- except SystemExit, exc:
- self.assertEqual(exc.code, 42)
- except:
- self.fail("wrong exception")
- else:
- self.fail("no exception")
+ self.assertEqual(cm.exception.code, 42)
# call with string argument
- try:
+ with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
- except SystemExit, exc:
- self.assertEqual(exc.code, "exit")
- except:
- self.fail("wrong exception")
- else:
- self.fail("no exception")
+ self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
- try:
+ with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
- except SystemExit, exc:
- self.assertEqual(exc.code, (17, 23))
- except:
- self.fail("wrong exception")
- else:
- self.fail("no exception")
+ self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
- import subprocess
# both unnormalized...
- rc = subprocess.call([sys.executable, "-c",
- "raise SystemExit, 46"])
+ rc, out, err = assert_python_failure('-c', 'raise SystemExit, 46')
self.assertEqual(rc, 46)
+ self.assertEqual(out, b'')
+ self.assertEqual(err, b'')
# ... and normalized
- rc = subprocess.call([sys.executable, "-c",
- "raise SystemExit(47)"])
+ rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
+ self.assertEqual(out, b'')
+ self.assertEqual(err, b'')
- def check_exit_message(code, expected, env=None):
- process = subprocess.Popen([sys.executable, "-c", code],
- stderr=subprocess.PIPE, env=env)
- stdout, stderr = process.communicate()
- self.assertEqual(process.returncode, 1)
- self.assertTrue(stderr.startswith(expected),
- "%s doesn't start with %s" % (repr(stderr), repr(expected)))
+ def check_exit_message(code, expected, **env_vars):
+ rc, out, err = assert_python_failure('-c', code, **env_vars)
+ self.assertEqual(rc, 1)
+ self.assertEqual(out, b'')
+ self.assertTrue(err.startswith(expected),
+ "%s doesn't start with %s" % (repr(err), repr(expected)))
- # test that stderr buffer if flushed before the exit message is written
+ # test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the unicode message is encoded to the stderr encoding
- env = os.environ.copy()
- env['PYTHONIOENCODING'] = 'latin-1'
check_exit_message(
r'import sys; sys.exit(u"h\xe9")',
- b"h\xe9", env=env)
+ b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
if test.test_support.have_unicode:
@@ -266,15 +246,16 @@ class SysModuleTest(unittest.TestCase):
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
+ @unittest.skipUnless(hasattr(sys, "setdlopenflags"),
+ 'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
- if hasattr(sys, "setdlopenflags"):
- self.assertTrue(hasattr(sys, "getdlopenflags"))
- self.assertRaises(TypeError, sys.getdlopenflags, 42)
- oldflags = sys.getdlopenflags()
- self.assertRaises(TypeError, sys.setdlopenflags)
- sys.setdlopenflags(oldflags+1)
- self.assertEqual(sys.getdlopenflags(), oldflags+1)
- sys.setdlopenflags(oldflags)
+ self.assertTrue(hasattr(sys, "getdlopenflags"))
+ self.assertRaises(TypeError, sys.getdlopenflags, 42)
+ oldflags = sys.getdlopenflags()
+ self.assertRaises(TypeError, sys.setdlopenflags)
+ sys.setdlopenflags(oldflags+1)
+ self.assertEqual(sys.getdlopenflags(), oldflags+1)
+ sys.setdlopenflags(oldflags)
def test_refcount(self):
# n here must be a global in order for this test to pass while
@@ -488,24 +469,11 @@ class SysModuleTest(unittest.TestCase):
p.wait()
self.assertIn(executable, ["''", repr(sys.executable)])
+@test.test_support.cpython_only
class SizeofTest(unittest.TestCase):
- TPFLAGS_HAVE_GC = 1<<14
- TPFLAGS_HEAPTYPE = 1L<<9
-
def setUp(self):
- self.c = len(struct.pack('c', ' '))
- self.H = len(struct.pack('H', 0))
- self.i = len(struct.pack('i', 0))
- self.l = len(struct.pack('l', 0))
- self.P = len(struct.pack('P', 0))
- # due to missing size_t information from struct, it is assumed that
- # sizeof(Py_ssize_t) = sizeof(void*)
- self.header = 'PP'
- self.vheader = self.header + 'P'
- if hasattr(sys, "gettotalrefcount"):
- self.header += '2P'
- self.vheader += '2P'
+ self.P = struct.calcsize('P')
self.longdigit = sys.long_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
@@ -515,128 +483,109 @@ class SizeofTest(unittest.TestCase):
self.file.close()
test.test_support.unlink(test.test_support.TESTFN)
- def check_sizeof(self, o, size):
- result = sys.getsizeof(o)
- if ((type(o) == type) and (o.__flags__ & self.TPFLAGS_HEAPTYPE) or\
- ((type(o) != type) and (type(o).__flags__ & self.TPFLAGS_HAVE_GC))):
- size += self.gc_headsize
- msg = 'wrong size for %s: got %d, expected %d' \
- % (type(o), result, size)
- self.assertEqual(result, size, msg)
-
- def calcsize(self, fmt):
- """Wrapper around struct.calcsize which enforces the alignment of the
- end of a structure to the alignment requirement of pointer.
-
- Note: This wrapper should only be used if a pointer member is included
- and no member with a size larger than a pointer exists.
- """
- return struct.calcsize(fmt + '0P')
+ check_sizeof = test.test_support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
- h = self.header
- size = self.calcsize
+ size = test.test_support.calcobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
- self.assertEqual(sys.getsizeof(True), size(h + 'l'))
+ self.assertEqual(sys.getsizeof(True), size('l'))
# but lists are
- self.assertEqual(sys.getsizeof([]), size(h + 'P PP') + gc_header_size)
+ self.assertEqual(sys.getsizeof([]), size('P PP') + gc_header_size)
def test_default(self):
- h = self.header
- size = self.calcsize
- self.assertEqual(sys.getsizeof(True, -1), size(h + 'l'))
+ size = test.test_support.calcobjsize
+ self.assertEqual(sys.getsizeof(True, -1), size('l'))
def test_objecttypes(self):
# check all types defined in Objects/
- h = self.header
- vh = self.vheader
- size = self.calcsize
+ size = test.test_support.calcobjsize
+ vsize = test.test_support.calcvobjsize
check = self.check_sizeof
# bool
- check(True, size(h + 'l'))
+ check(True, size('l'))
# buffer
with test.test_support.check_py3k_warnings():
- check(buffer(''), size(h + '2P2Pil'))
+ check(buffer(''), size('2P2Pil'))
# builtin_function_or_method
- check(len, size(h + '3P'))
+ check(len, size('3P'))
# bytearray
samples = ['', 'u'*100000]
for sample in samples:
x = bytearray(sample)
- check(x, size(vh + 'iPP') + x.__alloc__() * self.c)
+ check(x, vsize('iPP') + x.__alloc__())
# bytearray_iterator
- check(iter(bytearray()), size(h + 'PP'))
+ check(iter(bytearray()), size('PP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
- check(get_cell().func_closure[0], size(h + 'P'))
+ check(get_cell().func_closure[0], size('P'))
# classobj (old-style class)
class class_oldstyle():
def method():
pass
- check(class_oldstyle, size(h + '7P'))
+ check(class_oldstyle, size('7P'))
# instance (old-style class)
- check(class_oldstyle(), size(h + '3P'))
+ check(class_oldstyle(), size('3P'))
# instancemethod (old-style class)
- check(class_oldstyle().method, size(h + '4P'))
+ check(class_oldstyle().method, size('4P'))
# complex
- check(complex(0,1), size(h + '2d'))
+ check(complex(0,1), size('2d'))
# code
- check(get_cell().func_code, size(h + '4i8Pi3P'))
+ check(get_cell().func_code, size('4i8Pi3P'))
# BaseException
- check(BaseException(), size(h + '3P'))
+ check(BaseException(), size('3P'))
# UnicodeEncodeError
- check(UnicodeEncodeError("", u"", 0, 0, ""), size(h + '5P2PP'))
+ check(UnicodeEncodeError("", u"", 0, 0, ""), size('5P2PP'))
# UnicodeDecodeError
- check(UnicodeDecodeError("", "", 0, 0, ""), size(h + '5P2PP'))
+ check(UnicodeDecodeError("", "", 0, 0, ""), size('5P2PP'))
# UnicodeTranslateError
- check(UnicodeTranslateError(u"", 0, 1, ""), size(h + '5P2PP'))
+ check(UnicodeTranslateError(u"", 0, 1, ""), size('5P2PP'))
# method_descriptor (descriptor object)
- check(str.lower, size(h + '2PP'))
+ check(str.lower, size('2PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
- check(datetime.timedelta.days, size(h + '2PP'))
+ check(datetime.timedelta.days, size('2PP'))
# getset_descriptor (descriptor object)
import __builtin__
- check(__builtin__.file.closed, size(h + '2PP'))
+ check(__builtin__.file.closed, size('2PP'))
# wrapper_descriptor (descriptor object)
- check(int.__add__, size(h + '2P2P'))
+ check(int.__add__, size('2P2P'))
# dictproxy
class C(object): pass
- check(C.__dict__, size(h + 'P'))
+ check(C.__dict__, size('P'))
# method-wrapper (descriptor object)
- check({}.__iter__, size(h + '2P'))
+ check({}.__iter__, size('2P'))
# dict
- check({}, size(h + '3P2P' + 8*'P2P'))
+ check({}, size('3P2P' + 8*'P2P'))
x = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
- check(x, size(h + '3P2P' + 8*'P2P') + 16*size('P2P'))
+ check(x, size('3P2P' + 8*'P2P') + 16*struct.calcsize('P2P'))
# dictionary-keyiterator
- check({}.iterkeys(), size(h + 'P2PPP'))
+ check({}.iterkeys(), size('P2PPP'))
# dictionary-valueiterator
- check({}.itervalues(), size(h + 'P2PPP'))
+ check({}.itervalues(), size('P2PPP'))
# dictionary-itemiterator
- check({}.iteritems(), size(h + 'P2PPP'))
+ check({}.iteritems(), size('P2PPP'))
# ellipses
- check(Ellipsis, size(h + ''))
+ check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
- check(x, size(h + '32B2iB'))
+ check(x, size('32B2iB'))
# enumerate
- check(enumerate([]), size(h + 'l3P'))
+ check(enumerate([]), size('l3P'))
# file
- check(self.file, size(h + '4P2i4P3i3P3i'))
+ check(self.file, size('4P2i4P3i3P3i'))
# float
- check(float(0), size(h + 'd'))
+ check(float(0), size('d'))
# sys.floatinfo
- check(sys.float_info, size(vh) + self.P * len(sys.float_info))
+ check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
@@ -645,10 +594,10 @@ class SizeofTest(unittest.TestCase):
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
- check(x, size(vh + '12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
+ check(x, vsize('12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
- check(func, size(h + '9P'))
+ check(func, size('9P'))
class c():
@staticmethod
def foo():
@@ -657,65 +606,65 @@ class SizeofTest(unittest.TestCase):
def bar(cls):
pass
# staticmethod
- check(foo, size(h + 'P'))
+ check(foo, size('P'))
# classmethod
- check(bar, size(h + 'P'))
+ check(bar, size('P'))
# generator
def get_gen(): yield 1
- check(get_gen(), size(h + 'Pi2P'))
+ check(get_gen(), size('Pi2P'))
# integer
- check(1, size(h + 'l'))
- check(100, size(h + 'l'))
+ check(1, size('l'))
+ check(100, size('l'))
# iterator
- check(iter('abc'), size(h + 'lP'))
+ check(iter('abc'), size('lP'))
# callable-iterator
import re
- check(re.finditer('',''), size(h + '2P'))
+ check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
- check(sample, size(vh + 'PP') + len(sample)*self.P)
+ check(sample, vsize('PP') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
- check(iter([]), size(h + 'lP'))
+ check(iter([]), size('lP'))
# listreverseiterator (list)
- check(reversed([]), size(h + 'lP'))
+ check(reversed([]), size('lP'))
# long
- check(0L, size(vh))
- check(1L, size(vh) + self.longdigit)
- check(-1L, size(vh) + self.longdigit)
+ check(0L, vsize(''))
+ check(1L, vsize('') + self.longdigit)
+ check(-1L, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.long_info.bits_per_digit
- check(long(PyLong_BASE), size(vh) + 2*self.longdigit)
- check(long(PyLong_BASE**2-1), size(vh) + 2*self.longdigit)
- check(long(PyLong_BASE**2), size(vh) + 3*self.longdigit)
+ check(long(PyLong_BASE), vsize('') + 2*self.longdigit)
+ check(long(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
+ check(long(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
- check(unittest, size(h + 'P'))
+ check(unittest, size('P'))
# None
- check(None, size(h + ''))
+ check(None, size(''))
# object
- check(object(), size(h + ''))
+ check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
- check(x, size(h + '4Pi'))
+ check(x, size('4Pi'))
# PyCObject
# PyCapsule
# XXX
# rangeiterator
- check(iter(xrange(1)), size(h + '4l'))
+ check(iter(xrange(1)), size('4l'))
# reverse
- check(reversed(''), size(h + 'PP'))
+ check(reversed(''), size('PP'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
- s = size(h + '3P2P' + PySet_MINSIZE*'lP' + 'lP')
+ s = size('3P2P' + PySet_MINSIZE*'lP' + 'lP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
@@ -732,23 +681,24 @@ class SizeofTest(unittest.TestCase):
check(set(sample), s + newsize*struct.calcsize('lP'))
check(frozenset(sample), s + newsize*struct.calcsize('lP'))
# setiterator
- check(iter(set()), size(h + 'P3P'))
+ check(iter(set()), size('P3P'))
# slice
- check(slice(1), size(h + '3P'))
+ check(slice(1), size('3P'))
# str
- check('', struct.calcsize(vh + 'li') + 1)
- check('abc', struct.calcsize(vh + 'li') + 1 + 3*self.c)
+ vh = test.test_support._vheader
+ check('', struct.calcsize(vh + 'lic'))
+ check('abc', struct.calcsize(vh + 'lic') + 3)
# super
- check(super(int), size(h + '3P'))
+ check(super(int), size('3P'))
# tuple
- check((), size(vh))
- check((1,2,3), size(vh) + 3*self.P)
+ check((), vsize(''))
+ check((1,2,3), vsize('') + 3*self.P)
# tupleiterator
- check(iter(()), size(h + 'lP'))
+ check(iter(()), size('lP'))
# type
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs)
- s = size(vh + 'P2P15Pl4PP9PP11PI') + size('41P 10P 3P 6P')
+ s = vsize('P2P15Pl4PP9PP11PI') + struct.calcsize('41P 10P 3P 6P')
class newstyleclass(object):
pass
check(newstyleclass, s)
@@ -763,41 +713,40 @@ class SizeofTest(unittest.TestCase):
# we need to test for both sizes, because we don't know if the string
# has been cached
for s in samples:
- check(s, size(h + 'PPlP') + usize * (len(s) + 1))
+ check(s, size('PPlP') + usize * (len(s) + 1))
# weakref
import weakref
- check(weakref.ref(int), size(h + '2Pl2P'))
+ check(weakref.ref(int), size('2Pl2P'))
# weakproxy
# XXX
# weakcallableproxy
- check(weakref.proxy(int), size(h + '2Pl2P'))
+ check(weakref.proxy(int), size('2Pl2P'))
# xrange
- check(xrange(1), size(h + '3l'))
- check(xrange(66000), size(h + '3l'))
+ check(xrange(1), size('3l'))
+ check(xrange(66000), size('3l'))
def test_pythontypes(self):
# check all types defined in Python/
- h = self.header
- vh = self.vheader
- size = self.calcsize
+ size = test.test_support.calcobjsize
+ vsize = test.test_support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
- check(_ast.AST(), size(h + ''))
+ check(_ast.AST(), size(''))
# imp.NullImporter
import imp
- check(imp.NullImporter(self.file.name), size(h + ''))
+ check(imp.NullImporter(self.file.name), size(''))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb != None:
- check(tb, size(h + '2P2i'))
+ check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
- check(sys.flags, size(vh) + self.P * len(sys.flags))
+ check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_main():
diff --git a/Lib/test/test_sys_settrace.py b/Lib/test/test_sys_settrace.py
index 1f77f9b..9c7bcef 100644
--- a/Lib/test/test_sys_settrace.py
+++ b/Lib/test/test_sys_settrace.py
@@ -417,7 +417,7 @@ class RaisingTraceFuncTestCase(unittest.TestCase):
except ValueError:
pass
else:
- self.fail("exception not thrown!")
+ self.fail("exception not raised!")
except RuntimeError:
self.fail("recursion counter not reset")
@@ -670,6 +670,14 @@ def no_jump_to_non_integers(output):
no_jump_to_non_integers.jump = (2, "Spam")
no_jump_to_non_integers.output = [True]
+def jump_across_with(output):
+ with open(test_support.TESTFN, "wb") as fp:
+ pass
+ with open(test_support.TESTFN, "wb") as fp:
+ pass
+jump_across_with.jump = (1, 3)
+jump_across_with.output = []
+
# This verifies that you can't set f_lineno via _getframe or similar
# trickery.
def no_jump_without_trace_function():
@@ -739,6 +747,9 @@ class JumpTestCase(unittest.TestCase):
self.run_test(no_jump_to_non_integers)
def test_19_no_jump_without_trace_function(self):
no_jump_without_trace_function()
+ def test_jump_across_with(self):
+ self.addCleanup(test_support.unlink, test_support.TESTFN)
+ self.run_test(jump_across_with)
def test_20_large_function(self):
d = {}
diff --git a/Lib/test/test_sysconfig.py b/Lib/test/test_sysconfig.py
index 716fa5e..755f35f 100644
--- a/Lib/test/test_sysconfig.py
+++ b/Lib/test/test_sysconfig.py
@@ -14,6 +14,7 @@ from sysconfig import (get_paths, get_platform, get_config_vars,
get_path, get_path_names, _INSTALL_SCHEMES,
_get_default_scheme, _expand_vars,
get_scheme_names, get_config_var)
+import _osx_support
class TestSysConfig(unittest.TestCase):
@@ -137,6 +138,7 @@ class TestSysConfig(unittest.TestCase):
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'PowerPC'))
+ _osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
@@ -156,6 +158,7 @@ class TestSysConfig(unittest.TestCase):
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'i386'))
+ _osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
@@ -171,6 +174,7 @@ class TestSysConfig(unittest.TestCase):
sys.maxint = maxint
# macbook with fat binaries (fat, universal or fat64)
+ _osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.4'
get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
@@ -179,6 +183,7 @@ class TestSysConfig(unittest.TestCase):
self.assertEqual(get_platform(), 'macosx-10.4-fat')
+ _osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
@@ -186,18 +191,21 @@ class TestSysConfig(unittest.TestCase):
self.assertEqual(get_platform(), 'macosx-10.4-intel')
+ _osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat3')
+ _osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-universal')
+ _osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
@@ -206,6 +214,7 @@ class TestSysConfig(unittest.TestCase):
self.assertEqual(get_platform(), 'macosx-10.4-fat64')
for arch in ('ppc', 'i386', 'x86_64', 'ppc64'):
+ _osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch %s -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
diff --git a/Lib/test/test_tarfile.py b/Lib/test/test_tarfile.py
index d5b864e..ff3265f 100644
--- a/Lib/test/test_tarfile.py
+++ b/Lib/test/test_tarfile.py
@@ -154,6 +154,83 @@ class UstarReadTest(ReadTest):
def test_fileobj_symlink2(self):
self._test_fileobj_link("./ustar/linktest2/symtype", "ustar/linktest1/regtype")
+ def test_issue14160(self):
+ self._test_fileobj_link("symtype2", "ustar/regtype")
+
+
+class ListTest(ReadTest, unittest.TestCase):
+
+ # Override setUp to use default encoding (UTF-8)
+ def setUp(self):
+ self.tar = tarfile.open(self.tarname, mode=self.mode)
+
+ def test_list(self):
+ with test_support.captured_stdout() as t:
+ self.tar.list(verbose=False)
+ out = t.getvalue()
+ self.assertIn('ustar/conttype', out)
+ self.assertIn('ustar/regtype', out)
+ self.assertIn('ustar/lnktype', out)
+ self.assertIn('ustar' + ('/12345' * 40) + '67/longname', out)
+ self.assertIn('./ustar/linktest2/symtype', out)
+ self.assertIn('./ustar/linktest2/lnktype', out)
+ # Make sure it puts trailing slash for directory
+ self.assertIn('ustar/dirtype/', out)
+ self.assertIn('ustar/dirtype-with-size/', out)
+ # Make sure it is able to print non-ASCII characters
+ self.assertIn('ustar/umlauts-'
+ '\xc4\xd6\xdc\xe4\xf6\xfc\xdf', out)
+ self.assertIn('misc/regtype-hpux-signed-chksum-'
+ '\xc4\xd6\xdc\xe4\xf6\xfc\xdf', out)
+ self.assertIn('misc/regtype-old-v7-signed-chksum-'
+ '\xc4\xd6\xdc\xe4\xf6\xfc\xdf', out)
+ # Make sure it prints files separated by one newline without any
+ # 'ls -l'-like accessories if verbose flag is not being used
+ # ...
+ # ustar/conttype
+ # ustar/regtype
+ # ...
+ self.assertRegexpMatches(out, r'ustar/conttype ?\r?\n'
+ r'ustar/regtype ?\r?\n')
+ # Make sure it does not print the source of link without verbose flag
+ self.assertNotIn('link to', out)
+ self.assertNotIn('->', out)
+
+ def test_list_verbose(self):
+ with test_support.captured_stdout() as t:
+ self.tar.list(verbose=True)
+ out = t.getvalue()
+ # Make sure it prints files separated by one newline with 'ls -l'-like
+ # accessories if verbose flag is being used
+ # ...
+ # ?rw-r--r-- tarfile/tarfile 7011 2003-01-06 07:19:43 ustar/conttype
+ # ?rw-r--r-- tarfile/tarfile 7011 2003-01-06 07:19:43 ustar/regtype
+ # ...
+ self.assertRegexpMatches(out, (r'-rw-r--r-- tarfile/tarfile\s+7011 '
+ r'\d{4}-\d\d-\d\d\s+\d\d:\d\d:\d\d '
+ r'ustar/\w+type ?\r?\n') * 2)
+ # Make sure it prints the source of link with verbose flag
+ self.assertIn('ustar/symtype -> regtype', out)
+ self.assertIn('./ustar/linktest2/symtype -> ../linktest1/regtype', out)
+ self.assertIn('./ustar/linktest2/lnktype link to '
+ './ustar/linktest1/regtype', out)
+ self.assertIn('gnu' + ('/123' * 125) + '/longlink link to gnu' +
+ ('/123' * 125) + '/longname', out)
+ self.assertIn('pax' + ('/123' * 125) + '/longlink link to pax' +
+ ('/123' * 125) + '/longname', out)
+
+
+class GzipListTest(ListTest):
+ tarname = gzipname
+ mode = "r:gz"
+ taropen = tarfile.TarFile.gzopen
+
+
+class Bz2ListTest(ListTest):
+ tarname = bz2name
+ mode = "r:bz2"
+ taropen = tarfile.TarFile.bz2open
+
class CommonReadTest(ReadTest):
@@ -178,6 +255,14 @@ class CommonReadTest(ReadTest):
self.assertRaises(tarfile.ReadError, tarfile.open, tmpname, self.mode)
self.assertRaises(tarfile.ReadError, tarfile.open, tmpname)
+ def test_non_existent_tarfile(self):
+ # Test for issue11513: prevent non-existent gzipped tarfiles raising
+ # multiple exceptions.
+ exctype = OSError if '|' in self.mode else IOError
+ with self.assertRaisesRegexp(exctype, "xxx") as ex:
+ tarfile.open("xxx", self.mode)
+ self.assertEqual(ex.exception.errno, errno.ENOENT)
+
def test_ignore_zeros(self):
# Test TarFile's ignore_zeros option.
if self.mode.endswith(":gz"):
@@ -202,6 +287,7 @@ class CommonReadTest(ReadTest):
class MiscReadTest(CommonReadTest):
+ taropen = tarfile.TarFile.taropen
def test_no_name_argument(self):
fobj = open(self.tarname, "rb")
@@ -222,6 +308,17 @@ class MiscReadTest(CommonReadTest):
tar = tarfile.open(fileobj=fobj, mode=self.mode)
self.assertEqual(tar.name, None)
+ def test_illegal_mode_arg(self):
+ with open(tmpname, 'wb'):
+ pass
+ self.addCleanup(os.unlink, tmpname)
+ with self.assertRaisesRegexp(ValueError, 'mode must be '):
+ tar = self.taropen(tmpname, 'q')
+ with self.assertRaisesRegexp(ValueError, 'mode must be '):
+ tar = self.taropen(tmpname, 'rw')
+ with self.assertRaisesRegexp(ValueError, 'mode must be '):
+ tar = self.taropen(tmpname, '')
+
def test_fileobj_with_offset(self):
# Skip the first member and store values from the second member
# of the testtar.
@@ -257,7 +354,7 @@ class MiscReadTest(CommonReadTest):
def test_fail_comp(self):
# For Gzip and Bz2 Tests: fail with a ReadError on an uncompressed file.
if self.mode == "r:":
- return
+ self.skipTest('needs a gz or bz2 mode')
self.assertRaises(tarfile.ReadError, tarfile.open, tarname, self.mode)
fobj = open(tarname, "rb")
self.assertRaises(tarfile.ReadError, tarfile.open, fileobj=fobj, mode=self.mode)
@@ -294,26 +391,21 @@ class MiscReadTest(CommonReadTest):
def test_extract_hardlink(self):
# Test hardlink extraction (e.g. bug #857297).
- tar = tarfile.open(tarname, errorlevel=1, encoding="iso8859-1")
+ with tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") as tar:
+ tar.extract("ustar/regtype", TEMPDIR)
+ self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/regtype"))
- tar.extract("ustar/regtype", TEMPDIR)
- try:
tar.extract("ustar/lnktype", TEMPDIR)
- except EnvironmentError, e:
- if e.errno == errno.ENOENT:
- self.fail("hardlink not extracted properly")
+ self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/lnktype"))
+ with open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb") as f:
+ data = f.read()
+ self.assertEqual(md5sum(data), md5_regtype)
- data = open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb").read()
- self.assertEqual(md5sum(data), md5_regtype)
-
- try:
tar.extract("ustar/symtype", TEMPDIR)
- except EnvironmentError, e:
- if e.errno == errno.ENOENT:
- self.fail("symlink not extracted properly")
-
- data = open(os.path.join(TEMPDIR, "ustar/symtype"), "rb").read()
- self.assertEqual(md5sum(data), md5_regtype)
+ self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/symtype"))
+ with open(os.path.join(TEMPDIR, "ustar/symtype"), "rb") as f:
+ data = f.read()
+ self.assertEqual(md5sum(data), md5_regtype)
def test_extractall(self):
# Test if extractall() correctly restores directory permissions
@@ -347,6 +439,14 @@ class MiscReadTest(CommonReadTest):
finally:
os.remove(empty)
+ def test_parallel_iteration(self):
+ # Issue #16601: Restarting iteration over tarfile continued
+ # from where it left off.
+ with tarfile.open(self.tarname) as tar:
+ for m1, m2 in zip(tar, tar):
+ self.assertEqual(m1.offset, m2.offset)
+ self.assertEqual(m1.name, m2.name)
+
class StreamReadTest(CommonReadTest):
@@ -440,14 +540,12 @@ class DetectReadTest(unittest.TestCase):
def test_detect_fileobj(self):
self._test_modes(self._testfunc_fileobj)
+ @unittest.skipUnless(bz2, 'requires bz2')
def test_detect_stream_bz2(self):
# Originally, tarfile's stream detection looked for the string
# "BZh91" at the start of the file. This is incorrect because
# the '9' represents the blocksize (900kB). If the file was
# compressed using another blocksize autodetection fails.
- if not bz2:
- return
-
with open(tarname, "rb") as fobj:
data = fobj.read()
@@ -633,6 +731,12 @@ class WriteTestBase(unittest.TestCase):
tar.addfile(tarfile.TarInfo("foo"))
tar.close()
self.assertTrue(fobj.closed is False, "external fileobjs must never closed")
+ # Issue #20238: Incomplete gzip output with mode="w:gz"
+ data = fobj.getvalue()
+ del tar
+ test_support.gc_collect()
+ self.assertFalse(fobj.closed)
+ self.assertEqual(data, fobj.getvalue())
class WriteTest(WriteTestBase):
@@ -855,7 +959,7 @@ class WriteTest(WriteTestBase):
tar = tarfile.open(tmpname, "r")
for t in tar:
- self.assert_(t.name == "." or t.name.startswith("./"))
+ self.assertTrue(t.name == "." or t.name.startswith("./"))
tar.close()
finally:
os.chdir(cwd)
@@ -949,6 +1053,22 @@ class WriteTest(WriteTestBase):
os.unlink(temparchive)
shutil.rmtree(tempdir)
+ def test_open_nonwritable_fileobj(self):
+ for exctype in IOError, EOFError, RuntimeError:
+ class BadFile(StringIO.StringIO):
+ first = True
+ def write(self, data):
+ if self.first:
+ self.first = False
+ raise exctype
+
+ f = BadFile()
+ with self.assertRaises(exctype):
+ tar = tarfile.open(tmpname, self.mode, fileobj=f,
+ format=tarfile.PAX_FORMAT,
+ pax_headers={'non': 'empty'})
+ self.assertFalse(f.closed)
+
class StreamWriteTest(WriteTestBase):
mode = "w|"
@@ -976,12 +1096,11 @@ class StreamWriteTest(WriteTestBase):
self.assertTrue(data.count("\0") == tarfile.RECORDSIZE,
"incorrect zero padding")
+ @unittest.skipIf(sys.platform == 'win32', 'not appropriate for Windows')
+ @unittest.skipUnless(hasattr(os, 'umask'), 'requires os.umask')
def test_file_mode(self):
# Test for issue #8464: Create files with correct
# permissions.
- if sys.platform == "win32" or not hasattr(os, "umask"):
- return
-
if os.path.exists(tmpname):
os.remove(tmpname)
@@ -1354,15 +1473,13 @@ class AppendTest(unittest.TestCase):
self._add_testfile()
self._test(names=["foo", "bar"])
+ @unittest.skipUnless(gzip, 'requires gzip')
def test_append_gz(self):
- if gzip is None:
- return
self._create_testtar("w:gz")
self.assertRaises(tarfile.ReadError, tarfile.open, tmpname, "a")
+ @unittest.skipUnless(bz2, 'requires bz2')
def test_append_bz2(self):
- if bz2 is None:
- return
self._create_testtar("w:bz2")
self.assertRaises(tarfile.ReadError, tarfile.open, tmpname, "a")
@@ -1534,6 +1651,7 @@ class LinkEmulationTest(ReadTest):
class GzipMiscReadTest(MiscReadTest):
tarname = gzipname
mode = "r:gz"
+ taropen = tarfile.TarFile.gzopen
class GzipUstarReadTest(UstarReadTest):
tarname = gzipname
mode = "r:gz"
@@ -1549,6 +1667,7 @@ class GzipStreamWriteTest(StreamWriteTest):
class Bz2MiscReadTest(MiscReadTest):
tarname = bz2name
mode = "r:bz2"
+ taropen = tarfile.TarFile.bz2open
class Bz2UstarReadTest(UstarReadTest):
tarname = bz2name
mode = "r:bz2"
@@ -1601,6 +1720,7 @@ def test_main():
MemberReadTest,
GNUReadTest,
PaxReadTest,
+ ListTest,
WriteTest,
StreamWriteTest,
GNUWriteTest,
@@ -1632,6 +1752,7 @@ def test_main():
GzipMiscReadTest,
GzipUstarReadTest,
GzipStreamReadTest,
+ GzipListTest,
GzipWriteTest,
GzipStreamWriteTest,
]
@@ -1646,6 +1767,7 @@ def test_main():
Bz2MiscReadTest,
Bz2UstarReadTest,
Bz2StreamReadTest,
+ Bz2ListTest,
Bz2WriteTest,
Bz2StreamWriteTest,
Bz2PartialReadTest,
diff --git a/Lib/test/test_tcl.py b/Lib/test/test_tcl.py
index e8ba58f..91fb28f 100644
--- a/Lib/test/test_tcl.py
+++ b/Lib/test/test_tcl.py
@@ -1,8 +1,8 @@
-#!/usr/bin/env python
-
import unittest
+import sys
import os
from test import test_support
+from subprocess import Popen, PIPE
# Skip this test if the _tkinter module wasn't built.
_tkinter = test_support.import_module('_tkinter')
@@ -10,6 +10,34 @@ _tkinter = test_support.import_module('_tkinter')
from Tkinter import Tcl
from _tkinter import TclError
+try:
+ from _testcapi import INT_MAX, PY_SSIZE_T_MAX
+except ImportError:
+ INT_MAX = PY_SSIZE_T_MAX = sys.maxsize
+
+tcl_version = _tkinter.TCL_VERSION.split('.')
+try:
+ for i in range(len(tcl_version)):
+ tcl_version[i] = int(tcl_version[i])
+except ValueError:
+ pass
+tcl_version = tuple(tcl_version)
+
+_tk_patchlevel = None
+def get_tk_patchlevel():
+ global _tk_patchlevel
+ if _tk_patchlevel is None:
+ tcl = Tcl()
+ patchlevel = []
+ for x in tcl.call('info', 'patchlevel').split('.'):
+ try:
+ x = int(x, 10)
+ except ValueError:
+ x = -1
+ patchlevel.append(x)
+ _tk_patchlevel = tuple(patchlevel)
+ return _tk_patchlevel
+
class TkinterTest(unittest.TestCase):
@@ -22,6 +50,7 @@ class TclTest(unittest.TestCase):
def setUp(self):
self.interp = Tcl()
+ self.wantobjects = self.interp.tk.wantobjects()
def testEval(self):
tcl = self.interp
@@ -98,6 +127,53 @@ class TclTest(unittest.TestCase):
tcl = self.interp
self.assertRaises(TclError,tcl.unsetvar,'a')
+ def test_getint(self):
+ tcl = self.interp.tk
+ self.assertEqual(tcl.getint(' 42 '), 42)
+ self.assertEqual(tcl.getint(42), 42)
+ self.assertRaises(TypeError, tcl.getint)
+ self.assertRaises(TypeError, tcl.getint, '42', '10')
+ self.assertRaises(TypeError, tcl.getint, 42.0)
+ self.assertRaises(TclError, tcl.getint, 'a')
+ self.assertRaises((TypeError, ValueError, TclError),
+ tcl.getint, '42\0')
+ if test_support.have_unicode:
+ self.assertEqual(tcl.getint(unicode('42')), 42)
+ self.assertRaises((UnicodeEncodeError, ValueError, TclError),
+ tcl.getint, '42' + unichr(0xd800))
+
+ def test_getdouble(self):
+ tcl = self.interp.tk
+ self.assertEqual(tcl.getdouble(' 42 '), 42.0)
+ self.assertEqual(tcl.getdouble(' 42.5 '), 42.5)
+ self.assertEqual(tcl.getdouble(42.5), 42.5)
+ self.assertRaises(TypeError, tcl.getdouble)
+ self.assertRaises(TypeError, tcl.getdouble, '42.5', '10')
+ self.assertRaises(TypeError, tcl.getdouble, 42)
+ self.assertRaises(TclError, tcl.getdouble, 'a')
+ self.assertRaises((TypeError, ValueError, TclError),
+ tcl.getdouble, '42.5\0')
+ if test_support.have_unicode:
+ self.assertEqual(tcl.getdouble(unicode('42.5')), 42.5)
+ self.assertRaises((UnicodeEncodeError, ValueError, TclError),
+ tcl.getdouble, '42.5' + unichr(0xd800))
+
+ def test_getboolean(self):
+ tcl = self.interp.tk
+ self.assertIs(tcl.getboolean('on'), True)
+ self.assertIs(tcl.getboolean('1'), True)
+ self.assertEqual(tcl.getboolean(42), 42)
+ self.assertRaises(TypeError, tcl.getboolean)
+ self.assertRaises(TypeError, tcl.getboolean, 'on', '1')
+ self.assertRaises(TypeError, tcl.getboolean, 1.0)
+ self.assertRaises(TclError, tcl.getboolean, 'a')
+ self.assertRaises((TypeError, ValueError, TclError),
+ tcl.getboolean, 'on\0')
+ if test_support.have_unicode:
+ self.assertIs(tcl.getboolean(unicode('on')), True)
+ self.assertRaises((UnicodeEncodeError, ValueError, TclError),
+ tcl.getboolean, 'on' + unichr(0xd800))
+
def testEvalFile(self):
tcl = self.interp
filename = "testEvalFile.tcl"
@@ -114,6 +190,18 @@ class TclTest(unittest.TestCase):
self.assertEqual(tcl.eval('set b'),'2')
self.assertEqual(tcl.eval('set c'),'3')
+ def test_evalfile_null_in_result(self):
+ tcl = self.interp
+ with open(test_support.TESTFN, 'wb') as f:
+ self.addCleanup(test_support.unlink, test_support.TESTFN)
+ f.write("""
+ set a "a\0b"
+ set b "a\\0b"
+ """)
+ tcl.evalfile(test_support.TESTFN)
+ self.assertEqual(tcl.eval('set a'), 'a\xc0\x80b')
+ self.assertEqual(tcl.eval('set b'), 'a\xc0\x80b')
+
def testEvalFileException(self):
tcl = self.interp
filename = "doesnotexists"
@@ -127,34 +215,432 @@ class TclTest(unittest.TestCase):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'package require DNE')
+ @unittest.skipUnless(sys.platform == 'win32', "only applies to Windows")
def testLoadWithUNC(self):
- import sys
- if sys.platform != 'win32':
- return
-
# Build a UNC path from the regular path.
# Something like
# \\%COMPUTERNAME%\c$\python27\python.exe
fullname = os.path.abspath(sys.executable)
if fullname[1] != ':':
- return
+ self.skipTest('unusable path: %r' % fullname)
unc_name = r'\\%s\%s$\%s' % (os.environ['COMPUTERNAME'],
fullname[0],
fullname[3:])
with test_support.EnvironmentVarGuard() as env:
env.unset("TCL_LIBRARY")
- f = os.popen('%s -c "import Tkinter; print Tkinter"' % (unc_name,))
+ cmd = '%s -c "import Tkinter; print Tkinter"' % (unc_name,)
+
+ try:
+ p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+ except WindowsError as e:
+ if e.winerror == 5:
+ self.skipTest('Not permitted to start the child process')
+ else:
+ raise
+
+ out_data, err_data = p.communicate()
+
+ msg = '\n\n'.join(['"Tkinter.py" not in output',
+ 'Command:', cmd,
+ 'stdout:', out_data,
+ 'stderr:', err_data])
+
+ self.assertIn('Tkinter.py', out_data, msg)
- self.assertTrue('Tkinter.py' in f.read())
- # exit code must be zero
- self.assertEqual(f.close(), None)
+ self.assertEqual(p.wait(), 0, 'Non-zero exit code')
+
+
+ def test_exprstring(self):
+ tcl = self.interp
+ tcl.call('set', 'a', 3)
+ tcl.call('set', 'b', 6)
+ def check(expr, expected):
+ result = tcl.exprstring(expr)
+ self.assertEqual(result, expected)
+ self.assertIsInstance(result, str)
+
+ self.assertRaises(TypeError, tcl.exprstring)
+ self.assertRaises(TypeError, tcl.exprstring, '8.2', '+6')
+ self.assertRaises(TclError, tcl.exprstring, 'spam')
+ check('', '0')
+ check('8.2 + 6', '14.2')
+ check('3.1 + $a', '6.1')
+ check('2 + "$a.$b"', '5.6')
+ check('4*[llength "6 2"]', '8')
+ check('{word one} < "word $a"', '0')
+ check('4*2 < 7', '0')
+ check('hypot($a, 4)', '5.0')
+ check('5 / 4', '1')
+ check('5 / 4.0', '1.25')
+ check('5 / ( [string length "abcd"] + 0.0 )', '1.25')
+ check('20.0/5.0', '4.0')
+ check('"0x03" > "2"', '1')
+ check('[string length "a\xc2\xbd\xe2\x82\xac"]', '3')
+ check(r'[string length "a\xbd\u20ac"]', '3')
+ check('"abc"', 'abc')
+ check('"a\xc2\xbd\xe2\x82\xac"', 'a\xc2\xbd\xe2\x82\xac')
+ check(r'"a\xbd\u20ac"', 'a\xc2\xbd\xe2\x82\xac')
+ check(r'"a\0b"', 'a\xc0\x80b')
+ if tcl_version >= (8, 5):
+ check('2**64', str(2**64))
+
+ def test_exprdouble(self):
+ tcl = self.interp
+ tcl.call('set', 'a', 3)
+ tcl.call('set', 'b', 6)
+ def check(expr, expected):
+ result = tcl.exprdouble(expr)
+ self.assertEqual(result, expected)
+ self.assertIsInstance(result, float)
+
+ self.assertRaises(TypeError, tcl.exprdouble)
+ self.assertRaises(TypeError, tcl.exprdouble, '8.2', '+6')
+ self.assertRaises(TclError, tcl.exprdouble, 'spam')
+ check('', 0.0)
+ check('8.2 + 6', 14.2)
+ check('3.1 + $a', 6.1)
+ check('2 + "$a.$b"', 5.6)
+ check('4*[llength "6 2"]', 8.0)
+ check('{word one} < "word $a"', 0.0)
+ check('4*2 < 7', 0.0)
+ check('hypot($a, 4)', 5.0)
+ check('5 / 4', 1.0)
+ check('5 / 4.0', 1.25)
+ check('5 / ( [string length "abcd"] + 0.0 )', 1.25)
+ check('20.0/5.0', 4.0)
+ check('"0x03" > "2"', 1.0)
+ check('[string length "a\xc2\xbd\xe2\x82\xac"]', 3.0)
+ check(r'[string length "a\xbd\u20ac"]', 3.0)
+ self.assertRaises(TclError, tcl.exprdouble, '"abc"')
+ if tcl_version >= (8, 5):
+ check('2**64', float(2**64))
+
+ def test_exprlong(self):
+ tcl = self.interp
+ tcl.call('set', 'a', 3)
+ tcl.call('set', 'b', 6)
+ def check(expr, expected):
+ result = tcl.exprlong(expr)
+ self.assertEqual(result, expected)
+ self.assertIsInstance(result, int)
+
+ self.assertRaises(TypeError, tcl.exprlong)
+ self.assertRaises(TypeError, tcl.exprlong, '8.2', '+6')
+ self.assertRaises(TclError, tcl.exprlong, 'spam')
+ check('', 0)
+ check('8.2 + 6', 14)
+ check('3.1 + $a', 6)
+ check('2 + "$a.$b"', 5)
+ check('4*[llength "6 2"]', 8)
+ check('{word one} < "word $a"', 0)
+ check('4*2 < 7', 0)
+ check('hypot($a, 4)', 5)
+ check('5 / 4', 1)
+ check('5 / 4.0', 1)
+ check('5 / ( [string length "abcd"] + 0.0 )', 1)
+ check('20.0/5.0', 4)
+ check('"0x03" > "2"', 1)
+ check('[string length "a\xc2\xbd\xe2\x82\xac"]', 3)
+ check(r'[string length "a\xbd\u20ac"]', 3)
+ self.assertRaises(TclError, tcl.exprlong, '"abc"')
+ if tcl_version >= (8, 5):
+ self.assertRaises(TclError, tcl.exprlong, '2**64')
+
+ def test_exprboolean(self):
+ tcl = self.interp
+ tcl.call('set', 'a', 3)
+ tcl.call('set', 'b', 6)
+ def check(expr, expected):
+ result = tcl.exprboolean(expr)
+ self.assertEqual(result, expected)
+ self.assertIsInstance(result, int)
+ self.assertNotIsInstance(result, bool)
+
+ self.assertRaises(TypeError, tcl.exprboolean)
+ self.assertRaises(TypeError, tcl.exprboolean, '8.2', '+6')
+ self.assertRaises(TclError, tcl.exprboolean, 'spam')
+ check('', False)
+ for value in ('0', 'false', 'no', 'off'):
+ check(value, False)
+ check('"%s"' % value, False)
+ check('{%s}' % value, False)
+ for value in ('1', 'true', 'yes', 'on'):
+ check(value, True)
+ check('"%s"' % value, True)
+ check('{%s}' % value, True)
+ check('8.2 + 6', True)
+ check('3.1 + $a', True)
+ check('2 + "$a.$b"', True)
+ check('4*[llength "6 2"]', True)
+ check('{word one} < "word $a"', False)
+ check('4*2 < 7', False)
+ check('hypot($a, 4)', True)
+ check('5 / 4', True)
+ check('5 / 4.0', True)
+ check('5 / ( [string length "abcd"] + 0.0 )', True)
+ check('20.0/5.0', True)
+ check('"0x03" > "2"', True)
+ check('[string length "a\xc2\xbd\xe2\x82\xac"]', True)
+ check(r'[string length "a\xbd\u20ac"]', True)
+ self.assertRaises(TclError, tcl.exprboolean, '"abc"')
+ if tcl_version >= (8, 5):
+ check('2**64', True)
+
+ def test_passing_values(self):
+ def passValue(value):
+ return self.interp.call('set', '_', value)
+
+ self.assertEqual(passValue(True), True if self.wantobjects else '1')
+ self.assertEqual(passValue(False), False if self.wantobjects else '0')
+ self.assertEqual(passValue('string'), 'string')
+ self.assertEqual(passValue('string\xbd'), 'string\xbd')
+ self.assertEqual(passValue('string\xe2\x82\xac'), u'string\u20ac')
+ self.assertEqual(passValue(u'string'), u'string')
+ self.assertEqual(passValue(u'string\xbd'), u'string\xbd')
+ self.assertEqual(passValue(u'string\u20ac'), u'string\u20ac')
+ self.assertEqual(passValue('str\x00ing'), 'str\x00ing')
+ self.assertEqual(passValue('str\xc0\x80ing'), 'str\x00ing')
+ self.assertEqual(passValue(u'str\x00ing'), u'str\x00ing')
+ self.assertEqual(passValue(u'str\x00ing\xbd'), u'str\x00ing\xbd')
+ self.assertEqual(passValue(u'str\x00ing\u20ac'), u'str\x00ing\u20ac')
+ for i in (0, 1, -1, int(2**31-1), int(-2**31)):
+ self.assertEqual(passValue(i), i if self.wantobjects else str(i))
+ for f in (0.0, 1.0, -1.0, 1//3, 1/3.0,
+ sys.float_info.min, sys.float_info.max,
+ -sys.float_info.min, -sys.float_info.max):
+ if self.wantobjects:
+ self.assertEqual(passValue(f), f)
+ else:
+ self.assertEqual(float(passValue(f)), f)
+ if self.wantobjects:
+ f = passValue(float('nan'))
+ self.assertNotEqual(f, f)
+ self.assertEqual(passValue(float('inf')), float('inf'))
+ self.assertEqual(passValue(-float('inf')), -float('inf'))
+ else:
+ f = float(passValue(float('nan')))
+ self.assertNotEqual(f, f)
+ self.assertEqual(float(passValue(float('inf'))), float('inf'))
+ self.assertEqual(float(passValue(-float('inf'))), -float('inf'))
+ self.assertEqual(passValue((1, '2', (3.4,))),
+ (1, '2', (3.4,)) if self.wantobjects else '1 2 3.4')
+
+ def test_user_command(self):
+ result = []
+ def testfunc(arg):
+ result.append(arg)
+ return arg
+ self.interp.createcommand('testfunc', testfunc)
+ self.addCleanup(self.interp.tk.deletecommand, 'testfunc')
+ def check(value, expected, expected2=None, eq=self.assertEqual):
+ if expected2 is None:
+ expected2 = expected
+ del result[:]
+ r = self.interp.call('testfunc', value)
+ self.assertEqual(len(result), 1)
+ self.assertIsInstance(result[0], (str, unicode))
+ eq(result[0], expected2)
+ self.assertIsInstance(r, (str, unicode))
+ eq(r, expected2)
+ def float_eq(actual, expected):
+ expected = float(expected)
+ self.assertAlmostEqual(float(actual), expected,
+ delta=abs(expected) * 1e-10)
+ def nan_eq(actual, expected):
+ actual = float(actual)
+ self.assertNotEqual(actual, actual)
+
+ check(True, '1')
+ check(False, '0')
+ check('string', 'string')
+ check('string\xbd', 'string\xbd')
+ check('string\xe2\x82\xac', 'string\xe2\x82\xac', u'string\u20ac')
+ check(u'string', u'string')
+ check(u'string\xbd', 'string\xc2\xbd', u'string\xbd')
+ check(u'string\u20ac', 'string\xe2\x82\xac', u'string\u20ac')
+ check('str\xc0\x80ing', 'str\xc0\x80ing', u'str\x00ing')
+ check('str\xc0\x80ing\xe2\x82\xac', 'str\xc0\x80ing\xe2\x82\xac', u'str\x00ing\u20ac')
+ check(u'str\x00ing', 'str\xc0\x80ing', u'str\x00ing')
+ check(u'str\x00ing\xbd', 'str\xc0\x80ing\xc2\xbd', u'str\x00ing\xbd')
+ check(u'str\x00ing\u20ac', 'str\xc0\x80ing\xe2\x82\xac', u'str\x00ing\u20ac')
+ for i in (0, 1, -1, 2**31-1, -2**31):
+ check(i, str(i))
+ for f in (0.0, 1.0, -1.0):
+ check(f, repr(f))
+ for f in (1/3.0, sys.float_info.min, sys.float_info.max,
+ -sys.float_info.min, -sys.float_info.max):
+ check(f, f, eq=float_eq)
+ check(float('inf'), 'Inf', eq=float_eq)
+ check(-float('inf'), '-Inf', eq=float_eq)
+ check(float('nan'), 'NaN', eq=nan_eq)
+ check((), '')
+ check((1, (2,), (3, 4), '5 6', ()), '1 2 {3 4} {5 6} {}')
+
+ def test_splitlist(self):
+ splitlist = self.interp.tk.splitlist
+ call = self.interp.tk.call
+ self.assertRaises(TypeError, splitlist)
+ self.assertRaises(TypeError, splitlist, 'a', 'b')
+ self.assertRaises(TypeError, splitlist, 2)
+ testcases = [
+ ('2', ('2',)),
+ ('', ()),
+ ('{}', ('',)),
+ ('""', ('',)),
+ ('a\n b\t\r c\n ', ('a', 'b', 'c')),
+ (u'a\n b\t\r c\n ', ('a', 'b', 'c')),
+ ('a \xe2\x82\xac', ('a', '\xe2\x82\xac')),
+ (u'a \u20ac', ('a', '\xe2\x82\xac')),
+ ('a\xc0\x80b c\xc0\x80d', ('a\xc0\x80b', 'c\xc0\x80d')),
+ ('a {b c}', ('a', 'b c')),
+ (r'a b\ c', ('a', 'b c')),
+ (('a', 'b c'), ('a', 'b c')),
+ ('a 2', ('a', '2')),
+ (('a', 2), ('a', 2)),
+ ('a 3.4', ('a', '3.4')),
+ (('a', 3.4), ('a', 3.4)),
+ ((), ()),
+ (call('list', 1, '2', (3.4,)),
+ (1, '2', (3.4,)) if self.wantobjects else
+ ('1', '2', '3.4')),
+ ]
+ if tcl_version >= (8, 5):
+ if not self.wantobjects:
+ expected = ('12', '\xe2\x82\xac', '\xe2\x82\xac', '3.4')
+ elif get_tk_patchlevel() < (8, 5, 5):
+ # Before 8.5.5 dicts were converted to lists through string
+ expected = ('12', u'\u20ac', u'\u20ac', '3.4')
+ else:
+ expected = (12, u'\u20ac', u'\u20ac', (3.4,))
+ testcases += [
+ (call('dict', 'create', 12, u'\u20ac', '\xe2\x82\xac', (3.4,)),
+ expected),
+ ]
+ for arg, res in testcases:
+ self.assertEqual(splitlist(arg), res)
+ self.assertRaises(TclError, splitlist, '{')
+
+ def test_split(self):
+ split = self.interp.tk.split
+ call = self.interp.tk.call
+ self.assertRaises(TypeError, split)
+ self.assertRaises(TypeError, split, 'a', 'b')
+ self.assertRaises(TypeError, split, 2)
+ testcases = [
+ ('2', '2'),
+ ('', ''),
+ ('{}', ''),
+ ('""', ''),
+ ('{', '{'),
+ ('a\n b\t\r c\n ', ('a', 'b', 'c')),
+ (u'a\n b\t\r c\n ', ('a', 'b', 'c')),
+ ('a \xe2\x82\xac', ('a', '\xe2\x82\xac')),
+ (u'a \u20ac', ('a', '\xe2\x82\xac')),
+ ('a\xc0\x80b', 'a\xc0\x80b'),
+ ('a\xc0\x80b c\xc0\x80d', ('a\xc0\x80b', 'c\xc0\x80d')),
+ ('a {b c}', ('a', ('b', 'c'))),
+ (r'a b\ c', ('a', ('b', 'c'))),
+ (('a', 'b c'), ('a', ('b', 'c'))),
+ (('a', u'b c'), ('a', ('b', 'c'))),
+ ('a 2', ('a', '2')),
+ (('a', 2), ('a', 2)),
+ ('a 3.4', ('a', '3.4')),
+ (('a', 3.4), ('a', 3.4)),
+ (('a', (2, 3.4)), ('a', (2, 3.4))),
+ ((), ()),
+ (call('list', 1, '2', (3.4,)),
+ (1, '2', (3.4,)) if self.wantobjects else
+ ('1', '2', '3.4')),
+ ]
+ if tcl_version >= (8, 5):
+ if not self.wantobjects:
+ expected = ('12', '\xe2\x82\xac', '\xe2\x82\xac', '3.4')
+ elif get_tk_patchlevel() < (8, 5, 5):
+ # Before 8.5.5 dicts were converted to lists through string
+ expected = ('12', u'\u20ac', u'\u20ac', '3.4')
+ else:
+ expected = (12, u'\u20ac', u'\u20ac', (3.4,))
+ testcases += [
+ (call('dict', 'create', 12, u'\u20ac', '\xe2\x82\xac', (3.4,)),
+ expected),
+ ]
+ for arg, res in testcases:
+ self.assertEqual(split(arg), res)
+
+character_size = 4 if sys.maxunicode > 0xFFFF else 2
+
+class BigmemTclTest(unittest.TestCase):
+
+ def setUp(self):
+ self.interp = Tcl()
+ @test_support.cpython_only
+ @unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX, "needs UINT_MAX < SIZE_MAX")
+ @test_support.precisionbigmemtest(size=INT_MAX + 1, memuse=5, dry_run=False)
+ def test_huge_string_call(self, size):
+ value = ' ' * size
+ self.assertRaises(OverflowError, self.interp.call, 'set', '_', value)
+
+ @test_support.cpython_only
+ @unittest.skipUnless(test_support.have_unicode, 'requires unicode support')
+ @unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX, "needs UINT_MAX < SIZE_MAX")
+ @test_support.precisionbigmemtest(size=INT_MAX + 1,
+ memuse=2*character_size + 2,
+ dry_run=False)
+ def test_huge_unicode_call(self, size):
+ value = unicode(' ') * size
+ self.assertRaises(OverflowError, self.interp.call, 'set', '_', value)
+
+
+ @test_support.cpython_only
+ @unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX, "needs UINT_MAX < SIZE_MAX")
+ @test_support.precisionbigmemtest(size=INT_MAX + 1, memuse=9, dry_run=False)
+ def test_huge_string_builtins(self, size):
+ value = '1' + ' ' * size
+ self.check_huge_string_builtins(value)
+
+ @test_support.cpython_only
+ @unittest.skipUnless(test_support.have_unicode, 'requires unicode support')
+ @unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX, "needs UINT_MAX < SIZE_MAX")
+ @test_support.precisionbigmemtest(size=INT_MAX + 1,
+ memuse=2*character_size + 7,
+ dry_run=False)
+ def test_huge_unicode_builtins(self, size):
+ value = unicode('1' + ' ' * size)
+ self.check_huge_string_builtins(value)
+
+ def check_huge_string_builtins(self, value):
+ self.assertRaises(OverflowError, self.interp.tk.getint, value)
+ self.assertRaises(OverflowError, self.interp.tk.getdouble, value)
+ self.assertRaises(OverflowError, self.interp.tk.getboolean, value)
+ self.assertRaises(OverflowError, self.interp.eval, value)
+ self.assertRaises(OverflowError, self.interp.evalfile, value)
+ self.assertRaises(OverflowError, self.interp.record, value)
+ self.assertRaises(OverflowError, self.interp.adderrorinfo, value)
+ self.assertRaises(OverflowError, self.interp.setvar, value, 'x', 'a')
+ self.assertRaises(OverflowError, self.interp.setvar, 'x', value, 'a')
+ self.assertRaises(OverflowError, self.interp.unsetvar, value)
+ self.assertRaises(OverflowError, self.interp.unsetvar, 'x', value)
+ self.assertRaises(OverflowError, self.interp.adderrorinfo, value)
+ self.assertRaises(OverflowError, self.interp.exprstring, value)
+ self.assertRaises(OverflowError, self.interp.exprlong, value)
+ self.assertRaises(OverflowError, self.interp.exprboolean, value)
+ self.assertRaises(OverflowError, self.interp.splitlist, value)
+ self.assertRaises(OverflowError, self.interp.split, value)
+ self.assertRaises(OverflowError, self.interp.createcommand, value, max)
+ self.assertRaises(OverflowError, self.interp.deletecommand, value)
+
+
+def setUpModule():
+ if test_support.verbose:
+ tcl = Tcl()
+ print 'patchlevel =', tcl.call('info', 'patchlevel')
def test_main():
- test_support.run_unittest(TclTest, TkinterTest)
+ test_support.run_unittest(TclTest, TkinterTest, BigmemTclTest)
if __name__ == "__main__":
test_main()
diff --git a/Lib/test/test_telnetlib.py b/Lib/test/test_telnetlib.py
index 5a179f0..6c122d7 100644
--- a/Lib/test/test_telnetlib.py
+++ b/Lib/test/test_telnetlib.py
@@ -3,6 +3,7 @@ import telnetlib
import time
import Queue
+import unittest
from unittest import TestCase
from test import test_support
threading = test_support.import_module('threading')
@@ -91,6 +92,14 @@ class GeneralTests(TestCase):
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
+ def testGetters(self):
+ # Test telnet getter methods
+ telnet = telnetlib.Telnet(HOST, self.port, timeout=30)
+ t_sock = telnet.sock
+ self.assertEqual(telnet.get_socket(), t_sock)
+ self.assertEqual(telnet.fileno(), t_sock.fileno())
+ telnet.sock.close()
+
def _read_setUp(self):
self.evt = threading.Event()
self.dataq = Queue.Queue()
@@ -135,6 +144,28 @@ class ReadTests(TestCase):
self.assertEqual(data, want[0])
self.assertEqual(telnet.read_all(), 'not seen')
+ def test_read_until_with_poll(self):
+ """Use select.poll() to implement telnet.read_until()."""
+ want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
+ self.dataq.put(want)
+ telnet = telnetlib.Telnet(HOST, self.port)
+ if not telnet._has_poll:
+ raise unittest.SkipTest('select.poll() is required')
+ telnet._has_poll = True
+ self.dataq.join()
+ data = telnet.read_until('match')
+ self.assertEqual(data, ''.join(want[:-2]))
+
+ def test_read_until_with_select(self):
+ """Use select.select() to implement telnet.read_until()."""
+ want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
+ self.dataq.put(want)
+ telnet = telnetlib.Telnet(HOST, self.port)
+ telnet._has_poll = False
+ self.dataq.join()
+ data = telnet.read_until('match')
+ self.assertEqual(data, ''.join(want[:-2]))
+
def test_read_all_A(self):
"""
read_all()
@@ -146,7 +177,6 @@ class ReadTests(TestCase):
self.dataq.join()
data = telnet.read_all()
self.assertEqual(data, ''.join(want[:-1]))
- return
def _test_blocking(self, func):
self.dataq.put([self.block_long, EOF_sigil])
@@ -357,8 +387,75 @@ class OptionTests(TestCase):
self.assertEqual('', telnet.read_sb_data())
nego.sb_getter = None # break the nego => telnet cycle
+
+class ExpectTests(TestCase):
+ def setUp(self):
+ self.evt = threading.Event()
+ self.dataq = Queue.Queue()
+ self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.sock.settimeout(10)
+ self.port = test_support.bind_port(self.sock)
+ self.thread = threading.Thread(target=server, args=(self.evt,self.sock,
+ self.dataq))
+ self.thread.start()
+ self.evt.wait()
+
+ def tearDown(self):
+ self.thread.join()
+
+ # use a similar approach to testing timeouts as test_timeout.py
+ # these will never pass 100% but make the fuzz big enough that it is rare
+ block_long = 0.6
+ block_short = 0.3
+ def test_expect_A(self):
+ """
+ expect(expected, [timeout])
+ Read until the expected string has been seen, or a timeout is
+ hit (default is no timeout); may block.
+ """
+ want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
+ self.dataq.put(want)
+ telnet = telnetlib.Telnet(HOST, self.port)
+ self.dataq.join()
+ (_,_,data) = telnet.expect(['match'])
+ self.assertEqual(data, ''.join(want[:-2]))
+
+ def test_expect_B(self):
+ # test the timeout - it does NOT raise socket.timeout
+ want = ['hello', self.block_long, 'not seen', EOF_sigil]
+ self.dataq.put(want)
+ telnet = telnetlib.Telnet(HOST, self.port)
+ self.dataq.join()
+ (_,_,data) = telnet.expect(['not seen'], self.block_short)
+ self.assertEqual(data, want[0])
+ self.assertEqual(telnet.read_all(), 'not seen')
+
+ def test_expect_with_poll(self):
+ """Use select.poll() to implement telnet.expect()."""
+ want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
+ self.dataq.put(want)
+ telnet = telnetlib.Telnet(HOST, self.port)
+ if not telnet._has_poll:
+ raise unittest.SkipTest('select.poll() is required')
+ telnet._has_poll = True
+ self.dataq.join()
+ (_,_,data) = telnet.expect(['match'])
+ self.assertEqual(data, ''.join(want[:-2]))
+
+ def test_expect_with_select(self):
+ """Use select.select() to implement telnet.expect()."""
+ want = ['x' * 10, 'match', 'y' * 10, EOF_sigil]
+ self.dataq.put(want)
+ telnet = telnetlib.Telnet(HOST, self.port)
+ telnet._has_poll = False
+ self.dataq.join()
+ (_,_,data) = telnet.expect(['match'])
+ self.assertEqual(data, ''.join(want[:-2]))
+
+
def test_main(verbose=None):
- test_support.run_unittest(GeneralTests, ReadTests, OptionTests)
+ test_support.run_unittest(GeneralTests, ReadTests, OptionTests,
+ ExpectTests)
if __name__ == '__main__':
test_main()
diff --git a/Lib/test/test_tempfile.py b/Lib/test/test_tempfile.py
index 2ddc04c..465bcda 100644
--- a/Lib/test/test_tempfile.py
+++ b/Lib/test/test_tempfile.py
@@ -1,13 +1,17 @@
# tempfile.py unit tests.
import tempfile
+import errno
+import io
import os
import signal
+import shutil
import sys
import re
import warnings
+import contextlib
import unittest
-from test import test_support
+from test import test_support as support
warnings.filterwarnings("ignore",
category=RuntimeWarning,
@@ -177,7 +181,7 @@ class test__candidate_tempdir_list(TC):
# _candidate_tempdir_list contains the expected directories
# Make sure the interesting environment variables are all set.
- with test_support.EnvironmentVarGuard() as env:
+ with support.EnvironmentVarGuard() as env:
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname:
@@ -202,8 +206,51 @@ class test__candidate_tempdir_list(TC):
test_classes.append(test__candidate_tempdir_list)
+# We test _get_default_tempdir some more by testing gettempdir.
-# We test _get_default_tempdir by testing gettempdir.
+class TestGetDefaultTempdir(TC):
+ """Test _get_default_tempdir()."""
+
+ def test_no_files_left_behind(self):
+ # use a private empty directory
+ our_temp_directory = tempfile.mkdtemp()
+ try:
+ # force _get_default_tempdir() to consider our empty directory
+ def our_candidate_list():
+ return [our_temp_directory]
+
+ with support.swap_attr(tempfile, "_candidate_tempdir_list",
+ our_candidate_list):
+ # verify our directory is empty after _get_default_tempdir()
+ tempfile._get_default_tempdir()
+ self.assertEqual(os.listdir(our_temp_directory), [])
+
+ def raise_OSError(*args, **kwargs):
+ raise OSError(-1)
+
+ with support.swap_attr(io, "open", raise_OSError):
+ # test again with failing io.open()
+ with self.assertRaises(IOError) as cm:
+ tempfile._get_default_tempdir()
+ self.assertEqual(cm.exception.errno, errno.ENOENT)
+ self.assertEqual(os.listdir(our_temp_directory), [])
+
+ open = io.open
+ def bad_writer(*args, **kwargs):
+ fp = open(*args, **kwargs)
+ fp.write = raise_OSError
+ return fp
+
+ with support.swap_attr(io, "open", bad_writer):
+ # test again with failing write()
+ with self.assertRaises(IOError) as cm:
+ tempfile._get_default_tempdir()
+ self.assertEqual(cm.exception.errno, errno.ENOENT)
+ self.assertEqual(os.listdir(our_temp_directory), [])
+ finally:
+ shutil.rmtree(our_temp_directory)
+
+test_classes.append(TestGetDefaultTempdir)
class test__get_candidate_names(TC):
@@ -224,6 +271,22 @@ class test__get_candidate_names(TC):
test_classes.append(test__get_candidate_names)
+@contextlib.contextmanager
+def _inside_empty_temp_dir():
+ dir = tempfile.mkdtemp()
+ try:
+ with support.swap_attr(tempfile, 'tempdir', dir):
+ yield
+ finally:
+ support.rmtree(dir)
+
+
+def _mock_candidate_names(*names):
+ return support.swap_attr(tempfile,
+ '_get_candidate_names',
+ lambda: iter(names))
+
+
class test__mkstemp_inner(TC):
"""Test the internal function _mkstemp_inner."""
@@ -279,10 +342,9 @@ class test__mkstemp_inner(TC):
finally:
os.rmdir(dir)
+ @unittest.skipUnless(has_stat, 'os.stat not available')
def test_file_mode(self):
# _mkstemp_inner creates files with the proper mode
- if not has_stat:
- return # ugh, can't use SkipTest.
file = self.do_create()
mode = stat.S_IMODE(os.stat(file.name).st_mode)
@@ -294,12 +356,11 @@ class test__mkstemp_inner(TC):
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
+ @unittest.skipUnless(has_spawnl, 'os.spawnl not available')
def test_noinherit(self):
# _mkstemp_inner file handles are not inherited by child processes
- if not has_spawnl:
- return # ugh, can't use SkipTest.
- if test_support.verbose:
+ if support.verbose:
v="v"
else:
v="q"
@@ -332,14 +393,44 @@ class test__mkstemp_inner(TC):
"child process caught fatal signal %d" % -retval)
self.assertFalse(retval > 0, "child process reports failure %d"%retval)
+ @unittest.skipUnless(has_textmode, "text mode not available")
def test_textmode(self):
# _mkstemp_inner can create files in text mode
- if not has_textmode:
- return # ugh, can't use SkipTest.
self.do_create(bin=0).write("blat\n")
# XXX should test that the file really is a text file
+ def default_mkstemp_inner(self):
+ return tempfile._mkstemp_inner(tempfile.gettempdir(),
+ tempfile.template,
+ '',
+ tempfile._bin_openflags)
+
+ def test_collision_with_existing_file(self):
+ # _mkstemp_inner tries another name when a file with
+ # the chosen name already exists
+ with _inside_empty_temp_dir(), \
+ _mock_candidate_names('aaa', 'aaa', 'bbb'):
+ (fd1, name1) = self.default_mkstemp_inner()
+ os.close(fd1)
+ self.assertTrue(name1.endswith('aaa'))
+
+ (fd2, name2) = self.default_mkstemp_inner()
+ os.close(fd2)
+ self.assertTrue(name2.endswith('bbb'))
+
+ def test_collision_with_existing_directory(self):
+ # _mkstemp_inner tries another name when a directory with
+ # the chosen name already exists
+ with _inside_empty_temp_dir(), \
+ _mock_candidate_names('aaa', 'aaa', 'bbb'):
+ dir = tempfile.mkdtemp()
+ self.assertTrue(dir.endswith('aaa'))
+
+ (fd, name) = self.default_mkstemp_inner()
+ os.close(fd)
+ self.assertTrue(name.endswith('bbb'))
+
test_classes.append(test__mkstemp_inner)
@@ -496,10 +587,9 @@ class test_mkdtemp(TC):
finally:
os.rmdir(dir)
+ @unittest.skipUnless(has_stat, 'os.stat not available')
def test_mode(self):
# mkdtemp creates directories with the proper mode
- if not has_stat:
- return # ugh, can't use SkipTest.
dir = self.do_create()
try:
@@ -515,6 +605,27 @@ class test_mkdtemp(TC):
finally:
os.rmdir(dir)
+ def test_collision_with_existing_file(self):
+ # mkdtemp tries another name when a file with
+ # the chosen name already exists
+ with _inside_empty_temp_dir(), \
+ _mock_candidate_names('aaa', 'aaa', 'bbb'):
+ file = tempfile.NamedTemporaryFile(delete=False)
+ file.close()
+ self.assertTrue(file.name.endswith('aaa'))
+ dir = tempfile.mkdtemp()
+ self.assertTrue(dir.endswith('bbb'))
+
+ def test_collision_with_existing_directory(self):
+ # mkdtemp tries another name when a directory with
+ # the chosen name already exists
+ with _inside_empty_temp_dir(), \
+ _mock_candidate_names('aaa', 'aaa', 'bbb'):
+ dir1 = tempfile.mkdtemp()
+ self.assertTrue(dir1.endswith('aaa'))
+ dir2 = tempfile.mkdtemp()
+ self.assertTrue(dir2.endswith('bbb'))
+
test_classes.append(test_mkdtemp)
@@ -660,6 +771,24 @@ class test_NamedTemporaryFile(TC):
pass
self.assertRaises(ValueError, use_closed)
+ def test_no_leak_fd(self):
+ # Issue #21058: don't leak file descriptor when fdopen() fails
+ old_close = os.close
+ old_fdopen = os.fdopen
+ closed = []
+ def close(fd):
+ closed.append(fd)
+ def fdopen(*args):
+ raise ValueError()
+ os.close = close
+ os.fdopen = fdopen
+ try:
+ self.assertRaises(ValueError, tempfile.NamedTemporaryFile)
+ self.assertEqual(len(closed), 1)
+ finally:
+ os.close = old_close
+ os.fdopen = old_fdopen
+
# How to test the mode and bufsize parameters?
test_classes.append(test_NamedTemporaryFile)
@@ -738,6 +867,17 @@ class test_SpooledTemporaryFile(TC):
f.write(b'x')
self.assertTrue(f._rolled)
+ def test_xreadlines(self):
+ f = self.do_create(max_size=20)
+ f.write(b'abc\n' * 5)
+ f.seek(0)
+ self.assertFalse(f._rolled)
+ self.assertEqual(list(f.xreadlines()), [b'abc\n'] * 5)
+ f.write(b'x\ny')
+ self.assertTrue(f._rolled)
+ f.seek(0)
+ self.assertEqual(list(f.xreadlines()), [b'abc\n'] * 5 + [b'x\n', b'y'])
+
def test_sparse(self):
# A SpooledTemporaryFile that is written late in the file will extend
# when that occurs
@@ -793,6 +933,26 @@ class test_SpooledTemporaryFile(TC):
seek(0, 0)
self.assertTrue(read(70) == 'a'*35 + 'b'*35)
+ def test_properties(self):
+ f = tempfile.SpooledTemporaryFile(max_size=10)
+ f.write(b'x' * 10)
+ self.assertFalse(f._rolled)
+ self.assertEqual(f.mode, 'w+b')
+ self.assertIsNone(f.name)
+ with self.assertRaises(AttributeError):
+ f.newlines
+ with self.assertRaises(AttributeError):
+ f.encoding
+
+ f.write(b'x')
+ self.assertTrue(f._rolled)
+ self.assertEqual(f.mode, 'w+b')
+ self.assertIsNotNone(f.name)
+ with self.assertRaises(AttributeError):
+ f.newlines
+ with self.assertRaises(AttributeError):
+ f.encoding
+
def test_context_manager_before_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
@@ -882,7 +1042,7 @@ if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
test_classes.append(test_TemporaryFile)
def test_main():
- test_support.run_unittest(*test_classes)
+ support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
diff --git a/Lib/test/test_textwrap.py b/Lib/test/test_textwrap.py
index 63a54d9..7b72672 100644
--- a/Lib/test/test_textwrap.py
+++ b/Lib/test/test_textwrap.py
@@ -66,6 +66,15 @@ class WrapTestCase(BaseTestCase):
"I'm glad to hear it!"])
self.check_wrap(text, 80, [text])
+ def test_empty_string(self):
+ # Check that wrapping the empty string returns an empty list.
+ self.check_wrap("", 6, [])
+ self.check_wrap("", 6, [], drop_whitespace=False)
+
+ def test_empty_string_with_initial_indent(self):
+ # Check that the empty string is not indented.
+ self.check_wrap("", 6, [], initial_indent="++")
+ self.check_wrap("", 6, [], initial_indent="++", drop_whitespace=False)
def test_whitespace(self):
# Whitespace munging and end-of-sentence detection
@@ -323,7 +332,32 @@ What a mess!
["blah", " ", "(ding", " ", "dong),",
" ", "wubba"])
- def test_initial_whitespace(self):
+ def test_drop_whitespace_false(self):
+ # Check that drop_whitespace=False preserves whitespace.
+ # SF patch #1581073
+ text = " This is a sentence with much whitespace."
+ self.check_wrap(text, 10,
+ [" This is a", " ", "sentence ",
+ "with ", "much white", "space."],
+ drop_whitespace=False)
+
+ def test_drop_whitespace_false_whitespace_only(self):
+ # Check that drop_whitespace=False preserves a whitespace-only string.
+ self.check_wrap(" ", 6, [" "], drop_whitespace=False)
+
+ def test_drop_whitespace_false_whitespace_only_with_indent(self):
+ # Check that a whitespace-only string gets indented (when
+ # drop_whitespace is False).
+ self.check_wrap(" ", 6, [" "], drop_whitespace=False,
+ initial_indent=" ")
+
+ def test_drop_whitespace_whitespace_only(self):
+ # Check drop_whitespace on a whitespace-only string.
+ self.check_wrap(" ", 6, [])
+
+ def test_drop_whitespace_leading_whitespace(self):
+ # Check that drop_whitespace does not drop leading whitespace (if
+ # followed by non-whitespace).
# SF bug #622849 reported inconsistent handling of leading
# whitespace; let's test that a bit, shall we?
text = " This is a sentence with leading whitespace."
@@ -332,13 +366,27 @@ What a mess!
self.check_wrap(text, 30,
[" This is a sentence with", "leading whitespace."])
- def test_no_drop_whitespace(self):
- # SF patch #1581073
- text = " This is a sentence with much whitespace."
- self.check_wrap(text, 10,
- [" This is a", " ", "sentence ",
- "with ", "much white", "space."],
+ def test_drop_whitespace_whitespace_line(self):
+ # Check that drop_whitespace skips the whole line if a non-leading
+ # line consists only of whitespace.
+ text = "abcd efgh"
+ # Include the result for drop_whitespace=False for comparison.
+ self.check_wrap(text, 6, ["abcd", " ", "efgh"],
drop_whitespace=False)
+ self.check_wrap(text, 6, ["abcd", "efgh"])
+
+ def test_drop_whitespace_whitespace_only_with_indent(self):
+ # Check that initial_indent is not applied to a whitespace-only
+ # string. This checks a special case of the fact that dropping
+ # whitespace occurs before indenting.
+ self.check_wrap(" ", 6, [], initial_indent="++")
+
+ def test_drop_whitespace_whitespace_indent(self):
+ # Check that drop_whitespace does not drop whitespace indents.
+ # This checks a special case of the fact that dropping whitespace
+ # occurs before indenting.
+ self.check_wrap("abcd efgh", 6, [" abcd", " efgh"],
+ initial_indent=" ", subsequent_indent=" ")
if test_support.have_unicode:
def test_unicode(self):
diff --git a/Lib/test/test_thread.py b/Lib/test/test_thread.py
index 544e70d..b056039 100644
--- a/Lib/test/test_thread.py
+++ b/Lib/test/test_thread.py
@@ -70,39 +70,35 @@ class ThreadRunningTests(BasicThreadTest):
thread.stack_size(0)
self.assertEqual(thread.stack_size(), 0, "stack_size not reset to default")
- if os.name not in ("nt", "os2", "posix"):
- return
-
- tss_supported = True
+ @unittest.skipIf(os.name not in ("nt", "os2", "posix"), 'test meant for nt, os2, and posix')
+ def test_nt_and_posix_stack_size(self):
try:
thread.stack_size(4096)
except ValueError:
verbose_print("caught expected ValueError setting "
"stack_size(4096)")
except thread.error:
- tss_supported = False
- verbose_print("platform does not support changing thread stack "
- "size")
-
- if tss_supported:
- fail_msg = "stack_size(%d) failed - should succeed"
- for tss in (262144, 0x100000, 0):
- thread.stack_size(tss)
- self.assertEqual(thread.stack_size(), tss, fail_msg % tss)
- verbose_print("successfully set stack_size(%d)" % tss)
-
- for tss in (262144, 0x100000):
- verbose_print("trying stack_size = (%d)" % tss)
- self.next_ident = 0
- self.created = 0
- for i in range(NUMTASKS):
- self.newtask()
-
- verbose_print("waiting for all tasks to complete")
- self.done_mutex.acquire()
- verbose_print("all tasks done")
-
- thread.stack_size(0)
+ self.skipTest("platform does not support changing thread stack "
+ "size")
+
+ fail_msg = "stack_size(%d) failed - should succeed"
+ for tss in (262144, 0x100000, 0):
+ thread.stack_size(tss)
+ self.assertEqual(thread.stack_size(), tss, fail_msg % tss)
+ verbose_print("successfully set stack_size(%d)" % tss)
+
+ for tss in (262144, 0x100000):
+ verbose_print("trying stack_size = (%d)" % tss)
+ self.next_ident = 0
+ self.created = 0
+ for i in range(NUMTASKS):
+ self.newtask()
+
+ verbose_print("waiting for all tasks to complete")
+ self.done_mutex.acquire()
+ verbose_print("all tasks done")
+
+ thread.stack_size(0)
def test__count(self):
# Test the _count() function.
@@ -130,6 +126,29 @@ class ThreadRunningTests(BasicThreadTest):
time.sleep(0.01)
self.assertEqual(thread._count(), orig)
+ def test_save_exception_state_on_error(self):
+ # See issue #14474
+ def task():
+ started.release()
+ raise SyntaxError
+ def mywrite(self, *args):
+ try:
+ raise ValueError
+ except ValueError:
+ pass
+ real_write(self, *args)
+ c = thread._count()
+ started = thread.allocate_lock()
+ with test_support.captured_output("stderr") as stderr:
+ real_write = stderr.write
+ stderr.write = mywrite
+ started.acquire()
+ thread.start_new_thread(task, ())
+ started.acquire()
+ while thread._count() > c:
+ time.sleep(0.01)
+ self.assertIn("Traceback", stderr.getvalue())
+
class Barrier:
def __init__(self, num_threads):
diff --git a/Lib/test/test_threading.py b/Lib/test/test_threading.py
index 1a02ef2..86c953f 100644
--- a/Lib/test/test_threading.py
+++ b/Lib/test/test_threading.py
@@ -1,7 +1,9 @@
# Very rudimentary test of threading module
import test.test_support
-from test.test_support import verbose
+from test.test_support import verbose, cpython_only
+from test.script_helper import assert_python_ok
+
import random
import re
import sys
@@ -12,6 +14,10 @@ import unittest
import weakref
import os
import subprocess
+try:
+ import _testcapi
+except ImportError:
+ _testcapi = None
from test import lock_tests
@@ -123,9 +129,7 @@ class ThreadTests(BaseTestCase):
try:
threading.stack_size(262144)
except thread.error:
- if verbose:
- print 'platform does not support changing thread stack size'
- return
+ self.skipTest('platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
@@ -136,9 +140,7 @@ class ThreadTests(BaseTestCase):
try:
threading.stack_size(0x100000)
except thread.error:
- if verbose:
- print 'platform does not support changing thread stack size'
- return
+ self.skipTest('platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
@@ -165,9 +167,7 @@ class ThreadTests(BaseTestCase):
try:
import ctypes
except ImportError:
- if verbose:
- print "test_PyThreadState_SetAsyncExc can't import ctypes"
- return # can't do anything
+ self.skipTest('requires ctypes')
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
@@ -273,9 +273,7 @@ class ThreadTests(BaseTestCase):
try:
import ctypes
except ImportError:
- if verbose:
- print("test_finalize_with_runnning_thread can't import ctypes")
- return # can't do anything
+ self.skipTest('requires ctypes')
rc = subprocess.call([sys.executable, "-c", """if 1:
import ctypes, sys, time, thread
@@ -414,6 +412,73 @@ class ThreadTests(BaseTestCase):
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
+ @unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
+ def test_dummy_thread_after_fork(self):
+ # Issue #14308: a dummy thread in the active list doesn't mess up
+ # the after-fork mechanism.
+ code = """if 1:
+ import thread, threading, os, time
+
+ def background_thread(evt):
+ # Creates and registers the _DummyThread instance
+ threading.current_thread()
+ evt.set()
+ time.sleep(10)
+
+ evt = threading.Event()
+ thread.start_new_thread(background_thread, (evt,))
+ evt.wait()
+ assert threading.active_count() == 2, threading.active_count()
+ if os.fork() == 0:
+ assert threading.active_count() == 1, threading.active_count()
+ os._exit(0)
+ else:
+ os.wait()
+ """
+ _, out, err = assert_python_ok("-c", code)
+ self.assertEqual(out, '')
+ self.assertEqual(err, '')
+
+ @unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
+ def test_is_alive_after_fork(self):
+ # Try hard to trigger #18418: is_alive() could sometimes be True on
+ # threads that vanished after a fork.
+ old_interval = sys.getcheckinterval()
+
+ # Make the bug more likely to manifest.
+ sys.setcheckinterval(10)
+
+ try:
+ for i in range(20):
+ t = threading.Thread(target=lambda: None)
+ t.start()
+ pid = os.fork()
+ if pid == 0:
+ os._exit(1 if t.is_alive() else 0)
+ else:
+ t.join()
+ pid, status = os.waitpid(pid, 0)
+ self.assertEqual(0, status)
+ finally:
+ sys.setcheckinterval(old_interval)
+
+ def test_BoundedSemaphore_limit(self):
+ # BoundedSemaphore should raise ValueError if released too often.
+ for limit in range(1, 10):
+ bs = threading.BoundedSemaphore(limit)
+ threads = [threading.Thread(target=bs.acquire)
+ for _ in range(limit)]
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
+ threads = [threading.Thread(target=bs.release)
+ for _ in range(limit)]
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
+ self.assertRaises(ValueError, bs.release)
class ThreadJoinOnShutdown(BaseTestCase):
@@ -635,6 +700,49 @@ class ThreadJoinOnShutdown(BaseTestCase):
output = "end of worker thread\nend of main thread\n"
self.assertScriptHasOutput(script, output)
+ @unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
+ def test_6_daemon_threads(self):
+ # Check that a daemon thread cannot crash the interpreter on shutdown
+ # by manipulating internal structures that are being disposed of in
+ # the main thread.
+ script = """if True:
+ import os
+ import random
+ import sys
+ import time
+ import threading
+
+ thread_has_run = set()
+
+ def random_io():
+ '''Loop for a while sleeping random tiny amounts and doing some I/O.'''
+ while True:
+ in_f = open(os.__file__, 'rb')
+ stuff = in_f.read(200)
+ null_f = open(os.devnull, 'wb')
+ null_f.write(stuff)
+ time.sleep(random.random() / 1995)
+ null_f.close()
+ in_f.close()
+ thread_has_run.add(threading.current_thread())
+
+ def main():
+ count = 0
+ for _ in range(40):
+ new_thread = threading.Thread(target=random_io)
+ new_thread.daemon = True
+ new_thread.start()
+ count += 1
+ while len(thread_has_run) < count:
+ time.sleep(0.001)
+ # Trigger process shutdown
+ sys.exit(0)
+
+ main()
+ """
+ rc, out, err = assert_python_ok('-c', script)
+ self.assertFalse(err)
+
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
@@ -659,6 +767,46 @@ class ThreadJoinOnShutdown(BaseTestCase):
for t in threads:
t.join()
+ @cpython_only
+ @unittest.skipIf(_testcapi is None, "need _testcapi module")
+ def test_frame_tstate_tracing(self):
+ # Issue #14432: Crash when a generator is created in a C thread that is
+ # destroyed while the generator is still used. The issue was that a
+ # generator contains a frame, and the frame kept a reference to the
+ # Python state of the destroyed C thread. The crash occurs when a trace
+ # function is setup.
+
+ def noop_trace(frame, event, arg):
+ # no operation
+ return noop_trace
+
+ def generator():
+ while 1:
+ yield "genereator"
+
+ def callback():
+ if callback.gen is None:
+ callback.gen = generator()
+ return next(callback.gen)
+ callback.gen = None
+
+ old_trace = sys.gettrace()
+ sys.settrace(noop_trace)
+ try:
+ # Install a trace function
+ threading.settrace(noop_trace)
+
+ # Create a generator in a C thread which exits after the call
+ _testcapi.call_in_temporary_c_thread(callback)
+
+ # Call the generator in a different Python thread, check that the
+ # generator didn't keep a reference to the destroyed thread state
+ for test in range(3):
+ # The trace function is still called here
+ callback()
+ finally:
+ sys.settrace(old_trace)
+
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
diff --git a/Lib/test/test_time.py b/Lib/test/test_time.py
index 28917ca..4571c10 100644
--- a/Lib/test/test_time.py
+++ b/Lib/test/test_time.py
@@ -106,7 +106,7 @@ class TimeTestCase(unittest.TestCase):
def test_strptime(self):
# Should be able to go round-trip from strftime to strptime without
- # throwing an exception.
+ # raising an exception.
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
diff --git a/Lib/test/test_timeout.py b/Lib/test/test_timeout.py
index 4bc125e..bb9252d 100644
--- a/Lib/test/test_timeout.py
+++ b/Lib/test/test_timeout.py
@@ -178,16 +178,19 @@ class TimeoutTestCase(unittest.TestCase):
"timeout (%g) is %g seconds more than expected (%g)"
%(_delta, self.fuzz, _timeout))
+ @unittest.skip('test not implemented')
def testSend(self):
# Test send() timeout
# couldn't figure out how to test it
pass
+ @unittest.skip('test not implemented')
def testSendto(self):
# Test sendto() timeout
# couldn't figure out how to test it
pass
+ @unittest.skip('test not implemented')
def testSendall(self):
# Test sendall() timeout
# couldn't figure out how to test it
diff --git a/Lib/test/test_tk.py b/Lib/test/test_tk.py
index 8625db2..f3e264b 100644
--- a/Lib/test/test_tk.py
+++ b/Lib/test/test_tk.py
@@ -1,8 +1,9 @@
import os
from test import test_support
-# Skip test if _tkinter wasn't built.
+# Skip test if _tkinter wasn't built or gui resource is not available.
test_support.import_module('_tkinter')
+test_support.requires('gui')
this_dir = os.path.dirname(os.path.abspath(__file__))
lib_tk_test = os.path.abspath(os.path.join(this_dir, os.path.pardir,
@@ -11,19 +12,10 @@ lib_tk_test = os.path.abspath(os.path.join(this_dir, os.path.pardir,
with test_support.DirsOnSysPath(lib_tk_test):
import runtktests
-# Skip test if tk cannot be initialized.
-runtktests.check_tk_availability()
-
-def test_main(enable_gui=False):
- if enable_gui:
- if test_support.use_resources is None:
- test_support.use_resources = ['gui']
- elif 'gui' not in test_support.use_resources:
- test_support.use_resources.append('gui')
-
+def test_main():
with test_support.DirsOnSysPath(lib_tk_test):
test_support.run_unittest(
*runtktests.get_tests(text=False, packages=['test_tkinter']))
if __name__ == '__main__':
- test_main(enable_gui=True)
+ test_main()
diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
index 38da106..850aa9c 100644
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -4,7 +4,7 @@ Tests for the tokenize module.
>>> import glob, random, sys
The tests can be really simple. Given a small fragment of source
-code, print out a table with tokens. The ENDMARK is omitted for
+code, print out a table with tokens. The ENDMARKER is omitted for
brevity.
>>> dump_tokens("1 + 1")
@@ -278,6 +278,31 @@ String literals
OP '+' (1, 32) (1, 33)
STRING 'UR"ABC"' (1, 34) (1, 41)
+ >>> dump_tokens("b'abc' + B'abc'")
+ STRING "b'abc'" (1, 0) (1, 6)
+ OP '+' (1, 7) (1, 8)
+ STRING "B'abc'" (1, 9) (1, 15)
+ >>> dump_tokens('b"abc" + B"abc"')
+ STRING 'b"abc"' (1, 0) (1, 6)
+ OP '+' (1, 7) (1, 8)
+ STRING 'B"abc"' (1, 9) (1, 15)
+ >>> dump_tokens("br'abc' + bR'abc' + Br'abc' + BR'abc'")
+ STRING "br'abc'" (1, 0) (1, 7)
+ OP '+' (1, 8) (1, 9)
+ STRING "bR'abc'" (1, 10) (1, 17)
+ OP '+' (1, 18) (1, 19)
+ STRING "Br'abc'" (1, 20) (1, 27)
+ OP '+' (1, 28) (1, 29)
+ STRING "BR'abc'" (1, 30) (1, 37)
+ >>> dump_tokens('br"abc" + bR"abc" + Br"abc" + BR"abc"')
+ STRING 'br"abc"' (1, 0) (1, 7)
+ OP '+' (1, 8) (1, 9)
+ STRING 'bR"abc"' (1, 10) (1, 17)
+ OP '+' (1, 18) (1, 19)
+ STRING 'Br"abc"' (1, 20) (1, 27)
+ OP '+' (1, 28) (1, 29)
+ STRING 'BR"abc"' (1, 30) (1, 37)
+
Operators
>>> dump_tokens("def d22(a, b, c=2, d=2, *k): pass")
@@ -525,14 +550,19 @@ Evil tabs
NAME 'pass' (3, 9) (3, 13)
DEDENT '' (4, 0) (4, 0)
DEDENT '' (4, 0) (4, 0)
+
+Pathological whitespace (http://bugs.python.org/issue16152)
+ >>> dump_tokens("@ ")
+ OP '@' (1, 0) (1, 1)
"""
from test import test_support
from tokenize import (untokenize, generate_tokens, NUMBER, NAME, OP,
- STRING, ENDMARKER, tok_name)
+ STRING, ENDMARKER, tok_name, Untokenizer)
from StringIO import StringIO
import os
+from unittest import TestCase
def dump_tokens(s):
"""Print out the tokens in s in a table format.
@@ -585,12 +615,47 @@ def decistmt(s):
return untokenize(result)
-__test__ = {"doctests" : doctests, 'decistmt': decistmt}
+class UntokenizeTest(TestCase):
+
+ def test_bad_input_order(self):
+ # raise if previous row
+ u = Untokenizer()
+ u.prev_row = 2
+ u.prev_col = 2
+ with self.assertRaises(ValueError) as cm:
+ u.add_whitespace((1,3))
+ self.assertEqual(cm.exception.args[0],
+ 'start (1,3) precedes previous end (2,2)')
+ # raise if previous column in row
+ self.assertRaises(ValueError, u.add_whitespace, (2,1))
+
+ def test_backslash_continuation(self):
+ # The problem is that <whitespace>\<newline> leaves no token
+ u = Untokenizer()
+ u.prev_row = 1
+ u.prev_col = 1
+ u.tokens = []
+ u.add_whitespace((2, 0))
+ self.assertEqual(u.tokens, ['\\\n'])
+ u.prev_row = 2
+ u.add_whitespace((4, 4))
+ self.assertEqual(u.tokens, ['\\\n', '\\\n\\\n', ' '])
+
+ def test_iter_compat(self):
+ u = Untokenizer()
+ token = (NAME, 'Hello')
+ u.compat(token, iter([]))
+ self.assertEqual(u.tokens, ["Hello "])
+ u = Untokenizer()
+ self.assertEqual(u.untokenize(iter([token])), 'Hello ')
+__test__ = {"doctests" : doctests, 'decistmt': decistmt}
+
def test_main():
from test import test_tokenize
test_support.run_doctest(test_tokenize, True)
+ test_support.run_unittest(UntokenizeTest)
if __name__ == "__main__":
test_main()
diff --git a/Lib/test/test_tools.py b/Lib/test/test_tools.py
new file mode 100644
index 0000000..f60b4c7
--- /dev/null
+++ b/Lib/test/test_tools.py
@@ -0,0 +1,368 @@
+"""Tests for scripts in the Tools directory.
+
+This file contains regression tests for some of the scripts found in the
+Tools directory of a Python checkout or tarball, such as reindent.py.
+"""
+
+import os
+import sys
+import unittest
+import shutil
+import subprocess
+import sysconfig
+import tempfile
+import textwrap
+from test import test_support
+from test.script_helper import assert_python_ok, temp_dir
+
+if not sysconfig.is_python_build():
+ # XXX some installers do contain the tools, should we detect that
+ # and run the tests in that case too?
+ raise unittest.SkipTest('test irrelevant for an installed Python')
+
+basepath = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
+ 'Tools')
+scriptsdir = os.path.join(basepath, 'scripts')
+
+
+class ReindentTests(unittest.TestCase):
+ script = os.path.join(scriptsdir, 'reindent.py')
+
+ def test_noargs(self):
+ assert_python_ok(self.script)
+
+ def test_help(self):
+ rc, out, err = assert_python_ok(self.script, '-h')
+ self.assertEqual(out, b'')
+ self.assertGreater(err, b'')
+
+
+class PindentTests(unittest.TestCase):
+ script = os.path.join(scriptsdir, 'pindent.py')
+
+ def assertFileEqual(self, fn1, fn2):
+ with open(fn1) as f1, open(fn2) as f2:
+ self.assertEqual(f1.readlines(), f2.readlines())
+
+ def pindent(self, source, *args):
+ proc = subprocess.Popen(
+ (sys.executable, self.script) + args,
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ universal_newlines=True)
+ out, err = proc.communicate(source)
+ self.assertIsNone(err)
+ return out
+
+ def lstriplines(self, data):
+ return '\n'.join(line.lstrip() for line in data.splitlines()) + '\n'
+
+ def test_selftest(self):
+ self.maxDiff = None
+ with temp_dir() as directory:
+ data_path = os.path.join(directory, '_test.py')
+ with open(self.script) as f:
+ closed = f.read()
+ with open(data_path, 'w') as f:
+ f.write(closed)
+
+ rc, out, err = assert_python_ok(self.script, '-d', data_path)
+ self.assertEqual(out, b'')
+ self.assertEqual(err, b'')
+ backup = data_path + '~'
+ self.assertTrue(os.path.exists(backup))
+ with open(backup) as f:
+ self.assertEqual(f.read(), closed)
+ with open(data_path) as f:
+ clean = f.read()
+ compile(clean, '_test.py', 'exec')
+ self.assertEqual(self.pindent(clean, '-c'), closed)
+ self.assertEqual(self.pindent(closed, '-d'), clean)
+
+ rc, out, err = assert_python_ok(self.script, '-c', data_path)
+ self.assertEqual(out, b'')
+ self.assertEqual(err, b'')
+ with open(backup) as f:
+ self.assertEqual(f.read(), clean)
+ with open(data_path) as f:
+ self.assertEqual(f.read(), closed)
+
+ broken = self.lstriplines(closed)
+ with open(data_path, 'w') as f:
+ f.write(broken)
+ rc, out, err = assert_python_ok(self.script, '-r', data_path)
+ self.assertEqual(out, b'')
+ self.assertEqual(err, b'')
+ with open(backup) as f:
+ self.assertEqual(f.read(), broken)
+ with open(data_path) as f:
+ indented = f.read()
+ compile(indented, '_test.py', 'exec')
+ self.assertEqual(self.pindent(broken, '-r'), indented)
+
+ def pindent_test(self, clean, closed):
+ self.assertEqual(self.pindent(clean, '-c'), closed)
+ self.assertEqual(self.pindent(closed, '-d'), clean)
+ broken = self.lstriplines(closed)
+ self.assertEqual(self.pindent(broken, '-r', '-e', '-s', '4'), closed)
+
+ def test_statements(self):
+ clean = textwrap.dedent("""\
+ if a:
+ pass
+
+ if a:
+ pass
+ else:
+ pass
+
+ if a:
+ pass
+ elif:
+ pass
+ else:
+ pass
+
+ while a:
+ break
+
+ while a:
+ break
+ else:
+ pass
+
+ for i in a:
+ break
+
+ for i in a:
+ break
+ else:
+ pass
+
+ try:
+ pass
+ finally:
+ pass
+
+ try:
+ pass
+ except TypeError:
+ pass
+ except ValueError:
+ pass
+ else:
+ pass
+
+ try:
+ pass
+ except TypeError:
+ pass
+ except ValueError:
+ pass
+ finally:
+ pass
+
+ with a:
+ pass
+
+ class A:
+ pass
+
+ def f():
+ pass
+ """)
+
+ closed = textwrap.dedent("""\
+ if a:
+ pass
+ # end if
+
+ if a:
+ pass
+ else:
+ pass
+ # end if
+
+ if a:
+ pass
+ elif:
+ pass
+ else:
+ pass
+ # end if
+
+ while a:
+ break
+ # end while
+
+ while a:
+ break
+ else:
+ pass
+ # end while
+
+ for i in a:
+ break
+ # end for
+
+ for i in a:
+ break
+ else:
+ pass
+ # end for
+
+ try:
+ pass
+ finally:
+ pass
+ # end try
+
+ try:
+ pass
+ except TypeError:
+ pass
+ except ValueError:
+ pass
+ else:
+ pass
+ # end try
+
+ try:
+ pass
+ except TypeError:
+ pass
+ except ValueError:
+ pass
+ finally:
+ pass
+ # end try
+
+ with a:
+ pass
+ # end with
+
+ class A:
+ pass
+ # end class A
+
+ def f():
+ pass
+ # end def f
+ """)
+ self.pindent_test(clean, closed)
+
+ def test_multilevel(self):
+ clean = textwrap.dedent("""\
+ def foobar(a, b):
+ if a == b:
+ a = a+1
+ elif a < b:
+ b = b-1
+ if b > a: a = a-1
+ else:
+ print 'oops!'
+ """)
+ closed = textwrap.dedent("""\
+ def foobar(a, b):
+ if a == b:
+ a = a+1
+ elif a < b:
+ b = b-1
+ if b > a: a = a-1
+ # end if
+ else:
+ print 'oops!'
+ # end if
+ # end def foobar
+ """)
+ self.pindent_test(clean, closed)
+
+ def test_preserve_indents(self):
+ clean = textwrap.dedent("""\
+ if a:
+ if b:
+ pass
+ """)
+ closed = textwrap.dedent("""\
+ if a:
+ if b:
+ pass
+ # end if
+ # end if
+ """)
+ self.assertEqual(self.pindent(clean, '-c'), closed)
+ self.assertEqual(self.pindent(closed, '-d'), clean)
+ broken = self.lstriplines(closed)
+ self.assertEqual(self.pindent(broken, '-r', '-e', '-s', '9'), closed)
+ clean = textwrap.dedent("""\
+ if a:
+ \tif b:
+ \t\tpass
+ """)
+ closed = textwrap.dedent("""\
+ if a:
+ \tif b:
+ \t\tpass
+ \t# end if
+ # end if
+ """)
+ self.assertEqual(self.pindent(clean, '-c'), closed)
+ self.assertEqual(self.pindent(closed, '-d'), clean)
+ broken = self.lstriplines(closed)
+ self.assertEqual(self.pindent(broken, '-r'), closed)
+
+ def test_escaped_newline(self):
+ clean = textwrap.dedent("""\
+ class\\
+ \\
+ A:
+ def\
+ \\
+ f:
+ pass
+ """)
+ closed = textwrap.dedent("""\
+ class\\
+ \\
+ A:
+ def\
+ \\
+ f:
+ pass
+ # end def f
+ # end class A
+ """)
+ self.assertEqual(self.pindent(clean, '-c'), closed)
+ self.assertEqual(self.pindent(closed, '-d'), clean)
+
+ def test_empty_line(self):
+ clean = textwrap.dedent("""\
+ if a:
+
+ pass
+ """)
+ closed = textwrap.dedent("""\
+ if a:
+
+ pass
+ # end if
+ """)
+ self.pindent_test(clean, closed)
+
+ def test_oneline(self):
+ clean = textwrap.dedent("""\
+ if a: pass
+ """)
+ closed = textwrap.dedent("""\
+ if a: pass
+ # end if
+ """)
+ self.pindent_test(clean, closed)
+
+
+def test_main():
+ test_support.run_unittest(*[obj for obj in globals().values()
+ if isinstance(obj, type)])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Lib/test/test_traceback.py b/Lib/test/test_traceback.py
index 8076be8..8b0322b 100644
--- a/Lib/test/test_traceback.py
+++ b/Lib/test/test_traceback.py
@@ -1,11 +1,10 @@
"""Test cases for traceback module"""
-from _testcapi import traceback_print
from StringIO import StringIO
import sys
import unittest
from imp import reload
-from test.test_support import run_unittest, is_jython, Error
+from test.test_support import run_unittest, is_jython, Error, cpython_only
import traceback
@@ -35,6 +34,9 @@ class TracebackCases(unittest.TestCase):
def syntax_error_bad_indentation(self):
compile("def spam():\n print 1\n print 2", "?", "exec")
+ def syntax_error_bad_indentation2(self):
+ compile(" print(2)", "?", "exec")
+
def test_caret(self):
err = self.get_exception_format(self.syntax_error_with_caret,
SyntaxError)
@@ -111,6 +113,13 @@ def test():
os.unlink(os.path.join(testdir, f))
os.rmdir(testdir)
+ err = self.get_exception_format(self.syntax_error_bad_indentation2,
+ IndentationError)
+ self.assertEqual(len(err), 4)
+ self.assertEqual(err[1].strip(), "print(2)")
+ self.assertIn("^", err[2])
+ self.assertEqual(err[1].find("p"), err[2].find("^"))
+
def test_base_exception(self):
# Test that exceptions derived from BaseException are formatted right
e = KeyboardInterrupt()
@@ -171,7 +180,9 @@ def test():
class TracebackFormatTests(unittest.TestCase):
+ @cpython_only
def test_traceback_format(self):
+ from _testcapi import traceback_print
try:
raise KeyError('blah')
except KeyError:
diff --git a/Lib/test/test_ttk_guionly.py b/Lib/test/test_ttk_guionly.py
index e0368be..caa6930 100644
--- a/Lib/test/test_ttk_guionly.py
+++ b/Lib/test/test_ttk_guionly.py
@@ -2,8 +2,9 @@ import os
import unittest
from test import test_support
-# Skip this test if _tkinter wasn't built.
+# Skip this test if _tkinter wasn't built or gui resource is not available.
test_support.import_module('_tkinter')
+test_support.requires('gui')
this_dir = os.path.dirname(os.path.abspath(__file__))
lib_tk_test = os.path.abspath(os.path.join(this_dir, os.path.pardir,
@@ -12,9 +13,6 @@ lib_tk_test = os.path.abspath(os.path.join(this_dir, os.path.pardir,
with test_support.DirsOnSysPath(lib_tk_test):
import runtktests
-# Skip test if tk cannot be initialized.
-runtktests.check_tk_availability()
-
import ttk
from _tkinter import TclError
@@ -24,13 +22,7 @@ except TclError, msg:
# assuming ttk is not available
raise unittest.SkipTest("ttk not available: %s" % msg)
-def test_main(enable_gui=False):
- if enable_gui:
- if test_support.use_resources is None:
- test_support.use_resources = ['gui']
- elif 'gui' not in test_support.use_resources:
- test_support.use_resources.append('gui')
-
+def test_main():
with test_support.DirsOnSysPath(lib_tk_test):
from test_ttk.support import get_tk_root
try:
@@ -40,4 +32,4 @@ def test_main(enable_gui=False):
get_tk_root().destroy()
if __name__ == '__main__':
- test_main(enable_gui=True)
+ test_main()
diff --git a/Lib/test/test_ucn.py b/Lib/test/test_ucn.py
index 775044b..4409deb 100644
--- a/Lib/test/test_ucn.py
+++ b/Lib/test/test_ucn.py
@@ -8,9 +8,15 @@ Modified for Python 2.0 by Fredrik Lundh (fredrik@pythonware.com)
"""#"
import unittest
+import sys
from test import test_support
+try:
+ from _testcapi import INT_MAX, PY_SSIZE_T_MAX, UINT_MAX
+except ImportError:
+ INT_MAX = PY_SSIZE_T_MAX = UINT_MAX = 2**64 - 1
+
class UnicodeNamesTest(unittest.TestCase):
def checkletter(self, name, code):
@@ -137,6 +143,25 @@ class UnicodeNamesTest(unittest.TestCase):
unicode, "\\NSPACE", 'unicode-escape', 'strict'
)
+ @test_support.cpython_only
+ @unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX, "needs UINT_MAX < SIZE_MAX")
+ @unittest.skipUnless(UINT_MAX < sys.maxint, "needs UINT_MAX < sys.maxint")
+ @test_support.bigmemtest(minsize=UINT_MAX + 1,
+ memuse=2 + 4 // len(u'\U00010000'))
+ def test_issue16335(self, size):
+ func = self.test_issue16335
+ if size < func.minsize:
+ raise unittest.SkipTest("not enough memory: %.1fG minimum needed" %
+ (func.minsize * func.memuse / float(1024**3),))
+ # very very long bogus character name
+ x = b'\\N{SPACE' + b'x' * int(UINT_MAX + 1) + b'}'
+ self.assertEqual(len(x), len(b'\\N{SPACE}') + (UINT_MAX + 1))
+ self.assertRaisesRegexp(UnicodeError,
+ 'unknown Unicode character name',
+ x.decode, 'unicode-escape'
+ )
+
+
def test_main():
test_support.run_unittest(UnicodeNamesTest)
diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py
index fda44da..f068ca6 100644
--- a/Lib/test/test_unicode.py
+++ b/Lib/test/test_unicode.py
@@ -644,6 +644,23 @@ class UnicodeTest(
return u'\u1234'
self.assertEqual('%s' % Wrapper(), u'\u1234')
+ def test_formatting_huge_precision(self):
+ format_string = u"%.{}f".format(sys.maxsize + 1)
+ with self.assertRaises(ValueError):
+ result = format_string % 2.34
+
+ @test_support.cpython_only
+ def test_formatting_huge_precision_c_limits(self):
+ from _testcapi import INT_MAX
+ format_string = u"%.{}f".format(INT_MAX + 1)
+ with self.assertRaises(ValueError):
+ result = format_string % 2.34
+
+ def test_formatting_huge_width(self):
+ format_string = u"%{}f".format(sys.maxsize + 1)
+ with self.assertRaises(ValueError):
+ result = format_string % 2.34
+
def test_startswith_endswith_errors(self):
for meth in (u'foo'.startswith, u'foo'.endswith):
with self.assertRaises(UnicodeDecodeError):
@@ -1266,12 +1283,12 @@ class UnicodeTest(
self.assertEqual(repr(s1()), '\\n')
self.assertEqual(repr(s2()), '\\n')
+ # This test only affects 32-bit platforms because expandtabs can only take
+ # an int as the max value, not a 64-bit C long. If expandtabs is changed
+ # to take a 64-bit long, this test should apply to all platforms.
+ @unittest.skipIf(sys.maxint > (1 << 32) or struct.calcsize('P') != 4,
+ 'only applies to 32-bit platforms')
def test_expandtabs_overflows_gracefully(self):
- # This test only affects 32-bit platforms because expandtabs can only take
- # an int as the max value, not a 64-bit C long. If expandtabs is changed
- # to take a 64-bit long, this test should apply to all platforms.
- if sys.maxint > (1 << 32) or struct.calcsize('P') != 4:
- return
self.assertRaises(OverflowError, u't\tt\t'.expandtabs, sys.maxint)
def test__format__(self):
@@ -1446,6 +1463,27 @@ class UnicodeTest(
self.assertEqual(u'{0:10000}'.format(u''), u' ' * 10000)
self.assertEqual(u'{0:10000000}'.format(u''), u' ' * 10000000)
+ # issue 12546: use \x00 as a fill character
+ self.assertEqual('{0:\x00<6s}'.format('foo'), 'foo\x00\x00\x00')
+ self.assertEqual('{0:\x01<6s}'.format('foo'), 'foo\x01\x01\x01')
+ self.assertEqual('{0:\x00^6s}'.format('foo'), '\x00foo\x00\x00')
+ self.assertEqual('{0:^6s}'.format('foo'), ' foo ')
+
+ self.assertEqual('{0:\x00<6}'.format(3), '3\x00\x00\x00\x00\x00')
+ self.assertEqual('{0:\x01<6}'.format(3), '3\x01\x01\x01\x01\x01')
+ self.assertEqual('{0:\x00^6}'.format(3), '\x00\x003\x00\x00\x00')
+ self.assertEqual('{0:<6}'.format(3), '3 ')
+
+ self.assertEqual('{0:\x00<6}'.format(3.14), '3.14\x00\x00')
+ self.assertEqual('{0:\x01<6}'.format(3.14), '3.14\x01\x01')
+ self.assertEqual('{0:\x00^6}'.format(3.14), '\x003.14\x00')
+ self.assertEqual('{0:^6}'.format(3.14), ' 3.14 ')
+
+ self.assertEqual('{0:\x00<12}'.format(3+2.0j), '(3+2j)\x00\x00\x00\x00\x00\x00')
+ self.assertEqual('{0:\x01<12}'.format(3+2.0j), '(3+2j)\x01\x01\x01\x01\x01\x01')
+ self.assertEqual('{0:\x00^12}'.format(3+2.0j), '\x00\x00\x00(3+2j)\x00\x00\x00')
+ self.assertEqual('{0:^12}'.format(3+2.0j), ' (3+2j) ')
+
# format specifiers for user defined type
self.assertEqual(u'{0:abc}'.format(C()), u'abc')
@@ -1556,6 +1594,21 @@ class UnicodeTest(
# will fail
self.assertRaises(UnicodeEncodeError, "foo{0}".format, u'\u1000bar')
+ def test_format_huge_precision(self):
+ format_string = u".{}f".format(sys.maxsize + 1)
+ with self.assertRaises(ValueError):
+ result = format(2.34, format_string)
+
+ def test_format_huge_width(self):
+ format_string = u"{}f".format(sys.maxsize + 1)
+ with self.assertRaises(ValueError):
+ result = format(2.34, format_string)
+
+ def test_format_huge_item_number(self):
+ format_string = u"{{{}:.6f}}".format(sys.maxsize + 1)
+ with self.assertRaises(ValueError):
+ result = format_string.format(2.34)
+
def test_format_auto_numbering(self):
class C:
def __init__(self, x=100):
@@ -1606,6 +1659,7 @@ class UnicodeTest(
self.assertEqual("%s" % u, u'__unicode__ overridden')
self.assertEqual("{}".format(u), '__unicode__ overridden')
+ @test_support.cpython_only
def test_encode_decimal(self):
from _testcapi import unicode_encodedecimal
self.assertEqual(unicode_encodedecimal(u'123'),
@@ -1631,6 +1685,19 @@ class UnicodeTest(
self.assertEqual(unicode_encodedecimal(u"123\u20ac\u0660", "replace"),
b'123?0')
+ @test_support.cpython_only
+ def test_encode_decimal_with_surrogates(self):
+ from _testcapi import unicode_encodedecimal
+ tests = [(u'\U0001f49d', '&#128157;'),
+ (u'\ud83d', '&#55357;'),
+ (u'\udc9d', '&#56477;'),
+ ]
+ if u'\ud83d\udc9d' != u'\U0001f49d':
+ tests += [(u'\ud83d\udc9d', '&#55357;&#56477;')]
+ for s, exp in tests:
+ self.assertEqual(
+ unicode_encodedecimal(u"123" + s, "xmlcharrefreplace"),
+ '123' + exp)
def test_main():
test_support.run_unittest(__name__)
diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py
index 91aeb2f..0032865 100644
--- a/Lib/test/test_urllib.py
+++ b/Lib/test/test_urllib.py
@@ -222,6 +222,27 @@ Content-Type: text/html; charset=iso-8859-1
finally:
self.unfakehttp()
+ def test_missing_localfile(self):
+ self.assertRaises(IOError, urllib.urlopen,
+ 'file://localhost/a/missing/file.py')
+ fd, tmp_file = tempfile.mkstemp()
+ tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/')
+ self.assertTrue(os.path.exists(tmp_file))
+ try:
+ fp = urllib.urlopen(tmp_fileurl)
+ fp.close()
+ finally:
+ os.close(fd)
+ os.unlink(tmp_file)
+
+ self.assertFalse(os.path.exists(tmp_file))
+ self.assertRaises(IOError, urllib.urlopen, tmp_fileurl)
+
+ def test_ftp_nonexisting(self):
+ self.assertRaises(IOError, urllib.urlopen,
+ 'ftp://localhost/not/existing/file.py')
+
+
def test_userpass_inurl(self):
self.fakehttp('Hello!')
try:
@@ -768,6 +789,26 @@ class Utility_Tests(unittest.TestCase):
self.assertEqual(('user 2', 'ab'),urllib.splitpasswd('user 2:ab'))
self.assertEqual(('user+1', 'a+b'),urllib.splitpasswd('user+1:a+b'))
+ def test_splitport(self):
+ splitport = urllib.splitport
+ self.assertEqual(splitport('parrot:88'), ('parrot', '88'))
+ self.assertEqual(splitport('parrot'), ('parrot', None))
+ self.assertEqual(splitport('parrot:'), ('parrot', None))
+ self.assertEqual(splitport('127.0.0.1'), ('127.0.0.1', None))
+ self.assertEqual(splitport('parrot:cheese'), ('parrot:cheese', None))
+
+ def test_splitnport(self):
+ splitnport = urllib.splitnport
+ self.assertEqual(splitnport('parrot:88'), ('parrot', 88))
+ self.assertEqual(splitnport('parrot'), ('parrot', -1))
+ self.assertEqual(splitnport('parrot', 55), ('parrot', 55))
+ self.assertEqual(splitnport('parrot:'), ('parrot', -1))
+ self.assertEqual(splitnport('parrot:', 55), ('parrot', 55))
+ self.assertEqual(splitnport('127.0.0.1'), ('127.0.0.1', -1))
+ self.assertEqual(splitnport('127.0.0.1', 55), ('127.0.0.1', 55))
+ self.assertEqual(splitnport('parrot:cheese'), ('parrot', None))
+ self.assertEqual(splitnport('parrot:cheese', 55), ('parrot', None))
+
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
@@ -791,7 +832,7 @@ class URLopener_Tests(unittest.TestCase):
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
-# If anybody has one of the problematic enviroments, please help!
+# If anybody has one of the problematic environments, please help!
# . Facundo
#
# def server(evt):
@@ -837,7 +878,7 @@ class URLopener_Tests(unittest.TestCase):
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
-# self.assertTrue(socket.getdefaulttimeout() is None)
+# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
@@ -849,7 +890,7 @@ class URLopener_Tests(unittest.TestCase):
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
-# self.assertTrue(socket.getdefaulttimeout() is None)
+# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
diff --git a/Lib/test/test_urllib2.py b/Lib/test/test_urllib2.py
index 7f230e2..39a0132 100644
--- a/Lib/test/test_urllib2.py
+++ b/Lib/test/test_urllib2.py
@@ -591,8 +591,8 @@ class OpenerDirectorTests(unittest.TestCase):
self.assertIsInstance(args[0], Request)
# response from opener.open is None, because there's no
# handler that defines http_open to handle it
- self.assertTrue(args[1] is None or
- isinstance(args[1], MockResponse))
+ if args[1] is not None:
+ self.assertIsInstance(args[1], MockResponse)
def sanepathname2url(path):
@@ -924,7 +924,8 @@ class HandlerTests(unittest.TestCase):
MockHeaders({"location": to_url}))
except urllib2.HTTPError:
# 307 in response to POST requires user OK
- self.assertTrue(code == 307 and data is not None)
+ self.assertEqual(code, 307)
+ self.assertIsNotNone(data)
self.assertEqual(o.req.get_full_url(), to_url)
try:
self.assertEqual(o.req.get_method(), "GET")
@@ -1106,12 +1107,30 @@ class HandlerTests(unittest.TestCase):
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
- "http://acme.example.com/protected",
- )
+ "http://acme.example.com/protected"
+ )
def test_basic_auth_with_single_quoted_realm(self):
self.test_basic_auth(quote_char="'")
+ def test_basic_auth_with_unquoted_realm(self):
+ opener = OpenerDirector()
+ password_manager = MockPasswordManager()
+ auth_handler = urllib2.HTTPBasicAuthHandler(password_manager)
+ realm = "ACME Widget Store"
+ http_handler = MockHTTPHandler(
+ 401, 'WWW-Authenticate: Basic realm=%s\r\n\r\n' % realm)
+ opener.add_handler(auth_handler)
+ opener.add_handler(http_handler)
+ msg = "Basic Auth Realm was unquoted"
+ with test_support.check_warnings((msg, UserWarning)):
+ self._test_basic_auth(opener, auth_handler, "Authorization",
+ realm, http_handler, password_manager,
+ "http://acme.example.com/protected",
+ "http://acme.example.com/protected"
+ )
+
+
def test_proxy_basic_auth(self):
opener = OpenerDirector()
ph = urllib2.ProxyHandler(dict(http="proxy.example.com:3128"))
@@ -1130,7 +1149,7 @@ class HandlerTests(unittest.TestCase):
)
def test_basic_and_digest_auth_handlers(self):
- # HTTPDigestAuthHandler threw an exception if it couldn't handle a 40*
+ # HTTPDigestAuthHandler raised an exception if it couldn't handle a 40*
# response (http://python.org/sf/1479302), where it should instead
# return None to allow another handler (especially
# HTTPBasicAuthHandler) to handle the response.
@@ -1318,16 +1337,32 @@ class RequestTests(unittest.TestCase):
req = Request(url)
self.assertEqual(req.get_full_url(), url)
-def test_HTTPError_interface():
- """
- Issue 13211 reveals that HTTPError didn't implement the URLError
- interface even though HTTPError is a subclass of URLError.
-
- >>> err = urllib2.HTTPError(msg='something bad happened', url=None, code=None, hdrs=None, fp=None)
- >>> assert hasattr(err, 'reason')
- >>> err.reason
- 'something bad happened'
- """
+ def test_HTTPError_interface(self):
+ """
+ Issue 13211 reveals that HTTPError didn't implement the URLError
+ interface even though HTTPError is a subclass of URLError.
+
+ >>> err = urllib2.HTTPError(msg='something bad happened', url=None, code=None, hdrs=None, fp=None)
+ >>> assert hasattr(err, 'reason')
+ >>> err.reason
+ 'something bad happened'
+ """
+
+ def test_HTTPError_interface_call(self):
+ """
+ Issue 15701= - HTTPError interface has info method available from URLError.
+ """
+ err = urllib2.HTTPError(msg='something bad happened', url=None,
+ code=None, hdrs='Content-Length:42', fp=None)
+ self.assertTrue(hasattr(err, 'reason'))
+ assert hasattr(err, 'reason')
+ assert hasattr(err, 'info')
+ assert callable(err.info)
+ try:
+ err.info()
+ except AttributeError:
+ self.fail("err.info() failed")
+ self.assertEqual(err.info(), "Content-Length:42")
def test_main(verbose=None):
from test import test_urllib2
diff --git a/Lib/test/test_urllib2_localnet.py b/Lib/test/test_urllib2_localnet.py
index 0fda770..2e87f11 100644
--- a/Lib/test/test_urllib2_localnet.py
+++ b/Lib/test/test_urllib2_localnet.py
@@ -1,11 +1,11 @@
-#!/usr/bin/env python
-
import urlparse
import urllib2
import BaseHTTPServer
import unittest
import hashlib
+
from test import test_support
+
mimetools = test_support.import_module('mimetools', deprecated=True)
threading = test_support.import_module('threading')
@@ -346,6 +346,12 @@ class TestUrlopen(BaseTestCase):
for transparent redirection have been written.
"""
+ def setUp(self):
+ proxy_handler = urllib2.ProxyHandler({})
+ opener = urllib2.build_opener(proxy_handler)
+ urllib2.install_opener(opener)
+ super(TestUrlopen, self).setUp()
+
def start_server(self, responses):
handler = GetRequestHandler(responses)
@@ -481,6 +487,11 @@ class TestUrlopen(BaseTestCase):
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
+
+ # as indicated by the comment below, this might fail with some ISP,
+ # so we run the test only when -unetwork/-uall is specified to
+ # mitigate the problem a bit (see #17564)
+ test_support.requires('network')
self.assertRaises(IOError,
# Given that both VeriSign and various ISPs have in
# the past or are presently hijacking various invalid
diff --git a/Lib/test/test_urllib2net.py b/Lib/test/test_urllib2net.py
index dcdbfe8..91a0743 100644
--- a/Lib/test/test_urllib2net.py
+++ b/Lib/test/test_urllib2net.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
import unittest
from test import test_support
from test.test_urllib2 import sanepathname2url
@@ -80,13 +78,13 @@ class CloseSocketTest(unittest.TestCase):
# underlying socket
# delve deep into response to fetch socket._socketobject
- response = _urlopen_with_retry("http://www.python.org/")
+ response = _urlopen_with_retry("http://www.example.com/")
abused_fileobject = response.fp
- self.assertTrue(abused_fileobject.__class__ is socket._fileobject)
+ self.assertIs(abused_fileobject.__class__, socket._fileobject)
httpresponse = abused_fileobject._sock
- self.assertTrue(httpresponse.__class__ is httplib.HTTPResponse)
+ self.assertIs(httpresponse.__class__, httplib.HTTPResponse)
fileobject = httpresponse.fp
- self.assertTrue(fileobject.__class__ is socket._fileobject)
+ self.assertIs(fileobject.__class__, socket._fileobject)
self.assertTrue(not fileobject.closed)
response.close()
@@ -157,15 +155,15 @@ class OtherNetworkTests(unittest.TestCase):
## self._test_urls(urls, self._extra_handlers()+[bauth, dauth])
def test_urlwithfrag(self):
- urlwith_frag = "http://docs.python.org/glossary.html#glossary"
+ urlwith_frag = "https://docs.python.org/2/glossary.html#glossary"
with test_support.transient_internet(urlwith_frag):
req = urllib2.Request(urlwith_frag)
res = urllib2.urlopen(req)
self.assertEqual(res.geturl(),
- "http://docs.python.org/glossary.html#glossary")
+ "https://docs.python.org/2/glossary.html#glossary")
def test_fileno(self):
- req = urllib2.Request("http://www.python.org")
+ req = urllib2.Request("http://www.example.com")
opener = urllib2.build_opener()
res = opener.open(req)
try:
@@ -252,15 +250,15 @@ class OtherNetworkTests(unittest.TestCase):
class TimeoutTest(unittest.TestCase):
def test_http_basic(self):
- self.assertTrue(socket.getdefaulttimeout() is None)
- url = "http://www.python.org"
+ self.assertIsNone(socket.getdefaulttimeout())
+ url = "http://www.example.com"
with test_support.transient_internet(url, timeout=None):
u = _urlopen_with_retry(url)
- self.assertTrue(u.fp._sock.fp._sock.gettimeout() is None)
+ self.assertIsNone(u.fp._sock.fp._sock.gettimeout())
def test_http_default_timeout(self):
- self.assertTrue(socket.getdefaulttimeout() is None)
- url = "http://www.python.org"
+ self.assertIsNone(socket.getdefaulttimeout())
+ url = "http://www.example.com"
with test_support.transient_internet(url):
socket.setdefaulttimeout(60)
try:
@@ -270,18 +268,18 @@ class TimeoutTest(unittest.TestCase):
self.assertEqual(u.fp._sock.fp._sock.gettimeout(), 60)
def test_http_no_timeout(self):
- self.assertTrue(socket.getdefaulttimeout() is None)
- url = "http://www.python.org"
+ self.assertIsNone(socket.getdefaulttimeout())
+ url = "http://www.example.com"
with test_support.transient_internet(url):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(url, timeout=None)
finally:
socket.setdefaulttimeout(None)
- self.assertTrue(u.fp._sock.fp._sock.gettimeout() is None)
+ self.assertIsNone(u.fp._sock.fp._sock.gettimeout())
def test_http_timeout(self):
- url = "http://www.python.org"
+ url = "http://www.example.com"
with test_support.transient_internet(url):
u = _urlopen_with_retry(url, timeout=120)
self.assertEqual(u.fp._sock.fp._sock.gettimeout(), 120)
@@ -289,13 +287,13 @@ class TimeoutTest(unittest.TestCase):
FTP_HOST = "ftp://ftp.mirror.nl/pub/gnu/"
def test_ftp_basic(self):
- self.assertTrue(socket.getdefaulttimeout() is None)
+ self.assertIsNone(socket.getdefaulttimeout())
with test_support.transient_internet(self.FTP_HOST, timeout=None):
u = _urlopen_with_retry(self.FTP_HOST)
- self.assertTrue(u.fp.fp._sock.gettimeout() is None)
+ self.assertIsNone(u.fp.fp._sock.gettimeout())
def test_ftp_default_timeout(self):
- self.assertTrue(socket.getdefaulttimeout() is None)
+ self.assertIsNone(socket.getdefaulttimeout())
with test_support.transient_internet(self.FTP_HOST):
socket.setdefaulttimeout(60)
try:
@@ -305,14 +303,14 @@ class TimeoutTest(unittest.TestCase):
self.assertEqual(u.fp.fp._sock.gettimeout(), 60)
def test_ftp_no_timeout(self):
- self.assertTrue(socket.getdefaulttimeout() is None)
+ self.assertIsNone(socket.getdefaulttimeout(),)
with test_support.transient_internet(self.FTP_HOST):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(self.FTP_HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
- self.assertTrue(u.fp.fp._sock.gettimeout() is None)
+ self.assertIsNone(u.fp.fp._sock.gettimeout())
def test_ftp_timeout(self):
with test_support.transient_internet(self.FTP_HOST):
diff --git a/Lib/test/test_urllibnet.py b/Lib/test/test_urllibnet.py
index 1d88331..9f24b7a 100644
--- a/Lib/test/test_urllibnet.py
+++ b/Lib/test/test_urllibnet.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
import unittest
from test import test_support
@@ -36,7 +34,7 @@ class URLTimeoutTest(unittest.TestCase):
socket.setdefaulttimeout(None)
def testURLread(self):
- f = _open_with_retry(urllib.urlopen, "http://www.python.org/")
+ f = _open_with_retry(urllib.urlopen, "http://www.example.com/")
x = f.read()
class urlopenNetworkTests(unittest.TestCase):
@@ -48,7 +46,7 @@ class urlopenNetworkTests(unittest.TestCase):
for transparent redirection have been written.
setUp is not used for always constructing a connection to
- http://www.python.org/ since there a few tests that don't use that address
+ http://www.example.com/ since there a few tests that don't use that address
and making a connection is expensive enough to warrant minimizing unneeded
connections.
@@ -59,7 +57,7 @@ class urlopenNetworkTests(unittest.TestCase):
def test_basic(self):
# Simple test expected to pass.
- open_url = self.urlopen("http://www.python.org/")
+ open_url = self.urlopen("http://www.example.com/")
for attr in ("read", "readline", "readlines", "fileno", "close",
"info", "geturl"):
self.assertTrue(hasattr(open_url, attr), "object returned from "
@@ -71,7 +69,7 @@ class urlopenNetworkTests(unittest.TestCase):
def test_readlines(self):
# Test both readline and readlines.
- open_url = self.urlopen("http://www.python.org/")
+ open_url = self.urlopen("http://www.example.com/")
try:
self.assertIsInstance(open_url.readline(), basestring,
"readline did not return a string")
@@ -82,7 +80,7 @@ class urlopenNetworkTests(unittest.TestCase):
def test_info(self):
# Test 'info'.
- open_url = self.urlopen("http://www.python.org/")
+ open_url = self.urlopen("http://www.example.com/")
try:
info_obj = open_url.info()
finally:
@@ -94,7 +92,7 @@ class urlopenNetworkTests(unittest.TestCase):
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
- URL = "http://www.python.org/"
+ URL = "http://www.example.com/"
open_url = self.urlopen(URL)
try:
gotten_url = open_url.geturl()
@@ -104,7 +102,7 @@ class urlopenNetworkTests(unittest.TestCase):
def test_getcode(self):
# test getcode() with the fancy opener to get 404 error codes
- URL = "http://www.python.org/XXXinvalidXXX"
+ URL = "http://www.example.com/XXXinvalidXXX"
open_url = urllib.FancyURLopener().open(URL)
try:
code = open_url.getcode()
@@ -112,14 +110,11 @@ class urlopenNetworkTests(unittest.TestCase):
open_url.close()
self.assertEqual(code, 404)
+ @unittest.skipIf(sys.platform in ('win32',), 'not appropriate for Windows')
+ @unittest.skipUnless(hasattr(os, 'fdopen'), 'os.fdopen not available')
def test_fileno(self):
- if (sys.platform in ('win32',) or
- not hasattr(os, 'fdopen')):
- # On Windows, socket handles are not file descriptors; this
- # test can't pass on Windows.
- return
# Make sure fd returned by fileno is valid.
- open_url = self.urlopen("http://www.python.org/")
+ open_url = self.urlopen("http://www.example.com/")
fd = open_url.fileno()
FILE = os.fdopen(fd)
try:
@@ -157,7 +152,7 @@ class urlretrieveNetworkTests(unittest.TestCase):
def test_basic(self):
# Test basic functionality.
- file_location,info = self.urlretrieve("http://www.python.org/")
+ file_location,info = self.urlretrieve("http://www.example.com/")
self.assertTrue(os.path.exists(file_location), "file location returned by"
" urlretrieve is not a valid path")
FILE = file(file_location)
@@ -170,7 +165,7 @@ class urlretrieveNetworkTests(unittest.TestCase):
def test_specified_path(self):
# Make sure that specifying the location of the file to write to works.
- file_location,info = self.urlretrieve("http://www.python.org/",
+ file_location,info = self.urlretrieve("http://www.example.com/",
test_support.TESTFN)
self.assertEqual(file_location, test_support.TESTFN)
self.assertTrue(os.path.exists(file_location))
@@ -183,13 +178,13 @@ class urlretrieveNetworkTests(unittest.TestCase):
def test_header(self):
# Make sure header returned as 2nd value from urlretrieve is good.
- file_location, header = self.urlretrieve("http://www.python.org/")
+ file_location, header = self.urlretrieve("http://www.example.com/")
os.unlink(file_location)
self.assertIsInstance(header, mimetools.Message,
"header is not an instance of mimetools.Message")
def test_data_header(self):
- logo = "http://www.python.org/community/logos/python-logo-master-v3-TM.png"
+ logo = "http://www.example.com/"
file_location, fileheaders = self.urlretrieve(logo)
os.unlink(file_location)
datevalue = fileheaders.getheader('Date')
diff --git a/Lib/test/test_urlparse.py b/Lib/test/test_urlparse.py
index 39a897a..b3ad7cd 100644
--- a/Lib/test/test_urlparse.py
+++ b/Lib/test/test_urlparse.py
@@ -1,5 +1,3 @@
-#! /usr/bin/env python
-
from test import test_support
import unittest
import urlparse
@@ -364,6 +362,16 @@ class UrlParseTestCase(unittest.TestCase):
('http://[::12.34.56.78]/foo/', '::12.34.56.78', None),
('http://[::ffff:12.34.56.78]/foo/',
'::ffff:12.34.56.78', None),
+ ('http://Test.python.org:/foo/', 'test.python.org', None),
+ ('http://12.34.56.78:/foo/', '12.34.56.78', None),
+ ('http://[::1]:/foo/', '::1', None),
+ ('http://[dead:beef::1]:/foo/', 'dead:beef::1', None),
+ ('http://[dead:beef::]:/foo/', 'dead:beef::', None),
+ ('http://[dead:beef:cafe:5417:affe:8FA3:deaf:feed]:/foo/',
+ 'dead:beef:cafe:5417:affe:8fa3:deaf:feed', None),
+ ('http://[::12.34.56.78]:/foo/', '::12.34.56.78', None),
+ ('http://[::ffff:12.34.56.78]:/foo/',
+ '::ffff:12.34.56.78', None),
]:
urlparsed = urlparse.urlparse(url)
self.assertEqual((urlparsed.hostname, urlparsed.port) , (hostname, port))
@@ -437,6 +445,51 @@ class UrlParseTestCase(unittest.TestCase):
self.assertEqual(p.port, 80)
self.assertEqual(p.geturl(), url)
+ # Verify an illegal port of value greater than 65535 is set as None
+ url = "http://www.python.org:65536"
+ p = urlparse.urlsplit(url)
+ self.assertEqual(p.port, None)
+
+ def test_issue14072(self):
+ p1 = urlparse.urlsplit('tel:+31-641044153')
+ self.assertEqual(p1.scheme, 'tel')
+ self.assertEqual(p1.path, '+31-641044153')
+
+ p2 = urlparse.urlsplit('tel:+31641044153')
+ self.assertEqual(p2.scheme, 'tel')
+ self.assertEqual(p2.path, '+31641044153')
+
+ # Assert for urlparse
+ p1 = urlparse.urlparse('tel:+31-641044153')
+ self.assertEqual(p1.scheme, 'tel')
+ self.assertEqual(p1.path, '+31-641044153')
+
+ p2 = urlparse.urlparse('tel:+31641044153')
+ self.assertEqual(p2.scheme, 'tel')
+ self.assertEqual(p2.path, '+31641044153')
+
+
+ def test_telurl_params(self):
+ p1 = urlparse.urlparse('tel:123-4;phone-context=+1-650-516')
+ self.assertEqual(p1.scheme, 'tel')
+ self.assertEqual(p1.path, '123-4')
+ self.assertEqual(p1.params, 'phone-context=+1-650-516')
+
+ p1 = urlparse.urlparse('tel:+1-201-555-0123')
+ self.assertEqual(p1.scheme, 'tel')
+ self.assertEqual(p1.path, '+1-201-555-0123')
+ self.assertEqual(p1.params, '')
+
+ p1 = urlparse.urlparse('tel:7042;phone-context=example.com')
+ self.assertEqual(p1.scheme, 'tel')
+ self.assertEqual(p1.path, '7042')
+ self.assertEqual(p1.params, 'phone-context=example.com')
+
+ p1 = urlparse.urlparse('tel:863-1234;phone-context=+1-914-555')
+ self.assertEqual(p1.scheme, 'tel')
+ self.assertEqual(p1.path, '863-1234')
+ self.assertEqual(p1.params, 'phone-context=+1-914-555')
+
def test_attributes_bad_port(self):
"""Check handling of non-integer ports."""
@@ -493,6 +546,10 @@ class UrlParseTestCase(unittest.TestCase):
('s3','foo.com','/stuff','','',''))
self.assertEqual(urlparse.urlparse("x-newscheme://foo.com/stuff"),
('x-newscheme','foo.com','/stuff','','',''))
+ self.assertEqual(urlparse.urlparse("x-newscheme://foo.com/stuff?query#fragment"),
+ ('x-newscheme','foo.com','/stuff','','query','fragment'))
+ self.assertEqual(urlparse.urlparse("x-newscheme://foo.com/stuff?query"),
+ ('x-newscheme','foo.com','/stuff','','query',''))
def test_withoutscheme(self):
# Test urlparse without scheme
diff --git a/Lib/test/test_userstring.py b/Lib/test/test_userstring.py
index 2625985..51d8e8b 100755..100644
--- a/Lib/test/test_userstring.py
+++ b/Lib/test/test_userstring.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# UserString is a wrapper around the native builtin string type.
# UserString instances should behave similar to builtin string objects.
diff --git a/Lib/test/test_uu.py b/Lib/test/test_uu.py
index 95c9552..31046b0 100644
--- a/Lib/test/test_uu.py
+++ b/Lib/test/test_uu.py
@@ -48,7 +48,7 @@ class UUTest(unittest.TestCase):
out = cStringIO.StringIO()
try:
uu.decode(inp, out)
- self.fail("No exception thrown")
+ self.fail("No exception raised")
except uu.Error, e:
self.assertEqual(str(e), "Truncated input file")
@@ -57,7 +57,7 @@ class UUTest(unittest.TestCase):
out = cStringIO.StringIO()
try:
uu.decode(inp, out)
- self.fail("No exception thrown")
+ self.fail("No exception raised")
except uu.Error, e:
self.assertEqual(str(e), "No valid begin line found in input file")
diff --git a/Lib/test/test_uuid.py b/Lib/test/test_uuid.py
index e503084..5ba6845 100644
--- a/Lib/test/test_uuid.py
+++ b/Lib/test/test_uuid.py
@@ -1,5 +1,7 @@
-from unittest import TestCase
+import unittest
from test import test_support
+import io
+import os
import uuid
def importable(name):
@@ -9,7 +11,7 @@ def importable(name):
except:
return False
-class TestUUID(TestCase):
+class TestUUID(unittest.TestCase):
last_node = None
source2node = {}
@@ -299,24 +301,22 @@ class TestUUID(TestCase):
else:
TestUUID.last_node = node
+ @unittest.skipUnless(os.name == 'posix', 'requires Posix')
def test_ifconfig_getnode(self):
- import sys
- import os
- if os.name == 'posix':
- node = uuid._ifconfig_getnode()
- if node is not None:
- self.check_node(node, 'ifconfig')
+ node = uuid._ifconfig_getnode()
+ if node is not None:
+ self.check_node(node, 'ifconfig')
+ @unittest.skipUnless(os.name == 'nt', 'requires Windows')
def test_ipconfig_getnode(self):
- import os
- if os.name == 'nt':
- node = uuid._ipconfig_getnode()
- if node is not None:
- self.check_node(node, 'ipconfig')
+ node = uuid._ipconfig_getnode()
+ if node is not None:
+ self.check_node(node, 'ipconfig')
+ @unittest.skipUnless(importable('win32wnet'), 'requires win32wnet')
+ @unittest.skipUnless(importable('netbios'), 'requires netbios')
def test_netbios_getnode(self):
- if importable('win32wnet') and importable('netbios'):
- self.check_node(uuid._netbios_getnode(), 'netbios')
+ self.check_node(uuid._netbios_getnode(), 'netbios')
def test_random_getnode(self):
node = uuid._random_getnode()
@@ -324,22 +324,20 @@ class TestUUID(TestCase):
self.assertTrue(node & 0x010000000000)
self.assertTrue(node < (1L << 48))
+ @unittest.skipUnless(os.name == 'posix', 'requires Posix')
+ @unittest.skipUnless(importable('ctypes'), 'requires ctypes')
def test_unixdll_getnode(self):
- import sys
- import os
- if importable('ctypes') and os.name == 'posix':
- try: # Issues 1481, 3581: _uuid_generate_time() might be None.
- self.check_node(uuid._unixdll_getnode(), 'unixdll')
- except TypeError:
- pass
+ try: # Issues 1481, 3581: _uuid_generate_time() might be None.
+ self.check_node(uuid._unixdll_getnode(), 'unixdll')
+ except TypeError:
+ pass
+ @unittest.skipUnless(os.name == 'nt', 'requires Windows')
+ @unittest.skipUnless(importable('ctypes'), 'requires ctypes')
def test_windll_getnode(self):
- import os
- if importable('ctypes') and os.name == 'nt':
- self.check_node(uuid._windll_getnode(), 'windll')
+ self.check_node(uuid._windll_getnode(), 'windll')
def test_getnode(self):
- import sys
node1 = uuid.getnode()
self.check_node(node1, "getnode1")
@@ -349,13 +347,39 @@ class TestUUID(TestCase):
self.assertEqual(node1, node2)
+ @unittest.skipUnless(os.name == 'posix', 'requires Posix')
+ def test_find_mac(self):
+ data = '''\
+
+fake hwaddr
+cscotun0 Link encap:UNSPEC HWaddr 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00
+eth0 Link encap:Ethernet HWaddr 12:34:56:78:90:ab
+'''
+ def mock_popen(cmd):
+ return io.BytesIO(data)
+
+ path = os.environ.get("PATH", os.defpath).split(os.pathsep)
+ path.extend(('/sbin', '/usr/sbin'))
+ for dir in path:
+ executable = os.path.join(dir, 'ifconfig')
+ if (os.path.exists(executable) and
+ os.access(executable, os.F_OK | os.X_OK) and
+ not os.path.isdir(executable)):
+ break
+ else:
+ self.skipTest('requires ifconfig')
+
+ with test_support.swap_attr(os, 'popen', mock_popen):
+ mac = uuid._find_mac(
+ command='ifconfig',
+ args='',
+ hw_identifiers=['hwaddr'],
+ get_index=lambda x: x + 1,
+ )
+ self.assertEqual(mac, 0x1234567890ab)
+
+ @unittest.skipUnless(importable('ctypes'), 'requires ctypes')
def test_uuid1(self):
- # uuid1 requires ctypes.
- try:
- import ctypes
- except ImportError:
- return
-
equal = self.assertEqual
# Make sure uuid1() generates UUIDs that are actually version 1.
@@ -408,13 +432,8 @@ class TestUUID(TestCase):
equal(u, uuid.UUID(v))
equal(str(u), v)
+ @unittest.skipUnless(importable('ctypes'), 'requires ctypes')
def test_uuid4(self):
- # uuid4 requires ctypes.
- try:
- import ctypes
- except ImportError:
- return
-
equal = self.assertEqual
# Make sure uuid4() generates UUIDs that are actually version 4.
@@ -446,12 +465,8 @@ class TestUUID(TestCase):
equal(u, uuid.UUID(v))
equal(str(u), v)
+ @unittest.skipUnless(os.name == 'posix', 'requires Posix')
def testIssue8621(self):
- import os
- import sys
- if os.name != 'posix':
- return
-
# On at least some versions of OSX uuid.uuid4 generates
# the same sequence of UUIDs in the parent and any
# children started using fork.
@@ -465,6 +480,7 @@ class TestUUID(TestCase):
else:
os.close(fds[1])
+ self.addCleanup(os.close, fds[0])
parent_value = uuid.uuid4().hex
os.waitpid(pid, 0)
child_value = os.read(fds[0], 100)
diff --git a/Lib/test/test_wait4.py b/Lib/test/test_wait4.py
index d04a11b..54580a9 100644
--- a/Lib/test/test_wait4.py
+++ b/Lib/test/test_wait4.py
@@ -3,6 +3,7 @@
import os
import time
+import sys
from test.fork_wait import ForkWait
from test.test_support import run_unittest, reap_children, get_attribute
@@ -13,10 +14,15 @@ get_attribute(os, 'wait4')
class Wait4Test(ForkWait):
def wait_impl(self, cpid):
+ option = os.WNOHANG
+ if sys.platform.startswith('aix'):
+ # Issue #11185: wait4 is broken on AIX and will always return 0
+ # with WNOHANG.
+ option = 0
for i in range(10):
# wait4() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
- spid, status, rusage = os.wait4(cpid, os.WNOHANG)
+ spid, status, rusage = os.wait4(cpid, option)
if spid == cpid:
break
time.sleep(1.0)
diff --git a/Lib/test/test_warnings.py b/Lib/test/test_warnings.py
index e502ed8..a770ba2 100644
--- a/Lib/test/test_warnings.py
+++ b/Lib/test/test_warnings.py
@@ -259,11 +259,10 @@ class WarnTests(unittest.TestCase):
finally:
warning_tests.__file__ = filename
+ @unittest.skipUnless(hasattr(sys, 'argv'), 'test needs sys.argv')
def test_missing_filename_main_with_argv(self):
# If __file__ is not specified and the caller is __main__ and sys.argv
# exists, then use sys.argv[0] as the file.
- if not hasattr(sys, 'argv'):
- return
filename = warning_tests.__file__
module_name = warning_tests.__name__
try:
@@ -669,7 +668,7 @@ class CatchWarningTests(BaseTest):
# Explicit tests for the test_support convenience wrapper
wmod = self.module
if wmod is not sys.modules['warnings']:
- return
+ self.skipTest('module to test is not loaded warnings module')
with test_support.check_warnings(quiet=False) as w:
self.assertEqual(w.warnings, [])
wmod.simplefilter("always")
diff --git a/Lib/test/test_wave.py b/Lib/test/test_wave.py
index 02f5fae..9513df4 100644
--- a/Lib/test/test_wave.py
+++ b/Lib/test/test_wave.py
@@ -1,45 +1,123 @@
from test.test_support import TESTFN, run_unittest
-import os
-import wave
import unittest
+from test import audiotests
+import sys
+import wave
+
+
+class WaveTest(audiotests.AudioWriteTests,
+ audiotests.AudioTestsWithSourceFile):
+ module = wave
+ test_unseekable_write = None
+ test_unseekable_overflowed_write = None
+ test_unseekable_incompleted_write = None
+
+
+class WavePCM8Test(WaveTest, unittest.TestCase):
+ sndfilename = 'pluck-pcm8.wav'
+ sndfilenframes = 3307
+ nchannels = 2
+ sampwidth = 1
+ framerate = 11025
+ nframes = 48
+ comptype = 'NONE'
+ compname = 'not compressed'
+ frames = audiotests.fromhex("""\
+ 827F CB80 B184 0088 4B86 C883 3F81 837E 387A 3473 A96B 9A66 \
+ 6D64 4662 8E60 6F60 D762 7B68 936F 5877 177B 757C 887B 5F7B \
+ 917A BE7B 3C7C E67F 4F84 C389 418E D192 6E97 0296 FF94 0092 \
+ C98E D28D 6F8F 4E8F 648C E38A 888A AB8B D18E 0B91 368E C48A \
+ """)
+
+
+class WavePCM16Test(WaveTest, unittest.TestCase):
+ sndfilename = 'pluck-pcm16.wav'
+ sndfilenframes = 3307
+ nchannels = 2
+ sampwidth = 2
+ framerate = 11025
+ nframes = 48
+ comptype = 'NONE'
+ compname = 'not compressed'
+ frames = audiotests.fromhex("""\
+ 022EFFEA 4B5C00F9 311404EF 80DC0843 CBDF06B2 48AA03F3 BFE701B2 036BFE7C \
+ B857FA3E B4B2F34F 2999EBCA 1A5FE6D7 EDFCE491 C626E279 0E05E0B8 EF27E02D \
+ 5754E275 FB31E843 1373EF89 D827F72C 978BFB7A F5F7FC11 0866FB9C DF30FB42 \
+ 117FFA36 3EE4FB5D BC75FCB6 66D5FF5F CF16040E 43220978 C1BC0EC8 511F12A4 \
+ EEDF1755 82061666 7FFF1446 80001296 499C0EB2 52BA0DB9 EFB70F5C CE400FBC \
+ E4B50CEB 63440A5A 08CA0A1F 2BBA0B0B 51460E47 8BCB113C B6F50EEA 44150A59 \
+ """)
+ if sys.byteorder != 'big':
+ frames = audiotests.byteswap2(frames)
+
+ if sys.byteorder == 'big':
+ @unittest.expectedFailure
+ def test_unseekable_incompleted_write(self):
+ super().test_unseekable_incompleted_write()
+
+
+
+class WavePCM24Test(WaveTest, unittest.TestCase):
+ sndfilename = 'pluck-pcm24.wav'
+ sndfilenframes = 3307
+ nchannels = 2
+ sampwidth = 3
+ framerate = 11025
+ nframes = 48
+ comptype = 'NONE'
+ compname = 'not compressed'
+ frames = audiotests.fromhex("""\
+ 022D65FFEB9D 4B5A0F00FA54 3113C304EE2B 80DCD6084303 \
+ CBDEC006B261 48A99803F2F8 BFE82401B07D 036BFBFE7B5D \
+ B85756FA3EC9 B4B055F3502B 299830EBCB62 1A5CA7E6D99A \
+ EDFA3EE491BD C625EBE27884 0E05A9E0B6CF EF2929E02922 \
+ 5758D8E27067 FB3557E83E16 1377BFEF8402 D82C5BF7272A \
+ 978F16FB7745 F5F865FC1013 086635FB9C4E DF30FCFB40EE \
+ 117FE0FA3438 3EE6B8FB5AC3 BC77A3FCB2F4 66D6DAFF5F32 \
+ CF13B9041275 431D69097A8C C1BB600EC74E 5120B912A2BA \
+ EEDF641754C0 8207001664B7 7FFFFF14453F 8000001294E6 \
+ 499C1B0EB3B2 52B73E0DBCA0 EFB2B20F5FD8 CE3CDB0FBE12 \
+ E4B49C0CEA2D 6344A80A5A7C 08C8FE0A1FFE 2BB9860B0A0E \
+ 51486F0E44E1 8BCC64113B05 B6F4EC0EEB36 4413170A5B48 \
+ """)
+ if sys.byteorder != 'big':
+ frames = audiotests.byteswap3(frames)
+
+
+class WavePCM32Test(WaveTest, unittest.TestCase):
+ sndfilename = 'pluck-pcm32.wav'
+ sndfilenframes = 3307
+ nchannels = 2
+ sampwidth = 4
+ framerate = 11025
+ nframes = 48
+ comptype = 'NONE'
+ compname = 'not compressed'
+ frames = audiotests.fromhex("""\
+ 022D65BCFFEB9D92 4B5A0F8000FA549C 3113C34004EE2BC0 80DCD680084303E0 \
+ CBDEC0C006B26140 48A9980003F2F8FC BFE8248001B07D92 036BFB60FE7B5D34 \
+ B8575600FA3EC920 B4B05500F3502BC0 29983000EBCB6240 1A5CA7A0E6D99A60 \
+ EDFA3E80E491BD40 C625EB80E27884A0 0E05A9A0E0B6CFE0 EF292940E0292280 \
+ 5758D800E2706700 FB3557D8E83E1640 1377BF00EF840280 D82C5B80F7272A80 \
+ 978F1600FB774560 F5F86510FC101364 086635A0FB9C4E20 DF30FC40FB40EE28 \
+ 117FE0A0FA3438B0 3EE6B840FB5AC3F0 BC77A380FCB2F454 66D6DA80FF5F32B4 \
+ CF13B980041275B0 431D6980097A8C00 C1BB60000EC74E00 5120B98012A2BAA0 \
+ EEDF64C01754C060 820700001664B780 7FFFFFFF14453F40 800000001294E6E0 \
+ 499C1B000EB3B270 52B73E000DBCA020 EFB2B2E00F5FD880 CE3CDB400FBE1270 \
+ E4B49CC00CEA2D90 6344A8800A5A7CA0 08C8FE800A1FFEE0 2BB986C00B0A0E00 \
+ 51486F800E44E190 8BCC6480113B0580 B6F4EC000EEB3630 441317800A5B48A0 \
+ """)
+ if sys.byteorder != 'big':
+ frames = audiotests.byteswap4(frames)
+
+ if sys.byteorder == 'big':
+ @unittest.expectedFailure
+ def test_unseekable_incompleted_write(self):
+ super().test_unseekable_incompleted_write()
-nchannels = 2
-sampwidth = 2
-framerate = 8000
-nframes = 100
-
-class TestWave(unittest.TestCase):
-
- def setUp(self):
- self.f = None
-
- def tearDown(self):
- if self.f is not None:
- self.f.close()
- try:
- os.remove(TESTFN)
- except OSError:
- pass
-
- def test_it(self):
- self.f = wave.open(TESTFN, 'wb')
- self.f.setnchannels(nchannels)
- self.f.setsampwidth(sampwidth)
- self.f.setframerate(framerate)
- self.f.setnframes(nframes)
- output = '\0' * nframes * nchannels * sampwidth
- self.f.writeframes(output)
- self.f.close()
-
- self.f = wave.open(TESTFN, 'rb')
- self.assertEqual(nchannels, self.f.getnchannels())
- self.assertEqual(sampwidth, self.f.getsampwidth())
- self.assertEqual(framerate, self.f.getframerate())
- self.assertEqual(nframes, self.f.getnframes())
- self.assertEqual(self.f.readframes(nframes), output)
def test_main():
- run_unittest(TestWave)
+ run_unittest(WavePCM8Test, WavePCM16Test, WavePCM24Test, WavePCM32Test)
if __name__ == '__main__':
test_main()
diff --git a/Lib/test/test_weakref.py b/Lib/test/test_weakref.py
index bc2982f..40e1473 100644
--- a/Lib/test/test_weakref.py
+++ b/Lib/test/test_weakref.py
@@ -4,6 +4,8 @@ import unittest
import UserList
import weakref
import operator
+import contextlib
+import copy
from test import test_support
@@ -33,6 +35,27 @@ def create_unbound_method():
return C.method
+class Object:
+ def __init__(self, arg):
+ self.arg = arg
+ def __repr__(self):
+ return "<Object %r>" % self.arg
+ def __eq__(self, other):
+ if isinstance(other, Object):
+ return self.arg == other.arg
+ return NotImplemented
+ def __ne__(self, other):
+ if isinstance(other, Object):
+ return self.arg != other.arg
+ return NotImplemented
+ def __hash__(self):
+ return hash(self.arg)
+
+class RefCycle:
+ def __init__(self):
+ self.cycle = self
+
+
class TestBase(unittest.TestCase):
def setUp(self):
@@ -70,11 +93,9 @@ class ReferencesTestCase(TestBase):
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
- self.assertTrue(ref1() is None,
- "expected reference to be invalidated")
- self.assertTrue(ref2() is None,
- "expected reference to be invalidated")
- self.assertTrue(self.cbcalled == 2,
+ self.assertIsNone(ref1(), "expected reference to be invalidated")
+ self.assertIsNone(ref2(), "expected reference to be invalidated")
+ self.assertEqual(self.cbcalled, 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
@@ -108,15 +129,15 @@ class ReferencesTestCase(TestBase):
self.assertRaises(weakref.ReferenceError, check, ref1)
self.assertRaises(weakref.ReferenceError, check, ref2)
self.assertRaises(weakref.ReferenceError, bool, weakref.proxy(C()))
- self.assertTrue(self.cbcalled == 2)
+ self.assertEqual(self.cbcalled, 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
- self.assertTrue(ref() is not None,
+ self.assertIsNotNone(ref(),
"weak reference to live object should be live")
o2 = ref()
- self.assertTrue(o is o2,
+ self.assertIs(o, o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
@@ -124,9 +145,9 @@ class ReferencesTestCase(TestBase):
o = factory()
ref = weakref.ref(o, self.callback)
del o
- self.assertTrue(self.cbcalled == 1,
+ self.assertEqual(self.cbcalled, 1,
"callback did not properly set 'cbcalled'")
- self.assertTrue(ref() is None,
+ self.assertIsNone(ref(),
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
@@ -136,19 +157,19 @@ class ReferencesTestCase(TestBase):
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
- self.assertTrue(ref1 is ref2,
+ self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
- self.assertTrue(ref1 is ref2,
+ self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
- self.assertTrue(weakref.getweakrefcount(o) == 2,
+ self.assertEqual(weakref.getweakrefcount(o), 2,
"wrong weak ref count for object")
del proxy
- self.assertTrue(weakref.getweakrefcount(o) == 1,
+ self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
@@ -156,7 +177,7 @@ class ReferencesTestCase(TestBase):
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
- self.assertTrue(proxy1 is proxy2,
+ self.assertIs(proxy1, proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
@@ -238,19 +259,19 @@ class ReferencesTestCase(TestBase):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
- self.assertTrue(p1 is p2, "both callbacks were None in the C API")
+ self.assertIs(p1, p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
- self.assertTrue(p1 is p2, "callbacks were NULL, None in the C API")
+ self.assertIs(p1, p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
- self.assertTrue(p1 is p2, "both callbacks were NULL in the C API")
+ self.assertIs(p1, p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
- self.assertTrue(p1 is p2, "callbacks were None, NULL in the C API")
+ self.assertIs(p1, p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
@@ -258,13 +279,13 @@ class ReferencesTestCase(TestBase):
self.check_proxy(o, ref1)
- self.assertTrue(type(ref1) is weakref.CallableProxyType,
+ self.assertIs(type(ref1), weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
- self.assertTrue(o.bar == 'twinkies!',
+ self.assertEqual(o.bar, 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
- self.assertTrue(o.bar == 'Splat.',
+ self.assertEqual(o.bar, 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
@@ -275,24 +296,23 @@ class ReferencesTestCase(TestBase):
def check_proxy(self, o, proxy):
o.foo = 1
- self.assertTrue(proxy.foo == 1,
+ self.assertEqual(proxy.foo, 1,
"proxy does not reflect attribute addition")
o.foo = 2
- self.assertTrue(proxy.foo == 2,
+ self.assertEqual(proxy.foo, 2,
"proxy does not reflect attribute modification")
del o.foo
- self.assertTrue(not hasattr(proxy, 'foo'),
+ self.assertFalse(hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
- self.assertTrue(o.foo == 1,
+ self.assertEqual(o.foo, 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
- self.assertTrue(
- o.foo == 2,
+ self.assertEqual(o.foo, 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
- self.assertTrue(not hasattr(o, 'foo'),
+ self.assertFalse(hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
@@ -316,21 +336,21 @@ class ReferencesTestCase(TestBase):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
- self.assertTrue(weakref.getweakrefcount(o) == 2,
+ self.assertEqual(weakref.getweakrefcount(o), 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
- self.assertTrue(weakref.getweakrefcount(o) == 4,
+ self.assertEqual(weakref.getweakrefcount(o), 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
- self.assertTrue(weakref.getweakrefcount(o) == 0,
+ self.assertEqual(weakref.getweakrefcount(o), 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
- self.assertTrue(weakref.getweakrefcount(1) == 0,
+ self.assertEqual(weakref.getweakrefcount(1), 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
@@ -338,22 +358,22 @@ class ReferencesTestCase(TestBase):
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
- self.assertTrue(weakref.getweakrefs(o) == [ref2],
+ self.assertEqual(weakref.getweakrefs(o), [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
- self.assertTrue(weakref.getweakrefs(o) == [ref1],
+ self.assertEqual(weakref.getweakrefs(o), [ref1],
"list of refs does not match")
del ref1
- self.assertTrue(weakref.getweakrefs(o) == [],
+ self.assertEqual(weakref.getweakrefs(o), [],
"list of refs not cleared")
# assumes ints do not support weakrefs
- self.assertTrue(weakref.getweakrefs(1) == [],
+ self.assertEqual(weakref.getweakrefs(1), [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
@@ -361,8 +381,8 @@ class ReferencesTestCase(TestBase):
pass
f = F(2.0)
p = weakref.proxy(f)
- self.assertTrue(p + 1.0 == 3.0)
- self.assertTrue(1.0 + p == 3.0) # this used to SEGV
+ self.assertEqual(p + 1.0, 3.0)
+ self.assertEqual(1.0 + p, 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
@@ -615,7 +635,7 @@ class ReferencesTestCase(TestBase):
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
- self.assertTrue(external_wr() is callback)
+ self.assertIs(external_wr(), callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
@@ -705,6 +725,75 @@ class ReferencesTestCase(TestBase):
self.assertEqual(b(), None)
self.assertEqual(l, [a, b])
+ def test_equality(self):
+ # Alive weakrefs defer equality testing to their underlying object.
+ x = Object(1)
+ y = Object(1)
+ z = Object(2)
+ a = weakref.ref(x)
+ b = weakref.ref(y)
+ c = weakref.ref(z)
+ d = weakref.ref(x)
+ # Note how we directly test the operators here, to stress both
+ # __eq__ and __ne__.
+ self.assertTrue(a == b)
+ self.assertFalse(a != b)
+ self.assertFalse(a == c)
+ self.assertTrue(a != c)
+ self.assertTrue(a == d)
+ self.assertFalse(a != d)
+ del x, y, z
+ gc.collect()
+ for r in a, b, c:
+ # Sanity check
+ self.assertIs(r(), None)
+ # Dead weakrefs compare by identity: whether `a` and `d` are the
+ # same weakref object is an implementation detail, since they pointed
+ # to the same original object and didn't have a callback.
+ # (see issue #16453).
+ self.assertFalse(a == b)
+ self.assertTrue(a != b)
+ self.assertFalse(a == c)
+ self.assertTrue(a != c)
+ self.assertEqual(a == d, a is d)
+ self.assertEqual(a != d, a is not d)
+
+ def test_hashing(self):
+ # Alive weakrefs hash the same as the underlying object
+ x = Object(42)
+ y = Object(42)
+ a = weakref.ref(x)
+ b = weakref.ref(y)
+ self.assertEqual(hash(a), hash(42))
+ del x, y
+ gc.collect()
+ # Dead weakrefs:
+ # - retain their hash is they were hashed when alive;
+ # - otherwise, cannot be hashed.
+ self.assertEqual(hash(a), hash(42))
+ self.assertRaises(TypeError, hash, b)
+
+ def test_trashcan_16602(self):
+ # Issue #16602: when a weakref's target was part of a long
+ # deallocation chain, the trashcan mechanism could delay clearing
+ # of the weakref and make the target object visible from outside
+ # code even though its refcount had dropped to 0. A crash ensued.
+ class C(object):
+ def __init__(self, parent):
+ if not parent:
+ return
+ wself = weakref.ref(self)
+ def cb(wparent):
+ o = wself()
+ self.wparent = weakref.ref(parent, cb)
+
+ d = weakref.WeakKeyDictionary()
+ root = c = C(None)
+ for n in range(100):
+ d[c] = c = C(c)
+ del root
+ gc.collect()
+
class SubclassableWeakrefTestCase(TestBase):
@@ -718,11 +807,11 @@ class SubclassableWeakrefTestCase(TestBase):
return super(MyRef, self).__call__()
o = Object("foo")
mr = MyRef(o, value=24)
- self.assertTrue(mr() is o)
+ self.assertIs(mr(), o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
- self.assertTrue(mr() is None)
+ self.assertIsNone(mr())
self.assertTrue(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
@@ -731,14 +820,14 @@ class SubclassableWeakrefTestCase(TestBase):
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
- self.assertTrue(r1 is not r2)
+ self.assertIsNot(r1, r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
- self.assertTrue(r2 is refs[0])
+ self.assertIs(r2, refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
@@ -748,7 +837,7 @@ class SubclassableWeakrefTestCase(TestBase):
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
- self.assertTrue(r1 is not r2)
+ self.assertIsNot(r1, r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
@@ -809,40 +898,94 @@ class SubclassableWeakrefTestCase(TestBase):
self.assertEqual(self.cbcalled, 0)
-class Object:
- def __init__(self, arg):
- self.arg = arg
- def __repr__(self):
- return "<Object %r>" % self.arg
-
-
class MappingTestCase(TestBase):
COUNT = 10
+ def check_len_cycles(self, dict_type, cons):
+ N = 20
+ items = [RefCycle() for i in range(N)]
+ dct = dict_type(cons(i, o) for i, o in enumerate(items))
+ # Keep an iterator alive
+ it = dct.iteritems()
+ try:
+ next(it)
+ except StopIteration:
+ pass
+ del items
+ gc.collect()
+ n1 = len(dct)
+ list(it)
+ del it
+ gc.collect()
+ n2 = len(dct)
+ # iteration should prevent garbage collection here
+ # Note that this is a test on an implementation detail. The requirement
+ # is only to provide stable iteration, not that the size of the container
+ # stay fixed.
+ self.assertEqual(n1, 20)
+ #self.assertIn(n1, (0, 1))
+ self.assertEqual(n2, 0)
+
+ def test_weak_keyed_len_cycles(self):
+ self.check_len_cycles(weakref.WeakKeyDictionary, lambda n, k: (k, n))
+
+ def test_weak_valued_len_cycles(self):
+ self.check_len_cycles(weakref.WeakValueDictionary, lambda n, k: (n, k))
+
+ def check_len_race(self, dict_type, cons):
+ # Extended sanity checks for len() in the face of cyclic collection
+ self.addCleanup(gc.set_threshold, *gc.get_threshold())
+ for th in range(1, 100):
+ N = 20
+ gc.collect(0)
+ gc.set_threshold(th, th, th)
+ items = [RefCycle() for i in range(N)]
+ dct = dict_type(cons(o) for o in items)
+ del items
+ # All items will be collected at next garbage collection pass
+ it = dct.iteritems()
+ try:
+ next(it)
+ except StopIteration:
+ pass
+ n1 = len(dct)
+ del it
+ n2 = len(dct)
+ self.assertGreaterEqual(n1, 0)
+ self.assertLessEqual(n1, N)
+ self.assertGreaterEqual(n2, 0)
+ self.assertLessEqual(n2, n1)
+
+ def test_weak_keyed_len_race(self):
+ self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
+
+ def test_weak_valued_len_race(self):
+ self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
+
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
- self.assertTrue(weakref.getweakrefcount(o) == 1,
+ self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
- self.assertTrue(o is dict[o.arg],
+ self.assertIs(o, dict[o.arg],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
items1.sort()
items2.sort()
- self.assertTrue(items1 == items2,
+ self.assertEqual(items1, items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
- self.assertTrue(len(dict) == self.COUNT)
+ self.assertEqual(len(dict), self.COUNT)
del objects[0]
- self.assertTrue(len(dict) == (self.COUNT - 1),
+ self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
- self.assertTrue(len(dict) == 0,
+ self.assertEqual(len(dict), 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
@@ -857,21 +1000,21 @@ class MappingTestCase(TestBase):
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
- self.assertTrue(weakref.getweakrefcount(o) == 1,
+ self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
- self.assertTrue(o.arg is dict[o],
+ self.assertIs(o.arg, dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
- self.assertTrue(set(items1) == set(items2),
+ self.assertEqual(set(items1), set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
- self.assertTrue(len(dict) == self.COUNT)
+ self.assertEqual(len(dict), self.COUNT)
del objects[0]
- self.assertTrue(len(dict) == (self.COUNT - 1),
+ self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
- self.assertTrue(len(dict) == 0,
+ self.assertEqual(len(dict), 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
@@ -933,37 +1076,117 @@ class MappingTestCase(TestBase):
items = dict.items()
for item in dict.iteritems():
items.remove(item)
- self.assertTrue(len(items) == 0, "iteritems() did not touch all items")
+ self.assertEqual(len(items), 0, "iteritems() did not touch all items")
# key iterator, via __iter__():
keys = dict.keys()
for k in dict:
keys.remove(k)
- self.assertTrue(len(keys) == 0, "__iter__() did not touch all keys")
+ self.assertEqual(len(keys), 0, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = dict.keys()
for k in dict.iterkeys():
keys.remove(k)
- self.assertTrue(len(keys) == 0, "iterkeys() did not touch all keys")
+ self.assertEqual(len(keys), 0, "iterkeys() did not touch all keys")
# value iterator:
values = dict.values()
for v in dict.itervalues():
values.remove(v)
- self.assertTrue(len(values) == 0,
+ self.assertEqual(len(values), 0,
"itervalues() did not touch all values")
+ def check_weak_destroy_while_iterating(self, dict, objects, iter_name):
+ n = len(dict)
+ it = iter(getattr(dict, iter_name)())
+ next(it) # Trigger internal iteration
+ # Destroy an object
+ del objects[-1]
+ gc.collect() # just in case
+ # We have removed either the first consumed object, or another one
+ self.assertIn(len(list(it)), [len(objects), len(objects) - 1])
+ del it
+ # The removal has been committed
+ self.assertEqual(len(dict), n - 1)
+
+ def check_weak_destroy_and_mutate_while_iterating(self, dict, testcontext):
+ # Check that we can explicitly mutate the weak dict without
+ # interfering with delayed removal.
+ # `testcontext` should create an iterator, destroy one of the
+ # weakref'ed objects and then return a new key/value pair corresponding
+ # to the destroyed object.
+ with testcontext() as (k, v):
+ self.assertFalse(k in dict)
+ with testcontext() as (k, v):
+ self.assertRaises(KeyError, dict.__delitem__, k)
+ self.assertFalse(k in dict)
+ with testcontext() as (k, v):
+ self.assertRaises(KeyError, dict.pop, k)
+ self.assertFalse(k in dict)
+ with testcontext() as (k, v):
+ dict[k] = v
+ self.assertEqual(dict[k], v)
+ ddict = copy.copy(dict)
+ with testcontext() as (k, v):
+ dict.update(ddict)
+ self.assertEqual(dict, ddict)
+ with testcontext() as (k, v):
+ dict.clear()
+ self.assertEqual(len(dict), 0)
+
+ def test_weak_keys_destroy_while_iterating(self):
+ # Issue #7105: iterators shouldn't crash when a key is implicitly removed
+ dict, objects = self.make_weak_keyed_dict()
+ self.check_weak_destroy_while_iterating(dict, objects, 'iterkeys')
+ self.check_weak_destroy_while_iterating(dict, objects, 'iteritems')
+ self.check_weak_destroy_while_iterating(dict, objects, 'itervalues')
+ self.check_weak_destroy_while_iterating(dict, objects, 'iterkeyrefs')
+ dict, objects = self.make_weak_keyed_dict()
+ @contextlib.contextmanager
+ def testcontext():
+ try:
+ it = iter(dict.iteritems())
+ next(it)
+ # Schedule a key/value for removal and recreate it
+ v = objects.pop().arg
+ gc.collect() # just in case
+ yield Object(v), v
+ finally:
+ it = None # should commit all removals
+ self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
+
+ def test_weak_values_destroy_while_iterating(self):
+ # Issue #7105: iterators shouldn't crash when a key is implicitly removed
+ dict, objects = self.make_weak_valued_dict()
+ self.check_weak_destroy_while_iterating(dict, objects, 'iterkeys')
+ self.check_weak_destroy_while_iterating(dict, objects, 'iteritems')
+ self.check_weak_destroy_while_iterating(dict, objects, 'itervalues')
+ self.check_weak_destroy_while_iterating(dict, objects, 'itervaluerefs')
+ dict, objects = self.make_weak_valued_dict()
+ @contextlib.contextmanager
+ def testcontext():
+ try:
+ it = iter(dict.iteritems())
+ next(it)
+ # Schedule a key/value for removal and recreate it
+ k = objects.pop().arg
+ gc.collect() # just in case
+ yield k, Object(k)
+ finally:
+ it = None # should commit all removals
+ self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
+
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
- self.assertTrue(dict[o] == 364)
+ self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
- self.assertTrue(dict[o] == 364)
+ self.assertEqual(dict[o], 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
@@ -983,19 +1206,19 @@ class MappingTestCase(TestBase):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
- self.assertTrue(len(weakdict) == 2)
+ self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
- self.assertTrue(len(weakdict) == 1)
+ self.assertEqual(len(weakdict), 1)
if k is key1:
- self.assertTrue(v is value1)
+ self.assertIs(v, value1)
else:
- self.assertTrue(v is value2)
+ self.assertIs(v, value2)
k, v = weakdict.popitem()
- self.assertTrue(len(weakdict) == 0)
+ self.assertEqual(len(weakdict), 0)
if k is key1:
- self.assertTrue(v is value1)
+ self.assertIs(v, value1)
else:
- self.assertTrue(v is value2)
+ self.assertIs(v, value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
@@ -1006,7 +1229,7 @@ class MappingTestCase(TestBase):
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
- self.assertTrue(value1 is not value2,
+ self.assertIsNot(value1, value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
@@ -1065,10 +1288,10 @@ class MappingTestCase(TestBase):
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
- self.assertTrue(len(d) == 2)
+ self.assertEqual(len(d), 2)
del d[o1]
- self.assertTrue(len(d) == 1)
- self.assertTrue(d.keys() == [o2])
+ self.assertEqual(len(d), 1)
+ self.assertEqual(d.keys(), [o2])
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
@@ -1076,10 +1299,10 @@ class MappingTestCase(TestBase):
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
- self.assertTrue(len(d) == 2)
+ self.assertEqual(len(d), 2)
del d['something']
- self.assertTrue(len(d) == 1)
- self.assertTrue(d.items() == [('something else', o2)])
+ self.assertEqual(len(d), 1)
+ self.assertEqual(d.items(), [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
diff --git a/Lib/test/test_weakset.py b/Lib/test/test_weakset.py
index 89c2822..d735d9c 100644
--- a/Lib/test/test_weakset.py
+++ b/Lib/test/test_weakset.py
@@ -11,6 +11,7 @@ import warnings
import collections
import gc
import contextlib
+from UserString import UserString as ustr
class Foo:
@@ -30,6 +31,10 @@ class SomeClass(object):
def __hash__(self):
return hash((SomeClass, self.value))
+class RefCycle(object):
+ def __init__(self):
+ self.cycle = self
+
class TestWeakSet(unittest.TestCase):
def setUp(self):
@@ -37,6 +42,12 @@ class TestWeakSet(unittest.TestCase):
self.items = [SomeClass(c) for c in ('a', 'b', 'c')]
self.items2 = [SomeClass(c) for c in ('x', 'y', 'z')]
self.letters = [SomeClass(c) for c in string.ascii_letters]
+ self.ab_items = [SomeClass(c) for c in 'ab']
+ self.abcde_items = [SomeClass(c) for c in 'abcde']
+ self.def_items = [SomeClass(c) for c in 'def']
+ self.ab_weakset = WeakSet(self.ab_items)
+ self.abcde_weakset = WeakSet(self.abcde_items)
+ self.def_weakset = WeakSet(self.def_items)
self.s = WeakSet(self.items)
self.d = dict.fromkeys(self.items)
self.obj = SomeClass('F')
@@ -79,6 +90,11 @@ class TestWeakSet(unittest.TestCase):
x = WeakSet(self.items + self.items2)
c = C(self.items2)
self.assertEqual(self.s.union(c), x)
+ del c
+ self.assertEqual(len(u), len(self.items) + len(self.items2))
+ self.items2.pop()
+ gc.collect()
+ self.assertEqual(len(u), len(self.items) + len(self.items2))
def test_or(self):
i = self.s.union(self.items2)
@@ -86,14 +102,19 @@ class TestWeakSet(unittest.TestCase):
self.assertEqual(self.s | frozenset(self.items2), i)
def test_intersection(self):
- i = self.s.intersection(self.items2)
+ s = WeakSet(self.letters)
+ i = s.intersection(self.items2)
for c in self.letters:
- self.assertEqual(c in i, c in self.d and c in self.items2)
- self.assertEqual(self.s, WeakSet(self.items))
+ self.assertEqual(c in i, c in self.items2 and c in self.letters)
+ self.assertEqual(s, WeakSet(self.letters))
self.assertEqual(type(i), WeakSet)
for C in set, frozenset, dict.fromkeys, list, tuple:
x = WeakSet([])
- self.assertEqual(self.s.intersection(C(self.items2)), x)
+ self.assertEqual(i.intersection(C(self.items)), x)
+ self.assertEqual(len(i), len(self.items2))
+ self.items2.pop()
+ gc.collect()
+ self.assertEqual(len(i), len(self.items2))
def test_isdisjoint(self):
self.assertTrue(self.s.isdisjoint(WeakSet(self.items2)))
@@ -124,6 +145,10 @@ class TestWeakSet(unittest.TestCase):
self.assertEqual(self.s, WeakSet(self.items))
self.assertEqual(type(i), WeakSet)
self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
+ self.assertEqual(len(i), len(self.items) + len(self.items2))
+ self.items2.pop()
+ gc.collect()
+ self.assertEqual(len(i), len(self.items) + len(self.items2))
def test_xor(self):
i = self.s.symmetric_difference(self.items2)
@@ -131,22 +156,28 @@ class TestWeakSet(unittest.TestCase):
self.assertEqual(self.s ^ frozenset(self.items2), i)
def test_sub_and_super(self):
- pl, ql, rl = map(lambda s: [SomeClass(c) for c in s], ['ab', 'abcde', 'def'])
- p, q, r = map(WeakSet, (pl, ql, rl))
- self.assertTrue(p < q)
- self.assertTrue(p <= q)
- self.assertTrue(q <= q)
- self.assertTrue(q > p)
- self.assertTrue(q >= p)
- self.assertFalse(q < r)
- self.assertFalse(q <= r)
- self.assertFalse(q > r)
- self.assertFalse(q >= r)
+ self.assertTrue(self.ab_weakset <= self.abcde_weakset)
+ self.assertTrue(self.abcde_weakset <= self.abcde_weakset)
+ self.assertTrue(self.abcde_weakset >= self.ab_weakset)
+ self.assertFalse(self.abcde_weakset <= self.def_weakset)
+ self.assertFalse(self.abcde_weakset >= self.def_weakset)
self.assertTrue(set('a').issubset('abc'))
self.assertTrue(set('abc').issuperset('a'))
self.assertFalse(set('a').issubset('cbs'))
self.assertFalse(set('cbs').issuperset('a'))
+ def test_lt(self):
+ self.assertTrue(self.ab_weakset < self.abcde_weakset)
+ self.assertFalse(self.abcde_weakset < self.def_weakset)
+ self.assertFalse(self.ab_weakset < self.ab_weakset)
+ self.assertFalse(WeakSet() < WeakSet())
+
+ def test_gt(self):
+ self.assertTrue(self.abcde_weakset > self.ab_weakset)
+ self.assertFalse(self.abcde_weakset > self.def_weakset)
+ self.assertFalse(self.ab_weakset > self.ab_weakset)
+ self.assertFalse(WeakSet() > WeakSet())
+
def test_gc(self):
# Create a nest of cycles to exercise overall ref count check
s = WeakSet(Foo() for i in range(1000))
@@ -321,6 +352,12 @@ class TestWeakSet(unittest.TestCase):
self.assertFalse(self.s == tuple(self.items))
self.assertFalse(self.s == 1)
+ def test_ne(self):
+ self.assertTrue(self.s != set(self.items))
+ s1 = WeakSet()
+ s2 = WeakSet()
+ self.assertFalse(s1 != s2)
+
def test_weak_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
# Create new items to be sure no-one else holds a reference
@@ -369,6 +406,102 @@ class TestWeakSet(unittest.TestCase):
s.clear()
self.assertEqual(len(s), 0)
+ def test_len_cycles(self):
+ N = 20
+ items = [RefCycle() for i in range(N)]
+ s = WeakSet(items)
+ del items
+ it = iter(s)
+ try:
+ next(it)
+ except StopIteration:
+ pass
+ gc.collect()
+ n1 = len(s)
+ del it
+ gc.collect()
+ n2 = len(s)
+ # one item may be kept alive inside the iterator
+ self.assertIn(n1, (0, 1))
+ self.assertEqual(n2, 0)
+
+ def test_len_race(self):
+ # Extended sanity checks for len() in the face of cyclic collection
+ self.addCleanup(gc.set_threshold, *gc.get_threshold())
+ for th in range(1, 100):
+ N = 20
+ gc.collect(0)
+ gc.set_threshold(th, th, th)
+ items = [RefCycle() for i in range(N)]
+ s = WeakSet(items)
+ del items
+ # All items will be collected at next garbage collection pass
+ it = iter(s)
+ try:
+ next(it)
+ except StopIteration:
+ pass
+ n1 = len(s)
+ del it
+ n2 = len(s)
+ self.assertGreaterEqual(n1, 0)
+ self.assertLessEqual(n1, N)
+ self.assertGreaterEqual(n2, 0)
+ self.assertLessEqual(n2, n1)
+
+ def test_weak_destroy_while_iterating(self):
+ # Issue #7105: iterators shouldn't crash when a key is implicitly removed
+ # Create new items to be sure no-one else holds a reference
+ items = [ustr(c) for c in ('a', 'b', 'c')]
+ s = WeakSet(items)
+ it = iter(s)
+ next(it) # Trigger internal iteration
+ # Destroy an item
+ del items[-1]
+ gc.collect() # just in case
+ # We have removed either the first consumed items, or another one
+ self.assertIn(len(list(it)), [len(items), len(items) - 1])
+ del it
+ # The removal has been committed
+ self.assertEqual(len(s), len(items))
+
+ def test_weak_destroy_and_mutate_while_iterating(self):
+ # Issue #7105: iterators shouldn't crash when a key is implicitly removed
+ items = [ustr(c) for c in string.ascii_letters]
+ s = WeakSet(items)
+ @contextlib.contextmanager
+ def testcontext():
+ try:
+ it = iter(s)
+ # Start iterator
+ yielded = ustr(str(next(it)))
+ # Schedule an item for removal and recreate it
+ u = ustr(str(items.pop()))
+ if yielded == u:
+ # The iterator still has a reference to the removed item,
+ # advance it (issue #20006).
+ next(it)
+ gc.collect() # just in case
+ yield u
+ finally:
+ it = None # should commit all removals
+
+ with testcontext() as u:
+ self.assertFalse(u in s)
+ with testcontext() as u:
+ self.assertRaises(KeyError, s.remove, u)
+ self.assertFalse(u in s)
+ with testcontext() as u:
+ s.add(u)
+ self.assertTrue(u in s)
+ t = s.copy()
+ with testcontext() as u:
+ s.update(t)
+ self.assertEqual(len(s), len(t))
+ with testcontext() as u:
+ s.clear()
+ self.assertEqual(len(s), 0)
+
def test_main(verbose=None):
test_support.run_unittest(TestWeakSet)
diff --git a/Lib/test/test_whichdb.py b/Lib/test/test_whichdb.py
index 427e323..91ab7f9 100644
--- a/Lib/test/test_whichdb.py
+++ b/Lib/test/test_whichdb.py
@@ -1,4 +1,3 @@
-#! /usr/bin/env python
"""Test script for the whichdb module
based on test_anydbm.py
"""
diff --git a/Lib/test/test_winreg.py b/Lib/test/test_winreg.py
index 12ab3ba..260c224 100644
--- a/Lib/test/test_winreg.py
+++ b/Lib/test/test_winreg.py
@@ -1,7 +1,7 @@
# Test the windows specific win32reg module.
# Only win32reg functions not hit here: FlushKey, LoadKey and SaveKey
-import os, sys
+import os, sys, errno
import unittest
from test import test_support
threading = test_support.import_module("threading")
@@ -28,9 +28,12 @@ WIN64_MACHINE = True if machine() == "AMD64" else False
# tests are only valid up until 6.1
HAS_REFLECTION = True if WIN_VER < (6, 1) else False
-test_key_name = "SOFTWARE\\Python Registry Test Key - Delete Me"
+# Use a per-process key to prevent concurrent test runs (buildbot!) from
+# stomping on each other.
+test_key_base = "Python Test Key [%d] - Delete Me" % (os.getpid(),)
+test_key_name = "SOFTWARE\\" + test_key_base
# On OS'es that support reflection we should test with a reflected key
-test_reflect_key_name = "SOFTWARE\\Classes\\Python Test Key - Delete Me"
+test_reflect_key_name = "SOFTWARE\\Classes\\" + test_key_base
test_data = [
("Int Value", 45, REG_DWORD),
@@ -234,7 +237,7 @@ class LocalWinregTests(BaseWinregTests):
def test_changing_value(self):
# Issue2810: A race condition in 2.6 and 3.1 may cause
- # EnumValue or QueryValue to throw "WindowsError: More data is
+ # EnumValue or QueryValue to raise "WindowsError: More data is
# available"
done = False
@@ -267,7 +270,7 @@ class LocalWinregTests(BaseWinregTests):
def test_long_key(self):
# Issue2810, in 2.6 and 3.1 when the key name was exactly 256
- # characters, EnumKey threw "WindowsError: More data is
+ # characters, EnumKey raised "WindowsError: More data is
# available"
name = 'x'*256
try:
@@ -282,8 +285,14 @@ class LocalWinregTests(BaseWinregTests):
def test_dynamic_key(self):
# Issue2810, when the value is dynamically generated, these
- # throw "WindowsError: More data is available" in 2.6 and 3.1
- EnumValue(HKEY_PERFORMANCE_DATA, 0)
+ # raise "WindowsError: More data is available" in 2.6 and 3.1
+ try:
+ EnumValue(HKEY_PERFORMANCE_DATA, 0)
+ except OSError as e:
+ if e.errno in (errno.EPERM, errno.EACCES):
+ self.skipTest("access denied to registry key "
+ "(are you running in a non-interactive session?)")
+ raise
QueryValueEx(HKEY_PERFORMANCE_DATA, None)
# Reflection requires XP x64/Vista at a minimum. XP doesn't have this stuff
@@ -308,6 +317,44 @@ class LocalWinregTests(BaseWinregTests):
finally:
DeleteKey(HKEY_CURRENT_USER, test_key_name)
+ def test_setvalueex_value_range(self):
+ # Test for Issue #14420, accept proper ranges for SetValueEx.
+ # Py2Reg, which gets called by SetValueEx, was using PyLong_AsLong,
+ # thus raising OverflowError. The implementation now uses
+ # PyLong_AsUnsignedLong to match DWORD's size.
+ try:
+ with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck:
+ self.assertNotEqual(ck.handle, 0)
+ SetValueEx(ck, "test_name", None, REG_DWORD, 0x80000000)
+ finally:
+ DeleteKey(HKEY_CURRENT_USER, test_key_name)
+
+ def test_setvalueex_with_memoryview(self):
+ try:
+ with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck:
+ self.assertNotEqual(ck.handle, 0)
+ with self.assertRaises(TypeError):
+ SetValueEx(ck, "test_name", None, REG_BINARY, memoryview('val'))
+ finally:
+ DeleteKey(HKEY_CURRENT_USER, test_key_name)
+
+ def test_queryvalueex_return_value(self):
+ # Test for Issue #16759, return unsigned int from QueryValueEx.
+ # Reg2Py, which gets called by QueryValueEx, was returning a value
+ # generated by PyLong_FromLong. The implmentation now uses
+ # PyLong_FromUnsignedLong to match DWORD's size.
+ try:
+ with CreateKey(HKEY_CURRENT_USER, test_key_name) as ck:
+ self.assertNotEqual(ck.handle, 0)
+ test_val = 0x80000000
+ SetValueEx(ck, "test_name", None, REG_DWORD, test_val)
+ ret_val, ret_type = QueryValueEx(ck, "test_name")
+ self.assertEqual(ret_type, REG_DWORD)
+ self.assertEqual(ret_val, test_val)
+ finally:
+ DeleteKey(HKEY_CURRENT_USER, test_key_name)
+
+
@unittest.skipUnless(REMOTE_NAME, "Skipping remote registry tests")
class RemoteWinregTests(BaseWinregTests):
@@ -404,6 +451,11 @@ class Win64WinregTests(BaseWinregTests):
DeleteKeyEx(HKEY_CURRENT_USER, test_reflect_key_name,
KEY_WOW64_32KEY, 0)
+ def test_exception_numbers(self):
+ with self.assertRaises(WindowsError) as ctx:
+ QueryValue(HKEY_CLASSES_ROOT, 'some_value_that_does_not_exist')
+
+ self.assertEqual(ctx.exception.errno, 2)
def test_main():
test_support.run_unittest(LocalWinregTests, RemoteWinregTests,
diff --git a/Lib/test/test_winsound.py b/Lib/test/test_winsound.py
index 821d684..4208fe7 100644
--- a/Lib/test/test_winsound.py
+++ b/Lib/test/test_winsound.py
@@ -2,6 +2,7 @@
import unittest
from test import test_support
+test_support.requires('audio')
import time
import os
import subprocess
@@ -158,18 +159,15 @@ class PlaySoundTest(unittest.TestCase):
)
def test_alias_fallback(self):
- # This test can't be expected to work on all systems. The MS
- # PlaySound() docs say:
- #
- # If it cannot find the specified sound, PlaySound uses the
- # default system event sound entry instead. If the function
- # can find neither the system default entry nor the default
- # sound, it makes no sound and returns FALSE.
- #
- # It's known to return FALSE on some real systems.
-
- # winsound.PlaySound('!"$%&/(#+*', winsound.SND_ALIAS)
- return
+ # In the absense of the ability to tell if a sound was actually
+ # played, this test has two acceptable outcomes: success (no error,
+ # sound was theoretically played; although as issue #19987 shows
+ # a box without a soundcard can "succeed") or RuntimeError. Any
+ # other error is a failure.
+ try:
+ winsound.PlaySound('!"$%&/(#+*', winsound.SND_ALIAS)
+ except RuntimeError:
+ pass
def test_alias_nofallback(self):
if _have_soundcard():
diff --git a/Lib/test/test_with.py b/Lib/test/test_with.py
index 4964518..e7dd001 100644
--- a/Lib/test/test_with.py
+++ b/Lib/test/test_with.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
"""Unit tests for the with statement specified in PEP 343."""
diff --git a/Lib/test/test_wsgiref.py b/Lib/test/test_wsgiref.py
index 45ca620..401d784 100644
--- a/Lib/test/test_wsgiref.py
+++ b/Lib/test/test_wsgiref.py
@@ -39,9 +39,6 @@ class MockHandler(WSGIRequestHandler):
pass
-
-
-
def hello_app(environ,start_response):
start_response("200 OK", [
('Content-Type','text/plain'),
@@ -62,27 +59,6 @@ def run_amock(app=hello_app, data="GET / HTTP/1.0\n\n"):
return out.getvalue(), err.getvalue()
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
def compare_generic_iter(make_it,match):
"""Utility to compare a generic 2.1/2.2+ iterator with an iterable
@@ -120,10 +96,6 @@ def compare_generic_iter(make_it,match):
raise AssertionError("Too many items from .next()",it)
-
-
-
-
class IntegrationTests(TestCase):
def check_hello(self, out, has_length=True):
@@ -161,10 +133,6 @@ class IntegrationTests(TestCase):
)
-
-
-
-
class UtilityTests(TestCase):
def checkShift(self,sn_in,pi_in,part,sn_out,pi_out):
@@ -187,7 +155,7 @@ class UtilityTests(TestCase):
# Check existing value
env = {key:alt}
util.setup_testing_defaults(env)
- self.assertTrue(env[key] is alt)
+ self.assertIs(env[key], alt)
def checkCrossDefault(self,key,value,**kw):
util.setup_testing_defaults(kw)
@@ -201,11 +169,6 @@ class UtilityTests(TestCase):
util.setup_testing_defaults(kw)
self.assertEqual(util.request_uri(kw,query),uri)
-
-
-
-
-
def checkFW(self,text,size,match):
def make_it(text=text,size=size):
@@ -224,7 +187,6 @@ class UtilityTests(TestCase):
it.close()
self.assertTrue(it.filelike.closed)
-
def testSimpleShifts(self):
self.checkShift('','/', '', '/', '')
self.checkShift('','/x', 'x', '/x', '')
@@ -232,7 +194,6 @@ class UtilityTests(TestCase):
self.checkShift('/a','/x/y', 'x', '/a/x', '/y')
self.checkShift('/a','/x/', 'x', '/a/x', '/')
-
def testNormalizedShifts(self):
self.checkShift('/a/b', '/../y', '..', '/a', '/y')
self.checkShift('', '/../y', '..', '', '/y')
@@ -246,7 +207,6 @@ class UtilityTests(TestCase):
self.checkShift('/a/b', '/x//', 'x', '/a/b/x', '/')
self.checkShift('/a/b', '/.', None, '/a/b', '')
-
def testDefaults(self):
for key, value in [
('SERVER_NAME','127.0.0.1'),
@@ -266,7 +226,6 @@ class UtilityTests(TestCase):
]:
self.checkDefault(key,value)
-
def testCrossDefaults(self):
self.checkCrossDefault('HTTP_HOST',"foo.bar",SERVER_NAME="foo.bar")
self.checkCrossDefault('wsgi.url_scheme',"https",HTTPS="on")
@@ -276,7 +235,6 @@ class UtilityTests(TestCase):
self.checkCrossDefault('SERVER_PORT',"80",HTTPS="foo")
self.checkCrossDefault('SERVER_PORT',"443",HTTPS="on")
-
def testGuessScheme(self):
self.assertEqual(util.guess_scheme({}), "http")
self.assertEqual(util.guess_scheme({'HTTPS':"foo"}), "http")
@@ -284,13 +242,10 @@ class UtilityTests(TestCase):
self.assertEqual(util.guess_scheme({'HTTPS':"yes"}), "https")
self.assertEqual(util.guess_scheme({'HTTPS':"1"}), "https")
-
-
-
-
def testAppURIs(self):
self.checkAppURI("http://127.0.0.1/")
self.checkAppURI("http://127.0.0.1/spam", SCRIPT_NAME="/spam")
+ self.checkAppURI("http://127.0.0.1/sp%E4m", SCRIPT_NAME="/sp\xe4m")
self.checkAppURI("http://spam.example.com:2071/",
HTTP_HOST="spam.example.com:2071", SERVER_PORT="2071")
self.checkAppURI("http://spam.example.com/",
@@ -304,14 +259,19 @@ class UtilityTests(TestCase):
def testReqURIs(self):
self.checkReqURI("http://127.0.0.1/")
self.checkReqURI("http://127.0.0.1/spam", SCRIPT_NAME="/spam")
+ self.checkReqURI("http://127.0.0.1/sp%E4m", SCRIPT_NAME="/sp\xe4m")
self.checkReqURI("http://127.0.0.1/spammity/spam",
SCRIPT_NAME="/spammity", PATH_INFO="/spam")
+ self.checkReqURI("http://127.0.0.1/spammity/sp%E4m",
+ SCRIPT_NAME="/spammity", PATH_INFO="/sp\xe4m")
self.checkReqURI("http://127.0.0.1/spammity/spam;ham",
SCRIPT_NAME="/spammity", PATH_INFO="/spam;ham")
self.checkReqURI("http://127.0.0.1/spammity/spam;cookie=1234,5678",
SCRIPT_NAME="/spammity", PATH_INFO="/spam;cookie=1234,5678")
self.checkReqURI("http://127.0.0.1/spammity/spam?say=ni",
SCRIPT_NAME="/spammity", PATH_INFO="/spam",QUERY_STRING="say=ni")
+ self.checkReqURI("http://127.0.0.1/spammity/spam?s%E4y=ni",
+ SCRIPT_NAME="/spammity", PATH_INFO="/spam",QUERY_STRING="s%E4y=ni")
self.checkReqURI("http://127.0.0.1/spammity/spam", 0,
SCRIPT_NAME="/spammity", PATH_INFO="/spam",QUERY_STRING="say=ni")
@@ -342,7 +302,7 @@ class HeaderTests(TestCase):
self.assertEqual(Headers(test[:]).keys(), ['x'])
self.assertEqual(Headers(test[:]).values(), ['y'])
self.assertEqual(Headers(test[:]).items(), test)
- self.assertFalse(Headers(test).items() is test) # must be copy!
+ self.assertIsNot(Headers(test).items(), test) # must be copy!
h=Headers([])
del h['foo'] # should not raise an error
@@ -411,15 +371,6 @@ class TestHandler(ErrorHandler):
raise # for testing, we want to see what's happening
-
-
-
-
-
-
-
-
-
class HandlerTests(TestCase):
def checkEnvironAttrs(self, handler):
@@ -460,7 +411,6 @@ class HandlerTests(TestCase):
h=TestHandler(); h.setup_environ()
self.assertEqual(h.environ['wsgi.url_scheme'],'http')
-
def testAbstractMethods(self):
h = BaseHandler()
for name in [
@@ -469,7 +419,6 @@ class HandlerTests(TestCase):
self.assertRaises(NotImplementedError, getattr(h,name))
self.assertRaises(NotImplementedError, h._write, "test")
-
def testContentLength(self):
# Demo one reason iteration is better than write()... ;)
@@ -549,7 +498,6 @@ class HandlerTests(TestCase):
"\r\n"+MSG)
self.assertNotEqual(h.stderr.getvalue().find("AssertionError"), -1)
-
def testHeaderFormats(self):
def non_error_app(e,s):
@@ -591,40 +539,28 @@ class HandlerTests(TestCase):
(stdpat%(version,sw), h.stdout.getvalue())
)
-# This epilogue is needed for compatibility with the Python 2.5 regrtest module
+ def testCloseOnError(self):
+ side_effects = {'close_called': False}
+ MSG = b"Some output has been sent"
+ def error_app(e,s):
+ s("200 OK",[])(MSG)
+ class CrashyIterable(object):
+ def __iter__(self):
+ while True:
+ yield b'blah'
+ raise AssertionError("This should be caught by handler")
+
+ def close(self):
+ side_effects['close_called'] = True
+ return CrashyIterable()
+
+ h = ErrorHandler()
+ h.run(error_app)
+ self.assertEqual(side_effects['close_called'], True)
+
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-# the above lines intentionally left blank
diff --git a/Lib/test/test_xml_etree.py b/Lib/test/test_xml_etree.py
index f1a774a..9fb6c99 100644
--- a/Lib/test/test_xml_etree.py
+++ b/Lib/test/test_xml_etree.py
@@ -713,14 +713,21 @@ def iterparse():
end {namespace}root
end-ns None
+ >>> import StringIO
+
+ >>> events = ('start-ns', 'end-ns')
+ >>> context = ET.iterparse(StringIO.StringIO(r"<root xmlns=''/>"), events)
+ >>> for action, elem in context:
+ ... print action, elem
+ start-ns ('', '')
+ end-ns None
+
>>> events = ("start", "end", "bogus")
>>> with open(SIMPLE_XMLFILE, "rb") as f:
... iterparse(f, events)
Traceback (most recent call last):
ValueError: unknown event 'bogus'
- >>> import StringIO
-
>>> source = StringIO.StringIO(
... "<?xml version='1.0' encoding='iso-8859-1'?>\\n"
... "<body xmlns='http://&#233;ffbot.org/ns'\\n"
@@ -883,6 +890,12 @@ def check_encoding(encoding):
>>> check_encoding("iso-8859-15")
>>> check_encoding("cp437")
>>> check_encoding("mac-roman")
+ >>> check_encoding("gbk")
+ Traceback (most recent call last):
+ ValueError: multi-byte encodings are not supported
+ >>> check_encoding("cp037")
+ Traceback (most recent call last):
+ ParseError: unknown encoding: line 1, column 30
"""
ET.XML("<?xml version='1.0' encoding='%s'?><xml />" % encoding)
@@ -1769,6 +1782,16 @@ def bug_200709_iter_comment():
"""
+def bug_18347():
+ """
+
+ >>> e = ET.XML('<html><CamelCase>text</CamelCase></html>')
+ >>> serialize(e)
+ '<html><CamelCase>text</CamelCase></html>'
+ >>> serialize(e, method="html")
+ '<html><CamelCase>text</CamelCase></html>'
+ """
+
# --------------------------------------------------------------------
# reported on bugs.python.org
@@ -1822,6 +1845,26 @@ def check_issue6565():
"""
+def check_html_empty_elems_serialization(self):
+ # issue 15970
+ # from http://www.w3.org/TR/html401/index/elements.html
+ """
+
+ >>> empty_elems = ['AREA', 'BASE', 'BASEFONT', 'BR', 'COL', 'FRAME', 'HR',
+ ... 'IMG', 'INPUT', 'ISINDEX', 'LINK', 'META', 'PARAM']
+ >>> elems = ''.join('<%s />' % elem for elem in empty_elems)
+ >>> serialize(ET.XML('<html>%s</html>' % elems), method='html')
+ '<html><AREA><BASE><BASEFONT><BR><COL><FRAME><HR><IMG><INPUT><ISINDEX><LINK><META><PARAM></html>'
+ >>> serialize(ET.XML('<html>%s</html>' % elems.lower()), method='html')
+ '<html><area><base><basefont><br><col><frame><hr><img><input><isindex><link><meta><param></html>'
+ >>> elems = ''.join('<%s></%s>' % (elem, elem) for elem in empty_elems)
+ >>> serialize(ET.XML('<html>%s</html>' % elems), method='html')
+ '<html><AREA><BASE><BASEFONT><BR><COL><FRAME><HR><IMG><INPUT><ISINDEX><LINK><META><PARAM></html>'
+ >>> serialize(ET.XML('<html>%s</html>' % elems.lower()), method='html')
+ '<html><area><base><basefont><br><col><frame><hr><img><input><isindex><link><meta><param></html>'
+
+ """
+
# --------------------------------------------------------------------
diff --git a/Lib/test/test_xmlrpc.py b/Lib/test/test_xmlrpc.py
index 8e65fde..09235fd 100644
--- a/Lib/test/test_xmlrpc.py
+++ b/Lib/test/test_xmlrpc.py
@@ -19,6 +19,11 @@ except ImportError:
threading = None
try:
+ import gzip
+except ImportError:
+ gzip = None
+
+try:
unicode
except NameError:
have_unicode = False
@@ -681,6 +686,7 @@ class KeepaliveServerTestCase2(BaseKeepaliveServerTestCase):
#A test case that verifies that gzip encoding works in both directions
#(for a request and the response)
+@unittest.skipUnless(gzip, 'gzip not available')
class GzipServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
@@ -1011,11 +1017,7 @@ def test_main():
xmlrpc_tests.append(SimpleServerTestCase)
xmlrpc_tests.append(KeepaliveServerTestCase1)
xmlrpc_tests.append(KeepaliveServerTestCase2)
- try:
- import gzip
- xmlrpc_tests.append(GzipServerTestCase)
- except ImportError:
- pass #gzip not supported in this build
+ xmlrpc_tests.append(GzipServerTestCase)
xmlrpc_tests.append(MultiPathServerTestCase)
xmlrpc_tests.append(ServerProxyTestCase)
xmlrpc_tests.append(FailingServerTestCase)
diff --git a/Lib/test/test_xrange.py b/Lib/test/test_xrange.py
index e3c799e..83c0e41 100644
--- a/Lib/test/test_xrange.py
+++ b/Lib/test/test_xrange.py
@@ -46,6 +46,28 @@ class XrangeTest(unittest.TestCase):
self.fail('{}: wrong element at position {};'
'expected {}, got {}'.format(test_id, i, y, x))
+ def assert_xranges_equivalent(self, x, y):
+ # Check that two xrange objects are equivalent, in the sense of the
+ # associated sequences being the same. We want to use this for large
+ # xrange objects, so instead of converting to lists and comparing
+ # directly we do a number of indirect checks.
+ if len(x) != len(y):
+ self.fail('{} and {} have different '
+ 'lengths: {} and {} '.format(x, y, len(x), len(y)))
+ if len(x) >= 1:
+ if x[0] != y[0]:
+ self.fail('{} and {} have different initial '
+ 'elements: {} and {} '.format(x, y, x[0], y[0]))
+ if x[-1] != y[-1]:
+ self.fail('{} and {} have different final '
+ 'elements: {} and {} '.format(x, y, x[-1], y[-1]))
+ if len(x) >= 2:
+ x_step = x[1] - x[0]
+ y_step = y[1] - y[0]
+ if x_step != y_step:
+ self.fail('{} and {} have different step: '
+ '{} and {} '.format(x, y, x_step, y_step))
+
def test_xrange(self):
self.assertEqual(list(xrange(3)), [0, 1, 2])
self.assertEqual(list(xrange(1, 5)), [1, 2, 3, 4])
@@ -104,6 +126,59 @@ class XrangeTest(unittest.TestCase):
self.assertEqual(list(pickle.loads(pickle.dumps(r, proto))),
list(r))
+ M = min(sys.maxint, sys.maxsize)
+ large_testcases = testcases + [
+ (0, M, 1),
+ (M, 0, -1),
+ (0, M, M - 1),
+ (M // 2, M, 1),
+ (0, -M, -1),
+ (0, -M, 1 - M),
+ (-M, M, 2),
+ (-M, M, 1024),
+ (-M, M, 10585),
+ (M, -M, -2),
+ (M, -M, -1024),
+ (M, -M, -10585),
+ ]
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ for t in large_testcases:
+ r = xrange(*t)
+ r_out = pickle.loads(pickle.dumps(r, proto))
+ self.assert_xranges_equivalent(r_out, r)
+
+ def test_repr(self):
+ # Check that repr of an xrange is a valid representation
+ # of that xrange.
+
+ # Valid xranges have at most min(sys.maxint, sys.maxsize) elements.
+ M = min(sys.maxint, sys.maxsize)
+
+ testcases = [
+ (13,),
+ (0, 11),
+ (-22, 10),
+ (20, 3, -1),
+ (13, 21, 3),
+ (-2, 2, 2),
+ (0, M, 1),
+ (M, 0, -1),
+ (0, M, M - 1),
+ (M // 2, M, 1),
+ (0, -M, -1),
+ (0, -M, 1 - M),
+ (-M, M, 2),
+ (-M, M, 1024),
+ (-M, M, 10585),
+ (M, -M, -2),
+ (M, -M, -1024),
+ (M, -M, -10585),
+ ]
+ for t in testcases:
+ r = xrange(*t)
+ r_out = eval(repr(r))
+ self.assert_xranges_equivalent(r, r_out)
+
def test_range_iterators(self):
# see issue 7298
limits = [base + jiggle
diff --git a/Lib/test/test_zipfile.py b/Lib/test/test_zipfile.py
index 7ebb663..0bacc1c 100644
--- a/Lib/test/test_zipfile.py
+++ b/Lib/test/test_zipfile.py
@@ -8,7 +8,6 @@ import os
import io
import sys
import time
-import shutil
import struct
import zipfile
import unittest
@@ -18,7 +17,14 @@ from tempfile import TemporaryFile
from random import randint, random
from unittest import skipUnless
-from test.test_support import TESTFN, run_unittest, findfile, unlink
+from test.test_support import TESTFN, TESTFN_UNICODE, TESTFN_ENCODING, \
+ run_unittest, findfile, unlink, rmtree, check_warnings
+try:
+ TESTFN_UNICODE.encode(TESTFN_ENCODING)
+except (UnicodeError, TypeError):
+ # Either the file system encoding is None, or the file name
+ # cannot be encoded in the file system encoding.
+ TESTFN_UNICODE = None
TESTFN2 = TESTFN + "2"
TESTFNDIR = TESTFN + "d"
@@ -26,7 +32,7 @@ FIXEDTEST_SIZE = 1000
SMALL_TEST_DATA = [('_ziptest1', '1q2w3e4r5t'),
('ziptest2dir/_ziptest2', 'qawsedrftg'),
- ('/ziptest2dir/ziptest3dir/_ziptest3', 'azsxdcfvgb'),
+ ('ziptest2dir/ziptest3dir/_ziptest3', 'azsxdcfvgb'),
('ziptest2dir/ziptest3dir/ziptest4dir/_ziptest3', '6y7u8i9o0p')]
@@ -141,7 +147,9 @@ class TestsWithSourceFile(unittest.TestCase):
# Create the ZIP archive
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.writestr("name", "foo")
- zipfp.writestr("name", "bar")
+ with check_warnings(('', UserWarning)):
+ zipfp.writestr("name", "bar")
+ self.assertEqual(zipfp.namelist(), ["name"] * 2)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
infos = zipfp.infolist()
@@ -358,7 +366,8 @@ class TestsWithSourceFile(unittest.TestCase):
produces the expected result."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
zipfp.write(TESTFN)
- self.assertEqual(zipfp.read(TESTFN), open(TESTFN).read())
+ with open(TESTFN,'r') as fid:
+ self.assertEqual(zipfp.read(TESTFN), fid.read())
@skipUnless(zlib, "requires zlib")
def test_per_file_compression(self):
@@ -391,20 +400,18 @@ class TestsWithSourceFile(unittest.TestCase):
writtenfile = zipfp.extract(fpath)
# make sure it was written to the right place
- if os.path.isabs(fpath):
- correctfile = os.path.join(os.getcwd(), fpath[1:])
- else:
- correctfile = os.path.join(os.getcwd(), fpath)
+ correctfile = os.path.join(os.getcwd(), fpath)
correctfile = os.path.normpath(correctfile)
self.assertEqual(writtenfile, correctfile)
# make sure correct data is in correct file
- self.assertEqual(fdata, open(writtenfile, "rb").read())
+ with open(writtenfile, "rb") as fid:
+ self.assertEqual(fdata, fid.read())
os.remove(writtenfile)
# remove the test file subdirectories
- shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
+ rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
def test_extract_all(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
@@ -414,16 +421,120 @@ class TestsWithSourceFile(unittest.TestCase):
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
zipfp.extractall()
for fpath, fdata in SMALL_TEST_DATA:
- if os.path.isabs(fpath):
- outfile = os.path.join(os.getcwd(), fpath[1:])
- else:
- outfile = os.path.join(os.getcwd(), fpath)
+ outfile = os.path.join(os.getcwd(), fpath)
- self.assertEqual(fdata, open(outfile, "rb").read())
+ with open(outfile, "rb") as fid:
+ self.assertEqual(fdata, fid.read())
os.remove(outfile)
# remove the test file subdirectories
- shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
+ rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
+
+ def check_file(self, filename, content):
+ self.assertTrue(os.path.isfile(filename))
+ with open(filename, 'rb') as f:
+ self.assertEqual(f.read(), content)
+
+ @skipUnless(TESTFN_UNICODE, "No Unicode filesystem semantics on this platform.")
+ def test_extract_unicode_filenames(self):
+ fnames = [u'foo.txt', os.path.basename(TESTFN_UNICODE)]
+ content = 'Test for unicode filename'
+ with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
+ for fname in fnames:
+ zipfp.writestr(fname, content)
+
+ with zipfile.ZipFile(TESTFN2, "r") as zipfp:
+ for fname in fnames:
+ writtenfile = zipfp.extract(fname)
+
+ # make sure it was written to the right place
+ correctfile = os.path.join(os.getcwd(), fname)
+ correctfile = os.path.normpath(correctfile)
+ self.assertEqual(writtenfile, correctfile)
+
+ self.check_file(writtenfile, content)
+ os.remove(writtenfile)
+
+ def test_extract_hackers_arcnames(self):
+ hacknames = [
+ ('../foo/bar', 'foo/bar'),
+ ('foo/../bar', 'foo/bar'),
+ ('foo/../../bar', 'foo/bar'),
+ ('foo/bar/..', 'foo/bar'),
+ ('./../foo/bar', 'foo/bar'),
+ ('/foo/bar', 'foo/bar'),
+ ('/foo/../bar', 'foo/bar'),
+ ('/foo/../../bar', 'foo/bar'),
+ ]
+ if os.path.sep == '\\':
+ hacknames.extend([
+ (r'..\foo\bar', 'foo/bar'),
+ (r'..\/foo\/bar', 'foo/bar'),
+ (r'foo/\..\/bar', 'foo/bar'),
+ (r'foo\/../\bar', 'foo/bar'),
+ (r'C:foo/bar', 'foo/bar'),
+ (r'C:/foo/bar', 'foo/bar'),
+ (r'C://foo/bar', 'foo/bar'),
+ (r'C:\foo\bar', 'foo/bar'),
+ (r'//conky/mountpoint/foo/bar', 'foo/bar'),
+ (r'\\conky\mountpoint\foo\bar', 'foo/bar'),
+ (r'///conky/mountpoint/foo/bar', 'conky/mountpoint/foo/bar'),
+ (r'\\\conky\mountpoint\foo\bar', 'conky/mountpoint/foo/bar'),
+ (r'//conky//mountpoint/foo/bar', 'conky/mountpoint/foo/bar'),
+ (r'\\conky\\mountpoint\foo\bar', 'conky/mountpoint/foo/bar'),
+ (r'//?/C:/foo/bar', 'foo/bar'),
+ (r'\\?\C:\foo\bar', 'foo/bar'),
+ (r'C:/../C:/foo/bar', 'C_/foo/bar'),
+ (r'a:b\c<d>e|f"g?h*i', 'b/c_d_e_f_g_h_i'),
+ ('../../foo../../ba..r', 'foo/ba..r'),
+ ])
+ else: # Unix
+ hacknames.extend([
+ ('//foo/bar', 'foo/bar'),
+ ('../../foo../../ba..r', 'foo../ba..r'),
+ (r'foo/..\bar', r'foo/..\bar'),
+ ])
+
+ for arcname, fixedname in hacknames:
+ content = b'foobar' + arcname.encode()
+ with zipfile.ZipFile(TESTFN2, 'w', zipfile.ZIP_STORED) as zipfp:
+ zinfo = zipfile.ZipInfo()
+ # preserve backslashes
+ zinfo.filename = arcname
+ zinfo.external_attr = 0o600 << 16
+ zipfp.writestr(zinfo, content)
+
+ arcname = arcname.replace(os.sep, "/")
+ targetpath = os.path.join('target', 'subdir', 'subsub')
+ correctfile = os.path.join(targetpath, *fixedname.split('/'))
+
+ with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
+ writtenfile = zipfp.extract(arcname, targetpath)
+ self.assertEqual(writtenfile, correctfile,
+ msg="extract %r" % arcname)
+ self.check_file(correctfile, content)
+ rmtree('target')
+
+ with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
+ zipfp.extractall(targetpath)
+ self.check_file(correctfile, content)
+ rmtree('target')
+
+ correctfile = os.path.join(os.getcwd(), *fixedname.split('/'))
+
+ with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
+ writtenfile = zipfp.extract(arcname)
+ self.assertEqual(writtenfile, correctfile,
+ msg="extract %r" % arcname)
+ self.check_file(correctfile, content)
+ rmtree(fixedname.split('/')[0])
+
+ with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
+ zipfp.extractall()
+ self.check_file(correctfile, content)
+ rmtree(fixedname.split('/')[0])
+
+ os.remove(TESTFN2)
def test_writestr_compression(self):
zipfp = zipfile.ZipFile(TESTFN2, "w")
@@ -666,11 +777,12 @@ class PyZipFileTests(unittest.TestCase):
self.assertNotIn('mod2.txt', names)
finally:
- shutil.rmtree(TESTFN2)
+ rmtree(TESTFN2)
def test_write_non_pyfile(self):
with zipfile.PyZipFile(TemporaryFile(), "w") as zipfp:
- open(TESTFN, 'w').write('most definitely not a python file')
+ with open(TESTFN, 'w') as fid:
+ fid.write('most definitely not a python file')
self.assertRaises(RuntimeError, zipfp.writepy, TESTFN)
os.remove(TESTFN)
@@ -760,6 +872,20 @@ class OtherTests(unittest.TestCase):
chk = zipfile.is_zipfile(fp)
self.assertTrue(not chk)
+ def test_damaged_zipfile(self):
+ """Check that zipfiles with missing bytes at the end raise BadZipFile."""
+ # - Create a valid zip file
+ fp = io.BytesIO()
+ with zipfile.ZipFile(fp, mode="w") as zipf:
+ zipf.writestr("foo.txt", b"O, for a Muse of Fire!")
+ zipfiledata = fp.getvalue()
+
+ # - Now create copies of it missing the last N bytes and make sure
+ # a BadZipFile exception is raised when we try to open it
+ for N in range(len(zipfiledata)):
+ fp = io.BytesIO(zipfiledata[:N])
+ self.assertRaises(zipfile.BadZipfile, zipfile.ZipFile, fp)
+
def test_is_zip_valid_file(self):
"""Check that is_zipfile() correctly identifies zip files."""
# - passing a filename
@@ -811,7 +937,7 @@ class OtherTests(unittest.TestCase):
with zipfile.ZipFile(data, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
- # This is correct; calling .read on a closed ZipFile should throw
+ # This is correct; calling .read on a closed ZipFile should raise
# a RuntimeError, and so should calling .testzip. An earlier
# version of .testzip would swallow this exception (and any other)
# and report that the first file in the archive was corrupt.
@@ -819,8 +945,9 @@ class OtherTests(unittest.TestCase):
self.assertRaises(RuntimeError, zipf.open, "foo.txt")
self.assertRaises(RuntimeError, zipf.testzip)
self.assertRaises(RuntimeError, zipf.writestr, "bogus.txt", "bogus")
- open(TESTFN, 'w').write('zipfile test data')
- self.assertRaises(RuntimeError, zipf.write, TESTFN)
+ with open(TESTFN, 'w') as fid:
+ fid.write('zipfile test data')
+ self.assertRaises(RuntimeError, zipf.write, TESTFN)
def test_bad_constructor_mode(self):
"""Check that bad modes passed to ZipFile constructor are caught."""
@@ -859,6 +986,17 @@ class OtherTests(unittest.TestCase):
caught."""
self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, "w", -1)
+ def test_unsupported_compression(self):
+ # data is declared as shrunk, but actually deflated
+ data = (b'PK\x03\x04.\x00\x00\x00\x01\x00\xe4C\xa1@\x00\x00\x00'
+ b'\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00x\x03\x00PK\x01'
+ b'\x02.\x03.\x00\x00\x00\x01\x00\xe4C\xa1@\x00\x00\x00\x00\x02\x00\x00'
+ b'\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x80\x01\x00\x00\x00\x00xPK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x00'
+ b'/\x00\x00\x00!\x00\x00\x00\x00\x00')
+ with zipfile.ZipFile(io.BytesIO(data), 'r') as zipf:
+ self.assertRaises(NotImplementedError, zipf.open, 'x')
+
def test_null_byte_in_filename(self):
"""Check that a filename containing a null byte is properly
terminated."""
@@ -903,11 +1041,28 @@ class OtherTests(unittest.TestCase):
# check a comment that is too long is truncated
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
- zipf.comment = comment2 + 'oops'
+ with check_warnings(('', UserWarning)):
+ zipf.comment = comment2 + 'oops'
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipf:
self.assertEqual(zipf.comment, comment2)
+ def test_change_comment_in_empty_archive(self):
+ with zipfile.ZipFile(TESTFN, "a", zipfile.ZIP_STORED) as zipf:
+ self.assertFalse(zipf.filelist)
+ zipf.comment = b"this is a comment"
+ with zipfile.ZipFile(TESTFN, "r") as zipf:
+ self.assertEqual(zipf.comment, b"this is a comment")
+
+ def test_change_comment_in_nonempty_archive(self):
+ with zipfile.ZipFile(TESTFN, "w", zipfile.ZIP_STORED) as zipf:
+ zipf.writestr("foo.txt", "O, for a Muse of Fire!")
+ with zipfile.ZipFile(TESTFN, "a", zipfile.ZIP_STORED) as zipf:
+ self.assertTrue(zipf.filelist)
+ zipf.comment = b"this is a comment"
+ with zipfile.ZipFile(TESTFN, "r") as zipf:
+ self.assertEqual(zipf.comment, b"this is a comment")
+
def check_testzip_with_bad_crc(self, compression):
"""Tests that files with bad CRCs return their name from testzip."""
zipdata = self.zips_with_bad_crc[compression]
@@ -978,6 +1133,7 @@ class OtherTests(unittest.TestCase):
pass
try:
zipf = zipfile.ZipFile(TESTFN, mode="r")
+ zipf.close()
except zipfile.BadZipfile:
self.fail("Unable to create empty ZIP file in 'w' mode")
@@ -985,6 +1141,7 @@ class OtherTests(unittest.TestCase):
pass
try:
zipf = zipfile.ZipFile(TESTFN, mode="r")
+ zipf.close()
except:
self.fail("Unable to create empty ZIP file in 'a' mode")
@@ -1000,6 +1157,21 @@ class OtherTests(unittest.TestCase):
self.assertRaises(ValueError,
zipfile.ZipInfo, 'seventies', (1979, 1, 1, 0, 0, 0))
+ def test_zipfile_with_short_extra_field(self):
+ """If an extra field in the header is less than 4 bytes, skip it."""
+ zipdata = (
+ b'PK\x03\x04\x14\x00\x00\x00\x00\x00\x93\x9b\xad@\x8b\x9e'
+ b'\xd9\xd3\x01\x00\x00\x00\x01\x00\x00\x00\x03\x00\x03\x00ab'
+ b'c\x00\x00\x00APK\x01\x02\x14\x03\x14\x00\x00\x00\x00'
+ b'\x00\x93\x9b\xad@\x8b\x9e\xd9\xd3\x01\x00\x00\x00\x01\x00\x00'
+ b'\x00\x03\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa4\x81\x00'
+ b'\x00\x00\x00abc\x00\x00PK\x05\x06\x00\x00\x00\x00'
+ b'\x01\x00\x01\x003\x00\x00\x00%\x00\x00\x00\x00\x00'
+ )
+ with zipfile.ZipFile(io.BytesIO(zipdata), 'r') as zipf:
+ # testzip returns the name of the first corrupt file, or None
+ self.assertIsNone(zipf.testzip())
+
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
@@ -1181,12 +1353,11 @@ class TestsWithMultipleOpens(unittest.TestCase):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
with zipfile.ZipFile(TESTFN2, mode="r") as zipf:
- zopen1 = zipf.open('ones')
- zopen2 = zipf.open('ones')
- data1 = zopen1.read(500)
- data2 = zopen2.read(500)
- data1 += zopen1.read(500)
- data2 += zopen2.read(500)
+ with zipf.open('ones') as zopen1, zipf.open('ones') as zopen2:
+ data1 = zopen1.read(500)
+ data2 = zopen2.read(500)
+ data1 += zopen1.read(500)
+ data2 += zopen2.read(500)
self.assertEqual(data1, data2)
def test_different_file(self):
@@ -1213,6 +1384,17 @@ class TestsWithMultipleOpens(unittest.TestCase):
self.assertEqual(data1, '1'*FIXEDTEST_SIZE)
self.assertEqual(data2, '2'*FIXEDTEST_SIZE)
+ def test_many_opens(self):
+ # Verify that read() and open() promptly close the file descriptor,
+ # and don't rely on the garbage collector to free resources.
+ with zipfile.ZipFile(TESTFN2, mode="r") as zipf:
+ for x in range(100):
+ zipf.read('ones')
+ with zipf.open('ones') as zopen1:
+ pass
+ with open(os.devnull) as f:
+ self.assertLess(f.fileno(), 100)
+
def tearDown(self):
unlink(TESTFN2)
@@ -1235,12 +1417,12 @@ class TestWithDirectory(unittest.TestCase):
def test_store_dir(self):
os.mkdir(os.path.join(TESTFN2, "x"))
- zipf = zipfile.ZipFile(TESTFN, "w")
- zipf.write(os.path.join(TESTFN2, "x"), "x")
- self.assertTrue(zipf.filelist[0].filename.endswith("x/"))
+ with zipfile.ZipFile(TESTFN, "w") as zipf:
+ zipf.write(os.path.join(TESTFN2, "x"), "x")
+ self.assertTrue(zipf.filelist[0].filename.endswith("x/"))
def tearDown(self):
- shutil.rmtree(TESTFN2)
+ rmtree(TESTFN2)
if os.path.exists(TESTFN):
unlink(TESTFN)
@@ -1254,7 +1436,8 @@ class UniversalNewlineTests(unittest.TestCase):
for n, s in enumerate(self.seps):
self.arcdata[s] = s.join(self.line_gen) + s
self.arcfiles[s] = '%s-%d' % (TESTFN, n)
- open(self.arcfiles[s], "wb").write(self.arcdata[s])
+ with open(self.arcfiles[s], "wb") as fid:
+ fid.write(self.arcdata[s])
def make_test_archive(self, f, compression):
# Create the ZIP archive
@@ -1323,8 +1506,9 @@ class UniversalNewlineTests(unittest.TestCase):
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
- for line, zipline in zip(self.line_gen, zipfp.open(fn, "rU")):
- self.assertEqual(zipline, line + '\n')
+ with zipfp.open(fn, "rU") as fid:
+ for line, zipline in zip(self.line_gen, fid):
+ self.assertEqual(zipline, line + '\n')
def test_read_stored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
diff --git a/Lib/test/test_zipimport_support.py b/Lib/test/test_zipimport_support.py
index 4f41518..1b617ab 100644
--- a/Lib/test/test_zipimport_support.py
+++ b/Lib/test/test_zipimport_support.py
@@ -29,7 +29,8 @@ verbose = test.test_support.verbose
# test_cmd_line_script (covers the zipimport support in runpy)
# Retrieve some helpers from other test cases
-from test import test_doctest, sample_doctest
+from test import (test_doctest, sample_doctest, sample_doctest_no_doctests,
+ sample_doctest_no_docstrings)
from test.test_importhooks import ImportHooksBaseTestCase
@@ -99,16 +100,26 @@ class ZipSupportTests(ImportHooksBaseTestCase):
"test_zipped_doctest")
test_src = test_src.replace("test.sample_doctest",
"sample_zipped_doctest")
- sample_src = inspect.getsource(sample_doctest)
- sample_src = sample_src.replace("test.test_doctest",
- "test_zipped_doctest")
+ # The sample doctest files rewritten to include in the zipped version.
+ sample_sources = {}
+ for mod in [sample_doctest, sample_doctest_no_doctests,
+ sample_doctest_no_docstrings]:
+ src = inspect.getsource(mod)
+ src = src.replace("test.test_doctest", "test_zipped_doctest")
+ # Rewrite the module name so that, for example,
+ # "test.sample_doctest" becomes "sample_zipped_doctest".
+ mod_name = mod.__name__.split(".")[-1]
+ mod_name = mod_name.replace("sample_", "sample_zipped_")
+ sample_sources[mod_name] = src
+
with temp_dir() as d:
script_name = make_script(d, 'test_zipped_doctest',
test_src)
zip_name, run_name = make_zip_script(d, 'test_zip',
script_name)
z = zipfile.ZipFile(zip_name, 'a')
- z.writestr("sample_zipped_doctest.py", sample_src)
+ for mod_name, src in sample_sources.items():
+ z.writestr(mod_name + ".py", src)
z.close()
if verbose:
zip_file = zipfile.ZipFile(zip_name, 'r')
@@ -168,9 +179,10 @@ class ZipSupportTests(ImportHooksBaseTestCase):
test_zipped_doctest.test_unittest_reportflags,
]
# Needed for test_DocTestParser and test_debug
- deprecations = [
+ deprecations = []
+ if __debug__:
# Ignore all warnings about the use of class Tester in this module.
- ("class Tester is deprecated", DeprecationWarning)]
+ deprecations.append(("class Tester is deprecated", DeprecationWarning))
if sys.py3kwarning:
deprecations += [
("backquote not supported", SyntaxWarning),
diff --git a/Lib/test/test_zlib.py b/Lib/test/test_zlib.py
index 7f63143..fb62081 100644
--- a/Lib/test/test_zlib.py
+++ b/Lib/test/test_zlib.py
@@ -12,6 +12,13 @@ except ImportError:
zlib = import_module('zlib')
+requires_Compress_copy = unittest.skipUnless(
+ hasattr(zlib.compressobj(), "copy"),
+ 'requires Compress.copy()')
+requires_Decompress_copy = unittest.skipUnless(
+ hasattr(zlib.decompressobj(), "copy"),
+ 'requires Decompress.copy()')
+
class ChecksumTestCase(unittest.TestCase):
# checksum test cases
@@ -339,39 +346,39 @@ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
"mode=%i, level=%i") % (sync, level))
del obj
+ @unittest.skipUnless(hasattr(zlib, 'Z_SYNC_FLUSH'),
+ 'requires zlib.Z_SYNC_FLUSH')
def test_odd_flush(self):
# Test for odd flushing bugs noted in 2.0, and hopefully fixed in 2.1
import random
+ # Testing on 17K of "random" data
- if hasattr(zlib, 'Z_SYNC_FLUSH'):
- # Testing on 17K of "random" data
-
- # Create compressor and decompressor objects
- co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
- dco = zlib.decompressobj()
+ # Create compressor and decompressor objects
+ co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
+ dco = zlib.decompressobj()
- # Try 17K of data
- # generate random data stream
+ # Try 17K of data
+ # generate random data stream
+ try:
+ # In 2.3 and later, WichmannHill is the RNG of the bug report
+ gen = random.WichmannHill()
+ except AttributeError:
try:
- # In 2.3 and later, WichmannHill is the RNG of the bug report
- gen = random.WichmannHill()
+ # 2.2 called it Random
+ gen = random.Random()
except AttributeError:
- try:
- # 2.2 called it Random
- gen = random.Random()
- except AttributeError:
- # others might simply have a single RNG
- gen = random
- gen.seed(1)
- data = genblock(1, 17 * 1024, generator=gen)
-
- # compress, sync-flush, and decompress
- first = co.compress(data)
- second = co.flush(zlib.Z_SYNC_FLUSH)
- expanded = dco.decompress(first + second)
-
- # if decompressed data is different from the input data, choke.
- self.assertEqual(expanded, data, "17K random source doesn't match")
+ # others might simply have a single RNG
+ gen = random
+ gen.seed(1)
+ data = genblock(1, 17 * 1024, generator=gen)
+
+ # compress, sync-flush, and decompress
+ first = co.compress(data)
+ second = co.flush(zlib.Z_SYNC_FLUSH)
+ expanded = dco.decompress(first + second)
+
+ # if decompressed data is different from the input data, choke.
+ self.assertEqual(expanded, data, "17K random source doesn't match")
def test_empty_flush(self):
# Test that calling .flush() on unused objects works.
@@ -396,65 +403,104 @@ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
y += dco.flush()
self.assertEqual(y, 'foo')
- if hasattr(zlib.compressobj(), "copy"):
- def test_compresscopy(self):
- # Test copying a compression object
- data0 = HAMLET_SCENE
- data1 = HAMLET_SCENE.swapcase()
- c0 = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
- bufs0 = []
- bufs0.append(c0.compress(data0))
-
- c1 = c0.copy()
- bufs1 = bufs0[:]
-
- bufs0.append(c0.compress(data0))
- bufs0.append(c0.flush())
- s0 = ''.join(bufs0)
-
- bufs1.append(c1.compress(data1))
- bufs1.append(c1.flush())
- s1 = ''.join(bufs1)
-
- self.assertEqual(zlib.decompress(s0),data0+data0)
- self.assertEqual(zlib.decompress(s1),data0+data1)
-
- def test_badcompresscopy(self):
- # Test copying a compression object in an inconsistent state
- c = zlib.compressobj()
- c.compress(HAMLET_SCENE)
- c.flush()
- self.assertRaises(ValueError, c.copy)
-
- if hasattr(zlib.decompressobj(), "copy"):
- def test_decompresscopy(self):
- # Test copying a decompression object
- data = HAMLET_SCENE
- comp = zlib.compress(data)
-
- d0 = zlib.decompressobj()
- bufs0 = []
- bufs0.append(d0.decompress(comp[:32]))
-
- d1 = d0.copy()
- bufs1 = bufs0[:]
-
- bufs0.append(d0.decompress(comp[32:]))
- s0 = ''.join(bufs0)
-
- bufs1.append(d1.decompress(comp[32:]))
- s1 = ''.join(bufs1)
-
- self.assertEqual(s0,s1)
- self.assertEqual(s0,data)
-
- def test_baddecompresscopy(self):
- # Test copying a compression object in an inconsistent state
- data = zlib.compress(HAMLET_SCENE)
- d = zlib.decompressobj()
- d.decompress(data)
- d.flush()
- self.assertRaises(ValueError, d.copy)
+ def test_flush_with_freed_input(self):
+ # Issue #16411: decompressor accesses input to last decompress() call
+ # in flush(), even if this object has been freed in the meanwhile.
+ input1 = 'abcdefghijklmnopqrstuvwxyz'
+ input2 = 'QWERTYUIOPASDFGHJKLZXCVBNM'
+ data = zlib.compress(input1)
+ dco = zlib.decompressobj()
+ dco.decompress(data, 1)
+ del data
+ data = zlib.compress(input2)
+ self.assertEqual(dco.flush(), input1[1:])
+
+ @requires_Compress_copy
+ def test_compresscopy(self):
+ # Test copying a compression object
+ data0 = HAMLET_SCENE
+ data1 = HAMLET_SCENE.swapcase()
+ c0 = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
+ bufs0 = []
+ bufs0.append(c0.compress(data0))
+
+ c1 = c0.copy()
+ bufs1 = bufs0[:]
+
+ bufs0.append(c0.compress(data0))
+ bufs0.append(c0.flush())
+ s0 = ''.join(bufs0)
+
+ bufs1.append(c1.compress(data1))
+ bufs1.append(c1.flush())
+ s1 = ''.join(bufs1)
+
+ self.assertEqual(zlib.decompress(s0),data0+data0)
+ self.assertEqual(zlib.decompress(s1),data0+data1)
+
+ @requires_Compress_copy
+ def test_badcompresscopy(self):
+ # Test copying a compression object in an inconsistent state
+ c = zlib.compressobj()
+ c.compress(HAMLET_SCENE)
+ c.flush()
+ self.assertRaises(ValueError, c.copy)
+
+ def test_decompress_unused_data(self):
+ # Repeated calls to decompress() after EOF should accumulate data in
+ # dco.unused_data, instead of just storing the arg to the last call.
+ source = b'abcdefghijklmnopqrstuvwxyz'
+ remainder = b'0123456789'
+ y = zlib.compress(source)
+ x = y + remainder
+ for maxlen in 0, 1000:
+ for step in 1, 2, len(y), len(x):
+ dco = zlib.decompressobj()
+ data = b''
+ for i in range(0, len(x), step):
+ if i < len(y):
+ self.assertEqual(dco.unused_data, b'')
+ if maxlen == 0:
+ data += dco.decompress(x[i : i + step])
+ self.assertEqual(dco.unconsumed_tail, b'')
+ else:
+ data += dco.decompress(
+ dco.unconsumed_tail + x[i : i + step], maxlen)
+ data += dco.flush()
+ self.assertEqual(data, source)
+ self.assertEqual(dco.unconsumed_tail, b'')
+ self.assertEqual(dco.unused_data, remainder)
+
+ @requires_Decompress_copy
+ def test_decompresscopy(self):
+ # Test copying a decompression object
+ data = HAMLET_SCENE
+ comp = zlib.compress(data)
+
+ d0 = zlib.decompressobj()
+ bufs0 = []
+ bufs0.append(d0.decompress(comp[:32]))
+
+ d1 = d0.copy()
+ bufs1 = bufs0[:]
+
+ bufs0.append(d0.decompress(comp[32:]))
+ s0 = ''.join(bufs0)
+
+ bufs1.append(d1.decompress(comp[32:]))
+ s1 = ''.join(bufs1)
+
+ self.assertEqual(s0,s1)
+ self.assertEqual(s0,data)
+
+ @requires_Decompress_copy
+ def test_baddecompresscopy(self):
+ # Test copying a compression object in an inconsistent state
+ data = zlib.compress(HAMLET_SCENE)
+ d = zlib.decompressobj()
+ d.decompress(data)
+ d.flush()
+ self.assertRaises(ValueError, d.copy)
# Memory use of the following functions takes into account overallocation
diff --git a/Lib/test/testtar.tar b/Lib/test/testtar.tar
index bac0e26..440182a 100644
--- a/Lib/test/testtar.tar
+++ b/Lib/test/testtar.tar
Binary files differ
diff --git a/Lib/textwrap.py b/Lib/textwrap.py
index 9582a1c..62ea0b4 100644
--- a/Lib/textwrap.py
+++ b/Lib/textwrap.py
@@ -9,6 +9,14 @@ __revision__ = "$Id$"
import string, re
+try:
+ _unicode = unicode
+except NameError:
+ # If Python is built without Unicode support, the unicode type
+ # will not exist. Fake one.
+ class _unicode(object):
+ pass
+
# Do the right thing with boolean values for all known Python versions
# (so this module can be copied to projects that don't depend on Python
# 2.3, e.g. Optik and Docutils) by uncommenting the block of code below.
@@ -147,7 +155,7 @@ class TextWrapper:
if self.replace_whitespace:
if isinstance(text, str):
text = text.translate(self.whitespace_trans)
- elif isinstance(text, unicode):
+ elif isinstance(text, _unicode):
text = text.translate(self.unicode_whitespace_trans)
return text
@@ -167,7 +175,7 @@ class TextWrapper:
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
- if isinstance(text, unicode):
+ if isinstance(text, _unicode):
if self.break_on_hyphens:
pat = self.wordsep_re_uni
else:
diff --git a/Lib/threading.py b/Lib/threading.py
index ff32dfb..e81471b 100644
--- a/Lib/threading.py
+++ b/Lib/threading.py
@@ -10,6 +10,7 @@ except ImportError:
import warnings
+from collections import deque as _deque
from time import time as _time, sleep as _sleep
from traceback import format_exc as _format_exc
@@ -86,10 +87,22 @@ _profile_hook = None
_trace_hook = None
def setprofile(func):
+ """Set a profile function for all threads started from the threading module.
+
+ The func will be passed to sys.setprofile() for each thread, before its
+ run() method is called.
+
+ """
global _profile_hook
_profile_hook = func
def settrace(func):
+ """Set a trace function for all threads started from the threading module.
+
+ The func will be passed to sys.settrace() for each thread, before its run()
+ method is called.
+
+ """
global _trace_hook
_trace_hook = func
@@ -98,9 +111,22 @@ def settrace(func):
Lock = _allocate_lock
def RLock(*args, **kwargs):
+ """Factory function that returns a new reentrant lock.
+
+ A reentrant lock must be released by the thread that acquired it. Once a
+ thread has acquired a reentrant lock, the same thread may acquire it again
+ without blocking; the thread must release it once for each time it has
+ acquired it.
+
+ """
return _RLock(*args, **kwargs)
class _RLock(_Verbose):
+ """A reentrant lock must be released by the thread that acquired it. Once a
+ thread has acquired a reentrant lock, the same thread may acquire it
+ again without blocking; the thread must release it once for each time it
+ has acquired it.
+ """
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
@@ -118,6 +144,26 @@ class _RLock(_Verbose):
self.__class__.__name__, owner, self.__count)
def acquire(self, blocking=1):
+ """Acquire a lock, blocking or non-blocking.
+
+ When invoked without arguments: if this thread already owns the lock,
+ increment the recursion level by one, and return immediately. Otherwise,
+ if another thread owns the lock, block until the lock is unlocked. Once
+ the lock is unlocked (not owned by any thread), then grab ownership, set
+ the recursion level to one, and return. If more than one thread is
+ blocked waiting until the lock is unlocked, only one at a time will be
+ able to grab ownership of the lock. There is no return value in this
+ case.
+
+ When invoked with the blocking argument set to true, do the same thing
+ as when called without arguments, and return true.
+
+ When invoked with the blocking argument set to false, do not block. If a
+ call without an argument would block, return false immediately;
+ otherwise, do the same thing as when called without arguments, and
+ return true.
+
+ """
me = _get_ident()
if self.__owner == me:
self.__count = self.__count + 1
@@ -138,6 +184,21 @@ class _RLock(_Verbose):
__enter__ = acquire
def release(self):
+ """Release a lock, decrementing the recursion level.
+
+ If after the decrement it is zero, reset the lock to unlocked (not owned
+ by any thread), and if any other threads are blocked waiting for the
+ lock to become unlocked, allow exactly one of them to proceed. If after
+ the decrement the recursion level is still nonzero, the lock remains
+ locked and owned by the calling thread.
+
+ Only call this method when the calling thread owns the lock. A
+ RuntimeError is raised if this method is called when the lock is
+ unlocked.
+
+ There is no return value.
+
+ """
if self.__owner != _get_ident():
raise RuntimeError("cannot release un-acquired lock")
self.__count = count = self.__count - 1
@@ -178,9 +239,22 @@ class _RLock(_Verbose):
def Condition(*args, **kwargs):
+ """Factory function that returns a new condition variable object.
+
+ A condition variable allows one or more threads to wait until they are
+ notified by another thread.
+
+ If the lock argument is given and not None, it must be a Lock or RLock
+ object, and it is used as the underlying lock. Otherwise, a new RLock object
+ is created and used as the underlying lock.
+
+ """
return _Condition(*args, **kwargs)
class _Condition(_Verbose):
+ """Condition variables allow one or more threads to wait until they are
+ notified by another thread.
+ """
def __init__(self, lock=None, verbose=None):
_Verbose.__init__(self, verbose)
@@ -232,6 +306,28 @@ class _Condition(_Verbose):
return True
def wait(self, timeout=None):
+ """Wait until notified or until a timeout occurs.
+
+ If the calling thread has not acquired the lock when this method is
+ called, a RuntimeError is raised.
+
+ This method releases the underlying lock, and then blocks until it is
+ awakened by a notify() or notifyAll() call for the same condition
+ variable in another thread, or until the optional timeout occurs. Once
+ awakened or timed out, it re-acquires the lock and returns.
+
+ When the timeout argument is present and not None, it should be a
+ floating point number specifying a timeout for the operation in seconds
+ (or fractions thereof).
+
+ When the underlying lock is an RLock, it is not released using its
+ release() method, since this may not actually unlock the lock when it
+ was acquired multiple times recursively. Instead, an internal interface
+ of the RLock class is used, which really unlocks it even when it has
+ been recursively acquired several times. Another internal interface is
+ then used to restore the recursion level when the lock is reacquired.
+
+ """
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
@@ -274,6 +370,15 @@ class _Condition(_Verbose):
self._acquire_restore(saved_state)
def notify(self, n=1):
+ """Wake up one or more threads waiting on this condition, if any.
+
+ If the calling thread has not acquired the lock when this method is
+ called, a RuntimeError is raised.
+
+ This method wakes up at most n of the threads waiting for the condition
+ variable; it is a no-op if no threads are waiting.
+
+ """
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
__waiters = self.__waiters
@@ -292,15 +397,35 @@ class _Condition(_Verbose):
pass
def notifyAll(self):
+ """Wake up all threads waiting on this condition.
+
+ If the calling thread has not acquired the lock when this method
+ is called, a RuntimeError is raised.
+
+ """
self.notify(len(self.__waiters))
notify_all = notifyAll
def Semaphore(*args, **kwargs):
+ """A factory function that returns a new semaphore.
+
+ Semaphores manage a counter representing the number of release() calls minus
+ the number of acquire() calls, plus an initial value. The acquire() method
+ blocks if necessary until it can return without making the counter
+ negative. If not given, value defaults to 1.
+
+ """
return _Semaphore(*args, **kwargs)
class _Semaphore(_Verbose):
+ """Semaphores manage a counter representing the number of release() calls
+ minus the number of acquire() calls, plus an initial value. The acquire()
+ method blocks if necessary until it can return without making the counter
+ negative. If not given, value defaults to 1.
+
+ """
# After Tim Peters' semaphore class, but not quite the same (no maximum)
@@ -312,58 +437,123 @@ class _Semaphore(_Verbose):
self.__value = value
def acquire(self, blocking=1):
+ """Acquire a semaphore, decrementing the internal counter by one.
+
+ When invoked without arguments: if the internal counter is larger than
+ zero on entry, decrement it by one and return immediately. If it is zero
+ on entry, block, waiting until some other thread has called release() to
+ make it larger than zero. This is done with proper interlocking so that
+ if multiple acquire() calls are blocked, release() will wake exactly one
+ of them up. The implementation may pick one at random, so the order in
+ which blocked threads are awakened should not be relied on. There is no
+ return value in this case.
+
+ When invoked with blocking set to true, do the same thing as when called
+ without arguments, and return true.
+
+ When invoked with blocking set to false, do not block. If a call without
+ an argument would block, return false immediately; otherwise, do the
+ same thing as when called without arguments, and return true.
+
+ """
rc = False
- self.__cond.acquire()
- while self.__value == 0:
- if not blocking:
- break
- if __debug__:
- self._note("%s.acquire(%s): blocked waiting, value=%s",
- self, blocking, self.__value)
- self.__cond.wait()
- else:
- self.__value = self.__value - 1
- if __debug__:
- self._note("%s.acquire: success, value=%s",
- self, self.__value)
- rc = True
- self.__cond.release()
+ with self.__cond:
+ while self.__value == 0:
+ if not blocking:
+ break
+ if __debug__:
+ self._note("%s.acquire(%s): blocked waiting, value=%s",
+ self, blocking, self.__value)
+ self.__cond.wait()
+ else:
+ self.__value = self.__value - 1
+ if __debug__:
+ self._note("%s.acquire: success, value=%s",
+ self, self.__value)
+ rc = True
return rc
__enter__ = acquire
def release(self):
- self.__cond.acquire()
- self.__value = self.__value + 1
- if __debug__:
- self._note("%s.release: success, value=%s",
- self, self.__value)
- self.__cond.notify()
- self.__cond.release()
+ """Release a semaphore, incrementing the internal counter by one.
+
+ When the counter is zero on entry and another thread is waiting for it
+ to become larger than zero again, wake up that thread.
+
+ """
+ with self.__cond:
+ self.__value = self.__value + 1
+ if __debug__:
+ self._note("%s.release: success, value=%s",
+ self, self.__value)
+ self.__cond.notify()
def __exit__(self, t, v, tb):
self.release()
def BoundedSemaphore(*args, **kwargs):
+ """A factory function that returns a new bounded semaphore.
+
+ A bounded semaphore checks to make sure its current value doesn't exceed its
+ initial value. If it does, ValueError is raised. In most situations
+ semaphores are used to guard resources with limited capacity.
+
+ If the semaphore is released too many times it's a sign of a bug. If not
+ given, value defaults to 1.
+
+ Like regular semaphores, bounded semaphores manage a counter representing
+ the number of release() calls minus the number of acquire() calls, plus an
+ initial value. The acquire() method blocks if necessary until it can return
+ without making the counter negative. If not given, value defaults to 1.
+
+ """
return _BoundedSemaphore(*args, **kwargs)
class _BoundedSemaphore(_Semaphore):
- """Semaphore that checks that # releases is <= # acquires"""
+ """A bounded semaphore checks to make sure its current value doesn't exceed
+ its initial value. If it does, ValueError is raised. In most situations
+ semaphores are used to guard resources with limited capacity.
+ """
+
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def release(self):
- if self._Semaphore__value >= self._initial_value:
- raise ValueError, "Semaphore released too many times"
- return _Semaphore.release(self)
+ """Release a semaphore, incrementing the internal counter by one.
+
+ When the counter is zero on entry and another thread is waiting for it
+ to become larger than zero again, wake up that thread.
+
+ If the number of releases exceeds the number of acquires,
+ raise a ValueError.
+
+ """
+ with self._Semaphore__cond:
+ if self._Semaphore__value >= self._initial_value:
+ raise ValueError("Semaphore released too many times")
+ self._Semaphore__value += 1
+ self._Semaphore__cond.notify()
def Event(*args, **kwargs):
+ """A factory function that returns a new event.
+
+ Events manage a flag that can be set to true with the set() method and reset
+ to false with the clear() method. The wait() method blocks until the flag is
+ true.
+
+ """
return _Event(*args, **kwargs)
class _Event(_Verbose):
+ """A factory function that returns a new event object. An event manages a
+ flag that can be set to true with the set() method and reset to false
+ with the clear() method. The wait() method blocks until the flag is true.
+
+ """
# After Tim Peters' event class (without is_posted())
@@ -377,11 +567,18 @@ class _Event(_Verbose):
self.__cond.__init__()
def isSet(self):
+ 'Return true if and only if the internal flag is true.'
return self.__flag
is_set = isSet
def set(self):
+ """Set the internal flag to true.
+
+ All threads waiting for the flag to become true are awakened. Threads
+ that call wait() once the flag is true will not block at all.
+
+ """
self.__cond.acquire()
try:
self.__flag = True
@@ -390,6 +587,12 @@ class _Event(_Verbose):
self.__cond.release()
def clear(self):
+ """Reset the internal flag to false.
+
+ Subsequently, threads calling wait() will block until set() is called to
+ set the internal flag to true again.
+
+ """
self.__cond.acquire()
try:
self.__flag = False
@@ -397,6 +600,20 @@ class _Event(_Verbose):
self.__cond.release()
def wait(self, timeout=None):
+ """Block until the internal flag is true.
+
+ If the internal flag is true on entry, return immediately. Otherwise,
+ block until another thread calls set() to set the flag to true, or until
+ the optional timeout occurs.
+
+ When the timeout argument is present and not None, it should be a
+ floating point number specifying a timeout for the operation in seconds
+ (or fractions thereof).
+
+ This method returns the internal flag on exit, so it will always return
+ True except if a timeout is given and the operation times out.
+
+ """
self.__cond.acquire()
try:
if not self.__flag:
@@ -421,7 +638,11 @@ _limbo = {}
# Main class for threads
class Thread(_Verbose):
+ """A class that represents a thread of control.
+
+ This class can be safely subclassed in a limited fashion.
+ """
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
@@ -434,6 +655,27 @@ class Thread(_Verbose):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
+ """This constructor should always be called with keyword arguments. Arguments are:
+
+ *group* should be None; reserved for future extension when a ThreadGroup
+ class is implemented.
+
+ *target* is the callable object to be invoked by the run()
+ method. Defaults to None, meaning nothing is called.
+
+ *name* is the thread name. By default, a unique name is constructed of
+ the form "Thread-N" where N is a small decimal number.
+
+ *args* is the argument tuple for the target invocation. Defaults to ().
+
+ *kwargs* is a dictionary of keyword arguments for the target
+ invocation. Defaults to {}.
+
+ If a subclass overrides the constructor, it must make sure to invoke
+ the base class constructor (Thread.__init__()) before doing anything
+ else to the thread.
+
+"""
assert group is None, "group argument must be None for now"
_Verbose.__init__(self, verbose)
if kwargs is None:
@@ -482,6 +724,15 @@ class Thread(_Verbose):
return "<%s(%s, %s)>" % (self.__class__.__name__, self.__name, status)
def start(self):
+ """Start the thread's activity.
+
+ It must be called at most once per thread object. It arranges for the
+ object's run() method to be invoked in a separate thread of control.
+
+ This method will raise a RuntimeError if called more than once on the
+ same thread object.
+
+ """
if not self.__initialized:
raise RuntimeError("thread.__init__() not called")
if self.__started.is_set():
@@ -499,6 +750,14 @@ class Thread(_Verbose):
self.__started.wait()
def run(self):
+ """Method representing the thread's activity.
+
+ You may override this method in a subclass. The standard run() method
+ invokes the callable object passed to the object's constructor as the
+ target argument, if any, with sequential and keyword arguments taken
+ from the args and kwargs arguments, respectively.
+
+ """
try:
if self.__target:
self.__target(*self.__args, **self.__kwargs)
@@ -605,6 +864,10 @@ class Thread(_Verbose):
pass
def __stop(self):
+ # DummyThreads delete self.__block, but they have no waiters to
+ # notify anyway (join() is forbidden on them).
+ if not hasattr(self, '_Thread__block'):
+ return
self.__block.acquire()
self.__stopped = True
self.__block.notify_all()
@@ -646,6 +909,29 @@ class Thread(_Verbose):
raise
def join(self, timeout=None):
+ """Wait until the thread terminates.
+
+ This blocks the calling thread until the thread whose join() method is
+ called terminates -- either normally or through an unhandled exception
+ or until the optional timeout occurs.
+
+ When the timeout argument is present and not None, it should be a
+ floating point number specifying a timeout for the operation in seconds
+ (or fractions thereof). As join() always returns None, you must call
+ isAlive() after join() to decide whether a timeout happened -- if the
+ thread is still alive, the join() call timed out.
+
+ When the timeout argument is not present or None, the operation will
+ block until the thread terminates.
+
+ A thread can be join()ed many times.
+
+ join() raises a RuntimeError if an attempt is made to join the current
+ thread as that would cause a deadlock. It is also an error to join() a
+ thread before it has been started and attempts to do so raises the same
+ exception.
+
+ """
if not self.__initialized:
raise RuntimeError("Thread.__init__() not called")
if not self.__started.is_set():
@@ -680,6 +966,12 @@ class Thread(_Verbose):
@property
def name(self):
+ """A string used for identification purposes only.
+
+ It has no semantics. Multiple threads may be given the same name. The
+ initial name is set by the constructor.
+
+ """
assert self.__initialized, "Thread.__init__() not called"
return self.__name
@@ -690,10 +982,24 @@ class Thread(_Verbose):
@property
def ident(self):
+ """Thread identifier of this thread or None if it has not been started.
+
+ This is a nonzero integer. See the thread.get_ident() function. Thread
+ identifiers may be recycled when a thread exits and another thread is
+ created. The identifier is available even after the thread has exited.
+
+ """
assert self.__initialized, "Thread.__init__() not called"
return self.__ident
def isAlive(self):
+ """Return whether the thread is alive.
+
+ This method returns True just before the run() method starts until just
+ after the run() method terminates. The module function enumerate()
+ returns a list of all alive threads.
+
+ """
assert self.__initialized, "Thread.__init__() not called"
return self.__started.is_set() and not self.__stopped
@@ -701,6 +1007,17 @@ class Thread(_Verbose):
@property
def daemon(self):
+ """A boolean value indicating whether this thread is a daemon thread (True) or not (False).
+
+ This must be set before start() is called, otherwise RuntimeError is
+ raised. Its initial value is inherited from the creating thread; the
+ main thread is not a daemon thread and therefore all threads created in
+ the main thread default to daemon = False.
+
+ The entire Python program exits when no alive non-daemon threads are
+ left.
+
+ """
assert self.__initialized, "Thread.__init__() not called"
return self.__daemonic
@@ -727,14 +1044,24 @@ class Thread(_Verbose):
# The timer class was contributed by Itamar Shtull-Trauring
def Timer(*args, **kwargs):
+ """Factory function to create a Timer object.
+
+ Timers call a function after a specified number of seconds:
+
+ t = Timer(30.0, f, args=[], kwargs={})
+ t.start()
+ t.cancel() # stop the timer's action if it's still waiting
+
+ """
return _Timer(*args, **kwargs)
class _Timer(Thread):
"""Call a function after a specified number of seconds:
- t = Timer(30.0, f, args=[], kwargs={})
- t.start()
- t.cancel() # stop the timer's action if it's still waiting
+ t = Timer(30.0, f, args=[], kwargs={})
+ t.start()
+ t.cancel() # stop the timer's action if it's still waiting
+
"""
def __init__(self, interval, function, args=[], kwargs={}):
@@ -823,6 +1150,12 @@ class _DummyThread(Thread):
# Global API functions
def currentThread():
+ """Return the current Thread object, corresponding to the caller's thread of control.
+
+ If the caller's thread of control was not created through the threading
+ module, a dummy thread object with limited functionality is returned.
+
+ """
try:
return _active[_get_ident()]
except KeyError:
@@ -832,6 +1165,12 @@ def currentThread():
current_thread = currentThread
def activeCount():
+ """Return the number of Thread objects currently alive.
+
+ The returned count is equal to the length of the list returned by
+ enumerate().
+
+ """
with _active_limbo_lock:
return len(_active) + len(_limbo)
@@ -842,6 +1181,13 @@ def _enumerate():
return _active.values() + _limbo.values()
def enumerate():
+ """Return a list of all Thread objects currently alive.
+
+ The list includes daemonic threads, dummy thread objects created by
+ current_thread(), and the main thread. It excludes terminated threads and
+ threads that have not yet been started.
+
+ """
with _active_limbo_lock:
return _active.values() + _limbo.values()
@@ -876,7 +1222,7 @@ def _after_fork():
new_active = {}
current = current_thread()
with _active_limbo_lock:
- for thread in _active.itervalues():
+ for thread in _enumerate():
# Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them.
if hasattr(thread, '_reset_internal_locks'):
@@ -909,7 +1255,7 @@ def _test():
self.rc = Condition(self.mon)
self.wc = Condition(self.mon)
self.limit = limit
- self.queue = deque()
+ self.queue = _deque()
def put(self, item):
self.mon.acquire()
diff --git a/Lib/timeit.py b/Lib/timeit.py
index da9e819..da9e819 100644..100755
--- a/Lib/timeit.py
+++ b/Lib/timeit.py
diff --git a/Lib/token.py b/Lib/token.py
index 8d5cdaa..45825b4 100755..100644
--- a/Lib/token.py
+++ b/Lib/token.py
@@ -1,5 +1,3 @@
-#! /usr/bin/env python
-
"""Token constants (from "token.h")."""
# This file is automatically generated; please don't muck it up!
@@ -7,7 +5,7 @@
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
-# python Lib/token.py
+# ./python Lib/token.py
#--start constants--
ENDMARKER = 0
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
index ae3de54..661ddeb 100644
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -26,6 +26,7 @@ __author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
'Skip Montanaro, Raymond Hettinger')
+from itertools import chain
import string, re
from token import *
@@ -70,10 +71,10 @@ Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
-Triple = group("[uU]?[rR]?'''", '[uU]?[rR]?"""')
+Triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""')
# Single-line ' or " string.
-String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
- r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
+String = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
+ r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
@@ -91,11 +92,11 @@ PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
-ContStr = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
+ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
- r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
+ r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
-PseudoExtras = group(r'\\\r?\n', Comment, Triple)
+PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(
@@ -184,17 +185,26 @@ class Untokenizer:
def add_whitespace(self, start):
row, col = start
- assert row <= self.prev_row
+ if row < self.prev_row or row == self.prev_row and col < self.prev_col:
+ raise ValueError("start ({},{}) precedes previous end ({},{})"
+ .format(row, col, self.prev_row, self.prev_col))
+ row_offset = row - self.prev_row
+ if row_offset:
+ self.tokens.append("\\\n" * row_offset)
+ self.prev_col = 0
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
- for t in iterable:
+ it = iter(iterable)
+ for t in it:
if len(t) == 2:
- self.compat(t, iterable)
+ self.compat(t, it)
break
tok_type, token, start, end, line = t
+ if tok_type == ENDMARKER:
+ break
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
@@ -204,16 +214,12 @@ class Untokenizer:
return "".join(self.tokens)
def compat(self, token, iterable):
- startline = False
indents = []
toks_append = self.tokens.append
- toknum, tokval = token
- if toknum in (NAME, NUMBER):
- tokval += ' '
- if toknum in (NEWLINE, NL):
- startline = True
+ startline = token[0] in (NEWLINE, NL)
prevstring = False
- for tok in iterable:
+
+ for tok in chain([token], iterable):
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
@@ -263,7 +269,7 @@ def untokenize(iterable):
def generate_tokens(readline):
"""
- The generate_tokens() generator requires one argment, readline, which
+ The generate_tokens() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
@@ -362,6 +368,8 @@ def generate_tokens(readline):
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
+ if start == end:
+ continue
token, initial = line[start:end], line[start]
if initial in numchars or \
diff --git a/Lib/trace.py b/Lib/trace.py
index 38a13e2..38a13e2 100644..100755
--- a/Lib/trace.py
+++ b/Lib/trace.py
diff --git a/Lib/traceback.py b/Lib/traceback.py
index 8cb9e28..0a3dd11 100644
--- a/Lib/traceback.py
+++ b/Lib/traceback.py
@@ -72,7 +72,7 @@ def print_tb(tb, limit=None, file=None):
n = n+1
def format_tb(tb, limit = None):
- """A shorthand for 'format_list(extract_stack(f, limit))."""
+ """A shorthand for 'format_list(extract_tb(tb, limit))'."""
return format_list(extract_tb(tb, limit))
def extract_tb(tb, limit = None):
@@ -166,7 +166,7 @@ def format_exception_only(etype, value):
# >>> raise string1, string2 # deprecated
#
# Clear these out first because issubtype(string1, SyntaxError)
- # would throw another exception and mask the original problem.
+ # would raise another exception and mask the original problem.
if (isinstance(etype, BaseException) or
isinstance(etype, types.InstanceType) or
etype is None or type(etype) is str):
@@ -189,11 +189,12 @@ def format_exception_only(etype, value):
if badline is not None:
lines.append(' %s\n' % badline.strip())
if offset is not None:
- caretspace = badline.rstrip('\n')[:offset].lstrip()
+ caretspace = badline.rstrip('\n')
+ offset = min(len(caretspace), offset) - 1
+ caretspace = caretspace[:offset].lstrip()
# non-space whitespace (likes tabs) must be kept for alignment
caretspace = ((c.isspace() and c or ' ') for c in caretspace)
- # only three spaces to account for offset1 == pos 0
- lines.append(' %s^\n' % ''.join(caretspace))
+ lines.append(' %s^\n' % ''.join(caretspace))
value = msg
lines.append(_format_final_exc_line(stype, value))
diff --git a/Lib/unittest/__init__.py b/Lib/unittest/__init__.py
index 201a3f0..a5d50af 100644
--- a/Lib/unittest/__init__.py
+++ b/Lib/unittest/__init__.py
@@ -11,7 +11,7 @@ Simple usage:
import unittest
- class IntegerArithmenticTestCase(unittest.TestCase):
+ class IntegerArithmeticTestCase(unittest.TestCase):
def testAdd(self): ## test method names begin 'test*'
self.assertEqual((1 + 2), 3)
self.assertEqual(0 + 1, 1)
diff --git a/Lib/unittest/case.py b/Lib/unittest/case.py
index 4b3839e..644fe5b 100644
--- a/Lib/unittest/case.py
+++ b/Lib/unittest/case.py
@@ -6,6 +6,7 @@ import functools
import difflib
import pprint
import re
+import types
import warnings
from . import result
@@ -25,7 +26,7 @@ class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
- Usually you can use TestResult.skip() or one of the skipping decorators
+ Usually you can use TestCase.skipTest() or one of the skipping decorators
instead of raising this directly.
"""
pass
@@ -55,7 +56,7 @@ def skip(reason):
Unconditionally skip a test.
"""
def decorator(test_item):
- if not (isinstance(test_item, type) and issubclass(test_item, TestCase)):
+ if not isinstance(test_item, (type, types.ClassType)):
@functools.wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
@@ -121,8 +122,6 @@ class _AssertRaisesContext(object):
return True
expected_regexp = self.expected_regexp
- if isinstance(expected_regexp, basestring):
- expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
@@ -149,24 +148,23 @@ class TestCase(object):
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
- """
- # This attribute determines which exception will be raised when
- # the instance's assertion methods fail; test methods raising this
- # exception will be deemed to have 'failed' rather than 'errored'
+ When subclassing TestCase, you can set these attributes:
+ * failureException: determines which exception will be raised when
+ the instance's assertion methods fail; test methods raising this
+ exception will be deemed to have 'failed' rather than 'errored'.
+ * longMessage: determines whether long messages (including repr of
+ objects used in assert methods) will be printed on failure in *addition*
+ to any explicit message passed.
+ * maxDiff: sets the maximum length of a diff in failure messages
+ by assert methods using difflib. It is looked up as an instance
+ attribute so can be configured by individual tests if required.
+ """
failureException = AssertionError
- # This attribute determines whether long messages (including repr of
- # objects used in assert methods) will be printed on failure in *addition*
- # to any explicit message passed.
-
longMessage = False
- # This attribute sets the maximum length of a diff in failure messages
- # by assert methods using difflib. It is looked up as an instance attribute
- # so can be configured by individual tests if required.
-
maxDiff = 80*8
# If a string is longer than _diffThreshold, use normal comparison instead
@@ -201,7 +199,11 @@ class TestCase(object):
self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
self.addTypeEqualityFunc(set, 'assertSetEqual')
self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
- self.addTypeEqualityFunc(unicode, 'assertMultiLineEqual')
+ try:
+ self.addTypeEqualityFunc(unicode, 'assertMultiLineEqual')
+ except NameError:
+ # No unicode support in this build
+ pass
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
@@ -442,10 +444,10 @@ class TestCase(object):
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
- """Fail unless an exception of class excClass is thrown
+ """Fail unless an exception of class excClass is raised
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
- thrown, it will not be caught, and the test case will be
+ raised, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
@@ -511,7 +513,7 @@ class TestCase(object):
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
- """Fail if the two objects are equal as determined by the '=='
+ """Fail if the two objects are equal as determined by the '!='
operator.
"""
if not first != second:
@@ -871,7 +873,7 @@ class TestCase(object):
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
- first_seq, second_seq = list(actual_seq), list(expected_seq)
+ first_seq, second_seq = list(expected_seq), list(actual_seq)
with warnings.catch_warnings():
if sys.py3kwarning:
# Silence Py3k warning raised during the sorting
@@ -982,6 +984,8 @@ class TestCase(object):
args: Extra args.
kwargs: Extra kwargs.
"""
+ if expected_regexp is not None:
+ expected_regexp = re.compile(expected_regexp)
context = _AssertRaisesContext(expected_exception, self, expected_regexp)
if callable_obj is None:
return context
diff --git a/Lib/unittest/loader.py b/Lib/unittest/loader.py
index e88f536..9163a1a 100644
--- a/Lib/unittest/loader.py
+++ b/Lib/unittest/loader.py
@@ -106,7 +106,9 @@ class TestLoader(object):
elif (isinstance(obj, types.UnboundMethodType) and
isinstance(parent, type) and
issubclass(parent, case.TestCase)):
- return self.suiteClass([parent(obj.__name__)])
+ name = parts[-1]
+ inst = parent(name)
+ return self.suiteClass([inst])
elif isinstance(obj, suite.TestSuite):
return obj
elif hasattr(obj, '__call__'):
@@ -254,8 +256,8 @@ class TestLoader(object):
yield _make_failed_import_test(name, self.suiteClass)
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
- realpath = os.path.splitext(mod_file)[0]
- fullpath_noext = os.path.splitext(full_path)[0]
+ realpath = os.path.splitext(os.path.realpath(mod_file))[0]
+ fullpath_noext = os.path.splitext(os.path.realpath(full_path))[0]
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = os.path.splitext(os.path.basename(full_path))[0]
diff --git a/Lib/unittest/main.py b/Lib/unittest/main.py
index 3396bb0..b253679 100644
--- a/Lib/unittest/main.py
+++ b/Lib/unittest/main.py
@@ -157,7 +157,10 @@ class TestProgram(object):
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
- def _do_discovery(self, argv, Loader=loader.TestLoader):
+ def _do_discovery(self, argv, Loader=None):
+ if Loader is None:
+ Loader = lambda: self.testLoader
+
# handle command line args for test discovery
self.progName = '%s discover' % self.progName
import optparse
diff --git a/Lib/unittest/runner.py b/Lib/unittest/runner.py
index 1716891..7632fe9 100644
--- a/Lib/unittest/runner.py
+++ b/Lib/unittest/runner.py
@@ -34,7 +34,7 @@ class TextTestResult(result.TestResult):
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
- super(TextTestResult, self).__init__()
+ super(TextTestResult, self).__init__(stream, descriptions, verbosity)
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
diff --git a/Lib/unittest/signals.py b/Lib/unittest/signals.py
index fc31043..e6a5fc5 100644
--- a/Lib/unittest/signals.py
+++ b/Lib/unittest/signals.py
@@ -9,6 +9,20 @@ __unittest = True
class _InterruptHandler(object):
def __init__(self, default_handler):
self.called = False
+ self.original_handler = default_handler
+ if isinstance(default_handler, int):
+ if default_handler == signal.SIG_DFL:
+ # Pretend it's signal.default_int_handler instead.
+ default_handler = signal.default_int_handler
+ elif default_handler == signal.SIG_IGN:
+ # Not quite the same thing as SIG_IGN, but the closest we
+ # can make it: do nothing.
+ def default_handler(unused_signum, unused_frame):
+ pass
+ else:
+ raise TypeError("expected SIGINT signal handler to be "
+ "signal.SIG_IGN, signal.SIG_DFL, or a "
+ "callable object")
self.default_handler = default_handler
def __call__(self, signum, frame):
@@ -54,4 +68,4 @@ def removeHandler(method=None):
global _interrupt_handler
if _interrupt_handler is not None:
- signal.signal(signal.SIGINT, _interrupt_handler.default_handler)
+ signal.signal(signal.SIGINT, _interrupt_handler.original_handler)
diff --git a/Lib/unittest/test/test_assertions.py b/Lib/unittest/test/test_assertions.py
index e1ba614..e8f0f64 100644
--- a/Lib/unittest/test/test_assertions.py
+++ b/Lib/unittest/test/test_assertions.py
@@ -33,6 +33,10 @@ class Test_Assertions(unittest.TestCase):
self.assertNotAlmostEqual(1.1, 1.0, delta=0.05)
self.assertNotAlmostEqual(1.0, 1.1, delta=0.05)
+ self.assertAlmostEqual(1.0, 1.0, delta=0.5)
+ self.assertRaises(self.failureException, self.assertNotAlmostEqual,
+ 1.0, 1.0, delta=0.5)
+
self.assertRaises(self.failureException, self.assertAlmostEqual,
1.1, 1.0, delta=0.05)
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
diff --git a/Lib/unittest/test/test_break.py b/Lib/unittest/test/test_break.py
index 5600011..dab91c1 100644
--- a/Lib/unittest/test/test_break.py
+++ b/Lib/unittest/test/test_break.py
@@ -15,9 +15,12 @@ import unittest
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreak(unittest.TestCase):
+ int_handler = None
def setUp(self):
self._default_handler = signal.getsignal(signal.SIGINT)
+ if self.int_handler is not None:
+ signal.signal(signal.SIGINT, self.int_handler)
def tearDown(self):
signal.signal(signal.SIGINT, self._default_handler)
@@ -74,6 +77,10 @@ class TestBreak(unittest.TestCase):
def testSecondInterrupt(self):
+ # Can't use skipIf decorator because the signal handler may have
+ # been changed after defining this method.
+ if signal.getsignal(signal.SIGINT) == signal.SIG_IGN:
+ self.skipTest("test requires SIGINT to not be ignored")
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
@@ -123,6 +130,10 @@ class TestBreak(unittest.TestCase):
def testHandlerReplacedButCalled(self):
+ # Can't use skipIf decorator because the signal handler may have
+ # been changed after defining this method.
+ if signal.getsignal(signal.SIGINT) == signal.SIG_IGN:
+ self.skipTest("test requires SIGINT to not be ignored")
# If our handler has been replaced (is no longer installed) but is
# called by the *new* handler, then it isn't safe to delay the
# SIGINT and we should immediately delegate to the default handler
@@ -250,3 +261,24 @@ class TestBreak(unittest.TestCase):
test()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
+
+@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
+@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
+@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
+ "if threads have been used")
+class TestBreakDefaultIntHandler(TestBreak):
+ int_handler = signal.default_int_handler
+
+@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
+@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
+@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
+ "if threads have been used")
+class TestBreakSignalIgnored(TestBreak):
+ int_handler = signal.SIG_IGN
+
+@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
+@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
+@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
+ "if threads have been used")
+class TestBreakSignalDefault(TestBreak):
+ int_handler = signal.SIG_DFL
diff --git a/Lib/unittest/test/test_case.py b/Lib/unittest/test/test_case.py
index e92b019..b69fdb3 100644
--- a/Lib/unittest/test/test_case.py
+++ b/Lib/unittest/test/test_case.py
@@ -293,7 +293,7 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
def test(self):
pass
- self.assertTrue(Foo('test').failureException is AssertionError)
+ self.assertIs(Foo('test').failureException, AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
@@ -311,7 +311,7 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
failureException = RuntimeError
- self.assertTrue(Foo('test').failureException is RuntimeError)
+ self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
@@ -334,7 +334,7 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
failureException = RuntimeError
- self.assertTrue(Foo('test').failureException is RuntimeError)
+ self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
@@ -607,7 +607,7 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
- self.assertTrue(len(msg) < len(diff))
+ self.assertLess(len(msg), len(diff))
self.assertIn(omitted, msg)
self.maxDiff = len(diff) * 2
@@ -617,7 +617,7 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
- self.assertTrue(len(msg) > len(diff))
+ self.assertGreater(len(msg), len(diff))
self.assertNotIn(omitted, msg)
self.maxDiff = None
@@ -627,7 +627,7 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
- self.assertTrue(len(msg) > len(diff))
+ self.assertGreater(len(msg), len(diff))
self.assertNotIn(omitted, msg)
def testTruncateMessage(self):
@@ -979,6 +979,12 @@ test case
self.assertRaisesRegexp, Exception, u'x',
lambda: None)
+ def testAssertRaisesRegexpInvalidRegexp(self):
+ # Issue 20145.
+ class MyExc(Exception):
+ pass
+ self.assertRaises(TypeError, self.assertRaisesRegexp, MyExc, lambda: True)
+
def testAssertRaisesRegexpMismatch(self):
def Stub():
raise Exception('Unexpected')
diff --git a/Lib/unittest/test/test_discovery.py b/Lib/unittest/test/test_discovery.py
index 1155de1..e0277d5 100644
--- a/Lib/unittest/test/test_discovery.py
+++ b/Lib/unittest/test/test_discovery.py
@@ -220,12 +220,26 @@ class TestDiscovery(unittest.TestCase):
program = object.__new__(unittest.TestProgram)
program.usageExit = usageExit
+ program.testLoader = None
with self.assertRaises(Stop):
# too many args
program._do_discovery(['one', 'two', 'three', 'four'])
+ def test_command_line_handling_do_discovery_uses_default_loader(self):
+ program = object.__new__(unittest.TestProgram)
+
+ class Loader(object):
+ args = []
+ def discover(self, start_dir, pattern, top_level_dir):
+ self.args.append((start_dir, pattern, top_level_dir))
+ return 'tests'
+
+ program.testLoader = Loader()
+ program._do_discovery(['-v'])
+ self.assertEqual(Loader.args, [('.', 'test*.py', None)])
+
def test_command_line_handling_do_discovery_calls_loader(self):
program = object.__new__(unittest.TestProgram)
@@ -300,7 +314,7 @@ class TestDiscovery(unittest.TestCase):
self.assertTrue(program.failfast)
self.assertTrue(program.catchbreak)
- def test_detect_module_clash(self):
+ def setup_module_clash(self):
class Module(object):
__file__ = 'bar/foo.py'
sys.modules['foo'] = Module
@@ -327,7 +341,10 @@ class TestDiscovery(unittest.TestCase):
os.listdir = listdir
os.path.isfile = isfile
os.path.isdir = isdir
+ return full_path
+ def test_detect_module_clash(self):
+ full_path = self.setup_module_clash()
loader = unittest.TestLoader()
mod_dir = os.path.abspath('bar')
@@ -340,6 +357,25 @@ class TestDiscovery(unittest.TestCase):
)
self.assertEqual(sys.path[0], full_path)
+ def test_module_symlink_ok(self):
+ full_path = self.setup_module_clash()
+
+ original_realpath = os.path.realpath
+
+ mod_dir = os.path.abspath('bar')
+ expected_dir = os.path.abspath('foo')
+
+ def cleanup():
+ os.path.realpath = original_realpath
+ self.addCleanup(cleanup)
+
+ def realpath(path):
+ if path == os.path.join(mod_dir, 'foo.py'):
+ return os.path.join(expected_dir, 'foo.py')
+ return path
+ os.path.realpath = realpath
+ loader = unittest.TestLoader()
+ loader.discover(start_dir='foo', pattern='foo.py')
def test_discovery_from_dotted_path(self):
loader = unittest.TestLoader()
diff --git a/Lib/unittest/test/test_loader.py b/Lib/unittest/test/test_loader.py
index 3544a20..68e871c 100644
--- a/Lib/unittest/test/test_loader.py
+++ b/Lib/unittest/test/test_loader.py
@@ -324,7 +324,7 @@ class Test_TestLoader(unittest.TestCase):
# Does loadTestsFromName raise TypeError when the `module` argument
# isn't a module object?
#
- # XXX Accepts the not-a-module object, ignorning the object's type
+ # XXX Accepts the not-a-module object, ignoring the object's type
# This should raise an exception or the method name should be changed
#
# XXX Some people are relying on this, so keep it for now
@@ -1279,8 +1279,23 @@ class Test_TestLoader(unittest.TestCase):
# "The default value is the TestSuite class"
def test_suiteClass__default_value(self):
loader = unittest.TestLoader()
- self.assertTrue(loader.suiteClass is unittest.TestSuite)
+ self.assertIs(loader.suiteClass, unittest.TestSuite)
+ # Make sure the dotted name resolution works even if the actual
+ # function doesn't have the same name as is used to find it.
+ def test_loadTestsFromName__function_with_different_name_than_method(self):
+ # lambdas have the name '<lambda>'.
+ m = types.ModuleType('m')
+ class MyTestCase(unittest.TestCase):
+ test = lambda: 1
+ m.testcase_1 = MyTestCase
+
+ loader = unittest.TestLoader()
+ suite = loader.loadTestsFromNames(['testcase_1.test'], m)
+ self.assertIsInstance(suite, loader.suiteClass)
+
+ ref_suite = unittest.TestSuite([MyTestCase('test')])
+ self.assertEqual(list(suite), [ref_suite])
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/unittest/test/test_result.py b/Lib/unittest/test/test_result.py
index eb68c1d..2a596cd 100644
--- a/Lib/unittest/test/test_result.py
+++ b/Lib/unittest/test/test_result.py
@@ -176,7 +176,7 @@ class Test_TestResult(unittest.TestCase):
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.failures[0]
- self.assertTrue(test_case is test)
+ self.assertIs(test_case, test)
self.assertIsInstance(formatted_exc, str)
# "addError(test, err)"
@@ -224,7 +224,7 @@ class Test_TestResult(unittest.TestCase):
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.errors[0]
- self.assertTrue(test_case is test)
+ self.assertIs(test_case, test)
self.assertIsInstance(formatted_exc, str)
def testGetDescriptionWithoutDocstring(self):
diff --git a/Lib/unittest/test/test_runner.py b/Lib/unittest/test/test_runner.py
index 6df6a9c..3b661d3 100644
--- a/Lib/unittest/test/test_runner.py
+++ b/Lib/unittest/test/test_runner.py
@@ -149,6 +149,19 @@ class Test_TextTestRunner(unittest.TestCase):
self.assertEqual(runner.resultclass, unittest.TextTestResult)
+ def test_multiple_inheritance(self):
+ class AResult(unittest.TestResult):
+ def __init__(self, stream, descriptions, verbosity):
+ super(AResult, self).__init__(stream, descriptions, verbosity)
+
+ class ATextResult(unittest.TextTestResult, AResult):
+ pass
+
+ # This used to raise an exception due to TextTestResult not passing
+ # on arguments in its __init__ super call
+ ATextResult(None, None, 1)
+
+
def testBufferAndFailfast(self):
class Test(unittest.TestCase):
def testFoo(self):
diff --git a/Lib/unittest/test/test_skipping.py b/Lib/unittest/test/test_skipping.py
index 05958d6..d6639d1 100644
--- a/Lib/unittest/test/test_skipping.py
+++ b/Lib/unittest/test/test_skipping.py
@@ -66,6 +66,36 @@ class Test_TestSkipping(unittest.TestCase):
self.assertEqual(result.skipped, [(test, "testing")])
self.assertEqual(record, [])
+ def test_skip_non_unittest_class_old_style(self):
+ @unittest.skip("testing")
+ class Mixin:
+ def test_1(self):
+ record.append(1)
+ class Foo(Mixin, unittest.TestCase):
+ pass
+ record = []
+ result = unittest.TestResult()
+ test = Foo("test_1")
+ suite = unittest.TestSuite([test])
+ suite.run(result)
+ self.assertEqual(result.skipped, [(test, "testing")])
+ self.assertEqual(record, [])
+
+ def test_skip_non_unittest_class_new_style(self):
+ @unittest.skip("testing")
+ class Mixin(object):
+ def test_1(self):
+ record.append(1)
+ class Foo(Mixin, unittest.TestCase):
+ pass
+ record = []
+ result = unittest.TestResult()
+ test = Foo("test_1")
+ suite = unittest.TestSuite([test])
+ suite.run(result)
+ self.assertEqual(result.skipped, [(test, "testing")])
+ self.assertEqual(record, [])
+
def test_expected_failure(self):
class Foo(unittest.TestCase):
@unittest.expectedFailure
diff --git a/Lib/urllib.py b/Lib/urllib.py
index a73c5d7..ac5d797 100644
--- a/Lib/urllib.py
+++ b/Lib/urllib.py
@@ -28,6 +28,7 @@ import os
import time
import sys
import base64
+import re
from urlparse import urljoin as basejoin
@@ -818,7 +819,10 @@ def thishost():
"""Return the IP address of the current host."""
global _thishost
if _thishost is None:
- _thishost = socket.gethostbyname(socket.gethostname())
+ try:
+ _thishost = socket.gethostbyname(socket.gethostname())
+ except socket.gaierror:
+ _thishost = socket.gethostbyname('localhost')
return _thishost
_ftperrors = None
@@ -869,8 +873,8 @@ class ftpwrapper:
self.ftp = ftplib.FTP()
self.ftp.connect(self.host, self.port, self.timeout)
self.ftp.login(self.user, self.passwd)
- for dir in self.dirs:
- self.ftp.cwd(dir)
+ _target = '/'.join(self.dirs)
+ self.ftp.cwd(_target)
def retrfile(self, file, type):
import ftplib
@@ -980,11 +984,11 @@ class addclosehook(addbase):
self.hookargs = hookargs
def close(self):
- addbase.close(self)
if self.closehook:
self.closehook(*self.hookargs)
self.closehook = None
self.hookargs = None
+ addbase.close(self)
class addinfo(addbase):
"""class to add an info() method to an open file."""
@@ -1121,10 +1125,13 @@ def splitport(host):
global _portprog
if _portprog is None:
import re
- _portprog = re.compile('^(.*):([0-9]+)$')
+ _portprog = re.compile('^(.*):([0-9]*)$')
match = _portprog.match(host)
- if match: return match.group(1, 2)
+ if match:
+ host, port = match.groups()
+ if port:
+ return host, port
return host, None
_nportprog = None
@@ -1141,12 +1148,12 @@ def splitnport(host, defport=-1):
match = _nportprog.match(host)
if match:
host, port = match.group(1, 2)
- try:
- if not port: raise ValueError, "no digits"
- nport = int(port)
- except ValueError:
- nport = None
- return host, nport
+ if port:
+ try:
+ nport = int(port)
+ except ValueError:
+ nport = None
+ return host, nport
return host, defport
_queryprog = None
@@ -1198,22 +1205,35 @@ def splitvalue(attr):
_hexdig = '0123456789ABCDEFabcdef'
_hextochr = dict((a + b, chr(int(a + b, 16)))
for a in _hexdig for b in _hexdig)
+_asciire = re.compile('([\x00-\x7f]+)')
def unquote(s):
"""unquote('abc%20def') -> 'abc def'."""
- res = s.split('%')
+ if _is_unicode(s):
+ if '%' not in s:
+ return s
+ bits = _asciire.split(s)
+ res = [bits[0]]
+ append = res.append
+ for i in range(1, len(bits), 2):
+ append(unquote(str(bits[i])).decode('latin1'))
+ append(bits[i + 1])
+ return ''.join(res)
+
+ bits = s.split('%')
# fastpath
- if len(res) == 1:
+ if len(bits) == 1:
return s
- s = res[0]
- for item in res[1:]:
+ res = [bits[0]]
+ append = res.append
+ for item in bits[1:]:
try:
- s += _hextochr[item[:2]] + item[2:]
+ append(_hextochr[item[:2]])
+ append(item[2:])
except KeyError:
- s += '%' + item
- except UnicodeDecodeError:
- s += unichr(int(item[:2], 16)) + item[2:]
- return s
+ append('%')
+ append(item)
+ return ''.join(res)
def unquote_plus(s):
"""unquote('%7e/abc+def') -> '~/abc def'"""
diff --git a/Lib/urllib2.py b/Lib/urllib2.py
index 5471acd..aadeb73 100644
--- a/Lib/urllib2.py
+++ b/Lib/urllib2.py
@@ -102,6 +102,7 @@ import sys
import time
import urlparse
import bisect
+import warnings
try:
from cStringIO import StringIO
@@ -109,7 +110,7 @@ except ImportError:
from StringIO import StringIO
from urllib import (unwrap, unquote, splittype, splithost, quote,
- addinfourl, splitport, splittag,
+ addinfourl, splitport, splittag, toBytes,
splitattr, ftpwrapper, splituser, splitpasswd, splitvalue)
# support for FileHandler, proxies via environment variables
@@ -172,6 +173,9 @@ class HTTPError(URLError, addinfourl):
def reason(self):
return self.msg
+ def info(self):
+ return self.hdrs
+
# copied from cookielib.py
_cut_port_re = re.compile(r":\d+$")
def request_host(request):
@@ -828,7 +832,7 @@ class AbstractBasicAuthHandler:
# allow for double- and single-quoted realm values
# (single quotes are a violation of the RFC, but appear in the wild)
rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+'
- 'realm=(["\'])(.*?)\\2', re.I)
+ 'realm=(["\']?)([^"\']*)\\2', re.I)
# XXX could pre-emptively send auth info already accepted (RFC 2617,
# end of section 2, and section 1.2 immediately after "credentials"
@@ -861,6 +865,9 @@ class AbstractBasicAuthHandler:
mo = AbstractBasicAuthHandler.rx.search(authreq)
if mo:
scheme, quote, realm = mo.groups()
+ if quote not in ['"', "'"]:
+ warnings.warn("Basic Auth Realm was unquoted",
+ UserWarning, 2)
if scheme.lower() == 'basic':
response = self.retry_http_basic_auth(host, req, realm)
if response and response.code != 401:
diff --git a/Lib/urlparse.py b/Lib/urlparse.py
index b42e0f4..4cd3d67 100644
--- a/Lib/urlparse.py
+++ b/Lib/urlparse.py
@@ -28,6 +28,8 @@ test_urlparse.py provides a good indicator of parsing behavior.
"""
+import re
+
__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
"urlsplit", "urlunsplit", "parse_qs", "parse_qsl"]
@@ -40,11 +42,14 @@ uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
'imap', 'wais', 'file', 'mms', 'https', 'shttp',
'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
'svn', 'svn+ssh', 'sftp','nfs','git', 'git+ssh']
-non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
- 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
- 'mms', '', 'sftp']
+ 'mms', '', 'sftp', 'tel']
+
+# These are not actually used anymore, but should stay for backwards
+# compatibility. (They are undocumented, but have a public-looking name.)
+non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
+ 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
@@ -104,9 +109,12 @@ class ResultMixin(object):
netloc = self.netloc.split('@')[-1].split(']')[-1]
if ':' in netloc:
port = netloc.split(':')[1]
- return int(port, 10)
- else:
- return None
+ if port:
+ port = int(port, 10)
+ # verify legal port
+ if (0 <= port <= 65535):
+ return port
+ return None
from collections import namedtuple
@@ -192,21 +200,21 @@ def urlsplit(url, scheme='', allow_fragments=True):
if c not in scheme_chars:
break
else:
- try:
- # make sure "url" is not actually a port number (in which case
- # "scheme" is really part of the path
- _testportnum = int(url[i+1:])
- except ValueError:
- scheme, url = url[:i].lower(), url[i+1:]
+ # make sure "url" is not actually a port number (in which case
+ # "scheme" is really part of the path)
+ rest = url[i+1:]
+ if not rest or any(c not in '0123456789' for c in rest):
+ # not a port number
+ scheme, url = url[:i].lower(), rest
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
- if allow_fragments and scheme in uses_fragment and '#' in url:
+ if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
- if scheme in uses_query and '?' in url:
+ if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
@@ -306,6 +314,15 @@ def urldefrag(url):
else:
return url, ''
+try:
+ unicode
+except NameError:
+ def _is_unicode(x):
+ return 0
+else:
+ def _is_unicode(x):
+ return isinstance(x, unicode)
+
# unquote method for parse_qs and parse_qsl
# Cannot use directly from urllib as it would create a circular reference
# because urllib uses urlparse methods (urljoin). If you update this function,
@@ -314,22 +331,35 @@ def urldefrag(url):
_hexdig = '0123456789ABCDEFabcdef'
_hextochr = dict((a+b, chr(int(a+b,16)))
for a in _hexdig for b in _hexdig)
+_asciire = re.compile('([\x00-\x7f]+)')
def unquote(s):
"""unquote('abc%20def') -> 'abc def'."""
- res = s.split('%')
+ if _is_unicode(s):
+ if '%' not in s:
+ return s
+ bits = _asciire.split(s)
+ res = [bits[0]]
+ append = res.append
+ for i in range(1, len(bits), 2):
+ append(unquote(str(bits[i])).decode('latin1'))
+ append(bits[i + 1])
+ return ''.join(res)
+
+ bits = s.split('%')
# fastpath
- if len(res) == 1:
+ if len(bits) == 1:
return s
- s = res[0]
- for item in res[1:]:
+ res = [bits[0]]
+ append = res.append
+ for item in bits[1:]:
try:
- s += _hextochr[item[:2]] + item[2:]
+ append(_hextochr[item[:2]])
+ append(item[2:])
except KeyError:
- s += '%' + item
- except UnicodeDecodeError:
- s += unichr(int(item[:2], 16)) + item[2:]
- return s
+ append('%')
+ append(item)
+ return ''.join(res)
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument.
diff --git a/Lib/uuid.py b/Lib/uuid.py
index fdd0c5c..63cb3ae 100644
--- a/Lib/uuid.py
+++ b/Lib/uuid.py
@@ -293,25 +293,38 @@ class UUID(object):
def _find_mac(command, args, hw_identifiers, get_index):
import os
- for dir in ['', '/sbin/', '/usr/sbin']:
+ path = os.environ.get("PATH", os.defpath).split(os.pathsep)
+ path.extend(('/sbin', '/usr/sbin'))
+ for dir in path:
executable = os.path.join(dir, command)
- if not os.path.exists(executable):
- continue
+ if (os.path.exists(executable) and
+ os.access(executable, os.F_OK | os.X_OK) and
+ not os.path.isdir(executable)):
+ break
+ else:
+ return None
- try:
- # LC_ALL to get English output, 2>/dev/null to
- # prevent output on stderr
- cmd = 'LC_ALL=C %s %s 2>/dev/null' % (executable, args)
- with os.popen(cmd) as pipe:
- for line in pipe:
- words = line.lower().split()
- for i in range(len(words)):
- if words[i] in hw_identifiers:
+ try:
+ # LC_ALL to ensure English output, 2>/dev/null to
+ # prevent output on stderr
+ cmd = 'LC_ALL=C %s %s 2>/dev/null' % (executable, args)
+ with os.popen(cmd) as pipe:
+ for line in pipe:
+ words = line.lower().split()
+ for i in range(len(words)):
+ if words[i] in hw_identifiers:
+ try:
return int(
words[get_index(i)].replace(':', ''), 16)
- except IOError:
- continue
- return None
+ except (ValueError, IndexError):
+ # Virtual interfaces, such as those provided by
+ # VPNs, do not have a colon-delimited MAC address
+ # as expected, but a 16-byte HWAddr separated by
+ # dashes. These should be ignored in favor of a
+ # real MAC address
+ pass
+ except IOError:
+ pass
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
@@ -408,6 +421,8 @@ try:
_uuid_generate_random = lib.uuid_generate_random
if hasattr(lib, 'uuid_generate_time'):
_uuid_generate_time = lib.uuid_generate_time
+ if _uuid_generate_random is not None:
+ break # found everything we were looking for
# The uuid_generate_* functions are broken on MacOS X 10.5, as noted
# in issue #8621 the function generates the same sequence of values
diff --git a/Lib/wave.py b/Lib/wave.py
index 2016a2a..8ff93c3 100644
--- a/Lib/wave.py
+++ b/Lib/wave.py
@@ -80,17 +80,18 @@ class Error(Exception):
WAVE_FORMAT_PCM = 0x0001
-_array_fmts = None, 'b', 'h', None, 'l'
+_array_fmts = None, 'b', 'h', None, 'i'
-# Determine endian-ness
import struct
-if struct.pack("h", 1) == "\000\001":
- big_endian = 1
-else:
- big_endian = 0
-
+import sys
from chunk import Chunk
+def _byteswap3(data):
+ ba = bytearray(data)
+ ba[::3] = data[2::3]
+ ba[2::3] = data[::3]
+ return bytes(ba)
+
class Wave_read:
"""Variables used in this class:
@@ -231,13 +232,14 @@ class Wave_read:
self._data_seek_needed = 0
if nframes == 0:
return ''
- if self._sampwidth > 1 and big_endian:
+ if self._sampwidth in (2, 4) and sys.byteorder == 'big':
# unfortunately the fromfile() method does not take
# something that only looks like a file object, so
# we have to reach into the innards of the chunk object
import array
chunk = self._data_chunk
data = array.array(_array_fmts[self._sampwidth])
+ assert data.itemsize == self._sampwidth
nitems = nframes * self._nchannels
if nitems * self._sampwidth > chunk.chunksize - chunk.size_read:
nitems = (chunk.chunksize - chunk.size_read) / self._sampwidth
@@ -251,6 +253,8 @@ class Wave_read:
data = data.tostring()
else:
data = self._data_chunk.read(nframes * self._framesize)
+ if self._sampwidth == 3 and sys.byteorder == 'big':
+ data = _byteswap3(data)
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) // (self._nchannels * self._sampwidth)
@@ -261,9 +265,9 @@ class Wave_read:
#
def _read_fmt_chunk(self, chunk):
- wFormatTag, self._nchannels, self._framerate, dwAvgBytesPerSec, wBlockAlign = struct.unpack('<hhllh', chunk.read(14))
+ wFormatTag, self._nchannels, self._framerate, dwAvgBytesPerSec, wBlockAlign = struct.unpack('<HHLLH', chunk.read(14))
if wFormatTag == WAVE_FORMAT_PCM:
- sampwidth = struct.unpack('<h', chunk.read(2))[0]
+ sampwidth = struct.unpack('<H', chunk.read(2))[0]
self._sampwidth = (sampwidth + 7) // 8
else:
raise Error, 'unknown format: %r' % (wFormatTag,)
@@ -418,13 +422,18 @@ class Wave_write:
nframes = len(data) // (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
- if self._sampwidth > 1 and big_endian:
+ if self._sampwidth in (2, 4) and sys.byteorder == 'big':
import array
- data = array.array(_array_fmts[self._sampwidth], data)
+ a = array.array(_array_fmts[self._sampwidth])
+ a.fromstring(data)
+ data = a
+ assert data.itemsize == self._sampwidth
data.byteswap()
data.tofile(self._file)
self._datawritten = self._datawritten + len(data) * self._sampwidth
else:
+ if self._sampwidth == 3 and sys.byteorder == 'big':
+ data = _byteswap3(data)
self._file.write(data)
self._datawritten = self._datawritten + len(data)
self._nframeswritten = self._nframeswritten + nframes
@@ -436,11 +445,13 @@ class Wave_write:
def close(self):
if self._file:
- self._ensure_header_written(0)
- if self._datalength != self._datawritten:
- self._patchheader()
- self._file.flush()
- self._file = None
+ try:
+ self._ensure_header_written(0)
+ if self._datalength != self._datawritten:
+ self._patchheader()
+ self._file.flush()
+ finally:
+ self._file = None
if self._i_opened_the_file:
self._i_opened_the_file.close()
self._i_opened_the_file = None
@@ -466,14 +477,14 @@ class Wave_write:
self._nframes = initlength / (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
self._form_length_pos = self._file.tell()
- self._file.write(struct.pack('<l4s4slhhllhh4s',
+ self._file.write(struct.pack('<L4s4sLHHLLHH4s',
36 + self._datalength, 'WAVE', 'fmt ', 16,
WAVE_FORMAT_PCM, self._nchannels, self._framerate,
self._nchannels * self._framerate * self._sampwidth,
self._nchannels * self._sampwidth,
self._sampwidth * 8, 'data'))
self._data_length_pos = self._file.tell()
- self._file.write(struct.pack('<l', self._datalength))
+ self._file.write(struct.pack('<L', self._datalength))
self._headerwritten = True
def _patchheader(self):
@@ -482,9 +493,9 @@ class Wave_write:
return
curpos = self._file.tell()
self._file.seek(self._form_length_pos, 0)
- self._file.write(struct.pack('<l', 36 + self._datawritten))
+ self._file.write(struct.pack('<L', 36 + self._datawritten))
self._file.seek(self._data_length_pos, 0)
- self._file.write(struct.pack('<l', self._datawritten))
+ self._file.write(struct.pack('<L', self._datawritten))
self._file.seek(curpos, 0)
self._datalength = self._datawritten
diff --git a/Lib/weakref.py b/Lib/weakref.py
index 88c60e7..787c885 100644
--- a/Lib/weakref.py
+++ b/Lib/weakref.py
@@ -20,7 +20,7 @@ from _weakref import (
ProxyType,
ReferenceType)
-from _weakrefset import WeakSet
+from _weakrefset import WeakSet, _IterationGuard
from exceptions import ReferenceError
@@ -48,10 +48,24 @@ class WeakValueDictionary(UserDict.UserDict):
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
- del self.data[wr.key]
+ if self._iterating:
+ self._pending_removals.append(wr.key)
+ else:
+ del self.data[wr.key]
self._remove = remove
+ # A list of keys to be removed
+ self._pending_removals = []
+ self._iterating = set()
UserDict.UserDict.__init__(self, *args, **kw)
+ def _commit_removals(self):
+ l = self._pending_removals
+ d = self.data
+ # We shouldn't encounter any KeyError, because this method should
+ # always be called *before* mutating the dict.
+ while l:
+ del d[l.pop()]
+
def __getitem__(self, key):
o = self.data[key]()
if o is None:
@@ -59,6 +73,11 @@ class WeakValueDictionary(UserDict.UserDict):
else:
return o
+ def __delitem__(self, key):
+ if self._pending_removals:
+ self._commit_removals()
+ del self.data[key]
+
def __contains__(self, key):
try:
o = self.data[key]()
@@ -77,8 +96,15 @@ class WeakValueDictionary(UserDict.UserDict):
return "<WeakValueDictionary at %s>" % id(self)
def __setitem__(self, key, value):
+ if self._pending_removals:
+ self._commit_removals()
self.data[key] = KeyedRef(value, self._remove, key)
+ def clear(self):
+ if self._pending_removals:
+ self._commit_removals()
+ self.data.clear()
+
def copy(self):
new = WeakValueDictionary()
for key, wr in self.data.items():
@@ -120,16 +146,18 @@ class WeakValueDictionary(UserDict.UserDict):
return L
def iteritems(self):
- for wr in self.data.itervalues():
- value = wr()
- if value is not None:
- yield wr.key, value
+ with _IterationGuard(self):
+ for wr in self.data.itervalues():
+ value = wr()
+ if value is not None:
+ yield wr.key, value
def iterkeys(self):
- return self.data.iterkeys()
+ with _IterationGuard(self):
+ for k in self.data.iterkeys():
+ yield k
- def __iter__(self):
- return self.data.iterkeys()
+ __iter__ = iterkeys
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
@@ -141,15 +169,20 @@ class WeakValueDictionary(UserDict.UserDict):
keep the values around longer than needed.
"""
- return self.data.itervalues()
+ with _IterationGuard(self):
+ for wr in self.data.itervalues():
+ yield wr
def itervalues(self):
- for wr in self.data.itervalues():
- obj = wr()
- if obj is not None:
- yield obj
+ with _IterationGuard(self):
+ for wr in self.data.itervalues():
+ obj = wr()
+ if obj is not None:
+ yield obj
def popitem(self):
+ if self._pending_removals:
+ self._commit_removals()
while 1:
key, wr = self.data.popitem()
o = wr()
@@ -157,6 +190,8 @@ class WeakValueDictionary(UserDict.UserDict):
return key, o
def pop(self, key, *args):
+ if self._pending_removals:
+ self._commit_removals()
try:
o = self.data.pop(key)()
except KeyError:
@@ -172,12 +207,16 @@ class WeakValueDictionary(UserDict.UserDict):
try:
wr = self.data[key]
except KeyError:
+ if self._pending_removals:
+ self._commit_removals()
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return wr()
def update(self, dict=None, **kwargs):
+ if self._pending_removals:
+ self._commit_removals()
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
@@ -245,9 +284,29 @@ class WeakKeyDictionary(UserDict.UserDict):
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
- del self.data[k]
+ if self._iterating:
+ self._pending_removals.append(k)
+ else:
+ del self.data[k]
self._remove = remove
- if dict is not None: self.update(dict)
+ # A list of dead weakrefs (keys to be removed)
+ self._pending_removals = []
+ self._iterating = set()
+ if dict is not None:
+ self.update(dict)
+
+ def _commit_removals(self):
+ # NOTE: We don't need to call this method before mutating the dict,
+ # because a dead weakref never compares equal to a live weakref,
+ # even if they happened to refer to equal objects.
+ # However, it means keys may already have been removed.
+ l = self._pending_removals
+ d = self.data
+ while l:
+ try:
+ del d[l.pop()]
+ except KeyError:
+ pass
def __delitem__(self, key):
del self.data[ref(key)]
@@ -306,10 +365,11 @@ class WeakKeyDictionary(UserDict.UserDict):
return L
def iteritems(self):
- for wr, value in self.data.iteritems():
- key = wr()
- if key is not None:
- yield key, value
+ with _IterationGuard(self):
+ for wr, value in self.data.iteritems():
+ key = wr()
+ if key is not None:
+ yield key, value
def iterkeyrefs(self):
"""Return an iterator that yields the weak references to the keys.
@@ -321,19 +381,23 @@ class WeakKeyDictionary(UserDict.UserDict):
keep the keys around longer than needed.
"""
- return self.data.iterkeys()
+ with _IterationGuard(self):
+ for wr in self.data.iterkeys():
+ yield wr
def iterkeys(self):
- for wr in self.data.iterkeys():
- obj = wr()
- if obj is not None:
- yield obj
+ with _IterationGuard(self):
+ for wr in self.data.iterkeys():
+ obj = wr()
+ if obj is not None:
+ yield obj
- def __iter__(self):
- return self.iterkeys()
+ __iter__ = iterkeys
def itervalues(self):
- return self.data.itervalues()
+ with _IterationGuard(self):
+ for value in self.data.itervalues():
+ yield value
def keyrefs(self):
"""Return a list of weak references to the keys.
diff --git a/Lib/webbrowser.py b/Lib/webbrowser.py
index f3c53d4..d3272a0 100644..100755
--- a/Lib/webbrowser.py
+++ b/Lib/webbrowser.py
@@ -304,6 +304,18 @@ class Galeon(UnixBrowser):
background = True
+class Chrome(UnixBrowser):
+ "Launcher class for Google Chrome browser."
+
+ remote_args = ['%action', '%s']
+ remote_action = ""
+ remote_action_newwin = "--new-window"
+ remote_action_newtab = ""
+ background = True
+
+Chromium = Chrome
+
+
class Opera(UnixBrowser):
"Launcher class for Opera browser."
@@ -441,6 +453,14 @@ class Grail(BaseBrowser):
def register_X_browsers():
+ # use xdg-open if around
+ if _iscommand("xdg-open"):
+ register("xdg-open", None, BackgroundBrowser("xdg-open"))
+
+ # The default GNOME3 browser
+ if "GNOME_DESKTOP_SESSION_ID" in os.environ and _iscommand("gvfs-open"):
+ register("gvfs-open", None, BackgroundBrowser("gvfs-open"))
+
# The default GNOME browser
if "GNOME_DESKTOP_SESSION_ID" in os.environ and _iscommand("gnome-open"):
register("gnome-open", None, BackgroundBrowser("gnome-open"))
@@ -449,9 +469,13 @@ def register_X_browsers():
if "KDE_FULL_SESSION" in os.environ and _iscommand("kfmclient"):
register("kfmclient", Konqueror, Konqueror("kfmclient"))
+ if _iscommand("x-www-browser"):
+ register("x-www-browser", None, BackgroundBrowser("x-www-browser"))
+
# The Mozilla/Netscape browsers
for browser in ("mozilla-firefox", "firefox",
"mozilla-firebird", "firebird",
+ "iceweasel", "iceape",
"seamonkey", "mozilla", "netscape"):
if _iscommand(browser):
register(browser, None, Mozilla(browser))
@@ -471,6 +495,11 @@ def register_X_browsers():
if _iscommand("skipstone"):
register("skipstone", None, BackgroundBrowser("skipstone"))
+ # Google Chrome/Chromium browsers
+ for browser in ("google-chrome", "chrome", "chromium", "chromium-browser"):
+ if _iscommand(browser):
+ register(browser, None, Chrome(browser))
+
# Opera, quite popular
if _iscommand("opera"):
register("opera", None, Opera("opera"))
@@ -489,6 +518,8 @@ if os.environ.get("DISPLAY"):
# Also try console browsers
if os.environ.get("TERM"):
+ if _iscommand("www-browser"):
+ register("www-browser", None, GenericBrowser("www-browser"))
# The Links/elinks browsers <http://artax.karlin.mff.cuni.cz/~mikulas/links/>
if _iscommand("links"):
register("links", None, GenericBrowser("links"))
diff --git a/Lib/wsgiref/handlers.py b/Lib/wsgiref/handlers.py
index ae1e8cc..8cb57e2 100644
--- a/Lib/wsgiref/handlers.py
+++ b/Lib/wsgiref/handlers.py
@@ -122,11 +122,13 @@ class BaseHandler:
in the event loop to iterate over the data, and to call
'self.close()' once the response is finished.
"""
- if not self.result_is_file() or not self.sendfile():
- for data in self.result:
- self.write(data)
- self.finish_content()
- self.close()
+ try:
+ if not self.result_is_file() or not self.sendfile():
+ for data in self.result:
+ self.write(data)
+ self.finish_content()
+ finally:
+ self.close()
def get_scheme(self):
diff --git a/Lib/wsgiref/simple_server.py b/Lib/wsgiref/simple_server.py
index e6a385b..12119ea 100644
--- a/Lib/wsgiref/simple_server.py
+++ b/Lib/wsgiref/simple_server.py
@@ -153,3 +153,4 @@ if __name__ == '__main__':
import webbrowser
webbrowser.open('http://localhost:8000/xyz?abc')
httpd.handle_request() # serve one request, then exit
+ httpd.server_close()
diff --git a/Lib/wsgiref/validate.py b/Lib/wsgiref/validate.py
index 43784f9..04a893d 100644
--- a/Lib/wsgiref/validate.py
+++ b/Lib/wsgiref/validate.py
@@ -134,9 +134,9 @@ def validator(application):
When applied between a WSGI server and a WSGI application, this
middleware will check for WSGI compliancy on a number of levels.
This middleware does not modify the request or response in any
- way, but will throw an AssertionError if anything seems off
+ way, but will raise an AssertionError if anything seems off
(except for a failure to close the application iterator, which
- will be printed to stderr -- there's no way to throw an exception
+ will be printed to stderr -- there's no way to raise an exception
at that point).
"""
diff --git a/Lib/xml/dom/minidom.py b/Lib/xml/dom/minidom.py
index 7e2898c..c30e246 100644
--- a/Lib/xml/dom/minidom.py
+++ b/Lib/xml/dom/minidom.py
@@ -1,5 +1,6 @@
-"""\
-minidom.py -- a lightweight DOM implementation.
+"""Simple implementation of the Level 1 DOM.
+
+Namespaces and other minor Level 2 features are also supported.
parse("foo.xml")
@@ -357,9 +358,6 @@ class Attr(Node):
def _get_localName(self):
return self.nodeName.split(":", 1)[-1]
- def _get_name(self):
- return self.name
-
def _get_specified(self):
return self.specified
diff --git a/Lib/xml/etree/ElementInclude.py b/Lib/xml/etree/ElementInclude.py
index 84fd754..7e29119 100644
--- a/Lib/xml/etree/ElementInclude.py
+++ b/Lib/xml/etree/ElementInclude.py
@@ -75,14 +75,13 @@ class FatalIncludeError(SyntaxError):
# @throws IOError If the loader fails to load the resource.
def default_loader(href, parse, encoding=None):
- file = open(href)
- if parse == "xml":
- data = ElementTree.parse(file).getroot()
- else:
- data = file.read()
- if encoding:
- data = data.decode(encoding)
- file.close()
+ with open(href) as file:
+ if parse == "xml":
+ data = ElementTree.parse(file).getroot()
+ else:
+ data = file.read()
+ if encoding:
+ data = data.decode(encoding)
return data
##
diff --git a/Lib/xml/etree/ElementTree.py b/Lib/xml/etree/ElementTree.py
index 2b0cf0c..9f3e75d 100644
--- a/Lib/xml/etree/ElementTree.py
+++ b/Lib/xml/etree/ElementTree.py
@@ -683,8 +683,8 @@ class ElementTree(object):
return list(self.iter(tag))
##
- # Finds the first toplevel element with given tag.
- # Same as getroot().find(path).
+ # Same as getroot().find(path), starting at the root of the
+ # tree.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
@@ -704,10 +704,9 @@ class ElementTree(object):
return self._root.find(path, namespaces)
##
- # Finds the element text for the first toplevel element with given
- # tag. Same as getroot().findtext(path).
+ # Same as getroot().findtext(path), starting at the root of the tree.
#
- # @param path What toplevel element to look for.
+ # @param path What element to look for.
# @param default What to return if the element was not found.
# @keyparam namespaces Optional namespace prefix map.
# @return The text content of the first matching element, or the
@@ -729,8 +728,7 @@ class ElementTree(object):
return self._root.findtext(path, default, namespaces)
##
- # Finds all toplevel elements with the given tag.
- # Same as getroot().findall(path).
+ # Same as getroot().findall(path), starting at the root of the tree.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
@@ -779,11 +777,12 @@ class ElementTree(object):
# @param file A file name, or a file object opened for writing.
# @param **options Options, given as keyword arguments.
# @keyparam encoding Optional output encoding (default is US-ASCII).
- # @keyparam method Optional output method ("xml", "html", "text" or
- # "c14n"; default is "xml").
# @keyparam xml_declaration Controls if an XML declaration should
# be added to the file. Use False for never, True for always,
# None for only if not US-ASCII or UTF-8. None is default.
+ # @keyparam default_namespace Sets the default XML namespace (for "xmlns").
+ # @keyparam method Optional output method ("xml", "html", "text" or
+ # "c14n"; default is "xml").
def write(self, file_or_filename,
# keyword arguments
@@ -945,7 +944,7 @@ def _serialize_xml(write, elem, encoding, qnames, namespaces):
write(_escape_cdata(elem.tail, encoding))
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
- "img", "input", "isindex", "link", "meta" "param")
+ "img", "input", "isindex", "link", "meta", "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
@@ -989,15 +988,15 @@ def _serialize_html(write, elem, encoding, qnames, namespaces):
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
write(">")
- tag = tag.lower()
+ ltag = tag.lower()
if text:
- if tag == "script" or tag == "style":
+ if ltag == "script" or ltag == "style":
write(_encode(text, encoding))
else:
write(_escape_cdata(text, encoding))
for e in elem:
_serialize_html(write, e, encoding, qnames, None)
- if tag not in HTML_EMPTY:
+ if ltag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail, encoding))
diff --git a/Lib/xml/sax/_exceptions.py b/Lib/xml/sax/_exceptions.py
index fdd614a..a9b2ba3 100644
--- a/Lib/xml/sax/_exceptions.py
+++ b/Lib/xml/sax/_exceptions.py
@@ -12,7 +12,7 @@ class SAXException(Exception):
the application: you can subclass it to provide additional
functionality, or to add localization. Note that although you will
receive a SAXException as the argument to the handlers in the
- ErrorHandler interface, you are not actually required to throw
+ ErrorHandler interface, you are not actually required to raise
the exception; instead, you can simply read the information in
it."""
@@ -50,7 +50,7 @@ class SAXParseException(SAXException):
the original XML document. Note that although the application will
receive a SAXParseException as the argument to the handlers in the
ErrorHandler interface, the application is not actually required
- to throw the exception; instead, it can simply read the
+ to raise the exception; instead, it can simply read the
information in it and take a different action.
Since this exception is a subclass of SAXException, it inherits
@@ -62,7 +62,7 @@ class SAXParseException(SAXException):
self._locator = locator
# We need to cache this stuff at construction time.
- # If this exception is thrown, the objects through which we must
+ # If this exception is raised, the objects through which we must
# traverse to get this information may be deleted by the time
# it gets caught.
self._systemId = self._locator.getSystemId()
diff --git a/Lib/xml/sax/expatreader.py b/Lib/xml/sax/expatreader.py
index 92a79c1..9de3e72 100644
--- a/Lib/xml/sax/expatreader.py
+++ b/Lib/xml/sax/expatreader.py
@@ -108,7 +108,10 @@ class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
def prepareParser(self, source):
if source.getSystemId() is not None:
- self._parser.SetBase(source.getSystemId())
+ base = source.getSystemId()
+ if isinstance(base, unicode):
+ base = base.encode('utf-8')
+ self._parser.SetBase(base)
# Redefined setContentHandler to allow changing handlers during parsing
diff --git a/Lib/xml/sax/saxutils.py b/Lib/xml/sax/saxutils.py
index 97d65d8..1abcd9a 100644
--- a/Lib/xml/sax/saxutils.py
+++ b/Lib/xml/sax/saxutils.py
@@ -4,6 +4,8 @@ convenience of application and driver writers.
"""
import os, urlparse, urllib, types
+import io
+import sys
import handler
import xmlreader
@@ -12,15 +14,6 @@ try:
except AttributeError:
_StringTypes = [types.StringType]
-# See whether the xmlcharrefreplace error handler is
-# supported
-try:
- from codecs import xmlcharrefreplace_errors
- _error_handling = "xmlcharrefreplace"
- del xmlcharrefreplace_errors
-except ImportError:
- _error_handling = "strict"
-
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
@@ -81,25 +74,50 @@ def quoteattr(data, entities={}):
return data
+def _gettextwriter(out, encoding):
+ if out is None:
+ import sys
+ out = sys.stdout
+
+ if isinstance(out, io.RawIOBase):
+ buffer = io.BufferedIOBase(out)
+ # Keep the original file open when the TextIOWrapper is
+ # destroyed
+ buffer.close = lambda: None
+ else:
+ # This is to handle passed objects that aren't in the
+ # IOBase hierarchy, but just have a write method
+ buffer = io.BufferedIOBase()
+ buffer.writable = lambda: True
+ buffer.write = out.write
+ try:
+ # TextIOWrapper uses this methods to determine
+ # if BOM (for UTF-16, etc) should be added
+ buffer.seekable = out.seekable
+ buffer.tell = out.tell
+ except AttributeError:
+ pass
+ # wrap a binary writer with TextIOWrapper
+ class UnbufferedTextIOWrapper(io.TextIOWrapper):
+ def write(self, s):
+ super(UnbufferedTextIOWrapper, self).write(s)
+ self.flush()
+ return UnbufferedTextIOWrapper(buffer, encoding=encoding,
+ errors='xmlcharrefreplace',
+ newline='\n')
+
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1"):
- if out is None:
- import sys
- out = sys.stdout
handler.ContentHandler.__init__(self)
- self._out = out
+ out = _gettextwriter(out, encoding)
+ self._write = out.write
+ self._flush = out.flush
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
- def _write(self, text):
- if isinstance(text, str):
- self._out.write(text)
- else:
- self._out.write(text.encode(self._encoding, _error_handling))
-
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
@@ -120,9 +138,12 @@ class XMLGenerator(handler.ContentHandler):
# ContentHandler methods
def startDocument(self):
- self._write('<?xml version="1.0" encoding="%s"?>\n' %
+ self._write(u'<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
+ def endDocument(self):
+ self._flush()
+
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
@@ -133,39 +154,43 @@ class XMLGenerator(handler.ContentHandler):
del self._ns_contexts[-1]
def startElement(self, name, attrs):
- self._write('<' + name)
+ self._write(u'<' + name)
for (name, value) in attrs.items():
- self._write(' %s=%s' % (name, quoteattr(value)))
- self._write('>')
+ self._write(u' %s=%s' % (name, quoteattr(value)))
+ self._write(u'>')
def endElement(self, name):
- self._write('</%s>' % name)
+ self._write(u'</%s>' % name)
def startElementNS(self, name, qname, attrs):
- self._write('<' + self._qname(name))
+ self._write(u'<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
- self._out.write(' xmlns:%s="%s"' % (prefix, uri))
+ self._write(u' xmlns:%s="%s"' % (prefix, uri))
else:
- self._out.write(' xmlns="%s"' % uri)
+ self._write(u' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
- self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
- self._write('>')
+ self._write(u' %s=%s' % (self._qname(name), quoteattr(value)))
+ self._write(u'>')
def endElementNS(self, name, qname):
- self._write('</%s>' % self._qname(name))
+ self._write(u'</%s>' % self._qname(name))
def characters(self, content):
+ if not isinstance(content, unicode):
+ content = unicode(content, self._encoding)
self._write(escape(content))
def ignorableWhitespace(self, content):
+ if not isinstance(content, unicode):
+ content = unicode(content, self._encoding)
self._write(content)
def processingInstruction(self, target, data):
- self._write('<?%s %s?>' % (target, data))
+ self._write(u'<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
@@ -293,14 +318,31 @@ def prepare_input_source(source, base = ""):
source.setSystemId(f.name)
if source.getByteStream() is None:
- sysid = source.getSystemId()
- basehead = os.path.dirname(os.path.normpath(base))
- sysidfilename = os.path.join(basehead, sysid)
- if os.path.isfile(sysidfilename):
+ try:
+ sysid = source.getSystemId()
+ basehead = os.path.dirname(os.path.normpath(base))
+ encoding = sys.getfilesystemencoding()
+ if isinstance(sysid, unicode):
+ if not isinstance(basehead, unicode):
+ try:
+ basehead = basehead.decode(encoding)
+ except UnicodeDecodeError:
+ sysid = sysid.encode(encoding)
+ else:
+ if isinstance(basehead, unicode):
+ try:
+ sysid = sysid.decode(encoding)
+ except UnicodeDecodeError:
+ basehead = basehead.encode(encoding)
+ sysidfilename = os.path.join(basehead, sysid)
+ isfile = os.path.isfile(sysidfilename)
+ except UnicodeError:
+ isfile = False
+ if isfile:
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
- source.setSystemId(urlparse.urljoin(base, sysid))
+ source.setSystemId(urlparse.urljoin(base, source.getSystemId()))
f = urllib.urlopen(source.getSystemId())
source.setByteStream(f)
diff --git a/Lib/xml/sax/xmlreader.py b/Lib/xml/sax/xmlreader.py
index 46ee02b..74aa39a 100644
--- a/Lib/xml/sax/xmlreader.py
+++ b/Lib/xml/sax/xmlreader.py
@@ -68,7 +68,7 @@ class XMLReader:
SAX parsers are not required to provide localization for errors
and warnings; if they cannot support the requested locale,
- however, they must throw a SAX exception. Applications may
+ however, they must raise a SAX exception. Applications may
request a locale change in the middle of a parse."""
raise SAXNotSupportedException("Locale support not implemented")
diff --git a/Lib/xmlrpclib.py b/Lib/xmlrpclib.py
index 653c424..1a8b3fb 100644
--- a/Lib/xmlrpclib.py
+++ b/Lib/xmlrpclib.py
@@ -945,7 +945,7 @@ class _MultiCallMethod:
class MultiCallIterator:
"""Iterates over the results of a multicall. Exceptions are
- thrown in response to xmlrpc faults."""
+ raised in response to xmlrpc faults."""
def __init__(self, results):
self.results = results
@@ -1617,21 +1617,14 @@ Server = ServerProxy
if __name__ == "__main__":
- # simple test program (from the XML-RPC specification)
-
- # server = ServerProxy("http://localhost:8000") # local server
- server = ServerProxy("http://time.xmlrpc.com/RPC2")
+ server = ServerProxy("http://localhost:8000")
print server
- try:
- print server.currentTime.getCurrentTime()
- except Error, v:
- print "ERROR", v
-
multi = MultiCall(server)
- multi.currentTime.getCurrentTime()
- multi.currentTime.getCurrentTime()
+ multi.pow(2, 9)
+ multi.add(5, 1)
+ multi.add(24, 11)
try:
for response in multi():
print response
diff --git a/Lib/zipfile.py b/Lib/zipfile.py
index d9181f2..a0beae2 100644
--- a/Lib/zipfile.py
+++ b/Lib/zipfile.py
@@ -5,6 +5,7 @@ import struct, os, time, sys, shutil
import binascii, cStringIO, stat
import io
import re
+import string
try:
import zlib # We may need its compression method
@@ -166,6 +167,8 @@ def _EndRecData64(fpin, offset, endrec):
return endrec
data = fpin.read(sizeEndCentDir64Locator)
+ if len(data) != sizeEndCentDir64Locator:
+ return endrec
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
@@ -176,6 +179,8 @@ def _EndRecData64(fpin, offset, endrec):
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
+ if len(data) != sizeEndCentDir64:
+ return endrec
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
@@ -211,7 +216,9 @@ def _EndRecData(fpin):
except IOError:
return None
data = fpin.read()
- if data[0:4] == stringEndArchive and data[-2:] == "\000\000":
+ if (len(data) == sizeEndCentDir and
+ data[0:4] == stringEndArchive and
+ data[-2:] == b"\000\000"):
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
@@ -235,6 +242,9 @@ def _EndRecData(fpin):
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
+ if len(recData) != sizeEndCentDir:
+ # Zip file is corrupted.
+ return None
endrec = list(struct.unpack(structEndArchive, recData))
commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
@@ -246,7 +256,7 @@ def _EndRecData(fpin):
endrec)
# Unable to find a valid end of central directory structure
- return
+ return None
class ZipInfo (object):
@@ -316,7 +326,7 @@ class ZipInfo (object):
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
- def FileHeader(self):
+ def FileHeader(self, zip64=None):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
@@ -331,12 +341,17 @@ class ZipInfo (object):
extra = self.extra
- if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
- # File is larger than what fits into a 4 byte integer,
- # fall back to the ZIP64 extension
+ if zip64 is None:
+ zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT
+ if zip64:
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
+ if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
+ if not zip64:
+ raise LargeZipFile("Filesize would require ZIP64 extensions")
+ # File is larger than what fits into a 4 byte integer,
+ # fall back to the ZIP64 extension
file_size = 0xffffffff
compress_size = 0xffffffff
self.extract_version = max(45, self.extract_version)
@@ -369,7 +384,7 @@ class ZipInfo (object):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
- while extra:
+ while len(extra) >= 4:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
@@ -461,6 +476,28 @@ class _ZipDecrypter:
self._UpdateKeys(c)
return c
+
+compressor_names = {
+ 0: 'store',
+ 1: 'shrink',
+ 2: 'reduce',
+ 3: 'reduce',
+ 4: 'reduce',
+ 5: 'reduce',
+ 6: 'implode',
+ 7: 'tokenize',
+ 8: 'deflate',
+ 9: 'deflate64',
+ 10: 'implode',
+ 12: 'bzip2',
+ 14: 'lzma',
+ 18: 'terse',
+ 19: 'lz77',
+ 97: 'wavpack',
+ 98: 'ppmd',
+}
+
+
class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
@@ -475,9 +512,11 @@ class ZipExtFile(io.BufferedIOBase):
# Search for universal newlines or line chunks.
PATTERN = re.compile(r'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
- def __init__(self, fileobj, mode, zipinfo, decrypter=None):
+ def __init__(self, fileobj, mode, zipinfo, decrypter=None,
+ close_fileobj=False):
self._fileobj = fileobj
self._decrypter = decrypter
+ self._close_fileobj = close_fileobj
self._compress_type = zipinfo.compress_type
self._compress_size = zipinfo.compress_size
@@ -485,6 +524,12 @@ class ZipExtFile(io.BufferedIOBase):
if self._compress_type == ZIP_DEFLATED:
self._decompressor = zlib.decompressobj(-15)
+ elif self._compress_type != ZIP_STORED:
+ descr = compressor_names.get(self._compress_type)
+ if descr:
+ raise NotImplementedError("compression type %d (%s)" % (self._compress_type, descr))
+ else:
+ raise NotImplementedError("compression type %d" % (self._compress_type,))
self._unconsumed = ''
self._readbuffer = ''
@@ -561,7 +606,11 @@ class ZipExtFile(io.BufferedIOBase):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
- self._offset -= len(chunk)
+ if len(chunk) > self._offset:
+ self._readbuffer = chunk + self._readbuffer[self._offset:]
+ self._offset = 0
+ else:
+ self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
@@ -649,9 +698,15 @@ class ZipExtFile(io.BufferedIOBase):
self._offset += len(data)
return data
+ def close(self):
+ try :
+ if self._close_fileobj:
+ self._fileobj.close()
+ finally:
+ super(ZipExtFile, self).close()
-class ZipFile:
+class ZipFile(object):
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
@@ -690,7 +745,7 @@ class ZipFile:
self.compression = compression # Method of compression
self.mode = key = mode.replace('b', '')[0]
self.pwd = None
- self.comment = ''
+ self._comment = ''
# Check if we were passed a file-like object
if isinstance(file, basestring):
@@ -710,30 +765,34 @@ class ZipFile:
self.fp = file
self.filename = getattr(file, 'name', None)
- if key == 'r':
- self._GetContents()
- elif key == 'w':
- # set the modified flag so central directory gets written
- # even if no files are added to the archive
- self._didModify = True
- elif key == 'a':
- try:
- # See if file is a zip file
+ try:
+ if key == 'r':
self._RealGetContents()
- # seek to start of directory and overwrite
- self.fp.seek(self.start_dir, 0)
- except BadZipfile:
- # file is not a zip file, just append
- self.fp.seek(0, 2)
-
+ elif key == 'w':
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
- else:
+ elif key == 'a':
+ try:
+ # See if file is a zip file
+ self._RealGetContents()
+ # seek to start of directory and overwrite
+ self.fp.seek(self.start_dir, 0)
+ except BadZipfile:
+ # file is not a zip file, just append
+ self.fp.seek(0, 2)
+
+ # set the modified flag so central directory gets written
+ # even if no files are added to the archive
+ self._didModify = True
+ else:
+ raise RuntimeError('Mode must be "r", "w" or "a"')
+ except:
+ fp = self.fp
+ self.fp = None
if not self._filePassed:
- self.fp.close()
- self.fp = None
- raise RuntimeError, 'Mode must be "r", "w" or "a"'
+ fp.close()
+ raise
def __enter__(self):
return self
@@ -741,17 +800,6 @@ class ZipFile:
def __exit__(self, type, value, traceback):
self.close()
- def _GetContents(self):
- """Read the directory, making sure we close the file if the format
- is bad."""
- try:
- self._RealGetContents()
- except BadZipfile:
- if not self._filePassed:
- self.fp.close()
- self.fp = None
- raise
-
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
@@ -765,7 +813,7 @@ class ZipFile:
print endrec
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
- self.comment = endrec[_ECD_COMMENT] # archive comment
+ self._comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
@@ -784,9 +832,11 @@ class ZipFile:
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
- if centdir[0:4] != stringCentralDir:
- raise BadZipfile, "Bad magic number for central directory"
+ if len(centdir) != sizeCentralDir:
+ raise BadZipfile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
+ if centdir[_CD_SIGNATURE] != stringCentralDir:
+ raise BadZipfile("Bad magic number for central directory")
if self.debug > 2:
print centdir
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
@@ -845,9 +895,9 @@ class ZipFile:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
- f = self.open(zinfo.filename, "r")
- while f.read(chunk_size): # Check CRC-32
- pass
+ with self.open(zinfo.filename, "r") as f:
+ while f.read(chunk_size): # Check CRC-32
+ pass
except BadZipfile:
return zinfo.filename
@@ -864,6 +914,22 @@ class ZipFile:
"""Set default password for encrypted files."""
self.pwd = pwd
+ @property
+ def comment(self):
+ """The comment text associated with the ZIP file."""
+ return self._comment
+
+ @comment.setter
+ def comment(self, comment):
+ # check for valid comment length
+ if len(comment) > ZIP_MAX_COMMENT:
+ import warnings
+ warnings.warn('Archive comment is too long; truncating to %d bytes'
+ % ZIP_MAX_COMMENT, stacklevel=2)
+ comment = comment[:ZIP_MAX_COMMENT]
+ self._comment = comment
+ self._didModify = True
+
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
return self.open(name, "r", pwd).read()
@@ -880,62 +946,72 @@ class ZipFile:
# given a file object in the constructor
if self._filePassed:
zef_file = self.fp
+ should_close = False
else:
zef_file = open(self.filename, 'rb')
+ should_close = True
- # Make sure we have an info object
- if isinstance(name, ZipInfo):
- # 'name' is already an info object
- zinfo = name
- else:
- # Get info object for name
- zinfo = self.getinfo(name)
-
- zef_file.seek(zinfo.header_offset, 0)
-
- # Skip the file header:
- fheader = zef_file.read(sizeFileHeader)
- if fheader[0:4] != stringFileHeader:
- raise BadZipfile, "Bad magic number for file header"
-
- fheader = struct.unpack(structFileHeader, fheader)
- fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
- if fheader[_FH_EXTRA_FIELD_LENGTH]:
- zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
-
- if fname != zinfo.orig_filename:
- raise BadZipfile, \
- 'File name in directory "%s" and header "%s" differ.' % (
- zinfo.orig_filename, fname)
-
- # check for encrypted flag & handle password
- is_encrypted = zinfo.flag_bits & 0x1
- zd = None
- if is_encrypted:
- if not pwd:
- pwd = self.pwd
- if not pwd:
- raise RuntimeError, "File %s is encrypted, " \
- "password required for extraction" % name
-
- zd = _ZipDecrypter(pwd)
- # The first 12 bytes in the cypher stream is an encryption header
- # used to strengthen the algorithm. The first 11 bytes are
- # completely random, while the 12th contains the MSB of the CRC,
- # or the MSB of the file time depending on the header type
- # and is used to check the correctness of the password.
- bytes = zef_file.read(12)
- h = map(zd, bytes[0:12])
- if zinfo.flag_bits & 0x8:
- # compare against the file type from extended local headers
- check_byte = (zinfo._raw_time >> 8) & 0xff
+ try:
+ # Make sure we have an info object
+ if isinstance(name, ZipInfo):
+ # 'name' is already an info object
+ zinfo = name
else:
- # compare against the CRC otherwise
- check_byte = (zinfo.CRC >> 24) & 0xff
- if ord(h[11]) != check_byte:
- raise RuntimeError("Bad password for file", name)
-
- return ZipExtFile(zef_file, mode, zinfo, zd)
+ # Get info object for name
+ zinfo = self.getinfo(name)
+
+ zef_file.seek(zinfo.header_offset, 0)
+
+ # Skip the file header:
+ fheader = zef_file.read(sizeFileHeader)
+ if len(fheader) != sizeFileHeader:
+ raise BadZipfile("Truncated file header")
+ fheader = struct.unpack(structFileHeader, fheader)
+ if fheader[_FH_SIGNATURE] != stringFileHeader:
+ raise BadZipfile("Bad magic number for file header")
+
+ fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
+ if fheader[_FH_EXTRA_FIELD_LENGTH]:
+ zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
+
+ if fname != zinfo.orig_filename:
+ raise BadZipfile, \
+ 'File name in directory "%s" and header "%s" differ.' % (
+ zinfo.orig_filename, fname)
+
+ # check for encrypted flag & handle password
+ is_encrypted = zinfo.flag_bits & 0x1
+ zd = None
+ if is_encrypted:
+ if not pwd:
+ pwd = self.pwd
+ if not pwd:
+ raise RuntimeError, "File %s is encrypted, " \
+ "password required for extraction" % name
+
+ zd = _ZipDecrypter(pwd)
+ # The first 12 bytes in the cypher stream is an encryption header
+ # used to strengthen the algorithm. The first 11 bytes are
+ # completely random, while the 12th contains the MSB of the CRC,
+ # or the MSB of the file time depending on the header type
+ # and is used to check the correctness of the password.
+ bytes = zef_file.read(12)
+ h = map(zd, bytes[0:12])
+ if zinfo.flag_bits & 0x8:
+ # compare against the file type from extended local headers
+ check_byte = (zinfo._raw_time >> 8) & 0xff
+ else:
+ # compare against the CRC otherwise
+ check_byte = (zinfo.CRC >> 24) & 0xff
+ if ord(h[11]) != check_byte:
+ raise RuntimeError("Bad password for file", name)
+
+ return ZipExtFile(zef_file, mode, zinfo, zd,
+ close_fileobj=should_close)
+ except:
+ if should_close:
+ zef_file.close()
+ raise
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
@@ -969,17 +1045,28 @@ class ZipFile:
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
- # Strip trailing path separator, unless it represents the root.
- if (targetpath[-1:] in (os.path.sep, os.path.altsep)
- and len(os.path.splitdrive(targetpath)[1]) > 1):
- targetpath = targetpath[:-1]
-
- # don't include leading "/" from file name if present
- if member.filename[0] == '/':
- targetpath = os.path.join(targetpath, member.filename[1:])
- else:
- targetpath = os.path.join(targetpath, member.filename)
+ arcname = member.filename.replace('/', os.path.sep)
+
+ if os.path.altsep:
+ arcname = arcname.replace(os.path.altsep, os.path.sep)
+ # interpret absolute pathname as relative, remove drive letter or
+ # UNC path, redundant separators, "." and ".." components.
+ arcname = os.path.splitdrive(arcname)[1]
+ arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
+ if x not in ('', os.path.curdir, os.path.pardir))
+ if os.path.sep == '\\':
+ # filter illegal characters on Windows
+ illegal = ':<>|"?*'
+ if isinstance(arcname, unicode):
+ table = {ord(c): ord('_') for c in illegal}
+ else:
+ table = string.maketrans(illegal, '_' * len(illegal))
+ arcname = arcname.translate(table)
+ # remove trailing dots
+ arcname = (x.rstrip('.') for x in arcname.split(os.path.sep))
+ arcname = os.path.sep.join(x for x in arcname if x)
+ targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
@@ -992,19 +1079,17 @@ class ZipFile:
os.mkdir(targetpath)
return targetpath
- source = self.open(member, pwd=pwd)
- target = file(targetpath, "wb")
- shutil.copyfileobj(source, target)
- source.close()
- target.close()
+ with self.open(member, pwd=pwd) as source, \
+ file(targetpath, "wb") as target:
+ shutil.copyfileobj(source, target)
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
- if self.debug: # Warning for duplicate names
- print "Duplicate name:", zinfo.filename
+ import warnings
+ warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3)
if self.mode not in ("w", "a"):
raise RuntimeError, 'write() requires mode "w" or "a"'
if not self.fp:
@@ -1062,20 +1147,23 @@ class ZipFile:
zinfo.CRC = 0
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
- self.fp.write(zinfo.FileHeader())
+ self.fp.write(zinfo.FileHeader(False))
return
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
- zinfo.file_size = file_size = 0
- self.fp.write(zinfo.FileHeader())
+ # Compressed size can be larger than uncompressed size
+ zip64 = self._allowZip64 and \
+ zinfo.file_size * 1.05 > ZIP64_LIMIT
+ self.fp.write(zinfo.FileHeader(zip64))
if zinfo.compress_type == ZIP_DEFLATED:
cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
cmpr = None
+ file_size = 0
while 1:
buf = fp.read(1024 * 8)
if not buf:
@@ -1095,11 +1183,16 @@ class ZipFile:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
- # Seek backwards and write CRC and file sizes
+ if not zip64 and self._allowZip64:
+ if file_size > ZIP64_LIMIT:
+ raise RuntimeError('File size has increased during compressing')
+ if compress_size > ZIP64_LIMIT:
+ raise RuntimeError('Compressed size larger than uncompressed size')
+ # Seek backwards and write file header (which will now include
+ # correct CRC and file sizes)
position = self.fp.tell() # Preserve current position in file
- self.fp.seek(zinfo.header_offset + 14, 0)
- self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
- zinfo.file_size))
+ self.fp.seek(zinfo.header_offset, 0)
+ self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
@@ -1136,14 +1229,18 @@ class ZipFile:
zinfo.compress_size = len(bytes) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
- zinfo.header_offset = self.fp.tell() # Start of header bytes
- self.fp.write(zinfo.FileHeader())
+ zip64 = zinfo.file_size > ZIP64_LIMIT or \
+ zinfo.compress_size > ZIP64_LIMIT
+ if zip64 and not self._allowZip64:
+ raise LargeZipFile("Filesize would require ZIP64 extensions")
+ self.fp.write(zinfo.FileHeader(zip64))
self.fp.write(bytes)
- self.fp.flush()
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
- self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
+ fmt = '<LQQ' if zip64 else '<LLL'
+ self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
+ self.fp.flush()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
@@ -1157,109 +1254,104 @@ class ZipFile:
if self.fp is None:
return
- if self.mode in ("w", "a") and self._didModify: # write ending records
- count = 0
- pos1 = self.fp.tell()
- for zinfo in self.filelist: # write central directory
- count = count + 1
- dt = zinfo.date_time
- dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
- dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
- extra = []
- if zinfo.file_size > ZIP64_LIMIT \
- or zinfo.compress_size > ZIP64_LIMIT:
- extra.append(zinfo.file_size)
- extra.append(zinfo.compress_size)
- file_size = 0xffffffff
- compress_size = 0xffffffff
- else:
- file_size = zinfo.file_size
- compress_size = zinfo.compress_size
-
- if zinfo.header_offset > ZIP64_LIMIT:
- extra.append(zinfo.header_offset)
- header_offset = 0xffffffffL
- else:
- header_offset = zinfo.header_offset
-
- extra_data = zinfo.extra
- if extra:
- # Append a ZIP64 field to the extra's
- extra_data = struct.pack(
- '<HH' + 'Q'*len(extra),
- 1, 8*len(extra), *extra) + extra_data
-
- extract_version = max(45, zinfo.extract_version)
- create_version = max(45, zinfo.create_version)
- else:
- extract_version = zinfo.extract_version
- create_version = zinfo.create_version
-
- try:
- filename, flag_bits = zinfo._encodeFilenameFlags()
- centdir = struct.pack(structCentralDir,
- stringCentralDir, create_version,
- zinfo.create_system, extract_version, zinfo.reserved,
- flag_bits, zinfo.compress_type, dostime, dosdate,
- zinfo.CRC, compress_size, file_size,
- len(filename), len(extra_data), len(zinfo.comment),
- 0, zinfo.internal_attr, zinfo.external_attr,
- header_offset)
- except DeprecationWarning:
- print >>sys.stderr, (structCentralDir,
- stringCentralDir, create_version,
- zinfo.create_system, extract_version, zinfo.reserved,
- zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
- zinfo.CRC, compress_size, file_size,
- len(zinfo.filename), len(extra_data), len(zinfo.comment),
- 0, zinfo.internal_attr, zinfo.external_attr,
- header_offset)
- raise
- self.fp.write(centdir)
- self.fp.write(filename)
- self.fp.write(extra_data)
- self.fp.write(zinfo.comment)
-
- pos2 = self.fp.tell()
- # Write end-of-zip-archive record
- centDirCount = count
- centDirSize = pos2 - pos1
- centDirOffset = pos1
- if (centDirCount >= ZIP_FILECOUNT_LIMIT or
- centDirOffset > ZIP64_LIMIT or
- centDirSize > ZIP64_LIMIT):
- # Need to write the ZIP64 end-of-archive records
- zip64endrec = struct.pack(
- structEndArchive64, stringEndArchive64,
- 44, 45, 45, 0, 0, centDirCount, centDirCount,
- centDirSize, centDirOffset)
- self.fp.write(zip64endrec)
-
- zip64locrec = struct.pack(
- structEndArchive64Locator,
- stringEndArchive64Locator, 0, pos2, 1)
- self.fp.write(zip64locrec)
- centDirCount = min(centDirCount, 0xFFFF)
- centDirSize = min(centDirSize, 0xFFFFFFFF)
- centDirOffset = min(centDirOffset, 0xFFFFFFFF)
-
- # check for valid comment length
- if len(self.comment) >= ZIP_MAX_COMMENT:
- if self.debug > 0:
- msg = 'Archive comment is too long; truncating to %d bytes' \
- % ZIP_MAX_COMMENT
- self.comment = self.comment[:ZIP_MAX_COMMENT]
-
- endrec = struct.pack(structEndArchive, stringEndArchive,
- 0, 0, centDirCount, centDirCount,
- centDirSize, centDirOffset, len(self.comment))
- self.fp.write(endrec)
- self.fp.write(self.comment)
- self.fp.flush()
-
- if not self._filePassed:
- self.fp.close()
- self.fp = None
+ try:
+ if self.mode in ("w", "a") and self._didModify: # write ending records
+ count = 0
+ pos1 = self.fp.tell()
+ for zinfo in self.filelist: # write central directory
+ count = count + 1
+ dt = zinfo.date_time
+ dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
+ dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
+ extra = []
+ if zinfo.file_size > ZIP64_LIMIT \
+ or zinfo.compress_size > ZIP64_LIMIT:
+ extra.append(zinfo.file_size)
+ extra.append(zinfo.compress_size)
+ file_size = 0xffffffff
+ compress_size = 0xffffffff
+ else:
+ file_size = zinfo.file_size
+ compress_size = zinfo.compress_size
+
+ if zinfo.header_offset > ZIP64_LIMIT:
+ extra.append(zinfo.header_offset)
+ header_offset = 0xffffffffL
+ else:
+ header_offset = zinfo.header_offset
+
+ extra_data = zinfo.extra
+ if extra:
+ # Append a ZIP64 field to the extra's
+ extra_data = struct.pack(
+ '<HH' + 'Q'*len(extra),
+ 1, 8*len(extra), *extra) + extra_data
+
+ extract_version = max(45, zinfo.extract_version)
+ create_version = max(45, zinfo.create_version)
+ else:
+ extract_version = zinfo.extract_version
+ create_version = zinfo.create_version
+
+ try:
+ filename, flag_bits = zinfo._encodeFilenameFlags()
+ centdir = struct.pack(structCentralDir,
+ stringCentralDir, create_version,
+ zinfo.create_system, extract_version, zinfo.reserved,
+ flag_bits, zinfo.compress_type, dostime, dosdate,
+ zinfo.CRC, compress_size, file_size,
+ len(filename), len(extra_data), len(zinfo.comment),
+ 0, zinfo.internal_attr, zinfo.external_attr,
+ header_offset)
+ except DeprecationWarning:
+ print >>sys.stderr, (structCentralDir,
+ stringCentralDir, create_version,
+ zinfo.create_system, extract_version, zinfo.reserved,
+ zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
+ zinfo.CRC, compress_size, file_size,
+ len(zinfo.filename), len(extra_data), len(zinfo.comment),
+ 0, zinfo.internal_attr, zinfo.external_attr,
+ header_offset)
+ raise
+ self.fp.write(centdir)
+ self.fp.write(filename)
+ self.fp.write(extra_data)
+ self.fp.write(zinfo.comment)
+
+ pos2 = self.fp.tell()
+ # Write end-of-zip-archive record
+ centDirCount = count
+ centDirSize = pos2 - pos1
+ centDirOffset = pos1
+ if (centDirCount >= ZIP_FILECOUNT_LIMIT or
+ centDirOffset > ZIP64_LIMIT or
+ centDirSize > ZIP64_LIMIT):
+ # Need to write the ZIP64 end-of-archive records
+ zip64endrec = struct.pack(
+ structEndArchive64, stringEndArchive64,
+ 44, 45, 45, 0, 0, centDirCount, centDirCount,
+ centDirSize, centDirOffset)
+ self.fp.write(zip64endrec)
+
+ zip64locrec = struct.pack(
+ structEndArchive64Locator,
+ stringEndArchive64Locator, 0, pos2, 1)
+ self.fp.write(zip64locrec)
+ centDirCount = min(centDirCount, 0xFFFF)
+ centDirSize = min(centDirSize, 0xFFFFFFFF)
+ centDirOffset = min(centDirOffset, 0xFFFFFFFF)
+
+ endrec = struct.pack(structEndArchive, stringEndArchive,
+ 0, 0, centDirCount, centDirCount,
+ centDirSize, centDirOffset, len(self._comment))
+ self.fp.write(endrec)
+ self.fp.write(self._comment)
+ self.fp.flush()
+ finally:
+ fp = self.fp
+ self.fp = None
+ if not self._filePassed:
+ fp.close()
class PyZipFile(ZipFile):
@@ -1381,16 +1473,15 @@ def main(args = None):
if len(args) != 2:
print USAGE
sys.exit(1)
- zf = ZipFile(args[1], 'r')
- zf.printdir()
- zf.close()
+ with ZipFile(args[1], 'r') as zf:
+ zf.printdir()
elif args[0] == '-t':
if len(args) != 2:
print USAGE
sys.exit(1)
- zf = ZipFile(args[1], 'r')
- badfile = zf.testzip()
+ with ZipFile(args[1], 'r') as zf:
+ badfile = zf.testzip()
if badfile:
print("The following enclosed file is corrupted: {!r}".format(badfile))
print "Done testing"
@@ -1400,20 +1491,19 @@ def main(args = None):
print USAGE
sys.exit(1)
- zf = ZipFile(args[1], 'r')
- out = args[2]
- for path in zf.namelist():
- if path.startswith('./'):
- tgt = os.path.join(out, path[2:])
- else:
- tgt = os.path.join(out, path)
+ with ZipFile(args[1], 'r') as zf:
+ out = args[2]
+ for path in zf.namelist():
+ if path.startswith('./'):
+ tgt = os.path.join(out, path[2:])
+ else:
+ tgt = os.path.join(out, path)
- tgtdir = os.path.dirname(tgt)
- if not os.path.exists(tgtdir):
- os.makedirs(tgtdir)
- with open(tgt, 'wb') as fp:
- fp.write(zf.read(path))
- zf.close()
+ tgtdir = os.path.dirname(tgt)
+ if not os.path.exists(tgtdir):
+ os.makedirs(tgtdir)
+ with open(tgt, 'wb') as fp:
+ fp.write(zf.read(path))
elif args[0] == '-c':
if len(args) < 3:
@@ -1429,11 +1519,9 @@ def main(args = None):
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
- zf = ZipFile(args[1], 'w', allowZip64=True)
- for src in args[2:]:
- addToZip(zf, src, os.path.basename(src))
-
- zf.close()
+ with ZipFile(args[1], 'w', allowZip64=True) as zf:
+ for src in args[2:]:
+ addToZip(zf, src, os.path.basename(src))
if __name__ == "__main__":
main()
diff --git a/Mac/BuildScript/README.txt b/Mac/BuildScript/README.txt
index de2f5cb..7c2a0bb 100644
--- a/Mac/BuildScript/README.txt
+++ b/Mac/BuildScript/README.txt
@@ -8,70 +8,122 @@ $DESTROOT, massages that installation to remove .pyc files and such, creates
an Installer package from the installation plus other files in ``resources``
and ``scripts`` and placed that on a ``.dmg`` disk image.
-As of Python 2.7.x and 3.2, PSF practice is to build two installer variants
-for each release:
+For Python 2.7.x and 3.x, PSF practice is to build two installer variants
+for each release.
-1. 32-bit-only, i386 and PPC universal, capable on running on all machines
- supported by Mac OS X 10.3.9 through (at least) 10.6::
+Beginning with Python 2.7.8, we plan to drop binary installer support for
+Mac OS X 10.3.9 and 10.4.x systems. To ease the transition, for Python 2.7.7
+only there will be three installers provided:
- python build-installer.py \
+1. DEPRECATED - 32-bit-only, i386 and PPC universal, capable on running on all
+ machines supported by Mac OS X 10.3.9 through (at least) 10.9::
+
+ /usr/bin/python build-installer.py \
--sdk-path=/Developer/SDKs/MacOSX10.4u.sdk \
--universal-archs=32-bit \
--dep-target=10.3
- # These are the current default options
- builds the following third-party libraries
* Bzip2
- * Zlib 1.2.3
- * GNU Readline (GPL)
- * SQLite 3
* NCurses
+ * GNU Readline (GPL)
+ * SQLite 3.7.13
+ * Zlib 1.2.3
* Oracle Sleepycat DB 4.8 (Python 2.x only)
- requires ActiveState ``Tcl/Tk 8.4`` (currently 8.4.19) to be installed for building
- - current target build environment:
+ - recommended build environment:
* Mac OS X 10.5.8 PPC or Intel
- * Xcode 3.1.4 (or later)
+ * Xcode 3.1.4
* ``MacOSX10.4u`` SDK (later SDKs do not support PPC G3 processors)
* ``MACOSX_DEPLOYMENT_TARGET=10.3``
* Apple ``gcc-4.0``
- * Python 2.n (n >= 4) for documentation build with Sphinx
+ * system Python 2.5 for documentation build with Sphinx
+
+ - alternate build environments:
+
+ * Mac OS X 10.6.8 with Xcode 3.2.6
+ - need to change ``/System/Library/Frameworks/{Tcl,Tk}.framework/Version/Current`` to ``8.4``
+ * Note Xcode 4.* does not support building for PPC so cannot be used for this build
+
+2. 32-bit-only, i386 and PPC universal, capable on running on all machines
+ supported by Mac OS X 10.5 through (at least) 10.9::
+
+ /usr/bin/python build-installer.py \
+ --sdk-path=/Developer/SDKs/MacOSX10.5.sdk \
+ --universal-archs=32-bit \
+ --dep-target=10.5
+
+ - builds the following third-party libraries
+
+ * NCurses 5.9
+ * SQLite 3.7.13
+ * Oracle Sleepycat DB 4.8 (Python 2.x only)
+
+ - uses system-supplied versions of third-party libraries
+
+ * readline module links with Apple BSD editline (libedit)
+
+ - requires ActiveState ``Tcl/Tk 8.4`` (currently 8.4.20) to be installed for building
+
+ - recommended build environment:
+
+ * Mac OS X 10.5.8 Intel or PPC
+ * Xcode 3.1.4
+ * ``MacOSX10.5`` SDK
+ * ``MACOSX_DEPLOYMENT_TARGET=10.5``
+ * Apple ``gcc-4.2``
+ * system Python 2.5+ for documentation build with Sphinx
- alternate build environments:
- * Mac OS X 10.4.11 with Xcode 2.5
- * Mac OS X 10.6.6 with Xcode 3.2.5
+ * Mac OS X 10.6.8 with Xcode 3.2.6
- need to change ``/System/Library/Frameworks/{Tcl,Tk}.framework/Version/Current`` to ``8.4``
+ * Note Xcode 4.* does not support building for PPC so cannot be used for this build
-2. 64-bit / 32-bit, x86_64 and i386 universal, for OS X 10.6 (and later)::
+3. 64-bit / 32-bit, x86_64 and i386 universal, for OS X 10.6 (and later)::
- python build-installer.py \
+ /usr/bin/python build-installer.py \
--sdk-path=/Developer/SDKs/MacOSX10.6.sdk \
--universal-archs=intel \
--dep-target=10.6
+ - builds the following third-party libraries
+
+ * NCurses 5.9 (http://bugs.python.org/issue15037)
+ * SQLite 3.7.13
+ * Oracle Sleepycat DB 4.8 (Python 2.x only)
+
- uses system-supplied versions of third-party libraries
-
+
* readline module links with Apple BSD editline (libedit)
- * builds Oracle Sleepycat DB 4.8 (Python 2.x only)
- - requires ActiveState Tcl/Tk 8.5.9 (or later) to be installed for building
+ - requires ActiveState Tcl/Tk 8.5.15 (or later) to be installed for building
- - current target build environment:
-
- * Mac OS X 10.6.6 (or later)
- * Xcode 3.2.5 (or later)
+ - recommended build environment:
+
+ * Mac OS X 10.6.8 (or later)
+ * Xcode 3.2.6
* ``MacOSX10.6`` SDK
* ``MACOSX_DEPLOYMENT_TARGET=10.6``
* Apple ``gcc-4.2``
- * Python 2.n (n >= 4) for documentation build with Sphinx
+ * system Python 2.6 for documentation build with Sphinx
- alternate build environments:
- * none
+ * none. Xcode 4.x currently supplies two C compilers.
+ ``llvm-gcc-4.2.1`` has been found to miscompile Python 3.3.x and
+ produce a non-functional Python executable. As it appears to be
+ considered a migration aid by Apple and is not likely to be fixed,
+ its use should be avoided. The other compiler, ``clang``, has been
+ undergoing rapid development. While it appears to have become
+ production-ready in the most recent Xcode 5 releases, the versions
+ available on the deprecated Xcode 4.x for 10.6 were early releases
+ and did not receive the level of exposure in production environments
+ that the Xcode 3 gcc-4.2 compiler has had.
General Prerequisites
@@ -87,6 +139,11 @@ General Prerequisites
* It is safest to start each variant build with an empty source directory
populated with a fresh copy of the untarred source.
+* It is recommended that you remove any existing installed version of the
+ Python being built::
+
+ sudo rm -rf /Library/Frameworks/Python.framework/Versions/n.n
+
The Recipe
----------
@@ -107,9 +164,9 @@ Building other universal installers
...................................
It is also possible to build a 4-way universal installer that runs on
-OS X Leopard or later::
+OS X 10.5 Leopard or later::
- python 2.6 /build-installer.py \
+ /usr/bin/python /build-installer.py \
--dep-target=10.5
--universal-archs=all
--sdk-path=/Developer/SDKs/MacOSX10.5.sdk
@@ -120,7 +177,8 @@ also that you are building on at least OS X 10.5. 4-way includes
variants can only be run on G5 machines running 10.5. Note that,
while OS X 10.6 is only supported on Intel-based machines, it is possible
to run ``ppc`` (32-bit) executables unmodified thanks to the Rosetta ppc
-emulation in OS X 10.5 and 10.6.
+emulation in OS X 10.5 and 10.6. The 4-way installer variant must be
+built with Xcode 3. It is not regularly built or tested.
Other ``--universal-archs`` options are ``64-bit`` (``x86_64``, ``ppc64``),
and ``3-way`` (``ppc``, ``i386``, ``x86_64``). None of these options
@@ -133,15 +191,21 @@ Testing
Ideally, the resulting binaries should be installed and the test suite run
on all supported OS X releases and architectures. As a practical matter,
that is generally not possible. At a minimum, variant 1 should be run on
-at least one Intel, one PPC G4, and one PPC G3 system and one each of
-OS X 10.6, 10.5, 10.4, and 10.3.9. Not all tests run on 10.3.9.
-Variant 2 should be run on 10.6 in both 32-bit and 64-bit modes.::
+a PPC G4 system with OS X 10.5 and at least one Intel system running OS X
+10.8, 10.7, 10.6, or 10.5. Variant 2 should be run on 10.8, 10.7, and 10.6
+systems in both 32-bit and 64-bit modes.::
- arch -i386 /usr/local/bin/pythonn.n -m test.regrtest -w -u all
- arch -X86_64 /usr/local/bin/pythonn.n -m test.regrtest -w -u all
+ /usr/local/bin/pythonn.n -m test -w -u all,-largefile
+ /usr/local/bin/pythonn.n-32 -m test -w -u all
Certain tests will be skipped and some cause the interpreter to fail
which will likely generate ``Python quit unexpectedly`` alert messages
-to be generated at several points during a test run. These can
-be ignored.
+to be generated at several points during a test run. These are normal
+during testing and can be ignored.
+
+It is also recommend to launch IDLE and verify that it is at least
+functional. Double-click on the IDLE app icon in ``/Applications/Pythonn.n``.
+It should also be tested from the command line::
+
+ /usr/local/bin/idlen.n
diff --git a/Mac/BuildScript/build-installer.py b/Mac/BuildScript/build-installer.py
index ba92012..199b560 100755
--- a/Mac/BuildScript/build-installer.py
+++ b/Mac/BuildScript/build-installer.py
@@ -1,27 +1,52 @@
-#!/usr/bin/python
+#!/usr/bin/env python
"""
This script is used to build "official" universal installers on Mac OS X.
-It requires at least Mac OS X 10.4, Xcode 2.2 and the 10.4u SDK for
+It requires at least Mac OS X 10.5, Xcode 3, and the 10.4u SDK for
32-bit builds. 64-bit or four-way universal builds require at least
OS X 10.5 and the 10.5 SDK.
Please ensure that this script keeps working with Python 2.5, to avoid
bootstrap issues (/usr/bin/python is Python 2.5 on OSX 10.5). Sphinx,
which is used to build the documentation, currently requires at least
-Python 2.4.
+Python 2.4. However, as of Python 3.4.1, Doc builds require an external
+sphinx-build and the current versions of Sphinx now require at least
+Python 2.6.
+
+In addition to what is supplied with OS X 10.5+ and Xcode 3+, the script
+requires an installed version of hg and a third-party version of
+Tcl/Tk 8.4 (for OS X 10.4 and 10.5 deployment targets) or Tcl/TK 8.5
+(for 10.6 or later) installed in /Library/Frameworks. When installed,
+the Python built by this script will attempt to dynamically link first to
+Tcl and Tk frameworks in /Library/Frameworks if available otherwise fall
+back to the ones in /System/Library/Framework. For the build, we recommend
+installing the most recent ActiveTcl 8.4 or 8.5 version.
+
+32-bit-only installer builds are still possible on OS X 10.4 with Xcode 2.5
+and the installation of additional components, such as a newer Python
+(2.5 is needed for Python parser updates), hg, and for the documentation
+build either svn (pre-3.4.1) or sphinx-build (3.4.1 and later).
Usage: see USAGE variable in the script.
"""
-import platform, os, sys, getopt, textwrap, shutil, urllib2, stat, time, pwd
-import grp
+import platform, os, sys, getopt, textwrap, shutil, stat, time, pwd, grp
+try:
+ import urllib2 as urllib_request
+except ImportError:
+ import urllib.request as urllib_request
+
+STAT_0o755 = ( stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
+ | stat.S_IRGRP | stat.S_IXGRP
+ | stat.S_IROTH | stat.S_IXOTH )
+
+STAT_0o775 = ( stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
+ | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
+ | stat.S_IROTH | stat.S_IXOTH )
INCLUDE_TIMESTAMP = 1
VERBOSE = 1
from plistlib import Plist
-import MacOS
-
try:
from plistlib import writePlist
except ImportError:
@@ -42,20 +67,35 @@ def grepValue(fn, variable):
if ln.startswith(variable):
value = ln[len(variable):].strip()
return value[1:-1]
- raise RuntimeError, "Cannot find variable %s" % variable[:-1]
+ raise RuntimeError("Cannot find variable %s" % variable[:-1])
+
+_cache_getVersion = None
def getVersion():
- return grepValue(os.path.join(SRCDIR, 'configure'), 'PACKAGE_VERSION')
+ global _cache_getVersion
+ if _cache_getVersion is None:
+ _cache_getVersion = grepValue(
+ os.path.join(SRCDIR, 'configure'), 'PACKAGE_VERSION')
+ return _cache_getVersion
def getVersionTuple():
return tuple([int(n) for n in getVersion().split('.')])
+def getVersionMajorMinor():
+ return tuple([int(n) for n in getVersion().split('.', 2)])
+
+_cache_getFullVersion = None
+
def getFullVersion():
+ global _cache_getFullVersion
+ if _cache_getFullVersion is not None:
+ return _cache_getFullVersion
fn = os.path.join(SRCDIR, 'Include', 'patchlevel.h')
for ln in open(fn):
if 'PY_VERSION' in ln:
- return ln.split()[-1][1:-1]
- raise RuntimeError, "Cannot find full version??"
+ _cache_getFullVersion = ln.split()[-1][1:-1]
+ return _cache_getFullVersion
+ raise RuntimeError("Cannot find full version??")
# The directory we'll use to create the build (will be erased and recreated)
WORKDIR = "/tmp/_py"
@@ -70,7 +110,7 @@ DEPSRC = os.path.expanduser('~/Universal/other-sources')
### There are some issues with the SDK selection below here,
### The resulting binary doesn't work on all platforms that
### it should. Always default to the 10.4u SDK until that
-### isue is resolved.
+### issue is resolved.
###
##if int(os.uname()[2].split('.')[0]) == 8:
## # Explicitly use the 10.4u (universal) SDK when
@@ -110,14 +150,19 @@ SRCDIR = os.path.dirname(
# $MACOSX_DEPLOYMENT_TARGET -> minimum OS X level
DEPTARGET = '10.3'
-target_cc_map = {
- '10.3': 'gcc-4.0',
- '10.4': 'gcc-4.0',
- '10.5': 'gcc-4.0',
- '10.6': 'gcc-4.2',
-}
+def getDeptargetTuple():
+ return tuple([int(n) for n in DEPTARGET.split('.')[0:2]])
+
+def getTargetCompilers():
+ target_cc_map = {
+ '10.3': ('gcc-4.0', 'g++-4.0'),
+ '10.4': ('gcc-4.0', 'g++-4.0'),
+ '10.5': ('gcc-4.2', 'g++-4.2'),
+ '10.6': ('gcc-4.2', 'g++-4.2'),
+ }
+ return target_cc_map.get(DEPTARGET, ('clang', 'clang++') )
-CC = target_cc_map[DEPTARGET]
+CC, CXX = getTargetCompilers()
PYTHON_3 = getVersionTuple() >= (3, 0)
@@ -135,6 +180,13 @@ USAGE = textwrap.dedent("""\
--universal-archs=x universal architectures (options: %(UNIVERSALOPTS)r, default: %(UNIVERSALARCHS)r)
""")% globals()
+# Dict of object file names with shared library names to check after building.
+# This is to ensure that we ended up dynamically linking with the shared
+# library paths and versions we expected. For example:
+# EXPECTED_SHARED_LIBS['_tkinter.so'] = [
+# '/Library/Frameworks/Tcl.framework/Versions/8.5/Tcl',
+# '/Library/Frameworks/Tk.framework/Versions/8.5/Tk']
+EXPECTED_SHARED_LIBS = {}
# Instructions for building libraries that are necessary for building a
# batteries included python.
@@ -143,15 +195,126 @@ USAGE = textwrap.dedent("""\
def library_recipes():
result = []
- if DEPTARGET < '10.5':
+ LT_10_5 = bool(getDeptargetTuple() < (10, 5))
+
+# Disable for now
+ if False: # if (getDeptargetTuple() > (10, 5)) and (getVersionTuple() >= (3, 5)):
+ result.extend([
+ dict(
+ name="Tcl 8.5.15",
+ url="ftp://ftp.tcl.tk/pub/tcl//tcl8_5/tcl8.5.15-src.tar.gz",
+ checksum='f3df162f92c69b254079c4d0af7a690f',
+ buildDir="unix",
+ configure_pre=[
+ '--enable-shared',
+ '--enable-threads',
+ '--libdir=/Library/Frameworks/Python.framework/Versions/%s/lib'%(getVersion(),),
+ ],
+ useLDFlags=False,
+ install='make TCL_LIBRARY=%(TCL_LIBRARY)s && make install TCL_LIBRARY=%(TCL_LIBRARY)s DESTDIR=%(DESTDIR)s'%{
+ "DESTDIR": shellQuote(os.path.join(WORKDIR, 'libraries')),
+ "TCL_LIBRARY": shellQuote('/Library/Frameworks/Python.framework/Versions/%s/lib/tcl8.5'%(getVersion())),
+ },
+ ),
+ dict(
+ name="Tk 8.5.15",
+ url="ftp://ftp.tcl.tk/pub/tcl//tcl8_5/tk8.5.15-src.tar.gz",
+ checksum='55b8e33f903210a4e1c8bce0f820657f',
+ patches=[
+ "issue19373_tk_8_5_15_source.patch",
+ ],
+ buildDir="unix",
+ configure_pre=[
+ '--enable-aqua',
+ '--enable-shared',
+ '--enable-threads',
+ '--libdir=/Library/Frameworks/Python.framework/Versions/%s/lib'%(getVersion(),),
+ ],
+ useLDFlags=False,
+ install='make TCL_LIBRARY=%(TCL_LIBRARY)s TK_LIBRARY=%(TK_LIBRARY)s && make install TCL_LIBRARY=%(TCL_LIBRARY)s TK_LIBRARY=%(TK_LIBRARY)s DESTDIR=%(DESTDIR)s'%{
+ "DESTDIR": shellQuote(os.path.join(WORKDIR, 'libraries')),
+ "TCL_LIBRARY": shellQuote('/Library/Frameworks/Python.framework/Versions/%s/lib/tcl8.5'%(getVersion())),
+ "TK_LIBRARY": shellQuote('/Library/Frameworks/Python.framework/Versions/%s/lib/tk8.5'%(getVersion())),
+ },
+ ),
+ ])
+
+ if getVersionTuple() >= (3, 3):
+ result.extend([
+ dict(
+ name="XZ 5.0.5",
+ url="http://tukaani.org/xz/xz-5.0.5.tar.gz",
+ checksum='19d924e066b6fff0bc9d1981b4e53196',
+ configure_pre=[
+ '--disable-dependency-tracking',
+ ]
+ ),
+ ])
+
+ result.extend([
+ dict(
+ name="NCurses 5.9",
+ url="http://ftp.gnu.org/pub/gnu/ncurses/ncurses-5.9.tar.gz",
+ checksum='8cb9c412e5f2d96bc6f459aa8c6282a1',
+ configure_pre=[
+ "--enable-widec",
+ "--without-cxx",
+ "--without-cxx-binding",
+ "--without-ada",
+ "--without-curses-h",
+ "--enable-shared",
+ "--with-shared",
+ "--without-debug",
+ "--without-normal",
+ "--without-tests",
+ "--without-manpages",
+ "--datadir=/usr/share",
+ "--sysconfdir=/etc",
+ "--sharedstatedir=/usr/com",
+ "--with-terminfo-dirs=/usr/share/terminfo",
+ "--with-default-terminfo-dir=/usr/share/terminfo",
+ "--libdir=/Library/Frameworks/Python.framework/Versions/%s/lib"%(getVersion(),),
+ ],
+ patchscripts=[
+ ("ftp://invisible-island.net/ncurses//5.9/ncurses-5.9-20120616-patch.sh.bz2",
+ "f54bf02a349f96a7c4f0d00922f3a0d4"),
+ ],
+ useLDFlags=False,
+ install='make && make install DESTDIR=%s && cd %s/usr/local/lib && ln -fs ../../../Library/Frameworks/Python.framework/Versions/%s/lib/lib* .'%(
+ shellQuote(os.path.join(WORKDIR, 'libraries')),
+ shellQuote(os.path.join(WORKDIR, 'libraries')),
+ getVersion(),
+ ),
+ ),
+ dict(
+ name="SQLite 3.8.3.1",
+ url="http://www.sqlite.org/2014/sqlite-autoconf-3080301.tar.gz",
+ checksum='509ff98d8dc9729b618b7e96612079c6',
+ extra_cflags=('-Os '
+ '-DSQLITE_ENABLE_FTS4 '
+ '-DSQLITE_ENABLE_FTS3_PARENTHESIS '
+ '-DSQLITE_ENABLE_RTREE '
+ '-DSQLITE_TCL=0 '
+ '%s' % ('','-DSQLITE_WITHOUT_ZONEMALLOC ')[LT_10_5]),
+ configure_pre=[
+ '--enable-threadsafe',
+ '--enable-shared=no',
+ '--enable-static=yes',
+ '--disable-readline',
+ '--disable-dependency-tracking',
+ ]
+ ),
+ ])
+
+ if getDeptargetTuple() < (10, 5):
result.extend([
dict(
name="Bzip2 1.0.6",
url="http://bzip.org/1.0.6/bzip2-1.0.6.tar.gz",
checksum='00b516f4704d4a7cb50a1d97e6e8e15b',
configure=None,
- install='make install CC=%s PREFIX=%s/usr/local/ CFLAGS="-arch %s -isysroot %s"'%(
- CC,
+ install='make install CC=%s CXX=%s, PREFIX=%s/usr/local/ CFLAGS="-arch %s -isysroot %s"'%(
+ CC, CXX,
shellQuote(os.path.join(WORKDIR, 'libraries')),
' -arch '.join(ARCHLIST),
SDKPATH,
@@ -162,8 +325,8 @@ def library_recipes():
url="http://www.gzip.org/zlib/zlib-1.2.3.tar.gz",
checksum='debc62758716a169df9f62e6ab2bc634',
configure=None,
- install='make install CC=%s prefix=%s/usr/local/ CFLAGS="-arch %s -isysroot %s"'%(
- CC,
+ install='make install CC=%s CXX=%s, prefix=%s/usr/local/ CFLAGS="-arch %s -isysroot %s"'%(
+ CC, CXX,
shellQuote(os.path.join(WORKDIR, 'libraries')),
' -arch '.join(ARCHLIST),
SDKPATH,
@@ -178,58 +341,12 @@ def library_recipes():
patches=[
# The readline maintainers don't do actual micro releases, but
# just ship a set of patches.
- 'http://ftp.gnu.org/pub/gnu/readline/readline-6.1-patches/readline61-001',
- 'http://ftp.gnu.org/pub/gnu/readline/readline-6.1-patches/readline61-002',
- ]
- ),
- dict(
- name="SQLite 3.7.4",
- url="http://www.sqlite.org/sqlite-autoconf-3070400.tar.gz",
- checksum='8f0c690bfb33c3cbbc2471c3d9ba0158',
- configure_env=('CFLAGS="-Os'
- ' -DSQLITE_ENABLE_FTS3'
- ' -DSQLITE_ENABLE_FTS3_PARENTHESIS'
- ' -DSQLITE_ENABLE_RTREE'
- ' -DSQLITE_TCL=0'
- '"'),
- configure_pre=[
- '--enable-threadsafe',
- '--enable-shared=no',
- '--enable-static=yes',
- '--disable-readline',
- '--disable-dependency-tracking',
+ ('http://ftp.gnu.org/pub/gnu/readline/readline-6.1-patches/readline61-001',
+ 'c642f2e84d820884b0bf9fd176bc6c3f'),
+ ('http://ftp.gnu.org/pub/gnu/readline/readline-6.1-patches/readline61-002',
+ '1a76781a1ea734e831588285db7ec9b1'),
]
),
- dict(
- name="NCurses 5.5",
- url="http://ftp.gnu.org/pub/gnu/ncurses/ncurses-5.5.tar.gz",
- checksum='e73c1ac10b4bfc46db43b2ddfd6244ef',
- configure_pre=[
- "--enable-widec",
- "--without-cxx",
- "--without-ada",
- "--without-progs",
- "--without-curses-h",
- "--enable-shared",
- "--with-shared",
- "--datadir=/usr/share",
- "--sysconfdir=/etc",
- "--sharedstatedir=/usr/com",
- "--with-terminfo-dirs=/usr/share/terminfo",
- "--with-default-terminfo-dir=/usr/share/terminfo",
- "--libdir=/Library/Frameworks/Python.framework/Versions/%s/lib"%(getVersion(),),
- "--enable-termcap",
- ],
- patches=[
- "ncurses-5.5.patch",
- ],
- useLDFlags=False,
- install='make && make install DESTDIR=%s && cd %s/usr/local/lib && ln -fs ../../../Library/Frameworks/Python.framework/Versions/%s/lib/lib* .'%(
- shellQuote(os.path.join(WORKDIR, 'libraries')),
- shellQuote(os.path.join(WORKDIR, 'libraries')),
- getVersion(),
- ),
- ),
])
if not PYTHON_3:
@@ -252,6 +369,8 @@ def library_recipes():
# Instructions for building packages inside the .mpkg.
def pkg_recipes():
unselected_for_python3 = ('selected', 'unselected')[PYTHON_3]
+ # unselected if 3.0 through 3.3, selected otherwise (2.x or >= 3.4)
+ unselected_for_lt_python34 = ('selected', 'unselected')[(3, 0) <= getVersionTuple() < (3, 4)]
result = [
dict(
name="PythonFramework",
@@ -298,9 +417,7 @@ def pkg_recipes():
source="/pydocs",
readme="""\
This package installs the python documentation at a location
- that is useable for pydoc and IDLE. If you have installed Xcode
- it will also install a link to the documentation in
- /Developer/Documentation/Python
+ that is useable for pydoc and IDLE.
""",
postflight="scripts/postflight.documentation",
required=False,
@@ -322,11 +439,28 @@ def pkg_recipes():
topdir="/Library/Frameworks/Python.framework",
source="/empty-dir",
required=False,
- selected=unselected_for_python3,
+ selected=unselected_for_lt_python34,
),
]
- if DEPTARGET < '10.4':
+ if getVersionTuple() >= (3, 4):
+ result.append(
+ dict(
+ name="PythonInstallPip",
+ long_name="Install or upgrade pip",
+ readme="""\
+ This package installs (or upgrades from an earlier version)
+ pip, a tool for installing and managing Python packages.
+ """,
+ postflight="scripts/postflight.ensurepip",
+ topdir="/Library/Frameworks/Python.framework",
+ source="/empty-dir",
+ required=False,
+ selected='selected',
+ )
+ )
+
+ if getDeptargetTuple() < (10, 4) and not PYTHON_3:
result.append(
dict(
name="PythonSystemFixes",
@@ -343,6 +477,7 @@ def pkg_recipes():
selected=unselected_for_python3,
)
)
+
return result
def fatal(msg):
@@ -358,7 +493,7 @@ def fileContents(fn):
"""
Return the contents of the named file
"""
- return open(fn, 'rb').read()
+ return open(fn, 'r').read()
def runCommand(commandline):
"""
@@ -370,7 +505,7 @@ def runCommand(commandline):
xit = fd.close()
if xit is not None:
sys.stdout.write(data)
- raise RuntimeError, "command failed: %s"%(commandline,)
+ raise RuntimeError("command failed: %s"%(commandline,))
if VERBOSE:
sys.stdout.write(data); sys.stdout.flush()
@@ -381,7 +516,7 @@ def captureCommand(commandline):
xit = fd.close()
if xit is not None:
sys.stdout.write(data)
- raise RuntimeError, "command failed: %s"%(commandline,)
+ raise RuntimeError("command failed: %s"%(commandline,))
return data
@@ -423,47 +558,72 @@ def checkEnvironment():
# Because we only support dynamic load of only one major/minor version of
# Tcl/Tk, ensure:
# 1. there are no user-installed frameworks of Tcl/Tk with version
- # higher than the Apple-supplied system version
- # 2. there is a user-installed framework in /Library/Frameworks with the
- # same version as the system version. This allows users to choose
- # to install a newer patch level.
+ # higher than the Apple-supplied system version in
+ # SDKROOT/System/Library/Frameworks
+ # 2. there is a user-installed framework (usually ActiveTcl) in (or linked
+ # in) SDKROOT/Library/Frameworks with the same version as the system
+ # version. This allows users to choose to install a newer patch level.
+ frameworks = {}
for framework in ['Tcl', 'Tk']:
- #fw = dict(lower=framework.lower(),
- # upper=framework.upper(),
- # cap=framework.capitalize())
- #fwpth = "Library/Frameworks/%(cap)s.framework/%(lower)sConfig.sh" % fw
- fwpth = 'Library/Frameworks/Tcl.framework/Versions/Current'
+ fwpth = 'Library/Frameworks/%s.framework/Versions/Current' % framework
sysfw = os.path.join(SDKPATH, 'System', fwpth)
- libfw = os.path.join('/', fwpth)
+ libfw = os.path.join(SDKPATH, fwpth)
usrfw = os.path.join(os.getenv('HOME'), fwpth)
- #version = "%(upper)s_VERSION" % fw
+ frameworks[framework] = os.readlink(sysfw)
+ if not os.path.exists(libfw):
+ fatal("Please install a link to a current %s %s as %s so "
+ "the user can override the system framework."
+ % (framework, frameworks[framework], libfw))
if os.readlink(libfw) != os.readlink(sysfw):
fatal("Version of %s must match %s" % (libfw, sysfw) )
if os.path.exists(usrfw):
fatal("Please rename %s to avoid possible dynamic load issues."
% usrfw)
+ if frameworks['Tcl'] != frameworks['Tk']:
+ fatal("The Tcl and Tk frameworks are not the same version.")
+
+ # add files to check after build
+ EXPECTED_SHARED_LIBS['_tkinter.so'] = [
+ "/Library/Frameworks/Tcl.framework/Versions/%s/Tcl"
+ % frameworks['Tcl'],
+ "/Library/Frameworks/Tk.framework/Versions/%s/Tk"
+ % frameworks['Tk'],
+ ]
+
# Remove inherited environment variables which might influence build
environ_var_prefixes = ['CPATH', 'C_INCLUDE_', 'DYLD_', 'LANG', 'LC_',
'LD_', 'LIBRARY_', 'PATH', 'PYTHON']
for ev in list(os.environ):
for prefix in environ_var_prefixes:
if ev.startswith(prefix) :
- print "INFO: deleting environment variable %s=%s" % (
- ev, os.environ[ev])
+ print("INFO: deleting environment variable %s=%s" % (
+ ev, os.environ[ev]))
del os.environ[ev]
- os.environ['PATH'] = '/bin:/sbin:/usr/bin:/usr/sbin'
- print "Setting default PATH: %s"%(os.environ['PATH'])
-
+ base_path = '/bin:/sbin:/usr/bin:/usr/sbin'
+ if 'SDK_TOOLS_BIN' in os.environ:
+ base_path = os.environ['SDK_TOOLS_BIN'] + ':' + base_path
+ # Xcode 2.5 on OS X 10.4 does not include SetFile in its usr/bin;
+ # add its fixed location here if it exists
+ OLD_DEVELOPER_TOOLS = '/Developer/Tools'
+ if os.path.isdir(OLD_DEVELOPER_TOOLS):
+ base_path = base_path + ':' + OLD_DEVELOPER_TOOLS
+ os.environ['PATH'] = base_path
+ print("Setting default PATH: %s"%(os.environ['PATH']))
+ # Ensure ws have access to hg and to sphinx-build.
+ # You may have to create links in /usr/bin for them.
+ runCommand('hg --version')
+ if getVersionTuple() >= (3, 4):
+ runCommand('sphinx-build --version')
def parseOptions(args=None):
"""
Parse arguments and update global settings.
"""
global WORKDIR, DEPSRC, SDKPATH, SRCDIR, DEPTARGET
- global UNIVERSALOPTS, UNIVERSALARCHS, ARCHLIST, CC
+ global UNIVERSALOPTS, UNIVERSALARCHS, ARCHLIST, CC, CXX
if args is None:
args = sys.argv[1:]
@@ -472,18 +632,18 @@ def parseOptions(args=None):
options, args = getopt.getopt(args, '?hb',
[ 'build-dir=', 'third-party=', 'sdk-path=' , 'src-dir=',
'dep-target=', 'universal-archs=', 'help' ])
- except getopt.error, msg:
- print msg
+ except getopt.GetoptError:
+ print(sys.exc_info()[1])
sys.exit(1)
if args:
- print "Additional arguments"
+ print("Additional arguments")
sys.exit(1)
deptarget = None
for k, v in options:
if k in ('-h', '-?', '--help'):
- print USAGE
+ print(USAGE)
sys.exit(0)
elif k in ('-d', '--build-dir'):
@@ -511,27 +671,28 @@ def parseOptions(args=None):
# target
DEPTARGET = default_target_map.get(v, '10.3')
else:
- raise NotImplementedError, v
+ raise NotImplementedError(v)
else:
- raise NotImplementedError, k
+ raise NotImplementedError(k)
SRCDIR=os.path.abspath(SRCDIR)
WORKDIR=os.path.abspath(WORKDIR)
SDKPATH=os.path.abspath(SDKPATH)
DEPSRC=os.path.abspath(DEPSRC)
- CC=target_cc_map[DEPTARGET]
+ CC, CXX = getTargetCompilers()
- print "Settings:"
- print " * Source directory:", SRCDIR
- print " * Build directory: ", WORKDIR
- print " * SDK location: ", SDKPATH
- print " * Third-party source:", DEPSRC
- print " * Deployment target:", DEPTARGET
- print " * Universal architectures:", ARCHLIST
- print " * C compiler:", CC
- print ""
+ print("Settings:")
+ print(" * Source directory:", SRCDIR)
+ print(" * Build directory: ", WORKDIR)
+ print(" * SDK location: ", SDKPATH)
+ print(" * Third-party source:", DEPSRC)
+ print(" * Deployment target:", DEPTARGET)
+ print(" * Universal architectures:", ARCHLIST)
+ print(" * C compiler:", CC)
+ print(" * C++ compiler:", CXX)
+ print("")
@@ -543,13 +704,19 @@ def extractArchive(builddir, archiveName):
XXX: This function assumes that archives contain a toplevel directory
that is has the same name as the basename of the archive. This is
- save enough for anything we use.
+ safe enough for almost anything we use. Unfortunately, it does not
+ work for current Tcl and Tk source releases where the basename of
+ the archive ends with "-src" but the uncompressed directory does not.
+ For now, just special case Tcl and Tk tar.gz downloads.
"""
curdir = os.getcwd()
try:
os.chdir(builddir)
if archiveName.endswith('.tar.gz'):
retval = os.path.basename(archiveName[:-7])
+ if ((retval.startswith('tcl') or retval.startswith('tk'))
+ and retval.endswith('-src')):
+ retval = retval[:-4]
if os.path.exists(retval):
shutil.rmtree(retval)
fp = os.popen("tar zxf %s 2>&1"%(shellQuote(archiveName),), 'r')
@@ -576,31 +743,18 @@ def extractArchive(builddir, archiveName):
xit = fp.close()
if xit is not None:
sys.stdout.write(data)
- raise RuntimeError, "Cannot extract %s"%(archiveName,)
+ raise RuntimeError("Cannot extract %s"%(archiveName,))
return os.path.join(builddir, retval)
finally:
os.chdir(curdir)
-KNOWNSIZES = {
- "http://ftp.gnu.org/pub/gnu/readline/readline-5.1.tar.gz": 7952742,
- "http://downloads.sleepycat.com/db-4.4.20.tar.gz": 2030276,
-}
-
def downloadURL(url, fname):
"""
Download the contents of the url into the file.
"""
- try:
- size = os.path.getsize(fname)
- except OSError:
- pass
- else:
- if KNOWNSIZES.get(url) == size:
- print "Using existing file for", url
- return
- fpIn = urllib2.urlopen(url)
+ fpIn = urllib_request.urlopen(url)
fpOut = open(fname, 'wb')
block = fpIn.read(10240)
try:
@@ -615,6 +769,24 @@ def downloadURL(url, fname):
except:
pass
+def verifyThirdPartyFile(url, checksum, fname):
+ """
+ Download file from url to filename fname if it does not already exist.
+ Abort if file contents does not match supplied md5 checksum.
+ """
+ name = os.path.basename(fname)
+ if os.path.exists(fname):
+ print("Using local copy of %s"%(name,))
+ else:
+ print("Did not find local copy of %s"%(name,))
+ print("Downloading %s"%(name,))
+ downloadURL(url, fname)
+ print("Archive for %s stored as %s"%(name, fname))
+ if os.system(
+ 'MD5=$(openssl md5 %s) ; test "${MD5##*= }" = "%s"'
+ % (shellQuote(fname), checksum) ):
+ fatal('MD5 checksum mismatch for file %s' % fname)
+
def buildRecipe(recipe, basedir, archList):
"""
Build software using a recipe. This function does the
@@ -635,38 +807,43 @@ def buildRecipe(recipe, basedir, archList):
if not os.path.exists(DEPSRC):
os.mkdir(DEPSRC)
-
- if os.path.exists(sourceArchive):
- print "Using local copy of %s"%(name,)
-
- else:
- print "Did not find local copy of %s"%(name,)
- print "Downloading %s"%(name,)
- downloadURL(url, sourceArchive)
- print "Archive for %s stored as %s"%(name, sourceArchive)
-
- print "Extracting archive for %s"%(name,)
+ verifyThirdPartyFile(url, recipe['checksum'], sourceArchive)
+ print("Extracting archive for %s"%(name,))
buildDir=os.path.join(WORKDIR, '_bld')
if not os.path.exists(buildDir):
os.mkdir(buildDir)
workDir = extractArchive(buildDir, sourceArchive)
os.chdir(workDir)
- if 'buildDir' in recipe:
- os.chdir(recipe['buildDir'])
-
- for fn in recipe.get('patches', ()):
- if fn.startswith('http://'):
- # Download the patch before applying it.
- path = os.path.join(DEPSRC, os.path.basename(fn))
- downloadURL(fn, path)
- fn = path
-
- fn = os.path.join(curdir, fn)
+ for patch in recipe.get('patches', ()):
+ if isinstance(patch, tuple):
+ url, checksum = patch
+ fn = os.path.join(DEPSRC, os.path.basename(url))
+ verifyThirdPartyFile(url, checksum, fn)
+ else:
+ # patch is a file in the source directory
+ fn = os.path.join(curdir, patch)
runCommand('patch -p%s < %s'%(recipe.get('patchlevel', 1),
shellQuote(fn),))
+ for patchscript in recipe.get('patchscripts', ()):
+ if isinstance(patchscript, tuple):
+ url, checksum = patchscript
+ fn = os.path.join(DEPSRC, os.path.basename(url))
+ verifyThirdPartyFile(url, checksum, fn)
+ else:
+ # patch is a file in the source directory
+ fn = os.path.join(curdir, patchscript)
+ if fn.endswith('.bz2'):
+ runCommand('bunzip2 -fk %s' % shellQuote(fn))
+ fn = fn[:-4]
+ runCommand('sh %s' % shellQuote(fn))
+ os.unlink(fn)
+
+ if 'buildDir' in recipe:
+ os.chdir(recipe['buildDir'])
+
if configure is not None:
configure_args = [
"--prefix=/usr/local",
@@ -685,40 +862,44 @@ def buildRecipe(recipe, basedir, archList):
if recipe.get('useLDFlags', 1):
configure_args.extend([
- "CFLAGS=-arch %s -isysroot %s -I%s/usr/local/include"%(
+ "CFLAGS=%s-mmacosx-version-min=%s -arch %s -isysroot %s "
+ "-I%s/usr/local/include"%(
+ recipe.get('extra_cflags', ''),
+ DEPTARGET,
' -arch '.join(archList),
shellQuote(SDKPATH)[1:-1],
shellQuote(basedir)[1:-1],),
- "LDFLAGS=-syslibroot,%s -L%s/usr/local/lib -arch %s"%(
+ "LDFLAGS=-mmacosx-version-min=%s -isysroot %s -L%s/usr/local/lib -arch %s"%(
+ DEPTARGET,
shellQuote(SDKPATH)[1:-1],
shellQuote(basedir)[1:-1],
' -arch '.join(archList)),
])
else:
configure_args.extend([
- "CFLAGS=-arch %s -isysroot %s -I%s/usr/local/include"%(
+ "CFLAGS=%s-mmacosx-version-min=%s -arch %s -isysroot %s "
+ "-I%s/usr/local/include"%(
+ recipe.get('extra_cflags', ''),
+ DEPTARGET,
' -arch '.join(archList),
shellQuote(SDKPATH)[1:-1],
shellQuote(basedir)[1:-1],),
])
if 'configure_post' in recipe:
- configure_args = configure_args = list(recipe['configure_post'])
+ configure_args = configure_args + list(recipe['configure_post'])
configure_args.insert(0, configure)
configure_args = [ shellQuote(a) for a in configure_args ]
- if 'configure_env' in recipe:
- configure_args.insert(0, recipe['configure_env'])
-
- print "Running configure for %s"%(name,)
+ print("Running configure for %s"%(name,))
runCommand(' '.join(configure_args) + ' 2>&1')
- print "Running install for %s"%(name,)
+ print("Running install for %s"%(name,))
runCommand('{ ' + install + ' ;} 2>&1')
- print "Done %s"%(name,)
- print ""
+ print("Done %s"%(name,))
+ print("")
os.chdir(curdir)
@@ -726,9 +907,9 @@ def buildLibraries():
"""
Build our dependencies into $WORKDIR/libraries/usr/local
"""
- print ""
- print "Building required libraries"
- print ""
+ print("")
+ print("Building required libraries")
+ print("")
universal = os.path.join(WORKDIR, 'libraries')
os.mkdir(universal)
os.makedirs(os.path.join(universal, 'usr', 'local', 'lib'))
@@ -742,14 +923,21 @@ def buildLibraries():
def buildPythonDocs():
# This stores the documentation as Resources/English.lproj/Documentation
# inside the framwork. pydoc and IDLE will pick it up there.
- print "Install python documentation"
+ print("Install python documentation")
rootDir = os.path.join(WORKDIR, '_root')
buildDir = os.path.join('../../Doc')
docdir = os.path.join(rootDir, 'pydocs')
curDir = os.getcwd()
os.chdir(buildDir)
- runCommand('make update')
- runCommand("make html PYTHON='%s'" % os.path.abspath(sys.executable))
+ # The Doc build changed for 3.4 (technically, for 3.4.1)
+ if getVersionTuple() < (3, 4):
+ # This step does an svn checkout of sphinx and its dependencies
+ runCommand('make update')
+ runCommand("make html PYTHON='%s'" % os.path.abspath(sys.executable))
+ else:
+ runCommand('make clean')
+ # Assume sphinx-build is on our PATH, checked in checkEnvironment
+ runCommand('make html')
os.chdir(curDir)
if not os.path.exists(docdir):
os.mkdir(docdir)
@@ -757,7 +945,7 @@ def buildPythonDocs():
def buildPython():
- print "Building a universal python for %s architectures" % UNIVERSALARCHS
+ print("Building a universal python for %s architectures" % UNIVERSALARCHS)
buildDir = os.path.join(WORKDIR, '_bld', 'python')
rootDir = os.path.join(WORKDIR, '_root')
@@ -785,31 +973,36 @@ def buildPython():
# will find them during its extension import sanity checks.
os.environ['DYLD_LIBRARY_PATH'] = os.path.join(WORKDIR,
'libraries', 'usr', 'local', 'lib')
- print "Running configure..."
+ print("Running configure...")
runCommand("%s -C --enable-framework --enable-universalsdk=%s "
"--with-universal-archs=%s "
"%s "
+ "%s "
"LDFLAGS='-g -L%s/libraries/usr/local/lib' "
- "OPT='-g -O3 -I%s/libraries/usr/local/include' 2>&1"%(
+ "CFLAGS='-g -I%s/libraries/usr/local/include' 2>&1"%(
shellQuote(os.path.join(SRCDIR, 'configure')), shellQuote(SDKPATH),
UNIVERSALARCHS,
(' ', '--with-computed-gotos ')[PYTHON_3],
+ (' ', '--without-ensurepip ')[getVersionTuple() >= (3, 4)],
shellQuote(WORKDIR)[1:-1],
shellQuote(WORKDIR)[1:-1]))
- print "Running make"
+ print("Running make touch")
+ runCommand("make touch")
+
+ print("Running make")
runCommand("make")
- print "Running make install"
+ print("Running make install")
runCommand("make install DESTDIR=%s"%(
shellQuote(rootDir)))
- print "Running make frameworkinstallextras"
+ print("Running make frameworkinstallextras")
runCommand("make frameworkinstallextras DESTDIR=%s"%(
shellQuote(rootDir)))
del os.environ['DYLD_LIBRARY_PATH']
- print "Copying required shared libraries"
+ print("Copying required shared libraries")
if os.path.exists(os.path.join(WORKDIR, 'libraries', 'Library')):
runCommand("mv %s/* %s"%(
shellQuote(os.path.join(
@@ -820,16 +1013,20 @@ def buildPython():
'Python.framework', 'Versions', getVersion(),
'lib'))))
- print "Fix file modes"
+ path_to_lib = os.path.join(rootDir, 'Library', 'Frameworks',
+ 'Python.framework', 'Versions',
+ version, 'lib', 'python%s'%(version,))
+
+ print("Fix file modes")
frmDir = os.path.join(rootDir, 'Library', 'Frameworks', 'Python.framework')
gid = grp.getgrnam('admin').gr_gid
+ shared_lib_error = False
for dirpath, dirnames, filenames in os.walk(frmDir):
for dn in dirnames:
- os.chmod(os.path.join(dirpath, dn), 0775)
+ os.chmod(os.path.join(dirpath, dn), STAT_0o775)
os.chown(os.path.join(dirpath, dn), -1, gid)
-
for fn in filenames:
if os.path.islink(fn):
continue
@@ -840,6 +1037,19 @@ def buildPython():
os.chmod(p, stat.S_IMODE(st.st_mode) | stat.S_IWGRP)
os.chown(p, -1, gid)
+ if fn in EXPECTED_SHARED_LIBS:
+ # check to see that this file was linked with the
+ # expected library path and version
+ data = captureCommand("otool -L %s" % shellQuote(p))
+ for sl in EXPECTED_SHARED_LIBS[fn]:
+ if ("\t%s " % sl) not in data:
+ print("Expected shared lib %s was not linked with %s"
+ % (sl, p))
+ shared_lib_error = True
+
+ if shared_lib_error:
+ fatal("Unexpected shared library errors.")
+
if PYTHON_3:
LDVERSION=None
VERSION=None
@@ -863,20 +1073,63 @@ def buildPython():
# We added some directories to the search path during the configure
# phase. Remove those because those directories won't be there on
- # the end-users system.
- path =os.path.join(rootDir, 'Library', 'Frameworks', 'Python.framework',
- 'Versions', version, 'lib', 'python%s'%(version,),
- 'config' + config_suffix, 'Makefile')
+ # the end-users system. Also remove the directories from _sysconfigdata.py
+ # (added in 3.3) if it exists.
+
+ include_path = '-I%s/libraries/usr/local/include' % (WORKDIR,)
+ lib_path = '-L%s/libraries/usr/local/lib' % (WORKDIR,)
+
+ # fix Makefile
+ path = os.path.join(path_to_lib, 'config' + config_suffix, 'Makefile')
fp = open(path, 'r')
data = fp.read()
fp.close()
- data = data.replace('-L%s/libraries/usr/local/lib'%(WORKDIR,), '')
- data = data.replace('-I%s/libraries/usr/local/include'%(WORKDIR,), '')
+ for p in (include_path, lib_path):
+ data = data.replace(" " + p, '')
+ data = data.replace(p + " ", '')
+
fp = open(path, 'w')
fp.write(data)
fp.close()
+ # fix _sysconfigdata if it exists
+ #
+ # TODO: make this more robust! test_sysconfig_module of
+ # distutils.tests.test_sysconfig.SysconfigTestCase tests that
+ # the output from get_config_var in both sysconfig and
+ # distutils.sysconfig is exactly the same for both CFLAGS and
+ # LDFLAGS. The fixing up is now complicated by the pretty
+ # printing in _sysconfigdata.py. Also, we are using the
+ # pprint from the Python running the installer build which
+ # may not cosmetically format the same as the pprint in the Python
+ # being built (and which is used to originally generate
+ # _sysconfigdata.py).
+
+ import pprint
+ path = os.path.join(path_to_lib, '_sysconfigdata.py')
+ if os.path.exists(path):
+ fp = open(path, 'r')
+ data = fp.read()
+ fp.close()
+ # create build_time_vars dict
+ exec(data)
+ vars = {}
+ for k, v in build_time_vars.items():
+ if type(v) == type(''):
+ for p in (include_path, lib_path):
+ v = v.replace(' ' + p, '')
+ v = v.replace(p + ' ', '')
+ vars[k] = v
+
+ fp = open(path, 'w')
+ # duplicated from sysconfig._generate_posix_vars()
+ fp.write('# system configuration generated and used by'
+ ' the sysconfig module\n')
+ fp.write('build_time_vars = ')
+ pprint.pprint(vars, stream=fp)
+ fp.close()
+
# Add symlinks in /usr/local/bin, using relative links
usr_local_bin = os.path.join(rootDir, 'usr', 'local', 'bin')
to_framework = os.path.join('..', '..', '..', 'Library', 'Frameworks',
@@ -892,7 +1145,7 @@ def buildPython():
os.chdir(curdir)
if PYTHON_3:
- # Remove the 'Current' link, that way we don't accidently mess
+ # Remove the 'Current' link, that way we don't accidentally mess
# with an already installed version of python 2
os.unlink(os.path.join(rootDir, 'Library', 'Frameworks',
'Python.framework', 'Versions', 'Current'))
@@ -907,17 +1160,17 @@ def patchFile(inPath, outPath):
# This one is not handy as a template variable
data = data.replace('$PYTHONFRAMEWORKINSTALLDIR', '/Library/Frameworks/Python.framework')
- fp = open(outPath, 'wb')
+ fp = open(outPath, 'w')
fp.write(data)
fp.close()
def patchScript(inPath, outPath):
data = fileContents(inPath)
data = data.replace('@PYVER@', getVersion())
- fp = open(outPath, 'wb')
+ fp = open(outPath, 'w')
fp.write(data)
fp.close()
- os.chmod(outPath, 0755)
+ os.chmod(outPath, STAT_0o755)
@@ -934,7 +1187,7 @@ def packageFromRecipe(targetDir, recipe):
readme = textwrap.dedent(recipe['readme'])
isRequired = recipe.get('required', True)
- print "- building package %s"%(pkgname,)
+ print("- building package %s"%(pkgname,))
# Substitute some variables
textvars = dict(
@@ -979,7 +1232,7 @@ def packageFromRecipe(targetDir, recipe):
patchScript(postflight, os.path.join(rsrcDir, 'postflight'))
vers = getFullVersion()
- major, minor = map(int, getVersion().split('.', 2))
+ major, minor = getVersionMajorMinor()
pl = Plist(
CFBundleGetInfoString="Python.%s %s"%(pkgname, vers,),
CFBundleIdentifier='org.python.Python.%s'%(pkgname,),
@@ -1016,7 +1269,7 @@ def packageFromRecipe(targetDir, recipe):
def makeMpkgPlist(path):
vers = getFullVersion()
- major, minor = map(int, getVersion().split('.', 2))
+ major, minor = getVersionMajorMinor()
pl = Plist(
CFBundleGetInfoString="Python %s"%(vers,),
@@ -1127,7 +1380,7 @@ def buildDMG():
# Custom icon for the DMG, shown when the DMG is mounted.
shutil.copy("../Icons/Disk Image.icns",
os.path.join(WORKDIR, "mnt", volname, ".VolumeIcon.icns"))
- runCommand("/Developer/Tools/SetFile -a C %s/"%(
+ runCommand("SetFile -a C %s/"%(
shellQuote(os.path.join(WORKDIR, "mnt", volname)),))
runCommand("hdiutil detach %s"%(shellQuote(os.path.join(WORKDIR, "mnt", volname))))
@@ -1168,6 +1421,7 @@ def main():
os.environ['MACOSX_DEPLOYMENT_TARGET'] = DEPTARGET
os.environ['CC'] = CC
+ os.environ['CXX'] = CXX
if os.path.exists(WORKDIR):
shutil.rmtree(WORKDIR)
@@ -1198,7 +1452,7 @@ def main():
folder = os.path.join(WORKDIR, "_root", "Applications", "Python %s"%(
getVersion(),))
- os.chmod(folder, 0755)
+ os.chmod(folder, STAT_0o755)
setIcon(folder, "../Icons/Python Folder.icns")
# Create the installer
@@ -1211,9 +1465,9 @@ def main():
shutil.copy('../../LICENSE', os.path.join(WORKDIR, 'installer', 'License.txt'))
fp = open(os.path.join(WORKDIR, 'installer', 'Build.txt'), 'w')
- print >> fp, "# BUILD INFO"
- print >> fp, "# Date:", time.ctime()
- print >> fp, "# By:", pwd.getpwuid(os.getuid()).pw_gecos
+ fp.write("# BUILD INFO\n")
+ fp.write("# Date: %s\n" % time.ctime())
+ fp.write("# By: %s\n" % pwd.getpwuid(os.getuid()).pw_gecos)
fp.close()
# And copy it to a DMG
diff --git a/Mac/BuildScript/issue19373_tk_8_5_15_source.patch b/Mac/BuildScript/issue19373_tk_8_5_15_source.patch
new file mode 100644
index 0000000..de5d08e
--- /dev/null
+++ b/Mac/BuildScript/issue19373_tk_8_5_15_source.patch
@@ -0,0 +1,13 @@
+Issue #19373: Patch to Tk 8.5.15 to correct refresh problem on OS x 10.9.
+From upstream checkin https://core.tcl.tk/tk/info/5a5abf71f9
+
+--- tk8.5.15/macosx/tkMacOSXDraw.c 2013-09-16 09:41:21.000000000 -0700
++++ Tk_Source_Code-5a5abf71f9fdb0da/macosx/tkMacOSXDraw.c 2013-10-27 13:27:00.000000000 -0700
+@@ -1688,6 +1688,7 @@
+ {
+ if (dcPtr->context) {
+ CGContextSynchronize(dcPtr->context);
++ [[dcPtr->view window] setViewsNeedDisplay:YES];
+ [[dcPtr->view window] enableFlushWindow];
+ if (dcPtr->focusLocked) {
+ [dcPtr->view unlockFocus];
diff --git a/Mac/BuildScript/ncurses-5.5.patch b/Mac/BuildScript/ncurses-5.5.patch
deleted file mode 100644
index 0eab3d3..0000000
--- a/Mac/BuildScript/ncurses-5.5.patch
+++ /dev/null
@@ -1,36 +0,0 @@
-diff -r -u ncurses-5.5-orig/test/Makefile.in ncurses-5.5/test/Makefile.in
---- ncurses-5.5-orig/test/Makefile.in 2006-03-24 12:47:40.000000000 +0100
-+++ ncurses-5.5/test/Makefile.in 2006-03-24 12:47:50.000000000 +0100
-@@ -75,7 +75,7 @@
- MATH_LIB = @MATH_LIB@
-
- LD = @LD@
--LINK = @LINK_TESTS@ $(LIBTOOL_LINK) $(CC) $(CFLAGS)
-+LINK = @LINK_TESTS@ $(LIBTOOL_LINK) $(CC)
-
- usFLAGS = @LD_MODEL@ @LOCAL_LDFLAGS@ @LDFLAGS@
-
-diff -ru ncurses-5.5-orig/ncurses/tinfo/read_entry.c ncurses-5.5/ncurses/tinfo/read_entry.c
---- ncurses-5.5-orig/ncurses/tinfo/read_entry.c 2004-01-11 02:57:05.000000000 +0100
-+++ ncurses-5.5/ncurses/tinfo/read_entry.c 2006-03-25 22:49:39.000000000 +0100
-@@ -474,7 +474,7 @@
- }
-
- /* truncate the terminal name to prevent buffer overflow */
-- (void) sprintf(ttn, "%c/%.*s", *tn, (int) sizeof(ttn) - 3, tn);
-+ (void) sprintf(ttn, "%x/%.*s", *tn, (int) sizeof(ttn) - 3, tn);
-
- /* This is System V behavior, in conjunction with our requirements for
- * writing terminfo entries.
-diff -ru ncurses-5.5-orig/configure ncurses-5.5/configure
---- ncurses-5.5-orig/configure 2005-09-24 23:50:50.000000000 +0200
-+++ ncurses-5.5/configure 2006-03-26 22:24:59.000000000 +0200
-@@ -5027,7 +5027,7 @@
- darwin*)
- EXTRA_CFLAGS="-no-cpp-precomp"
- CC_SHARED_OPTS="-dynamic"
-- MK_SHARED_LIB='$(CC) -dynamiclib -install_name $(DESTDIR)$(libdir)/`basename $@` -compatibility_version $(ABI_VERSION) -current_version $(ABI_VERSION) -o $@'
-+ MK_SHARED_LIB='$(CC) $(CFLAGS) -dynamiclib -install_name $(DESTDIR)$(libdir)/`basename $@` -compatibility_version $(ABI_VERSION) -current_version $(ABI_VERSION) -o $@'
- test "$cf_cv_shlib_version" = auto && cf_cv_shlib_version=abi
- cf_cv_shlib_version_infix=yes
- ;;
diff --git a/Mac/BuildScript/resources/ReadMe.txt b/Mac/BuildScript/resources/ReadMe.txt
index 761cffb..a9d5879 100644
--- a/Mac/BuildScript/resources/ReadMe.txt
+++ b/Mac/BuildScript/resources/ReadMe.txt
@@ -2,11 +2,61 @@ This package will install Python $FULL_VERSION for Mac OS X
$MACOSX_DEPLOYMENT_TARGET for the following architecture(s):
$ARCHITECTURES.
-Installation requires approximately $INSTALL_SIZE MB of disk space,
-ignore the message that it will take zero bytes.
+ **** IMPORTANT ****
-You must install onto your current boot disk, even though the
-installer does not enforce this, otherwise things will not work.
+Installing on OS X 10.8 (Mountain Lion) or later systems
+========================================================
+
+If you are attempting to install on an OS X 10.8+ system, you may
+see a message that Python can't be installed because it is from an
+unidentified developer. This is because this Python installer
+package is not yet compatible with the Gatekeeper security feature
+introduced in OS X 10.8. To allow Python to be installed, you
+can override the Gatekeeper policy for this install. In the Finder,
+instead of double-clicking, control-click or right click the "Python"
+installer package icon. Then select "Open using ... Installer" from
+the contextual menu that appears.
+
+ **** IMPORTANT ****
+
+Update your version of Tcl/Tk to use IDLE or other Tk applications
+==================================================================
+
+To use IDLE or other programs that use the Tkinter graphical user
+interface toolkit, you may need to install a newer third-party version
+of the Tcl/Tk frameworks. Visit http://www.python.org/download/mac/tcltk/
+for current information about supported and recommended versions of
+Tcl/Tk for this version of Python and of Mac OS X.
+
+ **** IMPORTANT ****
+
+Binary installer support for 10.4 and 10.3.9 to be discontinued
+===============================================================
+
+Python 2.7.7 is the last release for which binary installers will be
+released on python.org that support OS X 10.3.9 (Panther) and 10.4.x
+(Tiger) systems. These systems were last updated by Apple in 2005
+and 2007. As of 2.7.8, the 32-bit-only installer will support PPC
+and Intel Macs running OS X 10.5 (Leopard) and later. 10.5 was the
+last OS X release for PPC machines (G4 and G5). (The 64-/32-bit
+installer configuration will remain unchanged.) This aligns Python
+2.7.x installer configurations with those currently provided with
+Python 3.x. Some of the reasons for making this change are:
+there were significant additions and compatibility improvements to
+the OS X POSIX system APIs in OS X 10.5 that Python users can now
+take advantage of; it is increasingly difficult to build and test
+on obsolete 10.3 and 10.4 systems and with the 10.3 ABI; and it is
+assumed that most remaining legacy PPC systems have upgraded to 10.5.
+To ease the transition, for Python 2.7.7 only we are providing three
+binary installers: (1) the legacy deprecated 32-bit-only 10.3+
+PPC/Intel format, (2) the newer 32-bit-only 10.5+ PPC/Intel format,
+and (3) the current 64-bit/32-bit 10.6+ Intel-only format. While
+future releases will not provide the deprecated installer, it will
+still be possible to build Python from source on 10.3.9 and 10.4
+systems if needed.
+
+Using this version of Python on OS X
+====================================
Python consists of the Python programming language interpreter, plus
a set of programs to allow easy access to it for Mac users including
@@ -14,17 +64,8 @@ an integrated development environment, IDLE, plus a set of pre-built
extension modules that open up specific Macintosh technologies to
Python programs.
- **** IMPORTANT ****
-
-Before using IDLE or other programs using the tkinter graphical user
-interface toolkit, visit http://www.python.org/download/mac/tcltk/
-for current information about supported and recommended versions
-of Tcl/Tk for this version of Python and Mac OS X.
-
- *******************
-
The installer puts applications, an "Update Shell Profile" command,
-and an Extras folder containing demo programs and tools into the
+and a link to the optionally installed Python Documentation into the
"Python $VERSION" subfolder of the system Applications folder,
and puts the underlying machinery into the folder
$PYTHONFRAMEWORKINSTALLDIR. It can
@@ -32,5 +73,16 @@ optionally place links to the command-line tools in /usr/local/bin as
well. Double-click on the "Update Shell Profile" command to add the
"bin" directory inside the framework to your shell's search path.
+You must install onto your current boot disk, even though the
+installer may not enforce this, otherwise things will not work.
+
+You can verify the integrity of the disk image file containing the
+installer package and this ReadMe file by comparing its md5 checksum
+and size with the values published on the release page linked at
+http://www.python.org/download/
+
+Installation requires approximately $INSTALL_SIZE MB of disk space,
+ignore the message that it will take zero bytes.
+
More information on Python in general can be found at
http://www.python.org.
diff --git a/Mac/BuildScript/resources/Welcome.rtf b/Mac/BuildScript/resources/Welcome.rtf
index 1fb7cd4..e148772 100644
--- a/Mac/BuildScript/resources/Welcome.rtf
+++ b/Mac/BuildScript/resources/Welcome.rtf
@@ -1,8 +1,8 @@
-{\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf350
-{\fonttbl\f0\fswiss\fcharset0 Helvetica;}
+{\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200
+\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;}
{\colortbl;\red255\green255\blue255;}
-\paperw11904\paperh16836\margl1440\margr1440\vieww9640\viewh10620\viewkind0
-\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural
+\paperw11905\paperh16837\margl1440\margr1440\vieww9640\viewh10620\viewkind0
+\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640
\f0\fs24 \cf0 This package will install
\b Python $FULL_VERSION
@@ -16,7 +16,7 @@
\b IDLE
\b0 and a set of pre-built extension modules that open up specific Macintosh technologies to Python programs.\
\
-See the ReadMe file and the Python documentation for more information.\
+See the ReadMe file and the Python documentation for important information, including the dropping of support for OS X 10.3.9 and 10.4 in future Python 2.7.x binary installers.\
\
\b IMPORTANT:
diff --git a/Mac/BuildScript/scripts/postflight.documentation b/Mac/BuildScript/scripts/postflight.documentation
index 31fbf2f..b9f28a5 100755
--- a/Mac/BuildScript/scripts/postflight.documentation
+++ b/Mac/BuildScript/scripts/postflight.documentation
@@ -5,19 +5,10 @@ FWK="/Library/Frameworks/Python.framework/Versions/${PYVER}"
FWK_DOCDIR_SUBPATH="Resources/English.lproj/Documentation"
FWK_DOCDIR="${FWK}/${FWK_DOCDIR_SUBPATH}"
APPDIR="/Applications/Python ${PYVER}"
-DEV_DOCDIR="/Developer/Documentation"
SHARE_DIR="${FWK}/share"
SHARE_DOCDIR="${SHARE_DIR}/doc/python${PYVER}"
SHARE_DOCDIR_TO_FWK="../../.."
-# make link in /Developer/Documentation/ for Xcode users
-if [ -d "${DEV_DOCDIR}" ]; then
- if [ ! -d "${DEV_DOCDIR}/Python" ]; then
- mkdir -p "${DEV_DOCDIR}/Python"
- fi
- ln -fhs "${FWK_DOCDIR}" "${DEV_DOCDIR}/Python/Reference Documentation ${PYVER}"
-fi
-
# make link in /Applications/Python m.n/ for Finder users
if [ -d "${APPDIR}" ]; then
ln -fhs "${FWK_DOCDIR}/index.html" "${APPDIR}/Python Documentation.html"
@@ -25,7 +16,7 @@ fi
# make share/doc link in framework for command line users
if [ -d "${SHARE_DIR}" ]; then
- mkdir -p "${SHARE_DOCDIR}"
+ mkdir -m 775 -p "${SHARE_DOCDIR}"
# make relative link to html doc directory
ln -fhs "${SHARE_DOCDIR_TO_FWK}/${FWK_DOCDIR_SUBPATH}" "${SHARE_DOCDIR}/html"
fi
diff --git a/Mac/BuildScript/scripts/postflight.framework b/Mac/BuildScript/scripts/postflight.framework
index 4c8f6d3..edd5038 100755
--- a/Mac/BuildScript/scripts/postflight.framework
+++ b/Mac/BuildScript/scripts/postflight.framework
@@ -8,14 +8,24 @@ FWK="/Library/Frameworks/Python.framework/Versions/@PYVER@"
"${FWK}/bin/python@PYVER@" -Wi -tt \
"${FWK}/lib/python${PYVER}/compileall.py" \
- -x badsyntax -x site-packages \
+ -f -x 'bad_coding|badsyntax|site-packages|lib2to3/tests/data' \
"${FWK}/lib/python${PYVER}"
"${FWK}/bin/python@PYVER@" -Wi -tt -O \
"${FWK}/lib/python${PYVER}/compileall.py" \
- -x badsyntax -x site-packages \
+ -f -x 'bad_coding|badsyntax|site-packages|lib2to3/tests/data' \
"${FWK}/lib/python${PYVER}"
+"${FWK}/bin/python@PYVER@" -Wi \
+ "${FWK}/lib/python${PYVER}/compileall.py" \
+ -f -x badsyntax \
+ "${FWK}/lib/python${PYVER}/site-packages"
+
+"${FWK}/bin/python@PYVER@" -Wi -O \
+ "${FWK}/lib/python${PYVER}/compileall.py" \
+ -f -x badsyntax \
+ "${FWK}/lib/python${PYVER}/site-packages"
+
chgrp -R admin "${FWK}"
chmod -R g+w "${FWK}"
diff --git a/Mac/IDLE/Info.plist.in b/Mac/IDLE/Info.plist.in
index b1fc459..9a18b27 100644
--- a/Mac/IDLE/Info.plist.in
+++ b/Mac/IDLE/Info.plist.in
@@ -36,7 +36,7 @@
<key>CFBundleExecutable</key>
<string>IDLE</string>
<key>CFBundleGetInfoString</key>
- <string>%VERSION%, © 2001-2012 Python Software Foundation</string>
+ <string>%VERSION%, © 2001-2014 Python Software Foundation</string>
<key>CFBundleIconFile</key>
<string>IDLE.icns</string>
<key>CFBundleIdentifier</key>
@@ -51,6 +51,8 @@
<string>%VERSION%</string>
<key>CFBundleVersion</key>
<string>%VERSION%</string>
+ <key>NSHighResolutionCapable</key>
+ <true/>
<!--
<key>LSMinimumSystemVersionByArchitecture</key>
<dict>
diff --git a/Mac/Makefile.in b/Mac/Makefile.in
index a6d78fb..a3e00b2 100644
--- a/Mac/Makefile.in
+++ b/Mac/Makefile.in
@@ -38,10 +38,10 @@ INSTALL_SCRIPT= @INSTALL_SCRIPT@
INSTALL_DATA=@INSTALL_DATA@
LN=@LN@
STRIPFLAG=-s
-CPMAC=/Developer/Tools/CpMac
+CPMAC=CpMac
APPTEMPLATE=$(srcdir)/Resources/app
-APPSUBDIRS=MacOS Resources
+APPSUBDIRS=MacOS Resources
CACHERSRC=$(srcdir)/scripts/cachersrc.py
compileall=$(srcdir)/../Lib/compileall.py
@@ -202,15 +202,22 @@ install_IDLE:
cd IDLE && make install
install_BuildApplet:
- $(RUNSHARED) @ARCH_RUN_32BIT@ $(BUILDPYTHON) $(srcdir)/scripts/BuildApplet.py \
- --destroot "$(DESTDIR)" \
- --python=$(prefix)/Resources/Python.app/Contents/MacOS/Python \
- --output "$(DESTDIR)$(PYTHONAPPSDIR)/Build Applet.app" \
- $(srcdir)/scripts/BuildApplet.py
-ifneq ($(LIPO_32BIT_FLAGS),)
- rm "$(DESTDIR)$(PYTHONAPPSDIR)/Build Applet.app/Contents/MacOS/Python"
- lipo $(LIPO_32BIT_FLAGS) -output "$(DESTDIR)$(PYTHONAPPSDIR)/Build Applet.app/Contents/MacOS/Python" $(BUILDPYTHON)
-endif
+ if ! $(RUNSHARED) @ARCH_RUN_32BIT@ $(BUILDPYTHON) \
+ -c 'import EasyDialogs' 2>/dev/null ; then \
+ echo "EasyDialogs not available in this Python - skipping Build Applet.app" ; \
+ else \
+ $(RUNSHARED) @ARCH_RUN_32BIT@ $(BUILDPYTHON) $(srcdir)/scripts/BuildApplet.py \
+ --destroot "$(DESTDIR)" \
+ --python=$(prefix)/Resources/Python.app/Contents/MacOS/Python \
+ --output "$(DESTDIR)$(PYTHONAPPSDIR)/Build Applet.app" \
+ $(srcdir)/scripts/BuildApplet.py && \
+ if [ -n "$(LIPO_32BIT_FLAGS)" ] ; then \
+ rm "$(DESTDIR)$(PYTHONAPPSDIR)/Build Applet.app/Contents/MacOS/Python" && \
+ lipo $(LIPO_32BIT_FLAGS) \
+ -output "$(DESTDIR)$(PYTHONAPPSDIR)/Build Applet.app/Contents/MacOS/Python" \
+ $(BUILDPYTHON) ; \
+ fi \
+ fi
MACLIBDEST=$(LIBDEST)/plat-mac
MACTOOLSDEST=$(prefix)/Mac/Tools
diff --git a/Mac/Modules/carbonevt/_CarbonEvtmodule.c b/Mac/Modules/carbonevt/_CarbonEvtmodule.c
index 623a3e5..30d40c9 100755..100644
--- a/Mac/Modules/carbonevt/_CarbonEvtmodule.c
+++ b/Mac/Modules/carbonevt/_CarbonEvtmodule.c
@@ -1051,8 +1051,7 @@ static PyObject *EventHandlerRef_RemoveEventHandler(EventHandlerRefObject *_self
_err = RemoveEventHandler(_self->ob_itself);
if (_err != noErr) return PyMac_Error(_err);
_self->ob_itself = NULL;
- Py_DECREF(_self->ob_callback);
- _self->ob_callback = NULL;
+ Py_CLEAR(_self->ob_callback);
Py_INCREF(Py_None);
_res = Py_None;
return _res;
diff --git a/Mac/Modules/cg/CFMLateImport.c b/Mac/Modules/cg/CFMLateImport.c
index 955acfb..955acfb 100755..100644
--- a/Mac/Modules/cg/CFMLateImport.c
+++ b/Mac/Modules/cg/CFMLateImport.c
diff --git a/Mac/Modules/cg/CFMLateImport.h b/Mac/Modules/cg/CFMLateImport.h
index 0878960..0878960 100755..100644
--- a/Mac/Modules/cg/CFMLateImport.h
+++ b/Mac/Modules/cg/CFMLateImport.h
diff --git a/Mac/Modules/cg/CGStubLib.exp b/Mac/Modules/cg/CGStubLib.exp
index b93dc7c..b93dc7c 100755..100644
--- a/Mac/Modules/cg/CGStubLib.exp
+++ b/Mac/Modules/cg/CGStubLib.exp
diff --git a/Mac/Modules/cg/CGStubLib.readme b/Mac/Modules/cg/CGStubLib.readme
index d2c5fa8..d2c5fa8 100755..100644
--- a/Mac/Modules/cg/CGStubLib.readme
+++ b/Mac/Modules/cg/CGStubLib.readme
diff --git a/Mac/Modules/cg/_CGmodule.c b/Mac/Modules/cg/_CGmodule.c
index 7169101..7169101 100755..100644
--- a/Mac/Modules/cg/_CGmodule.c
+++ b/Mac/Modules/cg/_CGmodule.c
diff --git a/Mac/Modules/fm/_Fmmodule.c b/Mac/Modules/fm/_Fmmodule.c
index adc47aa..c516f24 100644
--- a/Mac/Modules/fm/_Fmmodule.c
+++ b/Mac/Modules/fm/_Fmmodule.c
@@ -2,8 +2,9 @@
/* =========================== Module _Fm =========================== */
#include "Python.h"
+#include <Carbon/Carbon.h>
-#ifndef __LP64__
+#if !defined(__LP64__) && !defined(MAC_OS_X_VERSION_10_7)
#include "pymactoolbox.h"
@@ -16,7 +17,6 @@
}} while(0)
-#include <Carbon/Carbon.h>
/*
@@ -347,7 +347,7 @@ static PyMethodDef Fm_methods[] = {
void init_Fm(void)
{
PyObject *m;
-#ifndef __LP64__
+#if !defined(__LP64__) && !defined(MAC_OS_X_VERSION_10_7)
PyObject *d;
#endif /* __LP64__ */
@@ -355,7 +355,7 @@ void init_Fm(void)
m = Py_InitModule("_Fm", Fm_methods);
-#ifndef __LP64__
+#if !defined(__LP64__) && !defined(MAC_OS_X_VERSION_10_7)
d = PyModule_GetDict(m);
Fm_Error = PyMac_GetOSErrException();
if (Fm_Error == NULL ||
diff --git a/Mac/Modules/list/_Listmodule.c b/Mac/Modules/list/_Listmodule.c
index 3f43b06..9e63686 100644
--- a/Mac/Modules/list/_Listmodule.c
+++ b/Mac/Modules/list/_Listmodule.c
@@ -76,8 +76,7 @@ int ListObj_Convert(PyObject *v, ListHandle *p_itself)
static void ListObj_dealloc(ListObject *self)
{
- Py_XDECREF(self->ob_ldef_func);
- self->ob_ldef_func = NULL;
+ Py_CLEAR(self->ob_ldef_func);
SetListRefCon(self->ob_itself, (long)0);
if (self->ob_must_be_disposed && self->ob_itself) LDispose(self->ob_itself);
self->ob_type->tp_free((PyObject *)self);
diff --git a/Mac/Modules/qd/_Qdmodule.c b/Mac/Modules/qd/_Qdmodule.c
index c7594b9..d21f361 100644
--- a/Mac/Modules/qd/_Qdmodule.c
+++ b/Mac/Modules/qd/_Qdmodule.c
@@ -3,8 +3,9 @@
#include "Python.h"
+#include <Carbon/Carbon.h>
-#ifndef __LP64__
+#if !defined(__LP64__) && !defined(MAC_OS_X_VERSION_10_7)
#include "pymactoolbox.h"
@@ -16,7 +17,6 @@
}} while(0)
-#include <Carbon/Carbon.h>
#ifdef USE_TOOLBOX_OBJECT_GLUE
extern PyObject *_GrafObj_New(GrafPtr);
@@ -6548,7 +6548,7 @@ static PyObject *Qd_RawBitMap(PyObject *_self, PyObject *_args)
#endif /* __LP64__ */
static PyMethodDef Qd_methods[] = {
-#ifndef __LP64__
+#if !defined(__LP64__) && !defined(MAC_OS_X_VERSION_10_7)
{"GetPort", (PyCFunction)Qd_GetPort, 1,
PyDoc_STR("() -> (GrafPtr port)")},
{"GrafDevice", (PyCFunction)Qd_GrafDevice, 1,
@@ -7088,7 +7088,7 @@ static PyMethodDef Qd_methods[] = {
};
-#ifndef __LP64__
+#if !defined(__LP64__) && !defined(MAC_OS_X_VERSION_10_7)
/* Like BMObj_New, but the original bitmap data structure is copied (and
** released when the object is released)
@@ -7112,7 +7112,7 @@ PyObject *BMObj_NewCopied(BitMapPtr itself)
void init_Qd(void)
{
PyObject *m;
-#ifndef __LP64__
+#if !defined(__LP64__) && !defined(MAC_OS_X_VERSION_10_7)
PyObject *d;
@@ -7127,7 +7127,7 @@ void init_Qd(void)
#endif /* __LP64__ */
m = Py_InitModule("_Qd", Qd_methods);
-#ifndef __LP64__
+#if !defined(__LP64__) && !defined(MAC_OS_X_VERSION_10_7)
d = PyModule_GetDict(m);
Qd_Error = PyMac_GetOSErrException();
if (Qd_Error == NULL ||
diff --git a/Mac/Modules/qdoffs/_Qdoffsmodule.c b/Mac/Modules/qdoffs/_Qdoffsmodule.c
index 686eca4..e5562cb 100644
--- a/Mac/Modules/qdoffs/_Qdoffsmodule.c
+++ b/Mac/Modules/qdoffs/_Qdoffsmodule.c
@@ -4,7 +4,8 @@
#include "Python.h"
-#ifndef __LP64__
+#include <Carbon/Carbon.h>
+#if !defined(__LP64__) && !defined(MAC_OS_X_VERSION_10_7)
#include "pymactoolbox.h"
@@ -16,7 +17,6 @@
}} while(0)
-#include <Carbon/Carbon.h>
#ifdef USE_TOOLBOX_OBJECT_GLUE
extern PyObject *_GWorldObj_New(GWorldPtr);
@@ -634,7 +634,7 @@ static PyObject *Qdoffs_PutPixMapBytes(PyObject *_self, PyObject *_args)
#endif /* __LP64__ */
static PyMethodDef Qdoffs_methods[] = {
-#ifndef __LP64__
+#if !defined(__LP64__) && !defined(MAC_OS_X_VERSION_10_7)
{"NewGWorld", (PyCFunction)Qdoffs_NewGWorld, 1,
PyDoc_STR("(short PixelDepth, Rect boundsRect, CTabHandle cTable, GDHandle aGDevice, GWorldFlags flags) -> (GWorldPtr offscreenGWorld)")},
{"LockPixels", (PyCFunction)Qdoffs_LockPixels, 1,
@@ -691,7 +691,7 @@ static PyMethodDef Qdoffs_methods[] = {
void init_Qdoffs(void)
{
PyObject *m;
-#ifndef __LP64__
+#if !defined(__LP64__) && !defined(MAC_OS_X_VERSION_10_7)
PyObject *d;
@@ -702,7 +702,7 @@ void init_Qdoffs(void)
#endif /* __LP64__ */
m = Py_InitModule("_Qdoffs", Qdoffs_methods);
-#ifndef __LP64__
+#if !defined(__LP64__) && !defined(MAC_OS_X_VERSION_10_7)
d = PyModule_GetDict(m);
Qdoffs_Error = PyMac_GetOSErrException();
if (Qdoffs_Error == NULL ||
diff --git a/Mac/PythonLauncher/FileSettings.h b/Mac/PythonLauncher/FileSettings.h
index d807bae..7b74a9b 100755..100644
--- a/Mac/PythonLauncher/FileSettings.h
+++ b/Mac/PythonLauncher/FileSettings.h
@@ -45,18 +45,13 @@
+ (id)getFactorySettingsForFileType: (NSString *)filetype;
+ (id)newSettingsForFileType: (NSString *)filetype;
-//- (id)init;
- (id)initForFileType: (NSString *)filetype;
- (id)initForFSDefaultFileType: (NSString *)filetype;
- (id)initForDefaultFileType: (NSString *)filetype;
-//- (id)initWithFileSettings: (FileSettings *)source;
- (void)updateFromSource: (id <FileSettingsSource>)source;
- (NSString *)commandLineForScript: (NSString *)script;
-//- (void)applyFactorySettingsForFileType: (NSString *)filetype;
-//- (void)saveDefaults;
-//- (void)applyUserDefaults: (NSString *)filetype;
- (void)applyValuesFromDict: (NSDictionary *)dict;
- (void)reset;
- (NSArray *) interpreters;
diff --git a/Mac/PythonLauncher/FileSettings.m b/Mac/PythonLauncher/FileSettings.m
index 66b4fdc..3438870 100755..100644
--- a/Mac/PythonLauncher/FileSettings.m
+++ b/Mac/PythonLauncher/FileSettings.m
@@ -14,7 +14,7 @@
{
static FileSettings *fsdefault_py, *fsdefault_pyw, *fsdefault_pyc;
FileSettings **curdefault;
-
+
if ([filetype isEqualToString: @"Python Script"]) {
curdefault = &fsdefault_py;
} else if ([filetype isEqualToString: @"Python GUI Script"]) {
@@ -36,7 +36,7 @@
{
static FileSettings *default_py, *default_pyw, *default_pyc;
FileSettings **curdefault;
-
+
if ([filetype isEqualToString: @"Python Script"]) {
curdefault = &default_py;
} else if ([filetype isEqualToString: @"Python GUI Script"]) {
@@ -57,7 +57,7 @@
+ (id)newSettingsForFileType: (NSString *)filetype
{
FileSettings *cur;
-
+
cur = [FileSettings new];
[cur initForFileType: filetype];
return [cur retain];
@@ -67,7 +67,7 @@
{
self = [super init];
if (!self) return self;
-
+
interpreter = [source->interpreter retain];
honourhashbang = source->honourhashbang;
debug = source->debug;
@@ -81,36 +81,30 @@
with_terminal = source->with_terminal;
prefskey = source->prefskey;
if (prefskey) [prefskey retain];
-
+
return self;
}
- (id)initForFileType: (NSString *)filetype
{
FileSettings *defaults;
-
+
defaults = [FileSettings getDefaultsForFileType: filetype];
self = [self initWithFileSettings: defaults];
origsource = [defaults retain];
return self;
}
-//- (id)init
-//{
-// self = [self initForFileType: @"Python Script"];
-// return self;
-//}
-
- (id)initForFSDefaultFileType: (NSString *)filetype
{
int i;
NSString *filename;
NSDictionary *dict;
static NSDictionary *factorySettings;
-
+
self = [super init];
if (!self) return self;
-
+
if (factorySettings == NULL) {
NSBundle *bdl = [NSBundle mainBundle];
NSString *path = [ bdl pathForResource: @"factorySettings"
@@ -149,18 +143,18 @@
{
NSUserDefaults *defaults;
NSDictionary *dict;
-
+
defaults = [NSUserDefaults standardUserDefaults];
dict = [defaults dictionaryForKey: filetype];
if (!dict)
return;
[self applyValuesFromDict: dict];
}
-
+
- (id)initForDefaultFileType: (NSString *)filetype
{
FileSettings *fsdefaults;
-
+
fsdefaults = [FileSettings getFactorySettingsForFileType: filetype];
self = [self initWithFileSettings: fsdefaults];
if (!self) return self;
@@ -220,7 +214,7 @@
- (void)applyValuesFromDict: (NSDictionary *)dict
{
id value;
-
+
value = [dict objectForKey: @"interpreter"];
if (value) interpreter = [value retain];
value = [dict objectForKey: @"honourhashbang"];
@@ -247,12 +241,12 @@
- (NSString*)_replaceSingleQuotes: (NSString*)string
{
- /* Replace all single-quotes by '"'"', that way shellquoting will
- * be correct when the result value is delimited using single quotes.
- */
- NSArray* components = [string componentsSeparatedByString:@"'"];
+ /* Replace all single-quotes by '"'"', that way shellquoting will
+ * be correct when the result value is delimited using single quotes.
+ */
+ NSArray* components = [string componentsSeparatedByString:@"'"];
- return [components componentsJoinedByString:@"'\"'\"'"];
+ return [components componentsJoinedByString:@"'\"'\"'"];
}
- (NSString *)commandLineForScript: (NSString *)script
@@ -265,7 +259,7 @@
script_dir = [script substringToIndex:
[script length]-[[script lastPathComponent] length]];
-
+
if (honourhashbang &&
(fp=fopen([script fileSystemRepresentation], "r")) &&
fgets(hashbangbuf, sizeof(hashbangbuf), fp) &&
@@ -278,7 +272,7 @@
}
if (!cur_interp)
cur_interp = interpreter;
-
+
return [NSString stringWithFormat:
@"cd '%@' && '%@'%s%s%s%s%s%s %@ '%@' %@ %s",
[self _replaceSingleQuotes:script_dir],
@@ -297,7 +291,7 @@
- (NSArray *) interpreters { return interpreters;};
-// FileSettingsSource protocol
+// FileSettingsSource protocol
- (NSString *) interpreter { return interpreter;};
- (BOOL) honourhashbang { return honourhashbang; };
- (BOOL) debug { return debug;};
diff --git a/Mac/PythonLauncher/Info.plist.in b/Mac/PythonLauncher/Info.plist.in
index 913a2e0..0a5a439 100644
--- a/Mac/PythonLauncher/Info.plist.in
+++ b/Mac/PythonLauncher/Info.plist.in
@@ -40,7 +40,7 @@
<key>CFBundleExecutable</key>
<string>PythonLauncher</string>
<key>CFBundleGetInfoString</key>
- <string>%VERSION%, © 2001-2012 Python Software Foundation</string>
+ <string>%VERSION%, © 2001-2014 Python Software Foundation</string>
<key>CFBundleIconFile</key>
<string>PythonLauncher.icns</string>
<key>CFBundleIdentifier</key>
diff --git a/Mac/PythonLauncher/MyAppDelegate.m b/Mac/PythonLauncher/MyAppDelegate.m
index a5ba751..e75fb06 100644
--- a/Mac/PythonLauncher/MyAppDelegate.m
+++ b/Mac/PythonLauncher/MyAppDelegate.m
@@ -33,7 +33,7 @@
- (BOOL)shouldShowUI
{
- // if this call comes before applicationDidFinishLaunching: we
+ // if this call comes before applicationDidFinishLaunching: we
// should terminate immedeately after starting the script.
if (!initial_action_done)
should_terminate = YES;
@@ -62,7 +62,7 @@
static NSString *extensions[] = { @"py", @"pyw", @"pyc", NULL};
NSString **ext_p;
int i;
-
+
if ([[NSUserDefaults standardUserDefaults] boolForKey: @"SkipFileBindingTest"])
return;
ourUrl = [NSURL fileURLWithPath: [[NSBundle mainBundle] bundlePath]];
@@ -92,5 +92,5 @@
}
}
}
-
+
@end
diff --git a/Mac/PythonLauncher/MyDocument.h b/Mac/PythonLauncher/MyDocument.h
index 00c1bae..00c1bae 100755..100644
--- a/Mac/PythonLauncher/MyDocument.h
+++ b/Mac/PythonLauncher/MyDocument.h
diff --git a/Mac/PythonLauncher/MyDocument.m b/Mac/PythonLauncher/MyDocument.m
index 86112c4..90c5db9 100755..100644
--- a/Mac/PythonLauncher/MyDocument.m
+++ b/Mac/PythonLauncher/MyDocument.m
@@ -16,7 +16,7 @@
{
self = [super init];
if (self) {
-
+
// Add your subclass-specific initialization here.
// If an error occurs here, send a [self dealloc] message and return nil.
script = [@"<no script>.py" retain];
@@ -37,20 +37,17 @@
{
NSApplication *app = [NSApplication sharedApplication];
[super close];
- if ([[app delegate] shouldTerminate])
+ if ([(MyAppDelegate*)[app delegate] shouldTerminate])
[app terminate: self];
}
- (void)load_defaults
{
-// if (settings) [settings release];
settings = [FileSettings newSettingsForFileType: filetype];
}
- (void)update_display
{
-// [[self window] setTitle: script];
-
[interpreter setStringValue: [settings interpreter]];
[honourhashbang setState: [settings honourhashbang]];
[debug setState: [settings debug]];
@@ -62,7 +59,7 @@
[others setStringValue: [settings others]];
[scriptargs setStringValue: [settings scriptargs]];
[with_terminal setState: [settings with_terminal]];
-
+
[commandline setStringValue: [settings commandLineForScript: script]];
}
@@ -75,8 +72,8 @@
{
const char *cmdline;
int sts;
-
- cmdline = [[settings commandLineForScript: script] cString];
+
+ cmdline = [[settings commandLineForScript: script] UTF8String];
if ([settings with_terminal]) {
sts = doscript(cmdline);
} else {
@@ -107,14 +104,13 @@
{
// Insert code here to read your document from the given data. You can also choose to override -loadFileWrapperRepresentation:ofType: or -readFromFile:ofType: instead.
BOOL show_ui;
-
- // ask the app delegate whether we should show the UI or not.
- show_ui = [[[NSApplication sharedApplication] delegate] shouldShowUI];
+
+ // ask the app delegate whether we should show the UI or not.
+ show_ui = [(MyAppDelegate*)[[NSApplication sharedApplication] delegate] shouldShowUI];
[script release];
script = [fileName retain];
[filetype release];
filetype = [type retain];
-// if (settings) [settings release];
settings = [FileSettings newSettingsForFileType: filetype];
if (show_ui) {
[self update_display];
@@ -152,7 +148,7 @@
[self update_display];
}
-// FileSettingsSource protocol
+// FileSettingsSource protocol
- (NSString *) interpreter { return [interpreter stringValue];};
- (BOOL) honourhashbang { return [honourhashbang state];};
- (BOOL) debug { return [debug state];};
diff --git a/Mac/PythonLauncher/PreferencesWindowController.m b/Mac/PythonLauncher/PreferencesWindowController.m
index 311c375..ec5bbe8 100644
--- a/Mac/PythonLauncher/PreferencesWindowController.m
+++ b/Mac/PythonLauncher/PreferencesWindowController.m
@@ -5,7 +5,7 @@
+ getPreferencesWindow
{
static PreferencesWindowController *_singleton;
-
+
if (!_singleton)
_singleton = [[PreferencesWindowController alloc] init];
[_singleton showWindow: _singleton];
@@ -21,15 +21,13 @@
- (void)load_defaults
{
NSString *title = [filetype titleOfSelectedItem];
-
+
settings = [FileSettings getDefaultsForFileType: title];
}
- (void)update_display
{
-// [[self window] setTitle: script];
-
- [interpreter reloadData];
+ [interpreter reloadData];
[interpreter setStringValue: [settings interpreter]];
[honourhashbang setState: [settings honourhashbang]];
[debug setState: [settings debug]];
@@ -41,7 +39,6 @@
[others setStringValue: [settings others]];
[with_terminal setState: [settings with_terminal]];
// Not scriptargs, it isn't for preferences
-
[commandline setStringValue: [settings commandLineForScript: @"<your script here>"]];
}
@@ -75,7 +72,7 @@
[self update_display];
}
-// FileSettingsSource protocol
+// FileSettingsSource protocol
- (NSString *) interpreter { return [interpreter stringValue];};
- (BOOL) honourhashbang { return [honourhashbang state]; };
- (BOOL) debug { return [debug state];};
@@ -98,23 +95,23 @@
// NSComboBoxDataSource protocol
- (unsigned int)comboBox:(NSComboBox *)aComboBox indexOfItemWithStringValue:(NSString *)aString
{
- NSArray *interp_list = [settings interpreters];
+ NSArray *interp_list = [settings interpreters];
unsigned int rv = [interp_list indexOfObjectIdenticalTo: aString];
- return rv;
+ return rv;
}
- (id)comboBox:(NSComboBox *)aComboBox objectValueForItemAtIndex:(int)index
{
- NSArray *interp_list = [settings interpreters];
+ NSArray *interp_list = [settings interpreters];
id rv = [interp_list objectAtIndex: index];
- return rv;
+ return rv;
}
- (int)numberOfItemsInComboBox:(NSComboBox *)aComboBox
{
- NSArray *interp_list = [settings interpreters];
+ NSArray *interp_list = [settings interpreters];
int rv = [interp_list count];
- return rv;
+ return rv;
}
diff --git a/Mac/PythonLauncher/doscript.h b/Mac/PythonLauncher/doscript.h
index eef0b56..3fd3187 100644
--- a/Mac/PythonLauncher/doscript.h
+++ b/Mac/PythonLauncher/doscript.h
@@ -9,4 +9,4 @@
#include <Carbon/Carbon.h>
-extern int doscript(const char *command); \ No newline at end of file
+extern int doscript(const char *command);
diff --git a/Mac/PythonLauncher/doscript.m b/Mac/PythonLauncher/doscript.m
index 024b883..cbb783b 100644
--- a/Mac/PythonLauncher/doscript.m
+++ b/Mac/PythonLauncher/doscript.m
@@ -11,49 +11,49 @@
#import <ApplicationServices/ApplicationServices.h>
#import "doscript.h"
-extern int
+extern int
doscript(const char *command)
{
- char *bundleID = "com.apple.Terminal";
- AppleEvent evt, res;
- AEDesc desc;
- OSStatus err;
+ char *bundleID = "com.apple.Terminal";
+ AppleEvent evt, res;
+ AEDesc desc;
+ OSStatus err;
- [[NSWorkspace sharedWorkspace] launchApplication:@"/Applications/Utilities/Terminal.app/"];
+ [[NSWorkspace sharedWorkspace] launchApplication:@"/Applications/Utilities/Terminal.app/"];
- // Build event
- err = AEBuildAppleEvent(kAECoreSuite, kAEDoScript,
- typeApplicationBundleID,
- bundleID, strlen(bundleID),
- kAutoGenerateReturnID,
- kAnyTransactionID,
- &evt, NULL,
- "'----':utf8(@)", strlen(command),
- command);
- if (err) {
- NSLog(@"AEBuildAppleEvent failed: %d\n", err);
- return err;
- }
+ // Build event
+ err = AEBuildAppleEvent(kAECoreSuite, kAEDoScript,
+ typeApplicationBundleID,
+ bundleID, strlen(bundleID),
+ kAutoGenerateReturnID,
+ kAnyTransactionID,
+ &evt, NULL,
+ "'----':utf8(@)", strlen(command),
+ command);
+ if (err) {
+ NSLog(@"AEBuildAppleEvent failed: %ld\n", (long)err);
+ return err;
+ }
- // Send event and check for any Apple Event Manager errors
- err = AESendMessage(&evt, &res, kAEWaitReply, kAEDefaultTimeout);
- AEDisposeDesc(&evt);
- if (err) {
- NSLog(@"AESendMessage failed: %d\n", err);
- return err;
- }
- // Check for any application errors
- err = AEGetParamDesc(&res, keyErrorNumber, typeSInt32, &desc);
- AEDisposeDesc(&res);
- if (!err) {
- AEGetDescData(&desc, &err, sizeof(err));
- NSLog(@"Terminal returned an error: %d", err);
- AEDisposeDesc(&desc);
- } else if (err == errAEDescNotFound) {
- err = noErr;
- } else {
- NSLog(@"AEGetPArmDesc returned an error: %d", err);
- }
+ // Send event and check for any Apple Event Manager errors
+ err = AESendMessage(&evt, &res, kAEWaitReply, kAEDefaultTimeout);
+ AEDisposeDesc(&evt);
+ if (err) {
+ NSLog(@"AESendMessage failed: %ld\n", (long)err);
+ return err;
+ }
+ // Check for any application errors
+ err = AEGetParamDesc(&res, keyErrorNumber, typeSInt32, &desc);
+ AEDisposeDesc(&res);
+ if (!err) {
+ AEGetDescData(&desc, &err, sizeof(err));
+ NSLog(@"Terminal returned an error: %ld", (long)err);
+ AEDisposeDesc(&desc);
+ } else if (err == errAEDescNotFound) {
+ err = noErr;
+ } else {
+ NSLog(@"AEGetPArmDesc returned an error: %ld", (long)err);
+ }
- return err;
+ return err;
}
diff --git a/Mac/PythonLauncher/main.m b/Mac/PythonLauncher/main.m
index 6841433..04b4d73 100755..100644
--- a/Mac/PythonLauncher/main.m
+++ b/Mac/PythonLauncher/main.m
@@ -11,7 +11,7 @@
int main(int argc, const char *argv[])
{
- char *home = getenv("HOME");
- if (home) chdir(home);
+ char *home = getenv("HOME");
+ if (home) chdir(home);
return NSApplicationMain(argc, argv);
}
diff --git a/Mac/README b/Mac/README
index 522903a..04c02a3 100644
--- a/Mac/README
+++ b/Mac/README
@@ -1,12 +1,19 @@
-============
-MacOSX Notes
-============
+=========================
+Python on Mac OS X README
+=========================
+
+:Authors:
+ Jack Jansen (2004-07),
+ Ronald Oussoren (2010-04),
+ Ned Deily (2014-05)
+
+:Version: 2.7.7
This document provides a quick overview of some Mac OS X specific features in
the Python distribution.
-Mac-specific arguments to configure
-===================================
+OS X specific arguments to configure
+====================================
* ``--enable-framework[=DIR]``
@@ -15,11 +22,11 @@ Mac-specific arguments to configure
_`Building and using a framework-based Python on Mac OS X` for more
information on frameworks.
- If the optional directory argument is specified the framework it installed
+ If the optional directory argument is specified the framework is installed
into that directory. This can be used to install a python framework into
your home directory::
- $ configure --enable-framework=/Users/ronald/Library/Frameworks
+ $ ./configure --enable-framework=/Users/ronald/Library/Frameworks
$ make && make install
This will install the framework itself in ``/Users/ronald/Library/Frameworks``,
@@ -33,12 +40,13 @@ Mac-specific arguments to configure
* ``--enable-universalsdk[=PATH]``
- Create a universal binary build of of Python. This can be used with both
+ Create a universal binary build of Python. This can be used with both
regular and framework builds.
- The optional argument specifies which OSX SDK should be used to perform the
- build. This defaults to ``/Developer/SDKs/MacOSX.10.4u.sdk``, specify
- ``/`` when building on a 10.5 system, especially when building 64-bit code.
+ The optional argument specifies which OS X SDK should be used to perform the
+ build. This defaults to ``/Developer/SDKs/MacOSX.10.4u.sdk``. When building
+ on OS X 10.5 or later, you can specify ``/`` to use the installed system
+ headers rather than an SDK.
See the section _`Building and using a universal binary of Python on Mac OS X`
for more information.
@@ -56,9 +64,14 @@ Building and using a universal binary of Python on Mac OS X
1. What is a universal binary
-----------------------------
-A universal binary build of Python contains object code for both PPC and i386
-and can therefore run at native speed on both classic powerpc based macs and
-the newer intel based macs.
+A universal binary build of Python contains object code for more than one
+CPU architecture. A universal OS X executable file or library combines the
+architecture-specific code into one file and can therefore run at native
+speed on all supported architectures. Universal files were introduced in
+OS X 10.4 to add support for Intel-based Macs to the existing PowerPC (PPC)
+machines. In OS X 10.5 support was extended to 64-bit Intel and 64-bit PPC
+architectures. It is possible to build Python with various combinations
+of architectures depending on the build tools and OS X version in use.
2. How do I build a universal binary
------------------------------------
@@ -70,45 +83,69 @@ flag to configure::
$ make
$ make install
-This flag can be used a framework build of python, but also with a classic
-unix build. Either way you will have to build python on Mac OS X 10.4 (or later)
-with Xcode 2.1 (or later). You also have to install the 10.4u SDK when
-installing Xcode.
+This flag can be used with a framework build of python, but also with a classic
+unix build. Universal builds were first supported with OS X 10.4 with Xcode 2.1
+and the 10.4u SDK. Starting with Xcode 3 and OS X 10.5, more configurations are
+available.
The option ``--enable-universalsdk`` has an optional argument to specify an
-SDK, which defaults to the 10.4u SDK. When you build on OSX 10.5 or later
+SDK, which defaults to the 10.4u SDK. When you build on OS X 10.5 or later
you can use the system headers instead of an SDK::
$ ./configure --enable-universalsdk=/
-2.1 Flavours of universal binaries
-..................................
+In general, universal builds depend on specific features provided by the
+Apple-supplied compilers and other build tools included in Apple's Xcode
+development tools. You should install Xcode and the command line tools
+component appropriate for the OS X release you are running on. See the
+Python Developer's Guide (http://docs.python.org/devguide/setup.html)
+for more information.
+
+2.1 Flavors of universal binaries
+.................................
-It is possible to build a number of flavours of the universal binary build,
-the default is a 32-bit only binary (i386 and ppc). The flavour can be
+It is possible to build a number of flavors of the universal binary build,
+the default is a 32-bit only binary (i386 and ppc). Note that starting with
+Xcode 4, the build tools no longer support ppc. The flavor can be
specified using the option ``--with-universal-archs=VALUE``. The following
values are available:
+ * ``intel``: ``i386``, ``x86_64``
+
* ``32-bit``: ``ppc``, ``i386``
+ * ``3-way``: ``i386``, ``x86_64``, ``ppc``
+
* ``64-bit``: ``ppc64``, ``x86_64``
* ``all``: ``ppc``, ``ppc64``, ``i386``, ``x86_64``
- * ``3-way``: ``ppc``, ``i386`` and ``x86_64``
+To build a universal binary that includes a 64-bit architecture, you must build
+on a system running OS X 10.5 or later. The ``all`` and ``64-bit`` flavors can
+only be built with an 10.5 SDK because ``ppc64`` support was only included with
+OS X 10.5. Although legacy ``ppc`` support was included with Xcode 3 on OS X
+10.6, it was removed in Xcode 4, versions of which were released on OS X 10.6
+and which is the standard for OS X 10.7. To summarize, the
+following combinations of SDKs and universal-archs flavors are available:
- * ``intel``: ``i386``, ``x86_64``
+ * 10.4u SDK with Xcode 2 supports ``32-bit`` only
-To build a universal binary that includes a 64-bit architecture, you must build
-on a system running OSX 10.5 or later. The ``all`` flavour can only be built on
-OSX 10.5.
+ * 10.5 SDK with Xcode 3.1.x supports all flavors
+
+ * 10.6 SDK with Xcode 3.2.x supports ``intel``, ``3-way``, and ``32-bit``
+
+ * 10.6 SDK with Xcode 4 supports ``intel`` only
-The makefile for a framework build will install ``python32`` and ``pythonw32``
-binaries when the universal architecures includes at least one 32-bit architecture
-(that is, for all flavours but ``64-bit``).
+ * 10.7 and 10.8 SDKs with Xcode 4 support ``intel`` only
-Running a specific archicture
-.............................
+ * 10.8 and 10.9 SDKs with Xcode 5 support ``intel`` only
+
+The makefile for a framework build will also install ``python2.7-32``
+binaries when the universal architecture includes at least one 32-bit
+architecture (that is, for all flavors but ``64-bit``).
+
+Running a specific architecture
+...............................
You can run code using a specific architecture using the ``arch`` command::
@@ -123,6 +160,13 @@ Python 2.7 or 3.2, in earlier versions the python (and pythonw) commands are
wrapper tools that execute the real interpreter without ensuring that the
real interpreter runs with the same architecture.
+Using ``arch`` is not a perfect solution as the selected architecture will
+not automatically carry through to subprocesses launched by programs and tests
+under that Python. If you want to ensure that Python interpreters launched in
+subprocesses also run in 32-bit-mode if the main interpreter does, use
+a ``python2.7-32`` binary and use the value of ``sys.executable`` as the
+``subprocess`` ``Popen`` executable value.
+
Building and using a framework-based Python on Mac OS X.
========================================================
@@ -132,16 +176,17 @@ Building and using a framework-based Python on Mac OS X.
The main reason is because you want to create GUI programs in Python. With the
exception of X11/XDarwin-based GUI toolkits all GUI programs need to be run
-from a fullblown MacOSX application (a ".app" bundle).
+from a Mac OS X application bundle (".app").
While it is technically possible to create a .app without using frameworks you
will have to do the work yourself if you really want this.
A second reason for using frameworks is that they put Python-related items in
only two places: "/Library/Framework/Python.framework" and
-"/Applications/MacPython 2.6". This simplifies matters for users installing
+"/Applications/Python <VERSION>" where ``<VERSION>`` can be e.g. "3.4",
+"2.7", etc. This simplifies matters for users installing
Python from a binary distribution if they want to get rid of it again. Moreover,
-due to the way frameworks work a user without admin privileges can install a
+due to the way frameworks work, a user without admin privileges can install a
binary distribution in his or her home directory without recompilation.
2. How does a framework Python differ from a normal static Python?
@@ -156,43 +201,55 @@ Versions/Current and you will see the familiar bin and lib directories.
3. Do I need extra packages?
----------------------------
-Yes, probably. If you want Tkinter support you need to get the OSX AquaTk
-distribution, this is installed by default on Mac OS X 10.4 or later. If
-you want wxPython you need to get that. If you want Cocoa you need to get
-PyObjC.
+Yes, probably. If you want Tkinter support you need to get the OS X AquaTk
+distribution, this is installed by default on Mac OS X 10.4 or later. Be
+aware, though, that the Cocoa-based AquaTk's supplied starting with OS X
+10.6 have proven to be unstable. If possible, you should consider
+installing a newer version before building on OS X 10.6 or later, such as
+the ActiveTcl 8.5. See http://www.python.org/download/mac/tcltk/. If you
+are building with an SDK, ensure that the newer Tcl and Tk frameworks are
+seen in the SDK's ``Library/Frameworks`` directory; you may need to
+manually create symlinks to their installed location, ``/Library/Frameworks``.
+If you want wxPython you need to get that.
+If you want Cocoa you need to get PyObjC.
4. How do I build a framework Python?
-------------------------------------
This directory contains a Makefile that will create a couple of python-related
-applications (fullblown OSX .app applications, that is) in
-"/Applications/MacPython 2.6", and a hidden helper application Python.app
-inside the Python.framework, and unix tools "python" and "pythonw" into
-/usr/local/bin. In addition it has a target "installmacsubtree" that installs
+applications (full-blown OS X .app applications, that is) in
+"/Applications/Python <VERSION>", and a hidden helper application Python.app
+inside the Python.framework, and unix tools "python" and "pythonw" into
+/usr/local/bin. In addition it has a target "installmacsubtree" that installs
the relevant portions of the Mac subtree into the Python.framework.
It is normally invoked indirectly through the main Makefile, as the last step
-in the sequence::
+in the sequence
+
+ 1. ./configure --enable-framework
+
+ 2. make
+
+ 3. make install
- $ ./configure --enable-framework
- $ make
- $ make install
+This sequence will put the framework in ``/Library/Framework/Python.framework``,
+the applications in ``/Applications/Python <VERSION>`` and the unix tools in
+``/usr/local/bin``.
-This sequence will put the framework in /Library/Framework/Python.framework,
-the applications in "/Applications/MacPython 2.6" and the unix tools in
-/usr/local/bin.
+Installing in another place, for instance ``$HOME/Library/Frameworks`` if you
+have no admin privileges on your machine, is possible. This can be accomplished
+by configuring with ``--enable-framework=$HOME/Library/Frameworks``.
+The other two directories will then also be installed in your home directory,
+at ``$HOME/Applications/Python-<VERSION>`` and ``$HOME/bin``.
-It is possible to select a different name for the framework using the configure
-option ``--with-framework-name=NAME``. This makes it possible to have several
-parallel installs of a Python framework.
+If you want to install some part, but not all, read the main Makefile. The
+frameworkinstall is composed of a couple of sub-targets that install the
+framework itself, the Mac subtree, the applications and the unix tools.
-Installing in another place, for instance $HOME/Library/Frameworks if you have
-no admin privileges on your machine, has only been tested very lightly. This
-can be done by configuring with --enable-framework=$HOME/Library/Frameworks.
-The other two directories, "/Applications/MacPython-2.6" and /usr/local/bin,
-will then also be deposited in $HOME. This is sub-optimal for the unix tools,
-which you would want in $HOME/bin, but there is no easy way to fix this right
-now.
+There is an extra target frameworkinstallextras that is not part of the
+normal frameworkinstall which installs the Tools directory into
+"/Applications/Python <VERSION>", this is useful for binary
+distributions.
What do all these programs do?
===============================
@@ -200,33 +257,54 @@ What do all these programs do?
"IDLE.app" is an integrated development environment for Python: editor,
debugger, etc.
-"PythonLauncher.app" is a helper application that will handle things when you
+"Python Launcher.app" is a helper application that will handle things when you
double-click a .py, .pyc or .pyw file. For the first two it creates a Terminal
window and runs the scripts with the normal command-line Python. For the
latter it runs the script in the Python.app interpreter so the script can do
-GUI-things. Keep the "alt" key depressed while dragging or double-clicking a
-script to set runtime options. These options can be set once and for all
-through PythonLauncher's preferences dialog.
-
-"BuildApplet.app" creates an applet from a Python script. Drop the script on it
-and out comes a full-featured MacOS application. There is much more to this,
-to be supplied later. Some useful (but outdated) info can be found in
-Mac/Demo.
-
-The commandline scripts /usr/local/bin/python and pythonw can be used to run
-non-GUI and GUI python scripts from the command line, respectively.
+GUI-things. Keep the ``Option`` key depressed while dragging or double-clicking
+a script to set runtime options. These options can be set persistently
+through Python Launcher's preferences dialog.
+
+"Build Applet.app" creates an applet from a Python script. Drop the script on it
+and out comes a full-featured Mac OS X application. "Build Applet.app" is now
+deprecated and has been removed in Python 3. As of OS X 10.8, Xcode 4 no
+longer supplies the headers for the deprecated QuickDraw APIs used by
+the EasyDialogs module making BuildApplet unusable as an app. It will
+not be built by the Mac/Makefile in this case.
+
+The program ``pythonx.x`` runs python scripts from the command line. Various
+compatibility aliases are also installed, including ``pythonwx.x`` which
+in early releases of Python on OS X was required to run GUI programs. In
+current releases, the ``pythonx.x`` and ``pythonwx.x`` commands are identical
+and the use of ``pythonwx.x`` should be avoided as it has been removed in
+current versions of Python 3.
How do I create a binary distribution?
======================================
-Go to the directory "Mac/OSX/BuildScript". There you'll find a script
-"build-installer.py" that does all the work. This will download and build
-a number of 3th-party libaries, configures and builds a framework Python,
-installs it, creates the installer pacakge files and then packs this in a
-DMG image.
-
-The script will build a universal binary, you'll therefore have to run this
+Download and unpack the source release from http://www.python.org/download/.
+Go to the directory ``Mac/BuildScript``. There you will find a script
+``build-installer.py`` that does all the work. This will download and build
+a number of 3rd-party libaries, configures and builds a framework Python,
+installs it, creates the installer package files and then packs this in a
+DMG image. The script also builds an HTML copy of the current Python
+documentation set for this release for inclusion in the framework. The
+installer package will create links to the documentation for use by IDLE,
+pydoc, shell users, and Finder user.
+
+The script will build a universal binary so you'll therefore have to run this
script on Mac OS X 10.4 or later and with Xcode 2.1 or later installed.
+However, the Python build process itself has several build dependencies not
+available out of the box with OS X 10.4 so you may have to install
+additional software beyond what is provided with Xcode 2. OS X 10.5
+provides a recent enough system Python (in ``/usr/bin``) to build
+the Python documentation set. It should be possible to use SDKs and/or older
+versions of Xcode to build installers that are compatible with older systems
+on a newer system but this may not be completely foolproof so the resulting
+executables, shared libraries, and ``.so`` bundles should be carefully
+examined and tested on all supported systems for proper dynamic linking
+dependencies. It is safest to build the distribution on a system running the
+minimum OS X version supported.
All of this is normally done completely isolated in /tmp/_py, so it does not
use your normal build directory nor does it install into /.
@@ -251,16 +329,16 @@ The configure script sometimes emits warnings like the one below::
configure: WARNING: ## -------------------------------------- ##
This almost always means you are trying to build a universal binary for
-Python and have libaries in ``/usr/local`` that don't contain the required
+Python and have libraries in ``/usr/local`` that don't contain the required
architectures. Temporarily move ``/usr/local`` aside to finish the build.
Uninstalling a framework install, including the binary installer
================================================================
-Uninstalling a framework can be done by manually removing all bits that got installed,
-that's true for both installations from source and installations using the binary installer.
-Sadly enough OSX does not have a central uninstaller.
+Uninstalling a framework can be done by manually removing all bits that got installed.
+That's true for both installations from source and installations using the binary installer.
+OS X does not provide a central uninstaller.
The main bit of a framework install is the framework itself, installed in
``/Library/Frameworks/Python.framework``. This can contain multiple versions
@@ -274,14 +352,12 @@ A framework install also installs some applications in ``/Applications/Python X.
And lastly a framework installation installs files in ``/usr/local/bin``, all of
them symbolic links to files in ``/Library/Frameworks/Python.framework/Versions/X.Y/bin``.
-Odds and ends
-=============
-Something to take note of is that the ".rsrc" files in the distribution are
-not actually resource files, they're AppleSingle encoded resource files. The
-macresource module and the Mac/OSX/Makefile cater for this, and create
-".rsrc.df.rsrc" files on the fly that are normal datafork-based resource
-files.
+Resources
+=========
+
+ * http://www.python.org/download/mac/
+
+ * http://www.python.org/community/sigs/current/pythonmac-sig/
- Jack Jansen, Jack.Jansen@cwi.nl, 15-Jul-2004.
- Ronald Oussoren, RonaldOussoren@mac.com, 30-April-2010
+ * http://docs.python.org/devguide/
diff --git a/Mac/Resources/app/Info.plist.in b/Mac/Resources/app/Info.plist.in
index 1bbca4f..93b1c44 100644
--- a/Mac/Resources/app/Info.plist.in
+++ b/Mac/Resources/app/Info.plist.in
@@ -20,7 +20,7 @@
<key>CFBundleExecutable</key>
<string>Python</string>
<key>CFBundleGetInfoString</key>
- <string>%version%, (c) 2004-2012 Python Software Foundation.</string>
+ <string>%version%, (c) 2004-2014 Python Software Foundation.</string>
<key>CFBundleHelpBookFolder</key>
<array>
<string>Documentation</string>
@@ -37,7 +37,7 @@
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleLongVersionString</key>
- <string>%version%, (c) 2004-2012 Python Software Foundation.</string>
+ <string>%version%, (c) 2004-2014 Python Software Foundation.</string>
<key>CFBundleName</key>
<string>Python</string>
<key>CFBundlePackageType</key>
@@ -55,6 +55,8 @@
<key>NSAppleScriptEnabled</key>
<true/>
<key>NSHumanReadableCopyright</key>
- <string>(c) 2012 Python Software Foundation.</string>
+ <string>(c) 2014 Python Software Foundation.</string>
+ <key>NSHighResolutionCapable</key>
+ <true/>
</dict>
</plist>
diff --git a/Mac/Resources/framework/Info.plist.in b/Mac/Resources/framework/Info.plist.in
index bab46fa..43a5740 100644
--- a/Mac/Resources/framework/Info.plist.in
+++ b/Mac/Resources/framework/Info.plist.in
@@ -17,9 +17,9 @@
<key>CFBundlePackageType</key>
<string>FMWK</string>
<key>CFBundleShortVersionString</key>
- <string>%VERSION%, (c) 2004-2012 Python Software Foundation.</string>
+ <string>%VERSION%, (c) 2004-2014 Python Software Foundation.</string>
<key>CFBundleLongVersionString</key>
- <string>%VERSION%, (c) 2004-2012 Python Software Foundation.</string>
+ <string>%VERSION%, (c) 2004-2014 Python Software Foundation.</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
diff --git a/Mac/Tools/fixapplepython23.py b/Mac/Tools/fixapplepython23.py
index 03c799b..03c799b 100644..100755
--- a/Mac/Tools/fixapplepython23.py
+++ b/Mac/Tools/fixapplepython23.py
diff --git a/Mac/scripts/buildpkg.py b/Mac/scripts/buildpkg.py
index ed5c88f..ed5c88f 100644..100755
--- a/Mac/scripts/buildpkg.py
+++ b/Mac/scripts/buildpkg.py
diff --git a/Mac/scripts/mkestrres-macerrors.h b/Mac/scripts/mkestrres-macerrors.h
index 26c583d..26c583d 100755..100644
--- a/Mac/scripts/mkestrres-macerrors.h
+++ b/Mac/scripts/mkestrres-macerrors.h
diff --git a/Mac/scripts/zappycfiles.py b/Mac/scripts/zappycfiles.py
index 6d35748..6d35748 100644..100755
--- a/Mac/scripts/zappycfiles.py
+++ b/Mac/scripts/zappycfiles.py
diff --git a/Makefile.pre.in b/Makefile.pre.in
index e2237a9..bcd83bf 100644
--- a/Makefile.pre.in
+++ b/Makefile.pre.in
@@ -27,6 +27,10 @@ MODLIBS= _MODLIBS_
VERSION= @VERSION@
srcdir= @srcdir@
VPATH= @srcdir@
+abs_srcdir= @abs_srcdir@
+abs_builddir= @abs_builddir@
+build= @build@
+host= @host@
CC= @CC@
CXX= @CXX@
@@ -57,6 +61,8 @@ INSTALL_DATA= @INSTALL_DATA@
# Also, making them read-only seems to be a good idea...
INSTALL_SHARED= ${INSTALL} -m 555
+MKDIR_P= @MKDIR_P@
+
MAKESETUP= $(srcdir)/Modules/makesetup
# Compiler options
@@ -82,6 +88,9 @@ PY_CFLAGS= $(CFLAGS) $(CPPFLAGS) $(CFLAGSFORSHARED) -DPy_BUILD_CORE
# Machine-dependent subdirectories
MACHDEP= @MACHDEP@
+# Multiarch directory (may be empty)
+MULTIARCH= @MULTIARCH@
+
# Install prefix for architecture-independent files
prefix= @prefix@
@@ -152,7 +161,7 @@ SRCDIRS= @SRCDIRS@
SUBDIRSTOO= Include Lib Misc Demo
# Files and directories to be distributed
-CONFIGFILES= configure configure.in acconfig.h pyconfig.h.in Makefile.pre.in
+CONFIGFILES= configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in
DISTFILES= README ChangeLog $(CONFIGFILES)
DISTDIRS= $(SUBDIRS) $(SUBDIRSTOO) Ext-dummy
DIST= $(DISTFILES) $(DISTDIRS)
@@ -183,6 +192,14 @@ UNICODE_OBJS= @UNICODE_OBJS@
PYTHON= python$(EXE)
BUILDPYTHON= python$(BUILDEXE)
+PYTHON_FOR_BUILD=@PYTHON_FOR_BUILD@
+_PYTHON_HOST_PLATFORM=@_PYTHON_HOST_PLATFORM@
+HOST_GNU_TYPE= @host@
+
+# Tcl and Tk config info from --with-tcltk-includes and -libs options
+TCLTK_INCLUDES= @TCLTK_INCLUDES@
+TCLTK_LIBS= @TCLTK_LIBS@
+
# The task to run while instrument when building the profile-opt target
PROFILE_TASK= $(srcdir)/Tools/pybench/pybench.py -n 2 --with-gc --with-syscheck
#PROFILE_TASK= $(srcdir)/Lib/test/regrtest.py
@@ -204,8 +221,8 @@ SIGNAL_OBJS= @SIGNAL_OBJS@
##########################################################################
# Grammar
-GRAMMAR_H= $(srcdir)/Include/graminit.h
-GRAMMAR_C= $(srcdir)/Python/graminit.c
+GRAMMAR_H= Include/graminit.h
+GRAMMAR_C= Python/graminit.c
GRAMMAR_INPUT= $(srcdir)/Grammar/Grammar
@@ -215,6 +232,19 @@ LIBFFI_INCLUDEDIR= @LIBFFI_INCLUDEDIR@
# Parser
PGEN= Parser/pgen$(EXE)
+PSRCS= \
+ Parser/acceler.c \
+ Parser/grammar1.c \
+ Parser/listnode.c \
+ Parser/node.c \
+ Parser/parser.c \
+ Parser/parsetok.c \
+ Parser/bitset.c \
+ Parser/metagrammar.c \
+ Parser/firstsets.c \
+ Parser/grammar.c \
+ Parser/pgen.c
+
POBJS= \
Parser/acceler.o \
Parser/grammar1.o \
@@ -230,6 +260,14 @@ POBJS= \
PARSER_OBJS= $(POBJS) Parser/myreadline.o Parser/tokenizer.o
+PGSRCS= \
+ Objects/obmalloc.c \
+ Python/mysnprintf.c \
+ Python/pyctype.c \
+ Parser/tokenizer_pgen.c \
+ Parser/printgrammar.c \
+ Parser/pgenmain.c
+
PGOBJS= \
Objects/obmalloc.o \
Python/mysnprintf.o \
@@ -242,13 +280,14 @@ PARSER_HEADERS= \
Parser/parser.h \
Parser/tokenizer.h
-PGENOBJS= $(PGENMAIN) $(POBJS) $(PGOBJS)
+PGENSRCS= $(PSRCS) $(PGSRCS)
+PGENOBJS= $(POBJS) $(PGOBJS)
##########################################################################
# AST
-AST_H_DIR= $(srcdir)/Include
+AST_H_DIR= Include
AST_H= $(AST_H_DIR)/Python-ast.h
-AST_C_DIR= $(srcdir)/Python
+AST_C_DIR= Python
AST_C= $(AST_C_DIR)/Python-ast.c
AST_ASDL= $(srcdir)/Parser/Python.asdl
@@ -384,6 +423,7 @@ build_all_generate_profile:
$(MAKE) all CFLAGS="$(CFLAGS) -fprofile-generate" LIBS="$(LIBS) -lgcov"
run_profile_task:
+ : # FIXME: can't run for a cross build
./$(BUILDPYTHON) $(PROFILE_TASK)
build_all_use_profile:
@@ -401,16 +441,27 @@ $(BUILDPYTHON): Modules/python.o $(LIBRARY) $(LDLIBRARY)
Modules/python.o \
$(BLDLIBRARY) $(LIBS) $(MODLIBS) $(SYSLIBS) $(LDLAST)
-platform: $(BUILDPYTHON)
- $(RUNSHARED) ./$(BUILDPYTHON) -E -c 'import sys ; from sysconfig import get_platform ; print get_platform()+"-"+sys.version[0:3]' >platform
+platform: $(BUILDPYTHON) pybuilddir.txt
+ $(RUNSHARED) $(PYTHON_FOR_BUILD) -c 'import sys ; from sysconfig import get_platform ; print get_platform()+"-"+sys.version[0:3]' >platform
+# Create build directory and generate the sysconfig build-time data there.
+# pybuilddir.txt contains the name of the build dir and is used for
+# sys.path fixup -- see Modules/getpath.c.
+pybuilddir.txt: $(BUILDPYTHON)
+ $(RUNSHARED) $(PYTHON_FOR_BUILD) -S -m sysconfig --generate-posix-vars
# Build the shared modules
-sharedmods: $(BUILDPYTHON)
- @case $$MAKEFLAGS in \
- *s*) $(RUNSHARED) CC='$(CC)' LDSHARED='$(BLDSHARED)' OPT='$(OPT)' ./$(BUILDPYTHON) -E $(srcdir)/setup.py -q build;; \
- *) $(RUNSHARED) CC='$(CC)' LDSHARED='$(BLDSHARED)' OPT='$(OPT)' ./$(BUILDPYTHON) -E $(srcdir)/setup.py build;; \
- esac
+# Under GNU make, MAKEFLAGS are sorted and normalized; the 's' for
+# -s, --silent or --quiet is always the first char.
+# Under BSD make, MAKEFLAGS might be " -s -v x=y".
+sharedmods: $(BUILDPYTHON) pybuilddir.txt
+ @case "$$MAKEFLAGS" in \
+ *\ -s*|s*) quiet="-q";; \
+ *) quiet="";; \
+ esac; \
+ $(RUNSHARED) CC='$(CC)' LDSHARED='$(BLDSHARED)' OPT='$(OPT)' \
+ _TCLTK_INCLUDES='$(TCLTK_INCLUDES)' _TCLTK_LIBS='$(TCLTK_LIBS)' \
+ $(PYTHON_FOR_BUILD) $(srcdir)/setup.py $$quiet build
# Build static library
# avoid long command lines, same as LIBRARY_OBJS
@@ -434,7 +485,7 @@ libpython$(VERSION).so: $(LIBRARY_OBJS)
libpython$(VERSION).dylib: $(LIBRARY_OBJS)
$(CC) -dynamiclib -Wl,-single_module $(LDFLAGS) -undefined dynamic_lookup -Wl,-install_name,$(prefix)/lib/libpython$(VERSION).dylib -Wl,-compatibility_version,$(VERSION) -Wl,-current_version,$(VERSION) -o $@ $(LIBRARY_OBJS) $(SHLIBS) $(LIBC) $(LIBM) $(LDLAST); \
-
+
libpython$(VERSION).sl: $(LIBRARY_OBJS)
$(LDSHARED) -o $@ $(LIBRARY_OBJS) $(MODLIBS) $(SHLIBS) $(LIBC) $(LIBM) $(LDLAST)
@@ -538,13 +589,19 @@ Modules/getpath.o: $(srcdir)/Modules/getpath.c Makefile
Modules/python.o: $(srcdir)/Modules/python.c
$(MAINCC) -c $(PY_CFLAGS) -o $@ $(srcdir)/Modules/python.c
+Modules/posixmodule.o: $(srcdir)/Modules/posixmodule.c $(srcdir)/Modules/posixmodule.h
-# Use a stamp file to prevent make -j invoking pgen twice
-$(GRAMMAR_H) $(GRAMMAR_C): Parser/pgen.stamp
-Parser/pgen.stamp: $(PGEN) $(GRAMMAR_INPUT)
- -@$(INSTALL) -d Include
+Modules/grpmodule.o: $(srcdir)/Modules/grpmodule.c $(srcdir)/Modules/posixmodule.h
+
+Modules/pwdmodule.o: $(srcdir)/Modules/pwdmodule.c $(srcdir)/Modules/posixmodule.h
+
+$(GRAMMAR_H): $(GRAMMAR_INPUT) $(PGENSRCS)
+ @$(MKDIR_P) Include
+ $(MAKE) $(PGEN)
$(PGEN) $(GRAMMAR_INPUT) $(GRAMMAR_H) $(GRAMMAR_C)
- -touch Parser/pgen.stamp
+$(GRAMMAR_C): $(GRAMMAR_H) $(GRAMMAR_INPUT) $(PGENSRCS)
+ $(MAKE) $(GRAMMAR_H)
+ touch $(GRAMMAR_C)
$(PGEN): $(PGENOBJS)
$(CC) $(OPT) $(LDFLAGS) $(PGENOBJS) $(LIBS) -o $(PGEN)
@@ -559,9 +616,11 @@ Parser/tokenizer_pgen.o: $(srcdir)/Parser/tokenizer.c
Parser/pgenmain.o: $(srcdir)/Include/parsetok.h
$(AST_H): $(AST_ASDL) $(ASDLGEN_FILES)
+ $(MKDIR_P) $(AST_H_DIR)
$(ASDLGEN) -h $(AST_H_DIR) $(AST_ASDL)
$(AST_C): $(AST_ASDL) $(ASDLGEN_FILES)
+ $(MKDIR_P) $(AST_C_DIR)
$(ASDLGEN) -c $(AST_C_DIR) $(AST_ASDL)
Python/compile.o Python/symtable.o Python/ast.o: $(GRAMMAR_H) $(AST_H)
@@ -691,7 +750,8 @@ PYTHON_HEADERS= \
Include/warnings.h \
Include/weakrefobject.h \
pyconfig.h \
- $(PARSER_HEADERS)
+ $(PARSER_HEADERS) \
+ $(AST_H)
$(LIBRARY_OBJS) $(MODOBJS) Modules/python.o: $(PYTHON_HEADERS)
@@ -762,7 +822,8 @@ memtest: all platform
install: @FRAMEWORKINSTALLFIRST@ altinstall bininstall maninstall @FRAMEWORKINSTALLLAST@
# Install almost everything without disturbing previous versions
-altinstall: @FRAMEWORKALTINSTALLFIRST@ altbininstall libinstall inclinstall libainstall \
+altinstall: @FRAMEWORKALTINSTALLFIRST@ altbininstall libinstall inclinstall \
+ libainstall altmaninstall \
sharedinstall oldsharedinstall @FRAMEWORKALTINSTALLLAST@
# Install shared libraries enabled by Setup
@@ -832,8 +893,8 @@ altbininstall: $(BUILDPYTHON)
else true; \
fi
-# Install the manual page
-maninstall:
+# Install the versioned manual page
+altmaninstall:
@for i in $(MANDIR) $(MANDIR)/man1; \
do \
if test ! -d $(DESTDIR)$$i; then \
@@ -845,6 +906,13 @@ maninstall:
$(INSTALL_DATA) $(srcdir)/Misc/python.man \
$(DESTDIR)$(MANDIR)/man1/python$(VERSION).1
+# Install the unversioned manual pages
+maninstall: altmaninstall
+ -rm -f $(DESTDIR)$(MANDIR)/man1/python2.1
+ (cd $(DESTDIR)$(MANDIR)/man1; $(LN) -s python$(VERSION).1 python2.1)
+ -rm -f $(DESTDIR)$(MANDIR)/man1/python.1
+ (cd $(DESTDIR)$(MANDIR)/man1; $(LN) -s python2.1 python.1)
+
# Install the library
PLATDIR= plat-$(MACHDEP)
EXTRAPLATDIR= @EXTRAPLATDIR@
@@ -859,11 +927,13 @@ PLATMACDIRS= plat-mac plat-mac/Carbon plat-mac/lib-scriptpackages \
plat-mac/lib-scriptpackages/Netscape \
plat-mac/lib-scriptpackages/StdSuites \
plat-mac/lib-scriptpackages/SystemEvents \
- plat-mac/lib-scriptpackages/Terminal
+ plat-mac/lib-scriptpackages/Terminal
PLATMACPATH=:plat-mac:plat-mac/lib-scriptpackages
LIBSUBDIRS= lib-tk lib-tk/test lib-tk/test/test_tkinter \
- lib-tk/test/test_ttk site-packages test test/data \
- test/cjkencodings test/decimaltestdata test/xmltestdata test/subprocessdata \
+ lib-tk/test/test_ttk site-packages test test/audiodata test/data \
+ test/cjkencodings test/decimaltestdata test/xmltestdata \
+ test/imghdrdata \
+ test/subprocessdata \
test/tracedmodules \
encodings compiler hotshot \
email email/mime email/test email/test/data \
@@ -872,7 +942,8 @@ LIBSUBDIRS= lib-tk lib-tk/test lib-tk/test/test_tkinter \
logging bsddb bsddb/test csv importlib wsgiref \
lib2to3 lib2to3/fixes lib2to3/pgen2 lib2to3/tests \
lib2to3/tests/data lib2to3/tests/data/fixers lib2to3/tests/data/fixers/myfixes \
- ctypes ctypes/test ctypes/macholib idlelib idlelib/Icons \
+ ctypes ctypes/test ctypes/macholib \
+ idlelib idlelib/Icons idlelib/idle_test \
distutils distutils/command distutils/tests $(XMLLIBSUBDIRS) \
multiprocessing multiprocessing/dummy \
unittest unittest/test \
@@ -898,7 +969,7 @@ libinstall: build_all $(srcdir)/Lib/$(PLATDIR) $(srcdir)/Modules/xxmodule.c
else true; \
fi; \
done
- @for i in $(srcdir)/Lib/*.py $(srcdir)/Lib/*.doc $(srcdir)/Lib/*.egg-info ; \
+ @for i in $(srcdir)/Lib/*.py `cat pybuilddir.txt`/_sysconfigdata.py $(srcdir)/Lib/*.doc $(srcdir)/Lib/*.egg-info ; \
do \
if test -x $$i; then \
$(INSTALL_SCRIPT) $$i $(DESTDIR)$(LIBDEST); \
@@ -939,34 +1010,43 @@ libinstall: build_all $(srcdir)/Lib/$(PLATDIR) $(srcdir)/Modules/xxmodule.c
$(DESTDIR)$(LIBDEST)/distutils/tests ; \
fi
PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \
- ./$(BUILDPYTHON) -Wi -tt $(DESTDIR)$(LIBDEST)/compileall.py \
+ $(PYTHON_FOR_BUILD) -Wi -tt $(DESTDIR)$(LIBDEST)/compileall.py \
-d $(LIBDEST) -f \
-x 'bad_coding|badsyntax|site-packages|lib2to3/tests/data' \
$(DESTDIR)$(LIBDEST)
PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \
- ./$(BUILDPYTHON) -Wi -tt -O $(DESTDIR)$(LIBDEST)/compileall.py \
+ $(PYTHON_FOR_BUILD) -Wi -tt -O $(DESTDIR)$(LIBDEST)/compileall.py \
-d $(LIBDEST) -f \
-x 'bad_coding|badsyntax|site-packages|lib2to3/tests/data' \
$(DESTDIR)$(LIBDEST)
-PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \
- ./$(BUILDPYTHON) -Wi -t $(DESTDIR)$(LIBDEST)/compileall.py \
+ $(PYTHON_FOR_BUILD) -Wi -t $(DESTDIR)$(LIBDEST)/compileall.py \
-d $(LIBDEST)/site-packages -f \
-x badsyntax $(DESTDIR)$(LIBDEST)/site-packages
-PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \
- ./$(BUILDPYTHON) -Wi -t -O $(DESTDIR)$(LIBDEST)/compileall.py \
+ $(PYTHON_FOR_BUILD) -Wi -t -O $(DESTDIR)$(LIBDEST)/compileall.py \
-d $(LIBDEST)/site-packages -f \
-x badsyntax $(DESTDIR)$(LIBDEST)/site-packages
-PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \
- ./$(BUILDPYTHON) -Wi -t -c "import lib2to3.pygram, lib2to3.patcomp;lib2to3.patcomp.PatternCompiler()"
+ $(PYTHON_FOR_BUILD) -m lib2to3.pgen2.driver $(DESTDIR)$(LIBDEST)/lib2to3/Grammar.txt
+ -PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \
+ $(PYTHON_FOR_BUILD) -m lib2to3.pgen2.driver $(DESTDIR)$(LIBDEST)/lib2to3/PatternGrammar.txt
# Create the PLATDIR source directory, if one wasn't distributed..
$(srcdir)/Lib/$(PLATDIR):
mkdir $(srcdir)/Lib/$(PLATDIR)
cp $(srcdir)/Lib/plat-generic/regen $(srcdir)/Lib/$(PLATDIR)/regen
export PATH; PATH="`pwd`:$$PATH"; \
- export PYTHONPATH; PYTHONPATH="`pwd`/Lib"; \
+ export PYTHONPATH; PYTHONPATH="$(srcdir)/Lib:$(abs_builddir)/`cat pybuilddir.txt`"; \
export DYLD_FRAMEWORK_PATH; DYLD_FRAMEWORK_PATH="`pwd`"; \
export EXE; EXE="$(BUILDEXE)"; \
+ if [ -n "$(MULTIARCH)" ]; then export MULTIARCH; MULTIARCH=$(MULTIARCH); fi; \
+ export PYTHON_FOR_BUILD; \
+ if [ "$(build)" = "$(host)" ]; then \
+ PYTHON_FOR_BUILD="$(BUILDPYTHON)"; \
+ else \
+ PYTHON_FOR_BUILD="$(PYTHON_FOR_BUILD)"; \
+ fi; \
cd $(srcdir)/Lib/$(PLATDIR); $(RUNSHARED) ./regen
python-config: $(srcdir)/Misc/python-config.in
@@ -1062,11 +1142,12 @@ libainstall: all python-config
# Install the dynamically loadable modules
# This goes into $(exec_prefix)
sharedinstall: sharedmods
- $(RUNSHARED) ./$(BUILDPYTHON) -E $(srcdir)/setup.py install \
+ $(RUNSHARED) $(PYTHON_FOR_BUILD) $(srcdir)/setup.py install \
--prefix=$(prefix) \
--install-scripts=$(BINDIR) \
--install-platlib=$(DESTSHARED) \
--root=$(DESTDIR)/
+ -rm $(DESTDIR)$(DESTSHARED)/_sysconfigdata.py*
# Here are a couple of targets for MacOSX again, to install a full
# framework-based Python. frameworkinstall installs everything, the
@@ -1135,7 +1216,7 @@ frameworkinstallextras:
# This installs a few of the useful scripts in Tools/scripts
scriptsinstall:
SRCDIR=$(srcdir) $(RUNSHARED) \
- ./$(BUILDPYTHON) $(srcdir)/Tools/scripts/setup.py install \
+ $(PYTHON_FOR_BUILD) $(srcdir)/Tools/scripts/setup.py install \
--prefix=$(prefix) \
--install-scripts=$(BINDIR) \
--root=$(DESTDIR)/
@@ -1165,7 +1246,7 @@ recheck:
$(SHELL) config.status --recheck
$(SHELL) config.status
-# Rebuild the configure script from configure.in; also rebuild pyconfig.h.in
+# Rebuild the configure script from configure.ac; also rebuild pyconfig.h.in
autoconf:
(cd $(srcdir); autoconf)
(cd $(srcdir); autoheader)
@@ -1184,6 +1265,11 @@ TAGS::
etags Include/*.h; \
for i in $(SRCDIRS); do etags -a $$i/*.[ch]; done
+# Touch generated files
+touch:
+ cd $(srcdir); \
+ touch Include/Python-ast.h Python/Python-ast.c
+
# Sanitation targets -- clean leaves libraries, executables and tags
# files, which clobber removes as well
pycremoval:
@@ -1202,7 +1288,7 @@ profile-removal:
clobber: clean profile-removal
-rm -f $(BUILDPYTHON) $(PGEN) $(LIBRARY) $(LDLIBRARY) $(DLLLIBRARY) \
- tags TAGS Parser/pgen.stamp \
+ tags TAGS \
config.cache config.log pyconfig.h Modules/config.c
-rm -rf build platform
-rm -rf $(PYTHONFRAMEWORKDIR)
@@ -1218,11 +1304,12 @@ distclean: clobber
Modules/Setup Modules/Setup.local Modules/Setup.config \
Modules/ld_so_aix Modules/python.exp Misc/python.pc
-rm -f python*-gdb.py
- find $(srcdir) '(' -name '*.fdc' -o -name '*~' \
- -o -name '[@,#]*' -o -name '*.old' \
- -o -name '*.orig' -o -name '*.rej' \
- -o -name '*.bak' ')' \
- -exec rm -f {} ';'
+ -rm -f pybuilddir.txt
+ find $(srcdir)/[a-zA-Z]* '(' -name '*.fdc' -o -name '*~' \
+ -o -name '[@,#]*' -o -name '*.old' \
+ -o -name '*.orig' -o -name '*.rej' \
+ -o -name '*.bak' ')' \
+ -exec rm -f {} ';'
# Check for smelly exported symbols (not starting with Py/_Py)
smelly: all
@@ -1272,8 +1359,8 @@ Python/thread.o: @THREADHEADERS@
.PHONY: maninstall libinstall inclinstall libainstall sharedinstall
.PHONY: frameworkinstall frameworkinstallframework frameworkinstallstructure
.PHONY: frameworkinstallmaclib frameworkinstallapps frameworkinstallunixtools
-.PHONY: frameworkaltinstallunixtools recheck autoconf clean clobber distclean
-.PHONY: smelly funny patchcheck
+.PHONY: frameworkaltinstallunixtools recheck autoconf clean clobber distclean
+.PHONY: smelly funny patchcheck touch altmaninstall
.PHONY: gdbhooks
# IF YOU PUT ANYTHING HERE IT WILL GO AWAY
diff --git a/Misc/ACKS b/Misc/ACKS
index 624b616..983e048 100644
--- a/Misc/ACKS
+++ b/Misc/ACKS
@@ -11,73 +11,126 @@ Without you, I would've stopped working on Python long ago!
PS: In the standard Python distribution, this file is encoded in UTF-8
and the list is in rough alphabetical order by last names.
+Aahz
+Michael Abbott
+Rajiv Abraham
David Abrahams
+Marc Abramowitz
+Ron Adam
+Anton Afanasyev
+Ali Afshar
+Nitika Agarwal
Jim Ahlstrom
Farhan Ahmad
+Matthew Ahrens
Nir Aides
Yaniv Aknin
Jyrki Alakuijala
+Steve Alexander
+Fred Allen
+Jeff Allen
+Ray Allen
Billy G. Allie
Kevin Altis
Joe Amenta
+A. Amoroso
Mark Anacker
+Shashwat Anand
Anders Andersen
John Anderson
+Pehr Anderson
Erik Andersén
Oliver Andrich
Ross Andrus
+Juancarlo Añez
+Chris Angelico
+Jérémy Anger
+Ankur Ankan
+Jon Anglin
+Heidi Annexstad
+Ramchandra Apte
Éric Araujo
+Alicia Arlen
+Jeffrey Armstrong
Jason Asbahr
David Ascher
Chris AtLee
+Aymeric Augustin
John Aycock
-Jan-Hein Bührman
Donovan Baarda
+Arne Babenhauserheide
Attila Babo
-Alfonso Baciero
+Matt Bachmann
Marcin Bachry
+Alfonso Baciero
Dwayne Bailey
Stig Bakken
Greg Ball
Luigi Ballabio
Jeff Balogh
+Manuel Balsera
Matt Bandy
+Dmi Baranov
Michael J. Barber
+Daniel Barclay
+Nicolas Bareil
Chris Barker
+Anton Barkovsky
Nick Barnes
Quentin Barnes
+David Barnett
+Matthew Barnett
Richard Barran
Cesar Eduardo Barros
Des Barry
Ulf Bartelt
+Don Bashford
+Pior Bastida
Nick Bastin
+Ned Batchelder
Jeff Bauer
-Mike Bayer
Michael R Bax
Anthony Baxter
+Mike Bayer
Samuel L. Bayer
Donald Beaudry
David Beazley
-Robin Becker
Neal Becker
+Robin Becker
Torsten Becker
Bill Bedford
+Ian Beer
+Stefan Behnel
Reimer Behrends
Ben Bell
Thomas Bellman
Alexander “Саша†Belopolsky
+Eli Bendersky
+David Benjamin
+Oscar Benjamin
Andrew Bennetts
Andy Bensky
+Bennett Benson
+Ezra Berch
Michel Van den Bergh
+Julian Berman
+Brice Berna
+Olivier Bernard
Eric Beser
Steven Bethard
Stephen Bevan
Ron Bickers
+Natalia B. Bidart
+Adrian von Bidder
David Binger
Dominic Binks
Philippe Biondi
+Michael Birtwell
Stuart Bishop
Roy Bixler
+Daniel Black
+Jonathan Black
+Renaud Blanch
Mike Bland
Martin Bless
Pablo Bleyer
@@ -86,85 +139,133 @@ Eric Blossom
Finn Bock
Paul Boddie
Matthew Boedicker
+Robin Boerdijk
David Bolen
+Wouter Bolsterlee
Gawain Bolton
+Forest Bond
Gregory Bond
+Matias Bordese
+Jonas Borgström
Jurjen Bos
Peter Bosch
+Dan Boswell
Eric Bouck
Thierry Bousch
Sebastian Boving
+Michal Bozon
Jeff Bradberry
+Aaron Brancotti
Monty Brandenberg
Georg Brandl
Christopher Brannon
Terrence Brannon
+Germán M. Bravo
+Sven Brauch
+Erik Bray
+Brian Brazil
+Demian Brecht
Dave Brennan
Tom Bridgman
+Anthony Briggs
+Keith Briggs
+Tobias Brink
Richard Brodie
Michael Broghton
+Ammar Brohi
Daniel Brotsky
Jean Brouwers
Gary S. Brown
+Titus Brown
Oleg Broytmann
Dave Brueck
Francisco Martín Brugué
+Ian Bruntlett
+Floris Bruynooghe
+Matt Bryant
Stan Bubrouski
Erik de Bueger
+Jan-Hein Bührman
+Lars Buitinck
Dick Bulterman
Bill Bumgarner
Jimmy Burgett
+Edmond Burnett
Tommy Burnette
Roger Burnham
Alastair Burt
Tarn Weisner Burton
Lee Busby
+Katherine Busch
Ralph Butler
+Nicolas Cadou
Jp Calderone
Arnaud Calmettes
Daniel Calvelo
Tony Campbell
Brett Cannon
Mike Carlton
+Pierre Carrier
Terry Carroll
+Edward Catmur
Lorenzo M. Catucci
Donn Cave
Charles Cazabon
+Jesús Cea Avión
Per Cederqvist
+Matej Cepl
+Carl Cerecke
Octavian Cerna
+Dave Chambers
Pascal Chambon
John Chandler
Hye-Shik Chang
Jeffrey Chang
-Mitch Chapman
-Greg Chapman
+Godefroid Chapelle
Brad Chapman
+Greg Chapman
+Mitch Chapman
+Yogesh Chaudhari
David Chaum
Nicolas Chauvat
+Jerry Chen
Michael Chermside
+Ingrid Cheung
Albert Chin-A-Young
Adal Chiriliuc
Matt Chisholm
Anders Chrigström
Tom Christiansen
+Renee Chu
Vadim Chugunov
+Mauro Cicognini
David Cinege
Craig Citro
+Gilles Civario
+Chris Clark
+Laurie Clark-Michalek
Mike Clarkson
Andrew Clegg
Brad Clements
+Robbie Clemons
Steve Clift
+Hervé Coatanhay
Nick Coghlan
Josh Cogliati
Dave Cole
+Terrence Cole
Benjamin Collar
Jeffery Collins
Robert Collins
Paul Colomiets
+Christophe Combelles
Geremy Condra
+Denver Coneybeare
+Phil Connell
Juan José Conti
Matt Conway
David M. Cooke
+Jason R. Coombs
Garrett Cooper
Greg Copeland
Aldo Cortesi
@@ -172,57 +273,78 @@ David Costanzo
Scott Cotton
Greg Couch
David Cournapeau
+Julien Courteau
Steve Cousins
Alex Coventry
Matthew Dixon Cowles
Ryan Coyner
Christopher A. Craig
+Jeremy Craven
Laura Creighton
Simon Cross
+Felipe Cruz
Drew Csillag
Joaquin Cuenca Abela
John Cugini
Tom Culliton
+Antonio Cuni
Brian Curtin
Lisandro Dalcin
+Darren Dale
Andrew Dalke
Lars Damerow
Evan Dandrea
Eric Daniel
Scott David Daniels
Ben Darnell
+Kushal Das
Jonathan Dasteel
+Pierre-Yves David
+A. Jesse Jiryu Davis
+Merlijn van Deen
John DeGood
Ned Deily
Vincent Delft
Arnaud Delobelle
+Konrad Delong
Erik Demaine
+Martin Dengler
John Dennis
+L. Peter Deutsch
Roger Dev
Philippe Devalkeneer
Raghuram Devarakonda
+Caleb Deveraux
Catherine Devlin
Scott Dial
Toby Dickenson
Mark Dickinson
Jack Diederich
Daniel Diniz
+Humberto Diogenes
Yves Dionne
Daniel Dittmar
+Josip Djolonga
Jaromir Dolecek
Ismail Donmez
+Robert Donohue
Marcos Donolo
Dima Dorfman
+Yves Dorfsman
Cesar Douady
Dean Draayer
Fred L. Drake, Jr.
+Derk Drukker
John DuBois
Paul Dubois
+Jacques Ducasse
+Andrei Dorian Duma
Graham Dumpleton
Quinn Dunkan
Robin Dunn
Luke Dunstan
Virgil Dupras
+Bruno Dupuis
Andy Dustman
Gary Duzan
Eugene Dvurechenski
@@ -230,54 +352,83 @@ Josip Dzolonga
Maxim Dzumanenko
Walter Dörwald
Hans Eckardt
+Rodolpho Eckhardt
+Ulrich Eckhardt
+David Edelsohn
+John Edmonds
Grant Edwards
John Ehresman
+Tal Einat
Eric Eisner
Andrew Eland
+Julien Élie
Lance Ellinghaus
+Daniel Ellis
+Phil Elson
David Ely
Jeff Epler
Tom Epperly
+Gökcen Eraslan
Stoffel Erasmus
Jürgen A. Erhard
Michael Ernst
Ben Escoto
Andy Eskilsson
+André Espaze
Stefan Esser
-Stephen D Evans
+Nicolas Estibals
Carey Evans
+Stephen D Evans
Tim Everett
Paul Everitt
David Everly
Daniel Evers
+Winston Ewert
Greg Ewing
Martijn Faassen
Clovis Fabricio
Andreas Faerber
Bill Fancher
+Michael Farrell
Troy J. Farrell
Mark Favas
+Boris Feld
+Thomas Fenzl
Niels Ferguson
Sebastian Fernandez
+Florian Festi
+John Feuerstein
+Carl Feynman
Vincent Fiack
Tomer Filiba
Jeffrey Finkelstein
Russell Finn
+Dan Finnie
Nils Fischbeck
Frederik Fix
Matt Fleming
Hernán Martínez Foffani
+Artem Fokin
+Arnaud Fontaine
Michael Foord
Amaury Forgeot d'Arc
Doug Fort
+Chris Foster
John Fouhy
+Andrew Francis
+Stefan Franke
Martin Franklin
+Kent Frazier
+Bruce Frederiksen
Robin Friedrich
+Bradley Froehle
Ivan Frohne
+Matthias Fuchs
Jim Fulton
Tadayoshi Funaba
Gyro Funch
Peter Funk
+Ethan Furman
Geoff Furnish
Ulisses Furquim
Hagen Fürstenau
@@ -287,7 +438,9 @@ Martin von Gagern
Lele Gaifax
Santiago Gala
Yitzchak Gale
+Matthew Gallagher
Quentin Gallet-Gilles
+Riccardo Attilio Galli
Raymund Galvin
Nitin Ganatra
Fred Gansevles
@@ -295,39 +448,63 @@ Lars Marius Garshol
Dan Gass
Andrew Gaul
Stephen M. Gava
+Xavier de Gaye
Harry Henry Gebel
Marius Gedminas
Thomas Gellekum
Gabriel Genellina
Christos Georgiou
+Elazar Gershuni
Ben Gertzfield
+Nadim Ghaznavi
Dinu Gherman
Jonathan Giddy
Johannes Gijsbers
Michael Gilfix
+Julian Gindi
+Yannick Gingras
+Matt Giuca
+Wim Glenn
+Michael Goderbauer
+Jeroen Van Goey
Christoph Gohlke
Tim Golden
+Guilherme Gonçalves
+Tiago Gonçalves
Chris Gonnerman
+Shelley Gooch
David Goodger
Hans de Graaff
+Nathaniel Gray
Eddy De Greef
+Grant Griffin
+Andrea Griffini
Duncan Grisby
Fabian Groffen
-John S. Gruber
+Eric Groo
Dag Gruneau
Filip Gruszczyński
+Thomas Guettler
+Anuj Gupta
Michael Guravage
Lars Gustäbel
Thomas Güttler
+Jonas H.
Barry Haddow
+Philipp Hagemeister
Paul ten Hagen
Rasmus Hahn
Peter Haight
Václav Haisman
+Zbigniew Halas
+Walker Hale IV
Bob Halley
Jesse Hallio
Jun Hamano
+Alexandre Hamelin
+Anders Hammarquist
Mark Hammond
+Harald Hanche-Olsen
Manus Hand
Milton L. Hankins
Stephen Hansen
@@ -335,26 +512,38 @@ Barry Hantman
Lynda Hardman
Derek Harland
Jason Harper
+David Harrigan
+Brian Harring
+Jonathan Hartley
+Travis B. Hartwell
Larry Hastings
+Tim Hatch
Shane Hathaway
+Janko Hauser
Rycharde Hawkes
+Ben Hayden
Jochen Hayek
+Tim Heaney
+Henrik Heimbuerger
Christian Heimes
Thomas Heller
Malte Helmert
Lance Finn Helsten
Jonathan Hendry
+Michael Henry
James Henstridge
Kasun Herath
Chris Herborth
Ivan Herman
Jürgen Hermann
Gary Herron
+Ernie Hershey
Thomas Herve
Bernhard Herzog
Magnus L. Hetland
Raymond Hettinger
Kevan Heydon
+Kelsey Hightower
Jason Hildebrand
Richie Hindle
Konrad Hinsen
@@ -363,12 +552,18 @@ Tim Hochberg
Joerg-Cyril Hoehle
Gregor Hoffleit
Chris Hoffman
+Stefan Hoffmeister
Albert Hofkamp
Tomas Hoger
Jonathan Hogg
+Kamilla Holanda
+Steve Holden
+Akintayo Holder
+Thomas Holenstein
Gerrit Holl
Shane Holloway
Rune Holm
+Thomas Holmes
Philip Homburg
Naofumi Honda
Jeffrey Honig
@@ -377,78 +572,125 @@ Michiel de Hoon
Brian Hooper
Randall Hopper
Nadav Horesh
+Alon Horev
Jan Hosang
+Alan Hourihane
Ken Howard
Brad Howes
+Mike Hoy
Chih-Hao Huang
+Christian Hudon
Lawrence Hudson
Michael Hudson
Jim Hugunin
Greg Humphreys
Eric Huss
+Nehal Hussain
+Taihyun Hwang
Jeremy Hylton
+Ludwig Hähne
Gerhard Häring
Fredrik Håård
Catalin Iacob
Mihai Ibanescu
+Ali Ikinci
+Aaron Iles
Lars Immisch
Bobby Impollonia
Meador Inge
+Peter Ingebretson
Tony Ingraldi
John Interrante
+Vladimir Iofik
Bob Ippolito
+Roger Irwin
Atsuo Ishimoto
-Paul Jackson
+Adam Jackson
Ben Jackson
+Paul Jackson
+Manuel Jacob
David Jacobs
Kevin Jacobs
Kjetil Jacobsen
+Bertrand Janin
Geert Jansen
Jack Jansen
Bill Janssen
Thomas Jarosch
+Juhana Jauhiainen
+Rajagopalasarma Jayakrishnan
+Zbigniew Jędrzejewski-Szmek
+Julien Jehannet
Drew Jenkins
Flemming Kjær Jensen
-Jiba
+Philip H. Jensen
+Philip Jenvey
+MunSic Jeong
+Chris Jerdonek
+Dmitry Jeremov
+Jim Jewett
+Pedro Diaz Jimenez
Orjan Johansen
Fredrik Johansson
Gregory K. Johnson
+Kent Johnson
+Michael Johnson
Simon Johnston
+Matt Joiner
+Thomas Jollans
Nicolas Joly
+Brian K. Jones
Evan Jones
Jeremy Jones
Richard Jones
Irmen de Jong
Lucas de Jonge
-John Jorgensen
+Kristján Valur Jónsson
Jens B. Jorgensen
+John Jorgensen
Sijin Joseph
Andreas Jung
Tattoo Mabonzo K.
+Sarah K.
+Sunny K
+Bohuslav Kabrda
+Alexey Kachayev
Bob Kahn
Kurt B. Kaiser
Tamito Kajiyama
+Jan Kaliszewski
Peter van Kampen
+Rafe Kaplan
Jacob Kaplan-Moss
-Piotr Kasprzyk
+Janne Karila
+Per Øyvind Karlsen
+Anton Kasyanov
Lou Kates
Hiroaki Kawai
+Brian Kearns
Sebastien Keim
Ryan Kelly
-Robert Kern
+Dan Kenigsberg
Randall Kern
+Robert Kern
+Jim Kerr
Magnus Kessler
Lawrence Kesteloot
-Rafe Kettler
Vivek Khera
-Akira Kitada
+Dhiru Kholia
Mads Kiilerich
+Jason Killen
+Jan Kim
Taek Joo Kim
+Sam Kimbrel
W. Trevor King
Paul Kippes
Steve Kirsch
Sebastian Kirsche
+Kamil Kisiel
+Akira Kitada
Ron Klatchko
+Reid Kleckner
Bastian Kleineidam
Bob Kline
Matthias Klose
@@ -457,156 +699,257 @@ Thomas Kluyver
Kim Knapp
Lenny Kneler
Pat Knight
+Jeff Knupp
+Kubilay Kocak
Greg Kochanski
Damon Kohler
Marko Kohtala
+Vajrasky Kok
+Guido Kollerie
+Jacek Konieczny
+Марк Коренберг
+Arkady Koplyarov
+Peter A. Koren
+Vlad Korolev
Joseph Koshy
+Daniel Kozan
+Jerzy Kozera
Maksim Kozyarchuk
Stefan Krah
Bob Kras
+Sebastian Kreft
Holger Krekel
Michael Kremer
Fabian Kreutz
Cédric Krier
+Pedro Kroger
Hannu Krosing
Andrej Krpic
Ivan Krstić
+Steven Kryskalla
Andrew Kuchling
-Ralf W. Grosse-Kunstleve
+Dave Kuhlman
+Jon Kuhn
+Toshio Kuratomi
Vladimir Kushnir
-Kirill Kuzminykh (Кирилл Кузьминых)
+Erno Kuusela
Ross Lagerwall
Cameron Laird
+David Lam
+Thomas Lamb
+Valerie Lambert
+Jean-Baptiste "Jiba" Lamy
+Ronan Lamy
Torsten Landschoff
Åukasz Langa
Tino Lange
+Glenn Langford
Andrew Langmead
Detlef Lannert
Soren Larsen
+Amos Latteier
Piers Lauder
Ben Laurie
Simon Law
+Julia Lawall
Chris Lawrence
Brian Leair
-John J. Lee
+Mathieu Leduc-Hamel
+Amandine Lee
+Christopher Lee
Inyeol Lee
+James Lee
+John J. Lee
Thomas Lee
-Christopher Lee
+Tennessee Leeuwenburg
Luc Lefebvre
+Pierre Paul Lefebvre
+Glyph Lefkowitz
Vincent Legoll
Kip Lehman
Joerg Lehmann
Robert Lehmann
Petri Lehtinen
Luke Kenneth Casson Leighton
-Marc-Andre Lemburg
+Tshepang Lekhonkhobe
+Marc-André Lemburg
+Mateusz Lenik
John Lenton
+Kostyantyn Leschenko
+Benno Leslie
Christopher Tur Lesniewski-Laas
+Alain Leufroy
Mark Levinson
+Mark Levitt
William Lewis
+Akira Li
Xuanji Li
Robert van Liere
Ross Light
Shawn Ligocki
Martin Ligr
+Gediminas Liktaras
+Grant Limberg
Christopher Lindblad
+Ulf A. Lindgren
Björn Lindqvist
Per Lindqvist
Eric Lindvall
Gregor Lingl
+Everett Lipman
+Mirko Liss
Nick Lockwood
Stephanie Lockwood
+Hugo Lopes Tavares
Anne Lord
Tom Loredo
Justin Love
+Ned Jackson Lovely
Jason Lowe
Tony Lownds
Ray Loyzaga
+Kang-Hao (Kenny) Lu
Lukas Lueg
Loren Luke
Fredrik Lundh
+Zhongyue Luo
Mark Lutz
+Taras Lyapun
Jim Lynch
Mikael Lyngvig
Martin von Löwis
+Guillermo López-Anglada
+Jeff MacDonald
+John Machin
Andrew I MacIntyre
Tim MacKenzie
Nick Maclaren
+Don MacMillen
+Tomasz Maćkowiak
+Wolfgang Maier
Steve Majewski
+Marek Majkowski
Grzegorz Makarewicz
David Malcolm
+Greg Malcolm
+William Mallard
Ken Manheimer
Vladimir Marangozov
+Colin Marc
+Vincent Marchetti
David Marek
Doug Marien
+Sven Marnach
Alex Martelli
Anthony Martin
+Owen Martin
+Westley Martínez
Sébastien Martini
+Sidney San Martín
Roger Masse
Nick Mathewson
+Simon Mathieu
+Laura Matson
Graham Matthews
+Martin Matusiak
Dieter Maurer
+Daniel May
+Madison May
+Lucas Maystre
Arnaud Mazin
+Matt McClure
+Rebecca McCreary
Kirk McDonald
Chris McDonough
Greg McFarlane
Alan McIntyre
+Jessica McKellar
Michael McLay
+Brendan McLoughlin
Mark Mc Mahon
Gordon McMillan
-Caolan McNamara
Andrew McNamara
+Caolan McNamara
+Jeff McNeil
Craig McPheeters
Lambert Meertens
Bill van Melle
Lucas Prado Melo
Ezio Melotti
+Doug Mennella
Brian Merrell
Luke Mewburn
Carl Meyer
Mike Meyer
+Piotr Meyer
+Alexis Métaireau
Steven Miale
Trent Mick
+Jason Michalski
+Franck Michea
+Tom Middleton
+Thomas Miedema
Stan Mihai
+Stefan Mihaila
Aristotelis Mikropoulos
-Damien Miller
+Paolo Milani
Chad Miller
+Damien Miller
Jason V. Miller
Jay T. Miller
+Katie Miller
Roman Milner
+Julien Miotte
Andrii V. Mishkovskyi
-Dustin J. Mitchell
Dom Mitchell
+Dustin J. Mitchell
+Zubin Mithra
+Florian Mladitsch
Doug Moen
The Dragon De Monsyne
+Bastien Montagne
Skip Montanaro
+Peter Moody
Paul Moore
+Ross Moore
+Ben Morgan
Derek Morr
James A Morrison
+Martin Morrison
+Derek McTavish Mounce
+Alessandro Moura
Pablo Mouzo
+Mher Movsisyan
Ruslan Mstoi
-Sjoerd Mullender
+Valentina Mukhamedzhanova
+Michael Mulich
Sape Mullender
+Sjoerd Mullender
Michael Muller
Neil Muller
+Louis Munro
R. David Murray
-Piotr Meyer
+Matti Mäki
+Jörg Müller
+Dale Nagata
John Nagle
Takahiro Nakayama
Travers Naran
Charles-François Natali
Vilmos Nebehaj
Fredrik Nehr
-Trent Nelson
Tony Nelson
+Trent Nelson
Chad Netzer
Max Neunhöffer
George Neville-Neil
+Hieu Nguyen
Johannes Nicolai
Samuel Nicolary
+Jonathan Niehof
Gustavo Niemeyer
Oscar Nierstrasz
-Hrvoje Niksic
+Hrvoje Nikšić
Gregory Nofi
Jesse Noller
Bill Noon
@@ -614,46 +957,76 @@ Stefan Norberg
Tim Northover
Joe Norton
Neal Norwitz
+Mikhail Novikov
Michal Nowikowski
Steffen Daode Nurpmeso
Nigel O'Brian
+John O'Connor
Kevin O'Connor
Tim O'Malley
+Zooko O'Whielacronx
+Aaron Oakley
+James Oakley
+Elena Oat
+Jon Oberheide
+Milan Oberkirch
Pascal Oberndoerfer
Jeffrey Ollie
Adam Olsen
Grant Olson
+Koray Oner
Piet van Oostrum
+Tomas Oppelstrup
Jason Orendorff
Douglas Orr
Michele Orrù
Oleg Oshmyan
Denis S. Otkidach
+Peter Otten
Michael Otteneder
R. M. Oudkerk
Russel Owen
+Joonas Paalasmaa
+Martin Packman
+Shriphani Palakodety
Ondrej Palkovsky
Mike Pall
Todd R. Palmer
Juan David Ibáñez Palomar
Jan Palus
+Mathias Panzenböck
M. Papillon
Peter Parente
Alexandre Parenteau
Dan Parisien
+William Park
+Heikki Partanen
Harri Pasanen
+Gaël Pasgrimaud
+Ashish Nitin Patil
Randy Pausch
Samuele Pedroni
+Justin Peel
Marcel van der Peijl
+Berker Peksag
+Andreas Pelme
Steven Pemberton
+Bo Peng
Santiago Peresón
+George Peristerakis
+Mathieu Perreault
Mark Perrego
Trevor Perrin
Gabriel de Perthuis
Tim Peters
Benjamin Peterson
+Joe Peterson
Chris Petrilli
+Roumen Petrov
Bjorn Pettersen
+Justin D. Pettit
+Esa Peuha
+Ronny Pfannschmidt
Geoff Philbrick
Gavrie Philipson
Adrian Phillips
@@ -662,237 +1035,383 @@ Neale Pickett
Jim St. Pierre
Dan Pierson
Martijn Pieters
+Anand B. Pillai
François Pinard
+Tom Pinckney
Zach Pincus
+Zero Piraeus
Michael Piotrowski
Antoine Pitrou
Jean-François Piéronne
Oleg Plakhotnyuk
+Remi Pointel
+Ariel Poliak
Guilherme Polo
+Illia Polosukhin
Michael Pomraning
+Martin Pool
Iustin Pop
+Claudiu Popa
John Popplewell
+Guillaume Pratte
Amrit Prem
Paul Prescod
Donovan Preston
+Paul Price
+Iuliia Proskurnia
+Jyrki Pulliainen
Steve Purcell
-Fernando Pérez
Eduardo Pérez
+Fernando Pérez
+Pierre Quentel
Brian Quinlan
+Kevin Jing Qiu
Anders Qvist
+Thomas Rachel
+Ram Rachum
+Jérôme Radix
Burton Radons
+Jeff Ramnani
Brodie Rao
+Senko Rasic
Antti Rasinen
+Nikolaus Rath
Sridhar Ratnakumar
-Eric Raymond
+Ysj Ray
+Eric S. Raymond
Edward K. Ream
Chris Rebert
Marc Recht
John Redford
-Terry Reedy
+Terry J. Reedy
Gareth Rees
Steve Reeves
Lennart Regebro
+John Regehr
+Federico Reghenzani
Ofir Reichenberg
Sean Reifschneider
Michael P. Reilly
Bernhard Reiter
Steven Reiz
Roeland Rengelink
-Tim Rice
+Antoine Reversat
+Flávio Ribeiro
Francesco Ricciardi
+Tim Rice
Jan Pieter Riegel
Armin Rigo
+Arc Riley
Nicholas Riley
Jean-Claude Rimbault
Vlad Riscutia
+Wes Rishel
+Daniel Riti
Juan M. Bello Rivas
Davide Rizzo
Anthony Roach
+Carl Robben
Mark Roberts
-Jim Robinson
Andy Robinson
+Jim Robinson
+Mark Roddy
Kevin Rodgers
+Sean Rodman
Giampaolo Rodola
+Elson Rodriguez
+Adi Roiban
+Luis Rojas
Mike Romberg
Armin Ronacher
Case Roole
Timothy Roscoe
+Erik Rose
Jim Roskind
+Brian Rosner
+Guido van Rossum
Just van Rossum
Hugo van Rossum
Saskia van Rossum
Donald Wallace Rouse II
Liam Routt
+Todd Rovito
Craig Rowland
Clinton Roy
Paul Rubin
Sam Ruby
+Demur Rumed
Audun S. Runde
+Eran Rundstein
Rauli Ruohonen
Jeff Rush
Sam Rushing
Mark Russell
+Rusty Russell
Nick Russo
+Chris Ryland
+Constantina S.
+Patrick Sabin
Sébastien Sablé
Suman Saha
Hajime Saitou
George Sakkis
Rich Salz
Kevin Samborn
+Adrian Sampson
+James Sanders
Ilya Sandler
+Rafael Santos
+Simon Sapin
Mark Sapiro
Ty Sarna
+Hugh Sasse
+Bob Savage
Ben Sayer
sbt
Marco Scataglini
+Andrew Schaaf
Michael Scharf
+Andreas Schawo
Neil Schemenauer
David Scherer
+Wolfgang Scherer
+Hynek Schlawack
+Bob Schmertz
Gregor Schmid
Ralf Schmitt
Michael Schneider
Peter Schneider-Kamp
Arvin Schnell
+Scott Schram
+Robin Schreiber
Chad J. Schroeder
+Christian Schubert
Sam Schulenburg
Stefan Schwarzer
Dietmar Schwertberger
Federico Schwindt
-Steven Scott
Barry Scott
+Steven Scott
Nick Seidenman
-Žiga Seilnach
+Žiga Seilnacht
+Yury Selivanov
Fred Sells
Jiwon Seo
-Roger Serwy
+Iñigo Serna
+Joakim Sernbrant
+Roger D. Serwy
Jerry Seutter
+Pete Sevander
Denis Severson
Ian Seyer
+Daniel Shahaf
Ha Shao
+Mark Shannon
Richard Shapiro
+Varun Sharma
+Vlad Shcherbina
+Justin Sheehy
+Charlie Shepherd
Bruce Sherwood
Alexander Shigin
Pete Shinners
Michael Shiplett
John W. Shipman
Joel Shprentz
-Itamar Shtull-Trauring
+Yue Shuaijie
+Terrel Shumway
Eric Siegerman
Paul Sijben
+SilentGhost
+Tim Silk
+Michael Simcich
+Ionel Simionescu
Kirill Simonov
Nathan Paul Simons
+Guilherme Simões
+Adam Simpkins
+Ravi Sinha
Janne Sinkkonen
+Ng Pheng Siong
George Sipe
J. Sipprell
Kragen Sitaker
-Eric V. Smith
+Michael Sloan
+Nick Sloan
+Václav Šmilauer
Christopher Smith
+Eric V. Smith
Gregory P. Smith
+Mark Smith
+Roy Smith
+Ryan Smith-Roberts
Rafal Smotrzyk
+Eric Snow
Dirk Soede
Paul Sokolovsky
+Evgeny Sologubov
Cody Somerville
+Edoardo Spadolini
Clay Spence
Stefan Sperling
+Nicholas Spies
Per Spilling
Joshua Spoerri
Noah Spurrier
Nathan Srebro
RajGopal Srinivasan
+Tage Stabell-Kulo
Quentin Stafford-Fraser
Frank Stajano
+Joel Stanley
+Anthony Starks
Oliver Steele
Greg Stein
+Baruch Sterin
Chris Stern
+Alex Stewart
Victor Stinner
Richard Stoakley
Peter Stoehr
Casper Stoel
Michael Stone
+Serhiy Storchaka
Ken Stox
-Patrick Strawderman
Dan Stromberg
+Donald Stufft
Daniel Stutzbach
Andreas Stührk
+Colin Su
+Pal Subbiah
Nathan Sullivan
Mark Summerfield
+Reuben Sumner
+Marek Å uppa
Hisao Suzuki
-Andrew Svetlov
Kalle Svensson
+Andrew Svetlov
Paul Swartz
Thenault Sylvain
Péter Szabó
+John Szakmeister
+Amir Szekely
Arfrever Frehtes Taifersar Arahesis
+Hideaki Takahashi
+Indra Talip
+Neil Tallim
Geoff Talvola
+Musashi Tamura
William Tanksley
Christian Tanzer
Steven Taschuk
-Monty Taylor
Amy Taylor
+Monty Taylor
Anatoly Techtonik
Mikhail Terekhov
+Victor Terrón
Richard M. Tew
Tobias Thelen
+Févry Thibault
+Lowe Thiderman
Nicolas M. Thiéry
James Thomas
Robin Thomas
+Brian Thorne
Stephen Thorne
+Jeremy Thurgood
Eric Tiedemann
+July Tikhonov
Tracy Tims
Oren Tirosh
Jason Tishler
Christian Tismer
+Jim Tittsler
Frank J. Tobin
-R Lindsay Todd
Bennett Todd
+R Lindsay Todd
+Eugene Toder
+Erik Tollerud
+Stephen Tonkin
Matias Torchinsky
Sandro Tosi
Richard Townsend
+David Townshend
+Nathan Trapuzzano
Laurence Tratt
+Alberto Trevino
+Matthias Troffaes
+Tom Tromey
John Tromp
+Diane Trout
Jason Trowbridge
+Brent Tubbs
Anthony Tuininga
+Erno Tukia
+David Turner
Stephen Turner
+Itamar Turner-Trauring
Theodore Turocy
Bill Tutt
+Fraser Tweedale
Doobee R. Tzeck
Eren Türkay
Lionel Ulmer
Roger Upole
+Daniel Urban
Michael Urman
Hector Urtubia
+Ville Vainio
Andi Vajda
Case Van Horsen
Kyle VanderBeek
+Andrew Vant
Atul Varma
Dmitry Vasiliev
+Sebastian Ortiz Vasquez
Alexandre Vassalotti
+Nadeem Vawda
Frank Vercruesse
Mike Verdone
Jaap Vermeulen
+Nikita Vetoshkin
Al Vezza
Jacques A. Vidrine
John Viega
+Dino Viehland
Kannan Vijayan
Kurt Vile
Norman Vine
+Pauli Virtanen
Frank Visser
+Johannes Vogel
+Alex Volkov
+Guido Vranken
+Martijn Vries
+Sjoerd de Vries
Niki W. Waibel
Wojtek Walczak
Charles Waldman
Richard Walker
Larry Wall
Kevin Walzer
+Rodrigo Steinmuller Wanderley
+Ke Wang
Greg Ward
+Tom Wardill
+Zachary Ware
+Jonas Wagner
Barry Warsaw
Steve Waterbury
Bob Watson
+David Watson
Aaron Watters
Henrik Weber
Corran Webster
+Glyn Webster
+Phil Webster
Stefan Wehr
Zack Weinberg
+Bob Weiner
Edward Welbourne
Cliff Wells
Rickard Westman
@@ -904,13 +1423,21 @@ Truida Wiedijk
Felix Wiemann
Gerry Wiener
Frank Wierzbicki
+Santoso Wijaya
Bryce "Zooko" Wilcox-O'Hearn
+Timothy Wild
+Jakub Wilk
+Gerald S. Williams
Jason Williams
John Williams
Sue Williams
-Gerald S. Williams
+Carol Willing
+Steven Willis
Frank Willison
+Geoff Wilson
Greg V. Wilson
+J Derek Wilson
+Paul Winkler
Jody Winston
Collin Winter
Dik Winter
@@ -920,21 +1447,30 @@ Lars Wirzenius
John Wiseman
Chris Withers
Stefan Witzel
+Irek Wlizlo
David Wolever
Klaus-Juergen Wolf
Dan Wolfe
Richard Wolff
-Darren Worrall
+Adam Woodbeck
+Steven Work
Gordon Worley
+Darren Worrall
Thomas Wouters
+Daniel Wozniak
Heiko Wundram
Doug Wyatt
+Robert Xiao
Florent Xicluna
Hirokazu Yamamoto
Ka-Ping Yee
+Jason Yeo
+EungJun Yi
Bob Yodlowski
Danny Yoo
+Rory Yorke
George Yoshida
+Kazuhiro Yoshida
Masazumi Yoshikawa
Arnaud Ysmal
Bernard Yue
@@ -942,7 +1478,13 @@ Moshe Zadka
Milan Zamazal
Artur Zaprzala
Mike Zarnstorff
+Yury V. Zaytsev
Siebren van der Zee
+Nickolai Zeldovich
+Yuxiao Zeng
Uwe Zessin
+Cheng Zhang
+Kai Zhu
Tarek Ziadé
+Gennadiy Zlobin
Peter Ã…strand
diff --git a/Misc/NEWS b/Misc/NEWS
index e8778ad..dae435f 100644
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -1,17 +1,2284 @@
++++++++++++
Python News
+++++++++++
-What's New in Python 2.7.3 final?
-=================================
+What's New in Python 2.7.8?
+===========================
+
+*Release date: 2014-06-29*
+
+Core and Builtins
+-----------------
+
+- Issue #4346: In PyObject_CallMethod and PyObject_CallMethodObjArgs, don't
+ overwrite the error set in PyObject_GetAttr.
+
+- Issue #21831: Avoid integer overflow when large sizes and offsets are given to
+ the buffer type.
-*Release date: 2012-04-09*
+- Issue #1856: Avoid crashes and lockups when daemon threads run while the
+ interpreter is shutting down; instead, these threads are now killed when they
+ try to take the GIL.
+
+- Issue #19656: Running Python with the -3 option now also warns about
+ non-ascii bytes literals.
+
+- Issue #21642: If the conditional if-else expression, allow an integer written
+ with no space between itself and the ``else`` keyword (e.g. ``True if 42else
+ False``) to be valid syntax.
+
+- Issue #21523: Fix over-pessimistic computation of the stack effect of
+ some opcodes in the compiler. This also fixes a quadratic compilation
+ time issue noticeable when compiling code with a large number of "and"
+ and "or" operators.
Library
-------
+- Issue #21652: Prevent mimetypes.type_map from containing unicode keys on
+ Windows.
+
+- Issue #21729: Used the "with" statement in the dbm.dumb module to ensure
+ files closing.
+
+- Issue #21672: Fix the behavior of ntpath.join on UNC-style paths.
+
+- Issue #19145: The times argument for itertools.repeat now handles
+ negative values the same way for keyword arguments as it does for
+ positional arguments.
+
+- Issue #21832: Require named tuple inputs to be exact strings.
+
+- Issue #8343: Named group error messages in the re module did not show
+ the name of the erroneous group.
+
+- Issue #21491: SocketServer: Fix a race condition in child processes reaping.
+
+- Issue #21635: The difflib SequenceMatcher.get_matching_blocks() method
+ cache didn't match the actual result. The former was a list of tuples
+ and the latter was a list of named tuples.
+
+- Issue #21722: The distutils "upload" command now exits with a non-zero
+ return code when uploading fails. Patch by Martin Dengler.
+
+- Issue #21766: Prevent a security hole in CGIHTTPServer by URL unquoting paths
+ before checking for a CGI script at that path.
+
+- Issue #21310: Fixed possible resource leak in failed open().
+
+- Issue #21304: Backport the key derivation function hashlib.pbkdf2_hmac from
+ Python 3 per PEP 466.
+
+- Issue #11709: Fix the pydoc.help function to not fail when sys.stdin is not a
+ valid file.
+
+- Issue #13223: Fix pydoc.writedoc so that the HTML documentation for methods
+ that use 'self' in the example code is generated correctly.
+
+- Issue #21552: Fixed possible integer overflow of too long string lengths in
+ the tkinter module on 64-bit platforms.
+
+- Issue #14315: The zipfile module now ignores extra fields in the central
+ directory that are too short to be parsed instead of letting a struct.unpack
+ error bubble up as this "bad data" appears in many real world zip files in
+ the wild and is ignored by other zip tools.
+
+- Issue #21402: Tkinter.ttk now works when default root window is not set.
+
+- Issue #10203: sqlite3.Row now truly supports sequence protocol. In particulr
+ it supports reverse() and negative indices. Original patch by Claudiu Popa.
+
+- Issue #8743: Fix interoperability between set objects and the
+ collections.Set() abstract base class.
+
+- Issue #21481: Argparse equality and inequality tests now return
+ NotImplemented when comparing to an unknown type.
+
+IDLE
+----
+
+- Issue #21686: add unittest for HyperParser. Original patch by Saimadhav
+ Heblikar.
+
+- Issue #12387: Add missing upper(lower)case versions of default Windows key
+ bindings for Idle so Caps Lock does not disable them. Patch by Roger Serwy.
+
+- Issue #21695: Closing a Find-in-files output window while the search is
+ still in progress no longer closes Idle.
+
+- Issue #18910: Add unittest for textView. Patch by Phil Webster.
+
+- Issue #18292: Add unittest for AutoExpand. Patch by Saihadhav Heblikar.
+
+- Issue #18409: Add unittest for AutoComplete. Patch by Phil Webster.
+
+Tests
+-----
+
+- Issue #20155: Changed HTTP method names in failing tests in test_httpservers
+ so that packet filtering software (specifically Windows Base Filtering Engine)
+ does not interfere with the transaction semantics expected by the tests.
+
+- Issue #19493: Refactored the ctypes test package to skip tests explicitly
+ rather than silently.
+
+- Issue #18492: All resources are now allowed when tests are not run by
+ regrtest.py.
+
+- Issue #21605: Added tests for Tkinter images.
+
+- Issue #21493: Added test for ntpath.expanduser(). Original patch by
+ Claudiu Popa.
+
+- Issue #19925: Added tests for the spwd module. Original patch by Vajrasky Kok.
+
+- Issue #13355: random.triangular() no longer fails with a ZeroDivisionError
+ when low equals high.
+
+- Issue #21522: Added Tkinter tests for Listbox.itemconfigure(),
+ PanedWindow.paneconfigure(), and Menu.entryconfigure().
+
+- Issue #20635: Added tests for Tk geometry managers.
+
+Windows
+-------
+
+- Issue #21671, CVE-2014-0224: The bundled version of OpenSSL has been
+ updated to 1.0.1h.
+
+What's New in Python 2.7.7
+==========================
+
+*Release date: 2014-05-31*
+
+Build
+-----
+
+- Issue #21462: Build the Windows installers with OpenSSL 1.0.1g.
+
+- Issue #19866: Include some test data in the Windows installers, so tests don't
+ fail.
+
+
+What's New in Python 2.7.7 release candidate 1?
+===============================================
+
+*Release date: 2014-05-17*
+
+Core and Builtins
+-----------------
+
+- Issue #21350: Fix file.writelines() to accept arbitrary buffer objects,
+ as advertised. Patch by Brian Kearns.
+
+- Issue #20437: Fixed 43 potential bugs when deleting objects references.
+
+- Issue #21134: Fix segfault when str is called on an uninitialized
+ UnicodeEncodeError, UnicodeDecodeError, or UnicodeTranslateError object.
+
+- Issue #20494: Ensure that free()d memory arenas are really released on POSIX
+ systems supporting anonymous memory mappings. Patch by Charles-François
+ Natali.
+
+- Issue #17825: Cursor "^" is correctly positioned for SyntaxError and
+ IndentationError.
+
+- Raise a better error when non-unicode codecs are used for a file's coding
+ cookie.
+
+- Issue #17976: Fixed potential problem with file.write() not detecting IO error
+ by inspecting the return value of fwrite(). Based on patches by Jaakko Moisio
+ and Victor Stinner.
+
+- Issue #14432: Generator now clears the borrowed reference to the thread
+ state. Fix a crash when a generator is created in a C thread that is
+ destroyed while the generator is still used. The issue was that a generator
+ contains a frame, and the frame kept a reference to the Python state of the
+ destroyed C thread. The crash occurs when a trace function is setup.
+
+- Issue #19932: Fix typo in import.h, missing whitespaces in function prototypes.
+
+- Issue #19638: Fix possible crash / undefined behaviour from huge (more than 2
+ billion characters) input strings in _Py_dg_strtod.
+
+- Issue #12546: Allow \x00 to be used as a fill character when using str, int,
+ float, and complex __format__ methods.
+
+Library
+-------
+
+- Issue #10744: Fix PEP 3118 format strings on ctypes objects with a nontrivial
+ shape.
+
+- Issue #7776: Backport Fix ``Host:'' header and reconnection when using
+ http.client.HTTPConnection.set_tunnel() from Python 3. Patch by Nikolaus
+ Rath.
+
+- Issue #21306: Backport hmac.compare_digest from Python 3. This is part of PEP
+ 466.
+
+- Issue #21470: Do a better job seeding the random number generator by
+ using enough bytes to span the full state space of the Mersenne Twister.
+
+- Issue #21469: Reduced the risk of false positives in robotparser by
+ checking to make sure that robots.txt has been read or does not exist
+ prior to returning True in can_fetch().
+
+- Issue #21321: itertools.islice() now releases the reference to the source
+ iterator when the slice is exhausted. Patch by Anton Afanasyev.
+
+- Issue #9291: Do not attempt to re-encode mimetype data read from registry in
+ ANSI mode. Initial patches by Dmitry Jemerov & Vladimir Iofik.
+
+- Issue #21349: Passing a memoryview to _winreg.SetValueEx now correctly raises
+ a TypeError where it previously crashed the interpreter. Patch by Brian Kearns
+
+- Fix arbitrary memory access in JSONDecoder.raw_decode with a negative second
+ parameter. Bug reported by Guido Vranken.
+
+- Issue #21172: isinstance check relaxed from dict to collections.Mapping.
+
+- Issue #21191: In os.fdopen, never close the file descriptor when an exception
+ happens.
+
+- Issue #21149: Improved thread-safety in logging cleanup during interpreter
+ shutdown. Thanks to Devin Jeanpierre for the patch.
+
+- Fix possible overflow bug in strop.expandtabs. You shouldn't be using this
+ module!
+
+- Issue #20145: `assertRaisesRegex` now raises a TypeError if the second
+ argument is not a string or compiled regex.
+
+- Issue #21058: Fix a leak of file descriptor in tempfile.NamedTemporaryFile(),
+ close the file descriptor if os.fdopen() fails
+
+- Issue #20283: RE pattern methods now accept the string keyword parameters
+ as documented. The pattern and source keyword parameters are left as
+ deprecated aliases.
+
+- Issue #11599: When an external command (e.g. compiler) fails, distutils now
+ prints out the whole command line (instead of just the command name) if the
+ environment variable DISTUTILS_DEBUG is set.
+
+- Issue #4931: distutils should not produce unhelpful "error: None" messages
+ anymore. distutils.util.grok_environment_error is kept but doc-deprecated.
+
+- Improve the random module's default seeding to use 256 bits of entropy
+ from os.urandom(). This was already done for Python 3, mildly improving
+ security with a bigger seed space.
+
+- Issue #15618: Make turtle.py compatible with 'from __future__ import
+ unicode_literals'. Initial patch by Juancarlo Añez.
+
+- Issue #20501: fileinput module no longer reads whole file into memory when using
+ fileinput.hook_encoded.
+
+- Issue #6815: os.path.expandvars() now supports non-ASCII Unicode environment
+ variables names and values.
+
+- Issue #20635: Fixed grid_columnconfigure() and grid_rowconfigure() methods of
+ Tkinter widgets to work in wantobjects=True mode.
+
+- Issue #17671: Fixed a crash when use non-initialized io.BufferedRWPair.
+ Based on patch by Stephen Tu.
+
+- Issue #8478: Untokenizer.compat processes first token from iterator input.
+ Patch based on lines from Georg Brandl, Eric Snow, and Gareth Rees.
+
+- Issue #20594: Avoid name clash with the libc function posix_close.
+
+- Issue #19856: shutil.move() failed to move a directory to other directory
+ on Windows if source name ends with os.altsep.
+
+- Issue #14983: email.generator now always adds a line end after each MIME
+ boundary marker, instead of doing so only when there is an epilogue. This
+ fixes an RFC compliance bug and solves an issue with signed MIME parts.
+
+- Issue #20013: Some imap servers disconnect if the current mailbox is
+ deleted, and imaplib did not handle that case gracefully. Now it
+ handles the 'bye' correctly.
+
+- Issue #20426: When passing the re.DEBUG flag, re.compile() displays the
+ debug output every time it is called, regardless of the compilation cache.
+
+- Issue #20368: The null character now correctly passed from Tcl to Python (in
+ unicode strings only). Improved error handling in variables-related commands.
+
+- Issue #20435: Fix _pyio.StringIO.getvalue() to take into account newline
+ translation settings.
+
+- Issue #20288: fix handling of invalid numeric charrefs in HTMLParser.
+
+- Issue #19456: ntpath.join() now joins relative paths correctly when a drive
+ is present.
+
+- Issue #8260: The read(), readline() and readlines() methods of
+ codecs.StreamReader returned incomplete data when were called after
+ readline() or read(size). Based on patch by Amaury Forgeot d'Arc.
+
+- Issue #20374: Fix build with GNU readline >= 6.3.
+
+- Issue #14548: Make multiprocessing finalizers check pid before
+ running to cope with possibility of gc running just after fork.
+ (Backport from 3.x.)
+
+- Issue #20262: Warnings are raised now when duplicate names are added in the
+ ZIP file or too long ZIP file comment is truncated.
+
+- Issue #20270: urllib and urlparse now support empty ports.
+
+- Issue #20243: TarFile no longer raise ReadError when opened in write mode.
+
+- Issue #20245: The open functions in the tarfile module now correctly handle
+ empty mode.
+
+- Issue #20086: Restored the use of locale-independent mapping instead of
+ locale-dependent str.lower() in locale.normalize().
+
+- Issue #20246: Fix buffer overflow in socket.recvfrom_into.
+
+- Issue #19082: Working SimpleXMLRPCServer and xmlrpclib examples, both in
+ modules and documentation.
+
+- Issue #13107: argparse and optparse no longer raises an exception when output
+ a help on environment with too small COLUMNS. Based on patch by
+ Elazar Gershuni.
+
+- Issue #20207: Always disable SSLv2 except when PROTOCOL_SSLv2 is explicitly
+ asked for.
+
+- Issue #20072: Fixed multiple errors in tkinter with wantobjects is False.
+
+- Issue #1065986: pydoc can now handle unicode strings.
+
+- Issue #16039: CVE-2013-1752: Change use of readline in imaplib module to
+ limit line length. Patch by Emil Lind.
+
+- Issue #19422: Explicitly disallow non-SOCK_STREAM sockets in the ssl
+ module, rather than silently let them emit clear text data.
+
+- Issue #20027: Fixed locale aliases for devanagari locales.
+
+- Issue #20067: Tkinter variables now work when wantobjects is false.
+
+- Issue #19020: Tkinter now uses splitlist() instead of split() in configure
+ methods.
+
+- Issue #12226: HTTPS is now used by default when connecting to PyPI.
+
+- Issue #20048: Fixed ZipExtFile.peek() when it is called on the boundary of
+ the uncompress buffer and read() goes through more than one readbuffer.
+
+- Issue #20034: Updated alias mapping to most recent locale.alias file
+ from X.org distribution using makelocalealias.py.
+
+- Issue #5815: Fixed support for locales with modifiers. Fixed support for
+ locale encodings with hyphens.
+
+- Issue #20026: Fix the sqlite module to handle correctly invalid isolation
+ level (wrong type).
+
+- Issue #18829: csv.Dialect() now checks type for delimiter, escapechar and
+ quotechar fields. Original patch by Vajrasky Kok.
+
+- Issue #19855: uuid.getnode() on Unix now looks on the PATH for the
+ executables used to find the mac address, with /sbin and /usr/sbin as
+ fallbacks.
+
+- Issue #20007: HTTPResponse.read(0) no more prematurely closes connection.
+ Original patch by Simon Sapin.
+
+- Issue #19912: Fixed numerous bugs in ntpath.splitunc().
+
+- Issue #19623: Fixed writing to unseekable files in the aifc module.
+ Fixed writing 'ulaw' (lower case) compressed AIFC files.
+
+- Issue #17919: select.poll.register() again works with poll.POLLNVAL on AIX.
+ Fixed integer overflow in the eventmask parameter.
+
+- Issue #17200: telnetlib's read_until and expect timeout was broken by the
+ fix to Issue #14635 in Python 2.7.4 to be interpreted as milliseconds
+ instead of seconds when the platform supports select.poll (ie: everywhere).
+ It is now treated as seconds once again.
+
+- Issue #19099: The struct module now supports Unicode format strings.
+
+- Issue #19878: Fix segfault in bz2 module after calling __init__ twice with
+ non-existent filename. Initial patch by Vajrasky Kok.
+
+- Issue #16373: Prevent infinite recursion for ABC Set class comparisons.
+
+- Issue #19138: doctest's IGNORE_EXCEPTION_DETAIL now allows a match when
+ no exception detail exists (no colon following the exception's name, or
+ a colon does follow but no text follows the colon).
+
+- Issue #16231: Fixed pickle.Pickler to only fallback to its default pickling
+ behaviour when Pickler.persistent_id returns None, but not for any other
+ false values. This allows false values other than None to be used as
+ persistent IDs. This behaviour is consistent with cPickle.
+
+- Issue #11508: Fixed uuid.getnode() and uuid.uuid1() on environment with
+ virtual interface. Original patch by Kent Frazier.
+
+- Issue #11489: JSON decoder now accepts lone surrogates.
+
+- Fix test.test_support.bind_port() to not cause an error when Python was
+ compiled on a system with SO_REUSEPORT defined in the headers but run on
+ a system with an OS kernel that does not support that new socket option.
+
+- Issue #19633: Fixed writing not compressed 16- and 32-bit wave files on
+ big-endian platforms.
+
+- Issue #19449: in csv's writerow, handle non-string keys when generating the
+ error message that certain keys are not in the 'fieldnames' list.
+
+- Issue #12853: Fix NameError in distutils.command.upload.
+
+- Issue #19523: Closed FileHandler leak which occurred when delay was set.
+
+- Issue #1575020: Fixed support of 24-bit wave files on big-endian platforms.
+
+- Issue #19480: HTMLParser now accepts all valid start-tag names as defined
+ by the HTML5 standard.
+
+- Issue #17827: Add the missing documentation for ``codecs.encode`` and
+ ``codecs.decode``.
+
+- Issue #6157: Fixed Tkinter.Text.debug(). Original patch by Guilherme Polo.
+
+- Issue #6160: The bbox() method of tkinter.Spinbox now returns a tuple of
+ integers instead of a string. Based on patch by Guilherme Polo.
+
+- Issue #19286: Directories in ``package_data`` are no longer added to
+ the filelist, preventing failure outlined in the ticket.
+
+- Issue #6676: Ensure a meaningful exception is raised when attempting
+ to parse more than one XML document per pyexpat xmlparser instance.
+ (Original patches by Hirokazu Yamamoto and Amaury Forgeot d'Arc, with
+ suggested wording by David Gutteridge)
+
+- Issue #21311: Avoid exception in _osx_support with non-standard compiler
+ configurations. Patch by John Szakmeister.
+
+Tools/Demos
+-----------
+
+- Issue #3561: The Windows installer now has an option, off by default, for
+ placing the Python installation into the system "Path" environment variable.
+ This was backported from Python 3.3.
+
+- Add support for ``yield from`` to 2to3.
+
+- Add support for the PEP 465 matrix multiplication operator to 2to3.
+
+- Issue #19936: Added executable bits or shebang lines to Python scripts which
+ requires them. Disable executable bits and shebang lines in test and
+ benchmark files in order to prevent using a random system python, and in
+ source files of modules which don't provide command line interface.
+
+IDLE
+----
+
+- Issue #18104: Add idlelib/idle_test/htest.py with a few sample tests to begin
+ consolidating and improving human-validated tests of Idle. Change other files
+ as needed to work with htest. Running the module as __main__ runs all tests.
+
+- Issue #21139: Change default paragraph width to 72, the PEP 8 recommendation.
+
+- Issue #21284: Paragraph reformat test passes after user changes reformat width.
+
+- Issue #20406: Use Python application icons for Idle window title bars.
+ Patch mostly by Serhiy Storchaka.
+
+- Issue #21029: Occurrences of "print" are now consistently colored as
+ being a keyword (the colorizer doesn't know if print functions are
+ enabled in the source).
+
+- Issue #17721: Remove non-functional configuration dialog help button until we
+ make it actually gives some help when clicked. Patch by Guilherme Simões.
+
+- Issue #17390: Add Python version to Idle editor window title bar.
+ Original patches by Edmond Burnett and Kent Johnson.
+
+- Issue #20058: sys.stdin.readline() in IDLE now always returns only one line.
+
+- Issue #19481: print() of unicode, str or bytearray subclass instance in IDLE
+ no more hangs.
+
+- Issue #18270: Prevent possible IDLE AttributeError on OS X when no initial
+ shell window is present.
+
+- Issue #17654: Ensure IDLE menus are customized properly on OS X for
+ non-framework builds and for all variants of Tk.
+
+Tests
+-----
+
+- Issue #17752: Fix distutils tests when run from the installed location.
+
+- Issue #18604: Consolidated checks for GUI availability. All platforms now
+ at least check whether Tk can be instantiated when the GUI resource is
+ requested.
+
+- Issue #20946: Correct alignment assumptions of some ctypes tests.
+
+- Issue #20743: Fix a reference leak in test_tcl.
+
+- Issue #20510: Rewrote test_exit in test_sys to match existing comments,
+ use modern unittest features, and use helpers from test.script_helper
+ instead of using subprocess directly. Initial patch by Gareth Rees.
+
+- Issue #20532: Tests which use _testcapi now are marked as CPython only.
+
+- Issue #19920: Added tests for TarFile.list(). Based on patch by Vajrasky Kok.
+
+- Issue #19990: Added tests for the imghdr module. Based on patch by
+ Claudiu Popa.
+
+- Issue #19804: The test_find_mac test in test_uuid is now skipped if the
+ ifconfig executable is not available.
+
+- Issue #19886: Use better estimated memory requirements for bigmem tests.
+
+- Backported tests for Tkinter variables.
+
+- Issue #19320: test_tcl no longer fails when wantobjects is false.
+
+- Issue #19683: Removed empty tests from test_minidom. Initial patch by
+ Ajitesh Gupta.
+
+- Issue #19928: Implemented a test for repr() of cell objects.
+
+- Issue #19595, #19987: Re-enabled a long-disabled test in test_winsound.
+
+- Issue #19588: Fixed tests in test_random that were silently skipped most
+ of the time. Patch by Julian Gindi.
+
+- Issue #17883: Tweak test_tcl testLoadWithUNC to skip the test in the
+ event of a permission error on Windows and to properly report other
+ skip conditions.
+
+- Issue #17883: Backported _is_gui_available() in test.test_support to
+ avoid hanging Windows buildbots on test_ttk_guionly.
+
+- Issue #18702, #19572: All skipped tests now reported as skipped.
+
+- Issue #19085: Added basic tests for all tkinter widget options.
+
+- Issue #20605: Make test_socket getaddrinfo OS X segfault test more robust.
+
+- Issue #20939: Avoid various network test failures due to new
+ redirect of http://www.python.org/ to https://www.python.org:
+ use http://www.example.com instead.
+
+- Issue #21093: Prevent failures of ctypes test_macholib on OS X if a
+ copy of libz exists in $HOME/lib or /usr/local/lib.
+
+Build
+-----
+
+- Issue #21285: Refactor and fix curses configure check to always search
+ in a ncursesw directory.
+
+Documentation
+-------------
+
+- Issue #20255: Update the about and bugs pages.
+
+- Issue #18840: Introduce the json module in the tutorial, and de-emphasize
+ the pickle module.
+
+- Issue #19795: Improved markup of True/False constants.
+
+Windows
+-------
+
+- Issue #21303, #20565: Updated the version of Tcl/Tk included in the
+ installer from 8.5.2 to 8.5.15.
+
+Mac OS X
+--------
+
+- As of 2.7.8, the 32-bit-only installer will support OS X 10.5
+ and later systems as is currently done for Python 3.x installers.
+ For 2.7.7 only, we will provide three installers:
+ the legacy deprecated 10.3+ 32-bit-only format;
+ the newer 10.5+ 32-bit-only format;
+ and the unchanged 10.6+ 64-/32-bit format.
+ Although binary installers will no longer be available from
+ python.org as of 2.7.8, it will still be possible to build from
+ source on 10.3.9 and 10.4 systems if necessary.
+ See Mac/BuildScript/README.txt for more information.
+
+
+Whats' New in Python 2.7.6?
+===========================
+
+*Release date: 2013-11-10*
+
+Library
+-------
+
+- Issue #19435: Fix directory traversal attack on CGIHttpRequestHandler.
+
+IDLE
+----
+
+- Issue #19426: Fixed the opening of Python source file with specified encoding.
+
+Tests
+-----
+
+- Issue #19457: Fixed xmlcharrefreplace tests on wide build when tests are
+ loaded from .py[co] files.
+
+Build
+-----
+
+- Issue #15663: Revert OS X installer built-in Tcl/Tk support for 2.7.6.
+ Some third-party projects, such as Matplotlib and PIL/Pillow,
+ depended on being able to build with Tcl and Tk frameworks in
+ /Library/Frameworks.
+
+
+What's New in Python 2.7.6 release candidate 1?
+===============================================
+
+*Release date: 2013-10-26*
+
+Core and Builtins
+-----------------
+
+- Issue #18603: Ensure that PyOS_mystricmp and PyOS_mystrnicmp are in the
+ Python executable and not removed by the linker's optimizer.
+
+- Issue #19279: UTF-7 decoder no more produces illegal unicode strings.
+
+- Issue #18739: Fix an inconsistency between math.log(n) and math.log(long(n));
+ the results could be off from one another by a ulp or two.
+
+- Issue #13461: Fix a crash in the "replace" error handler on 64-bit platforms.
+ Patch by Yogesh Chaudhari.
+
+- Issue #15866: The xmlcharrefreplace error handler no more produces two XML
+ entities for a non-BMP character on narrow build.
+
+- Issue #18184: PyUnicode_FromFormat() and PyUnicode_FromFormatV() now raise
+ OverflowError when an argument of %c format is out of range.
+
+- Issue #18137: Detect integer overflow on precision in float.__format__()
+ and complex.__format__().
+
+- Issue #18038: SyntaxError raised during compilation sources with illegal
+ encoding now always contains an encoding name.
+
+- Issue #18019: Fix crash in the repr of dictionaries containing their own
+ views.
+
+- Issue #18427: str.replace could crash the interpreter with huge strings.
+
+Library
+-------
+
+- Issue #19393: Fix symtable.symtable function to not be confused when there are
+ functions or classes named "top".
+
+- Issue #19327: Fixed the working of regular expressions with too big charset.
+
+- Issue #19350: Increasing the test coverage of macurl2path. Patch by Colin
+ Williams.
+
+- Issue #19352: Fix unittest discovery when a module can be reached
+ through several paths (e.g. under Debian/Ubuntu with virtualenv).
+
+- Issue #15207: Fix mimetypes to read from correct part of Windows registry
+ Original patch by Dave Chambers
+
+- Issue #8964: fix platform._sys_version to handle IronPython 2.6+.
+ Patch by Martin Matusiak.
+
+- Issue #16038: CVE-2013-1752: ftplib: Limit amount of data read by
+ limiting the call to readline(). Original patch by Michał
+ Jastrzębski and Giampaolo Rodola.
+
+- Issue #19276: Fixed the wave module on 64-bit big-endian platforms.
+
+- Issue #18458: Prevent crashes with newer versions of libedit. Its readline
+ emulation has changed from 0-based indexing to 1-based like gnu readline.
+ Original patch by Ronald Oussoren.
+
+- Issue #18919: If the close() method of a writer in the sunau or wave module
+ failed, second invocation of close() and destructor no more raise an
+ exception. Second invocation of close() on sunau writer now has no effects.
+ The aifc module now accepts lower case of names of the 'ulaw' and 'alaw'
+ codecs.
+
+- Issue #19131: The aifc module now correctly reads and writes sampwidth of
+ compressed streams.
+
+- Issue #19158: a rare race in BoundedSemaphore could allow .release() too
+ often.
+
+- Issue #18037: 2to3 now escapes '\u' and '\U' in native strings.
+
+- Issue #19137: The pprint module now correctly formats empty set and frozenset
+ and instances of set and frozenset subclasses.
+
+- Issue #16040: CVE-2013-1752: nntplib: Limit maximum line lengths to 2048 to
+ prevent readline() calls from consuming too much memory. Patch by Jyrki
+ Pulliainen.
+
+- Issue #12641: Avoid passing "-mno-cygwin" to the mingw32 compiler, except
+ when necessary. Patch by Oscar Benjamin.
+
+- Properly initialize all fields of a SSL object after allocation.
+
+- Issue #4366: Fix building extensions on all platforms when --enable-shared
+ is used.
+
+- Issue #18950: Fix miscellaneous bugs in the sunau module.
+ Au_read.readframes() now updates current file position and reads correct
+ number of frames from multichannel stream. Au_write.writeframesraw() now
+ correctly updates current file position. Au_read and Au_write now correctly
+ work with file object if start file position is not a zero.
+
+- Issue #18050: Fixed an incompatibility of the re module with Python 2.7.3
+ and older binaries.
+
+- Issue #19037: The mailbox module now makes all changes to maildir files
+ before moving them into place, to avoid race conditions with other programs
+ that may be accessing the maildir directory.
+
+- Issue #14984: On POSIX systems, when netrc is called without a filename
+ argument (and therefore is reading the user's $HOME/.netrc file), it now
+ enforces the same security rules as typical ftp clients: the .netrc file must
+ be owned by the user that owns the process and must not be readable by any
+ other user.
+
+- Issue #17324: Fix http.server's request handling case on trailing '/'. Patch
+ contributed by Vajrasky Kok.
+
+- Issue #19018: The heapq.merge() function no longer suppresses IndexError
+ in the underlying iterables.
+
+- Issue #18784: The uuid module no more attempts to load libc via ctypes.CDLL,
+ if all necessary functions are already found in libuuid.
+ Patch by Evgeny Sologubov.
+
+- Issue #14971: unittest test discovery no longer gets confused when a function
+ has a different __name__ than its name in the TestCase class dictionary.
+
+- Issue #18672: Fixed format specifiers for Py_ssize_t in debugging output in
+ the _sre module.
+
+- Issue #18830: inspect.getclasstree() no more produces duplicated entries even
+ when input list contains duplicates.
+
+- Issue #18909: Fix _tkinter.tkapp.interpaddr() on Windows 64-bit, don't cast
+ 64-bit pointer to long (32 bits).
+
+- Issue #18876: The FileIO.mode attribute now better reflects the actual mode
+ under which the file was opened. Patch by Erik Bray.
+
+- Issue #18851: Avoid a double close of subprocess pipes when the child
+ process fails starting.
+
+- Issue #18418: After fork(), reinit all threads states, not only active ones.
+ Patch by A. Jesse Jiryu Davis.
+
+- Issue #11973: Fix a problem in kevent. The flags and fflags fields are now
+ properly handled as unsigned.
+
+- Issue #16809: Fixed some tkinter incompabilities with Tcl/Tk 8.6.
+
+- Issue #16809: Tkinter's splitlist() and split() methods now accept Tcl_Obj
+ argument.
+
+- Issue #17119: Fixed integer overflows when processing large Unicode strings
+ and tuples in the tkinter module.
+
+- Issue #15233: Python now guarantees that callables registered with the atexit
+ module will be called in a deterministic order.
+
+- Issue #18747: Re-seed OpenSSL's pseudo-random number generator after fork.
+ A pthread_atfork() parent handler is used to seed the PRNG with pid, time
+ and some stack data.
+
+- Issue #8865: Concurrent invocation of select.poll.poll() now raises a
+ RuntimeError exception. Patch by Christian Schubert.
+
+- Issue #13461: Fix a crash in the TextIOWrapper.tell method on 64-bit
+ platforms. Patch by Yogesh Chaudhari.
+
+- Issue #18777: The ssl module now uses the new CRYPTO_THREADID API of
+ OpenSSL 1.0.0+ instead of the deprecated CRYPTO id callback function.
+
+- Issue #18768: Correct doc string of RAND_edg(). Patch by Vajrasky Kok.
+
+- Issue #18178: Fix ctypes on BSD. dlmalloc.c was compiled twice which broke
+ malloc weak symbols.
+
+- Issue #18709: Fix CVE-2013-4238. The SSL module now handles NULL bytes
+ inside subjectAltName correctly. Formerly the module has used OpenSSL's
+ GENERAL_NAME_print() function to get the string represention of ASN.1
+ strings for ``rfc822Name`` (email), ``dNSName`` (DNS) and
+ ``uniformResourceIdentifier`` (URI).
+
+- Issue #18756: Improve error reporting in os.urandom() when the failure
+ is due to something else than /dev/urandom not existing (for example,
+ exhausting the file descriptor limit).
+
+- Fix tkinter regression introduced by the security fix in issue #16248.
+
+- Issue #18676: Change 'positive' to 'non-negative' in queue.py put and get
+ docstrings and ValueError messages. Patch by Zhongyue Luo
+
+- Issue #17998: Fix an internal error in regular expression engine.
+
+- Issue #17557: Fix os.getgroups() to work with the modified behavior of
+ getgroups(2) on OS X 10.8. Original patch by Mateusz Lenik.
+
+- Issue #18455: multiprocessing should not retry connect() with same socket.
+
+- Issue #18513: Fix behaviour of cmath.rect w.r.t. signed zeros on OS X 10.8 +
+ gcc.
+
+- Issue #18101: Tcl.split() now process Unicode strings nested in a tuple as it
+ do with byte strings.
+
+- Issue #18347: ElementTree's html serializer now preserves the case of
+ closing tags.
+
+- Issue #17261: Ensure multiprocessing's proxies use proper address.
+
+- Issue #17097: Make multiprocessing ignore EINTR.
+
+- Issue #18155: The csv module now correctly handles csv files that use
+ a delimiter character that has a special meaning in regexes, instead of
+ throwing an exception.
+
+- Issue #18135: ssl.SSLSocket.write() now raises an OverflowError if the input
+ string in longer than 2 gigabytes. The ssl module does not support partial
+ write.
+
+- Issue #18167: cgi.FieldStorage no longer fails to handle multipart/form-data
+ when \r\n appears at end of 65535 bytes without other newlines.
+
+- Issue #17403: urllib.parse.robotparser normalizes the urls before adding to
+ ruleline. This helps in handling certain types invalid urls in a conservative
+ manner. Patch contributed by Mher Movsisyan.
+
+- Implement inequality on weakref.WeakSet.
+
+- Issue #17981: Closed socket on error in SysLogHandler.
+
+- Issue #18015: Fix unpickling of 2.7.3 and 2.7.4 namedtuples.
+
+- Issue #17754: Make ctypes.util.find_library() independent of the locale.
+
+- Fix typos in the multiprocessing module.
+
+- Issue #17269: Workaround for socket.getaddrinfo crash on MacOS X
+ with port None or "0" and flags AI_NUMERICSERV.
+
+- Issue #18080: When building a C extension module on OS X, if the compiler
+ is overriden with the CC environment variable, use the new compiler as
+ the default for linking if LDSHARED is not also overriden. This restores
+ Distutils behavior introduced in 2.7.3 and inadvertently dropped in 2.7.4.
+
+- Issue #18071: C extension module builds on OS X could fail with TypeError
+ if the Xcode command line tools were not installed.
+
+- Issue #18113: Fixed a refcount leak in the curses.panel module's
+ set_userptr() method. Reported by Atsuo Ishimoto.
+
+- Issue #18849: Fixed a Windows-specific tempfile bug where collision with an
+ existing directory caused mkstemp and related APIs to fail instead of
+ retrying. Report and fix by Vlad Shcherbina.
+
+- Issue #19400: Prevent extension module build failures with Xcode 5 on OS X
+ 10.8+ when using a universal Python that included a PPC architecture,
+ such as with a python.org 32-bit-only binary installer.
+
+Tools/Demos
+-----------
+
+- Issue #18873: 2to3 and the findnocoding.py script now detect Python source
+ code encoding only in comment lines.
+
+- Issue #18817: Fix a resource warning in Lib/aifc.py demo.
+
+- Issue #18439: Make patchcheck work on Windows for ACKS, NEWS.
+
+- Issue #18448: Fix a typo in Demo/newmetaclasses/Eiffel.py.
+
+- Issue #12990: The "Python Launcher" on OSX could not launch python scripts
+ that have paths that include wide characters.
+
+Build
+-----
+
+- Issue #16067: Add description into MSI file to replace installer's temporary name.
+
+- Issue #18256: Compilation fix for recent AIX releases. Patch by
+ David Edelsohn.
+
+- Issue #18098: The deprecated OS X Build Applet.app fails to build on
+ OS X 10.8 systems because the Apple-deprecated QuickDraw headers have
+ been removed from Xcode 4. Skip building it in this case.
+
+- Issue #1584: Provide options to override default search paths for
+ Tcl and Tk when building _tkinter.
+
+- Issue #15663: Tcl/Tk 8.5.15 is now included with the OS X 10.6+
+ 64-bit/32-bit installer for 10.6+. It is no longer necessary
+ to install a third-party version of Tcl/Tk 8.5 to work around the
+ problems in the Apple-supplied Tcl/Tk 8.5 shipped in OS X 10.6
+ and later releases.
+
+- Issue #19019: Change the OS X installer build script to use CFLAGS instead
+ of OPT for special build options. By setting OPT, some compiler-specific
+ options like -fwrapv were overridden and thus not used, which could result
+ in broken interpreters when building with clang.
+
+IDLE
+----
+
+- Issue #18873: IDLE now detects Python source code encoding only in comment
+ lines.
+
+- Issue #18988: The "Tab" key now works when a word is already autocompleted.
+
+- Issue #18489: Add tests for SearchEngine. Original patch by Phil Webster.
+
+- Issue #18429: Format / Format Paragraph, now works when comment blocks
+ are selected. As with text blocks, this works best when the selection
+ only includes complete lines.
+
+- Issue #18226: Add docstrings and unittests for FormatParagraph.py.
+ Original patches by Todd Rovito and Phil Webster.
+
+- Issue #18279: Format - Strip trailing whitespace no longer marks a file as
+ changed when it has not been changed. This fix followed the addition of a
+ test file originally written by Phil Webster (the issue's main goal).
+
+- Issue #18539: Calltips now work for float default arguments.
+
+- Issue #7136: In the Idle File menu, "New Window" is renamed "New File".
+ Patch by Tal Einat, Roget Serwy, and Todd Rovito.
+
+- Issue #8515: Set __file__ when run file in IDLE.
+ Initial patch by Bruce Frederiksen.
+
+- Issue #5492: Avoid traceback when exiting IDLE caused by a race condition.
+
+- Issue #17511: Keep IDLE find dialog open after clicking "Find Next".
+ Original patch by Sarah K.
+
+- Issue #15392: Create a unittest framework for IDLE.
+ Preliminary patch by Rajagopalasarma Jayakrishnan
+ See Lib/idlelib/idle_test/README.txt for how to run Idle tests.
+
+- Issue #14146: Highlight source line while debugging on Windows.
+
+- Issue #17532: Always include Options menu for IDLE on OS X.
+ Patch by Guilherme Simões.
+
+Tests
+-----
+
+- Issue #18919: Added tests for the sunau module. Unified and extended tests
+ for audio modules: aifc, sunau and wave.
+
+- Issue #18792: Use "127.0.0.1" or "::1" instead of "localhost" as much as
+ possible, since "localhost" goes through a DNS lookup under recent Windows
+ versions.
+
+- Issue #18357: add tests for dictview set difference.
+ Patch by Fraser Tweedale.
+
+- Issue #11185: Fix test_wait4 under AIX. Patch by Sébastien Sablé.
+
+- Issue #18094: test_uuid no more reports skipped tests as passed.
+
+- Issue #11995: test_pydoc doesn't import all sys.path modules anymore.
+
+Documentation
+-------------
+
+- Issue #18758: Fixed and improved cross-references.
+
+- Issue #18718: datetime documentation contradictory on leap second support.
+
+- Issue #17701: Improving strftime documentation.
+
+- Issue #17844: Refactor a documentation of Python specific encodings.
+ Add links to encoders and decoders for binary-to-binary codecs.
+
+
+What's New in Python 2.7.5?
+===========================
+
+*Release date: 2013-05-12*
+
+Core and Builtins
+-----------------
+
+- Issue #15535: Fixed regression in the pickling of named tuples by
+ removing the __dict__ property introduced in 2.7.4.
+
+- Issue #17857: Prevent build failures with pre-3.5.0 versions of sqlite3,
+ such as was shipped with Centos 5 and Mac OS X 10.4.
+
+- Issue #17703: Fix a regression where an illegal use of Py_DECREF() after
+ interpreter finalization can cause a crash.
+
+- Issue #16447: Fixed potential segmentation fault when setting __name__ on a
+ class.
+
+- Issue #17610: Don't rely on non-standard behavior of the C qsort() function.
+
+Library
+-------
+
+- Issue #17979: Fixed the re module in build with --disable-unicode.
+
+- Issue #17606: Fixed support of encoded byte strings in the XMLGenerator
+ .characters() and ignorableWhitespace() methods. Original patch by Sebastian
+ Ortiz Vasquez.
+
+- Issue #16601: Restarting iteration over tarfile no more continues from where
+ it left off. Patch by Michael Birtwell.
+
+- Issue 16584: in filecomp._cmp, catch IOError as well as os.error.
+ Patch by Till Maas.
+
+- Issue #17926: Fix dbm.__contains__ on 64-bit big-endian machines.
+
+- Issue #19267: Fix support of multibyte encoding (ex: UTF-16) in the logging
+ module.
+
+- Issue #17918: When using SSLSocket.accept(), if the SSL handshake failed
+ on the new socket, the socket would linger indefinitely. Thanks to
+ Peter Saveliev for reporting.
+
+- Issue #17289: The readline module now plays nicer with external modules
+ or applications changing the rl_completer_word_break_characters global
+ variable. Initial patch by Bradley Froehle.
+
+- Issue #12181: select module: Fix struct kevent definition on OpenBSD 64-bit
+ platforms. Patch by Federico Schwindt.
+
+- Issue #14173: Avoid crashing when reading a signal handler during
+ interpreter shutdown.
+
+- Issue #16316: mimetypes now recognizes the .xz and .txz (.tar.xz) extensions.
+
+- Issue #17192: Restore the patch for Issue #10309 which was ommitted
+ in 2.7.4 when updating the bundled version of libffi used by ctypes.
+
+- Issue #17843: Removed test data file that was triggering false-positive virus
+ warnings with certain antivirus software.
+
+- Issue #17353: Plistlib emitted empty data tags with deeply nested datastructures
+
+- Issue #11714: Use 'with' statements to assure a Semaphore releases a
+ condition variable. Original patch by Thomas Rachel.
+
+- Issue #17795: Reverted backwards-incompatible change in SysLogHandler with
+ Unix domain sockets.
+
+- Issue #17555: Fix ForkAwareThreadLock so that size of after fork
+ registry does not grow exponentially with generation of process.
+
+- Issue #17710: Fix cPickle raising a SystemError on bogus input.
+
+- Issue #17341: Include the invalid name in the error messages from re about
+ invalid group names.
+
+- Issue #17016: Get rid of possible pointer wraparounds and integer overflows
+ in the re module. Patch by Nickolai Zeldovich.
+
+- Issue #17536: Add to webbrowser's browser list: xdg-open, gvfs-open,
+ www-browser, x-www-browser, chromium browsers, iceweasel, iceape.
+
+- Issue #17656: Fix extraction of zip files with unicode member paths.
+
+- Issue #17666: Fix reading gzip files with an extra field.
+
+- Issue #13150, #17512: sysconfig no longer parses the Makefile and config.h
+ files when imported, instead doing it at build time. This makes importing
+ sysconfig faster and reduces Python startup time by 20%.
+
+- Issue #13163: Rename operands in smtplib.SMTP._get_socket to correct names;
+ fixes otherwise misleading output in tracebacks and when when debug is on.
+
+- Issue #17526: fix an IndexError raised while passing code without filename to
+ inspect.findsource(). Initial patch by Tyler Doyle.
+
+Build
+-----
+
+- Issue #17547: In configure, explicitly pass -Wformat for the benefit for GCC
+ 4.8.
+
+- Issue #17682: Add the _io module to Modules/Setup.dist (commented out).
+
+- Issue #17086: Search the include and library directories provided by the
+ compiler.
+
+Tests
+-----
+
+- Issue #17928: Fix test_structmembers on 64-bit big-endian machines.
+
+- Issue #17883: Fix buildbot testing of Tkinter on Windows.
+ Patch by Zachary Ware.
+
+- Issue #7855: Add tests for ctypes/winreg for issues found in IronPython.
+ Initial patch by Dino Viehland.
+
+- Issue #17712: Fix test_gdb failures on Ubuntu 13.04.
+
+- Issue #17065: Use process-unique key for winreg tests to avoid failures if
+ test is run multiple times in parallel (eg: on a buildbot host).
+
+IDLE
+----
+
+- Issue #17838: Allow sys.stdin to be reassigned.
+
+- Issue #14735: Update IDLE docs to omit "Control-z on Windows".
+
+- Issue #17585: Fixed IDLE regression. Now closes when using exit() or quit().
+
+- Issue #17657: Show full Tk version in IDLE's about dialog.
+ Patch by Todd Rovito.
+
+- Issue #17613: Prevent traceback when removing syntax colorizer in IDLE.
+
+- Issue #1207589: Backwards-compatibility patch for right-click menu in IDLE.
+
+- Issue #16887: IDLE now accepts Cancel in tabify/untabify dialog box.
+
+- Issue #14254: IDLE now handles readline correctly across shell restarts.
+
+- Issue #17614: IDLE no longer raises exception when quickly closing a file.
+
+- Issue #6698: IDLE now opens just an editor window when configured to do so.
+
+- Issue #8900: Using keyboard shortcuts in IDLE to open a file no longer
+ raises an exception.
+
+- Issue #6649: Fixed missing exit status in IDLE. Patch by Guilherme Polo.
+
+Documentation
+-------------
+
+- Issue #15940: Specify effect of locale on time functions.
+
+- Issue #6696: add documentation for the Profile objects, and improve
+ profile/cProfile docs. Patch by Tom Pinckney.
+
+
+What's New in Python 2.7.4?
+===========================
+
+*Release date: 2013-04-06*
+
+Build
+-----
+
+- Issue #17550: Fix the --enable-profiling configure switch.
+
+Core and Builtins
+-----------------
+
+- Issue #15801 (again): With string % formatting, relax the type check for a
+ mapping such that any type with a __getitem__ can be used on the right hand
+ side.
+
+IDLE
+----
+
+- Issue #17625: In IDLE, close the replace dialog after it is used.
+
+Tests
+-----
+
+- Issue #17835: Fix test_io when the default OS pipe buffer size is larger
+ than one million bytes.
+
+- Issue #17531: Fix tests that thought group and user ids were always the int
+ type. Also, always allow -1 as a valid group and user id.
+
+- Issue #17533: Fix test_xpickle with older versions of Python 2.5.
+
+Documentation
+-------------
+
+- Issue 17538: Document XML vulnerabilties
+
+
+What's New in Python 2.7.4 release candidate 1
+==============================================
+
+*Release date: 2013-03-23*
+
+Core and Builtins
+-----------------
+
+- Issue #10211: Buffer objects expose the new buffer interface internally
+
+- Issue #16445: Fixed potential segmentation fault when deleting an exception
+ message.
+
+- Issue #17275: Corrected class name in init error messages of the C version of
+ BufferedWriter and BufferedRandom.
+
+- Issue #7963: Fixed misleading error message that issued when object is
+ called without arguments.
+
+- Issue #5308: Raise ValueError when marshalling too large object (a sequence
+ with size >= 2**31), instead of producing illegal marshal data.
+
+- Issue #17043: The unicode-internal decoder no longer read past the end of
+ input buffer.
+
+- Issue #16979: Fix error handling bugs in the unicode-escape-decode decoder.
+
+- Issue #10156: In the interpreter's initialization phase, unicode globals
+ are now initialized dynamically as needed.
+
+- Issue #16975: Fix error handling bug in the escape-decode decoder.
+
+- Issue #14850: Now a charmap decoder treats U+FFFE as "undefined mapping"
+ in any mapping, not only in a Unicode string.
+
+- Issue #11461: Fix the incremental UTF-16 decoder. Original patch by
+ Amaury Forgeot d'Arc.
+
+- Issue #16367: Fix FileIO.readall() on Windows for files larger than 2 GB.
+
+- Issue #15516: Fix a bug in PyString_FromFormat where it failed to properly
+ ignore errors from a __int__() method.
+
+- Issue #16839: Fix a segfault when calling unicode() on a classic class early
+ in interpreter initialization.
+
+- Issue #16761: Calling ``int()`` and ``long()`` with *base* argument only
+ now raises TypeError.
+
+- Issue #16759: Support the full DWORD (unsigned long) range in Reg2Py
+ when retrieving a REG_DWORD value. This corrects functions like
+ winreg.QueryValueEx that may have been returning truncated values.
+
+- Issue #14420: Support the full DWORD (unsigned long) range in Py2Reg
+ when passed a REG_DWORD value. Fixes ValueError in winreg.SetValueEx when
+ given a long.
+
+- Issue #13863: Work around buggy 'fstat' implementation on Windows / NTFS that
+ lead to incorrect timestamps (off by one hour) being stored in .pyc files on
+ some systems.
+
+- Issue #16602: When a weakref's target was part of a long deallocation
+ chain, the object could remain reachable through its weakref even though
+ its refcount had dropped to zero.
+
+- Issue #9011: Fix hacky AST code that modified the CST when compiling
+ a negated numeric literal.
+
+- Issue #16306: Fix multiple error messages when unknown command line
+ parameters where passed to the interpreter. Patch by Hieu Nguyen.
+
+- Issue #15379: Fix passing of non-BMP characters as integers for the charmap
+ decoder (already working as unicode strings). Patch by Serhiy Storchaka.
+
+- Issue #16453: Fix equality testing of dead weakref objects.
+
+- Issue #9535: Fix pending signals that have been received but not yet
+ handled by Python to not persist after os.fork() in the child process.
+
+- Issue #15001: fix segfault on "del sys.modules['__main__']". Patch by Victor
+ Stinner.
+
+- Issue #5057: the peepholer no longer optimizes subscription on unicode
+ literals (e.g. u'foo'[0]) in order to produce compatible pyc files between
+ narrow and wide builds.
+
+- Issue #8401: assigning an int to a bytearray slice (e.g. b[3:4] = 5) now
+ raises an error.
+
+- Issue #14700: Fix buggy overflow checks for large width and precision
+ in string formatting operations.
+
+- Issue #16345: Fix an infinite loop when ``fromkeys`` on a dict subclass
+ received a nonempty dict from the constructor.
+
+- Issue #6074: Ensure cached bytecode files can always be updated by the
+ user that created them, even when the source file is read-only.
+
+- Issue #14783: Improve int() and long() docstrings and switch docstrings for
+ unicode(), slice(), range(), and xrange() to use multi-line signatures.
+
+- Issue #16030: Fix overflow bug in computing the `repr` of an xrange object
+ with large start, step or length.
+
+- Issue #16029: Fix overflow bug occurring when pickling xranges with large
+ start, step or length.
+
+- Issue #16037: Limit httplib's _read_status() function to work around broken
+ HTTP servers and reduce memory usage. It's actually a backport of a Python
+ 3.2 fix. Thanks to Adrien Kunysz.
+
+- Issue #16588: Silence unused-but-set warnings in Python/thread_pthread
+
+- Issue #13992: The trashcan mechanism is now thread-safe. This eliminates
+ sporadic crashes in multi-thread programs when several long deallocator
+ chains ran concurrently and involved subclasses of built-in container
+ types.
+
+- Issue #15801: Make sure mappings passed to '%' formatting are actually
+ subscriptable.
+
+- Issue #15604: Update uses of PyObject_IsTrue() to check for and handle
+ errors correctly. Patch by Serhiy Storchaka.
+
+- Issue #14579: Fix error handling bug in the utf-16 decoder. Patch by
+ Serhiy Storchaka.
+
+- Issue #15368: An issue that caused bytecode generation to be
+ non-deterministic when using randomized hashing (-R) has been fixed.
+
+- Issue #15897: zipimport.c doesn't check return value of fseek().
+ Patch by Felipe Cruz.
+
+- Issue #16369: Global PyTypeObjects not initialized with PyType_Ready(...).
+
+- Issue #15033: Fix the exit status bug when modules invoked using -m switch,
+ return the proper failure return value (1). Patch contributed by Jeff Knupp.
+
+- Issue #12268: File readline, readlines and read() methods no longer lose
+ data when an underlying read system call is interrupted. IOError is no
+ longer raised due to a read system call returning EINTR from within these
+ methods.
+
+- Issue #13512: Create ~/.pypirc securely (CVE-2011-4944). Initial patch by
+ Philip Jenvey, tested by Mageia and Debian.
+
+- Issue #7719: Make distutils ignore ``.nfs*`` files instead of choking later
+ on. Initial patch by SilentGhost and Jeff Ramnani.
+
+- Issue #10053: Don't close FDs when FileIO.__init__ fails. Loosely based on
+ the work by Hirokazu Yamamoto.
+
+- Issue #14775: Fix a potential quadratic dict build-up due to the garbage
+ collector repeatedly trying to untrack dicts.
+
+- Issue #14494: Fix __future__.py and its documentation to note that
+ absolute imports are the default behavior in 3.0 instead of 2.7.
+ Patch by Sven Marnach.
+
+- Issue #14761: Fix potential leak on an error case in the import machinery.
+
+- Issue #14699: Fix calling the classmethod descriptor directly.
+
+- Issue #11603 (again): Setting __repr__ to __str__ now raises a RuntimeError
+ when repr() or str() is called on such an object.
+
+- Issue #14658: Fix binding a special method to a builtin implementation of a
+ special method with a different name.
+
+- Issue #14612: Fix jumping around with blocks by setting f_lineno.
+
+- Issue #13889: Check and (if necessary) set FPU control word before calling
+ any of the dtoa.c string <-> float conversion functions, on MSVC builds of
+ Python. This fixes issues when embedding Python in a Delphi app.
+
+- Issue #14505: Fix file descriptor leak when deallocating file objects
+ created with PyFile_FromString().
+
+- Issue #14474: Save and restore exception state in thread.start_new_thread()
+ while writing error message if the thread leaves a unhandled exception.
+
+- Issue #13019: Fix potential reference leaks in bytearray.extend(). Patch
+ by Suman Saha.
+
+- Issue #14378: Fix compiling ast.ImportFrom nodes with a "__future__" string as
+ the module name that was not interned.
+
+- Issue #14331: Use significantly less stack space when importing modules by
+ allocating path buffers on the heap instead of the stack.
+
+- Issue #14334: Prevent in a segfault in type.__getattribute__ when it was not
+ passed strings. Also fix segfaults in the __getattribute__ and __setattr__
+ methods of old-style classes.
+
+- Issue #14161: fix the __repr__ of file objects to escape the file name.
+
+- Issue #1469629: Allow cycles through an object's __dict__ slot to be
+ collected. (For example if ``x.__dict__ is x``).
+
+- Issue #13521: dict.setdefault() now does only one lookup for the given key,
+ making it "atomic" for many purposes. Patch by Filip Gruszczyński.
+
+- Issue #1602133: on Mac OS X a shared library build (``--enable-shared``)
+ now fills the ``os.environ`` variable correctly.
+
+- Issue #10538: When using the "s*" code with PyArg_ParseTuple() to fill a
+ Py_buffer structure with data from an object supporting only the old
+ PyBuffer interface, a reference to the source objects is now properly added
+ to the Py_buffer.obj member.
+
+Library
+-------
+
+- Issue #12718: Fix interaction with winpdb overriding __import__ by setting
+ importer attribute on BaseConfigurator instance.
+
+- Issue #17521: Corrected non-enabling of logger following two calls to
+ fileConfig().
+
+- Issue #17508: Corrected MemoryHandler configuration in dictConfig() where
+ the target handler wasn't configured first.
+
+- Issue #10212: cStringIO and struct.unpack support new buffer objects.
+
+- Issue #12098: multiprocessing on Windows now starts child processes
+ using the same sys.flags as the current process. Initial patch by
+ Sergey Mezentsev.
+
+- Issue #8862: Fixed curses cleanup when getkey is interrputed by a signal.
+
+- Issue #9090: When a socket with a timeout fails with EWOULDBLOCK or EAGAIN,
+ retry the select() loop instead of bailing out. This is because select()
+ can incorrectly report a socket as ready for reading (for example, if it
+ received some data with an invalid checksum).
+
+- Issue #1285086: Get rid of the refcounting hack and speed up urllib.unquote().
+
+- Issue #17368: Fix an off-by-one error in the Python JSON decoder that caused
+ a failure while decoding empty object literals when object_pairs_hook was
+ specified.
+
+- Issue #17278: Fix a crash in heapq.heappush() and heapq.heappop() when
+ the list is being resized concurrently.
+
+- Issue #17018: Make Process.join() retry if os.waitpid() fails with EINTR.
+
+- Issue #14720: sqlite3: Convert datetime microseconds correctly.
+ Patch by Lowe Thiderman.
+
+- Issue #17225: JSON decoder now counts columns in the first line starting
+ with 1, as in other lines.
+
+- Issue #7842: backported fix for py_compile.compile() syntax error handling.
+
+- Issue #13153: Tkinter functions now raise TclError instead of ValueError when
+ a unicode argument contains non-BMP character.
+
+- Issue #9669: Protect re against infinite loops on zero-width matching in
+ non-greedy repeat. Patch by Matthew Barnett.
+
+- Issue #13169: The maximal repetition number in a regular expression has been
+ increased from 65534 to 2147483647 (on 32-bit platform) or 4294967294 (on
+ 64-bit).
+
+- Issue #16743: Fix mmap overflow check on 32 bit Windows.
+
+- Issue #11311: StringIO.readline(0) now returns an empty string as all other
+ file-like objects.
+
+- Issue #16800: tempfile.gettempdir() no longer left temporary files when
+ the disk is full. Original patch by Amir Szekely.
+
+- Issue #13555: cPickle now supports files larger than 2 GiB.
+
+- Issue #17052: unittest discovery should use self.testLoader.
+
+- Issue #4591: Uid and gid values larger than 2**31 are supported now.
+
+- Issue #17141: random.vonmisesvariate() no more hangs for large kappas.
+
+- Issue #17149: Fix random.vonmisesvariate to always return results in
+ the range [0, 2*math.pi].
+
+- Issue #1470548: XMLGenerator now works with UTF-16 and UTF-32 encodings.
+
+- Issue #6975: os.path.realpath() now correctly resolves multiple nested
+ symlinks on POSIX platforms.
+
+- Issue #7358: cStringIO.StringIO now supports writing to and reading from
+ a stream larger than 2 GiB on 64-bit systems.
+
+- Issue #10355: In SpooledTemporaryFile class mode and name properties and
+ xreadlines method now work for unrolled files. encoding and newlines
+ properties now removed as they have no sense and always produced
+ AttributeError.
+
+- Issue #16686: Fixed a lot of bugs in audioop module. Fixed crashes in
+ avgpp(), maxpp() and ratecv(). Fixed an integer overflow in add(), bias(),
+ and ratecv(). reverse(), lin2lin() and ratecv() no more lose precision for
+ 32-bit samples. max() and rms() no more returns a negative result and
+ various other functions now work correctly with 32-bit sample -0x80000000.
+
+- Issue #17073: Fix some integer overflows in sqlite3 module.
+
+- Issue #6083: Fix multiple segmentation faults occured when PyArg_ParseTuple
+ parses nested mutating sequence.
+
+- Issue #5289: Fix ctypes.util.find_library on Solaris.
+
+- Issue #17106: Fix a segmentation fault in io.TextIOWrapper when an underlying
+ stream or a decoder produces data of an unexpected type (i.e. when
+ io.TextIOWrapper initialized with text stream or use bytes-to-bytes codec).
+
+- Issue #13994: Add compatibility alias in distutils.ccompiler for
+ distutils.sysconfig.customize_compiler.
+
+- Issue #15633: httplib.HTTPResponse is now mark closed when the server
+ sends less than the advertised Content-Length.
+
+- Issue #15881: Fixed atexit hook in multiprocessing.
+
+- Issue #14340: Upgrade the embedded expat library to version 2.1.0.
+
+- Issue #11159: SAX parser now supports unicode file names.
+
+- Issue #6972: The zipfile module no longer overwrites files outside of
+ its destination path when extracting malicious zip files.
+
+- Issue #17049: Localized calendar methods now return unicode if a locale
+ includes an encoding and the result string contains month or weekday (was
+ regression from Python 2.6).
+
+- Issue #4844: ZipFile now raises BadZipfile when opens a ZIP file with an
+ incomplete "End of Central Directory" record. Original patch by Guilherme
+ Polo and Alan McIntyre.
+
+- Issue #15505: `unittest.installHandler` no longer assumes SIGINT handler is
+ set to a callable object.
+
+- Issue #17051: Fix a memory leak in os.path.isdir() on Windows. Patch by
+ Robert Xiao.
+
+- Issue #13454: Fix a crash when deleting an iterator created by itertools.tee()
+ if all other iterators were very advanced before.
+
+- Issue #16992: On Windows in signal.set_wakeup_fd, validate the file
+ descriptor argument.
+
+- Issue #15861: tkinter now correctly works with lists and tuples containing
+ strings with whitespaces, backslashes or unbalanced braces.
+
+- Issue #10527: Use poll() instead of select() for multiprocessing pipes.
+
+- Issue #9720: zipfile now writes correct local headers for files larger than
+ 4 GiB.
+
+- Issue #13899: \A, \Z, and \B now correctly match the A, Z, and B literals
+ when used inside character classes (e.g. '[\A]'). Patch by Matthew Barnett.
+
+- Issue #16398: Optimize deque.rotate() so that it only moves pointers
+ and doesn't touch the underlying data with increfs and decrefs.
+
+- Issue #15109: Fix regression in sqlite3's iterdump method where it would
+ die with an encoding error if the database contained string values
+ containing non-ASCII. (Regression was introduced by fix for 9750).
+
+- Issue #15545: Fix regression in sqlite3's iterdump method where it was
+ failing if the connection used a row factory (such as sqlite3.Row) that
+ produced unsortable objects. (Regression was introduced by fix for 9750).
+
+- Issue #16828: Fix error incorrectly raised by bz2.compress(''). Patch by
+ Martin Packman.
+
+- Issue #9586: Redefine SEM_FAILED on MacOSX to keep compiler happy.
+
+- Issue #10527: make multiprocessing use poll() instead of select() if available.
+
+- Issue #16485: Now file descriptors are closed if file header patching failed
+ on closing an aifc file.
+
+- Issue #12065: connect_ex() on an SSL socket now returns the original errno
+ when the socket's timeout expires (it used to return None).
+
+- Issue #16713: Fix the parsing of tel url with params using urlparse module.
+
+- Issue #16443: Add docstrings to regular expression match objects.
+ Patch by Anton Kasyanov.
+
+- Issue #8853: Allow port to be of type long for socket.getaddrinfo().
+
+- Issue #16597: In buffered and text IO, call close() on the underlying stream
+ if invoking flush() fails.
+
+- Issue #15701: Fix HTTPError info method call to return the headers information.
+
+- Issue #16646: ftplib.FTP.makeport() might lose socket error details.
+ (patch by Serhiy Storchaka)
+
+- Issue #16626: Fix infinite recursion in glob.glob() on Windows when the
+ pattern contains a wildcard in the drive or UNC path. Patch by Serhiy
+ Storchaka.
+
+- Issue #16298: In HTTPResponse.read(), close the socket when there is no
+ Content-Length and the incoming stream is finished. Patch by Eran
+ Rundstein.
+
+- Issue #16248: Disable code execution from the user's home directory by
+ tkinter when the -E flag is passed to Python. Patch by Zachary Ware.
+
+- Issue #16628: Fix a memory leak in ctypes.resize().
+
+- Issue #13614: Fix setup.py register failure with invalid rst in description.
+ Patch by Julien Courteau and Pierre Paul Lefebvre.
+
+- Issue #10182: The re module doesn't truncate indices to 32 bits anymore.
+ Patch by Serhiy Storchaka.
+
+- Issue #16573: In 2to3, treat enumerate() like a consuming call, so superfluous
+ list() calls aren't added to filter(), map(), and zip() which are directly
+ passed enumerate().
+
+- Issue #1160: Fix compiling large regular expressions on UCS2 builds.
+ Patch by Serhiy Storchaka.
+
+- Issue #14313: zipfile now raises NotImplementedError when the compression
+ type is unknown.
+
+- Issue #16408: Fix file descriptors not being closed in error conditions
+ in the zipfile module. Patch by Serhiy Storchaka.
+
+- Issue #16327: The subprocess module no longer leaks file descriptors
+ used for stdin/stdout/stderr pipes to the child when fork() fails.
+
+- Issue #14396: Handle the odd rare case of waitpid returning 0 when not
+ expected in subprocess.Popen.wait().
+
+- Issue #16411: Fix a bug where zlib.decompressobj().flush() might try to access
+ previously-freed memory. Patch by Serhiy Storchaka.
+
+- Issue #16350: zlib.decompressobj().decompress() now accumulates data from
+ successive calls after EOF in unused_data, instead of only saving the argument
+ to the last call. decompressobj().flush() now correctly sets unused_data and
+ unconsumed_tail. A bug in the handling of MemoryError when setting the
+ unconsumed_tail attribute has also been fixed. Patch by Serhiy Storchaka.
+
+- Issue #12759: sre_parse now raises a proper error when the name of the group
+ is missing. Initial patch by Serhiy Storchaka.
+
+- Issue #16152: fix tokenize to ignore whitespace at the end of the code when
+ no newline is found. Patch by Ned Batchelder.
+
+- Issue #16230: Fix a crash in select.select() when one the lists changes
+ size while iterated on. Patch by Serhiy Storchaka.
+
+- Issue #16228: Fix a crash in the json module where a list changes size
+ while it is being encoded. Patch by Serhiy Storchaka.
+
+- Issue #14897: Enhance error messages of struct.pack and
+ struct.pack_into. Patch by Matti Mäki.
+
+- Issue #12890: cgitb no longer prints spurious <p> tags in text
+ mode when the logdir option is specified.
+
+- Issue #14398: Fix size truncation and overflow bugs in the bz2 module.
+
+- Issue #5148: Ignore 'U' in mode given to gzip.open() and gzip.GzipFile().
+
+- Issue #16220: wsgiref now always calls close() on an iterable response.
+ Patch by Brent Tubbs.
+
+- Issue #16461: Wave library should be able to deal with 4GB wav files,
+ and sample rate of 44100 Hz.
+
+- Issue #16176: Properly identify Windows 8 via platform.platform()
+
+- Issue #15756: subprocess.poll() now properly handles errno.ECHILD to
+ return a returncode of 0 when the child has already exited or cannot
+ be waited on.
+
+- Issue #12376: Pass on parameters in TextTestResult.__init__ super call
+
+- Issue #15222: Insert blank line after each message in mbox mailboxes
+
+- Issue #16013: Fix CSV Reader parsing issue with ending quote characters.
+ Patch by Serhiy Storchaka.
+
+- Issue #15421: fix an OverflowError in Calendar.itermonthdates() after
+ datetime.MAXYEAR. Patch by Cédric Krier.
+
+- Issue #15970: xml.etree.ElementTree now serializes correctly the empty HTML
+ elements 'meta' and 'param'.
+
+- Issue #15676: Now "mmap" check for empty files before doing the
+ offset check. Patch by Steven Willis.
+
+- Issue #15340: Fix importing the random module when /dev/urandom cannot
+ be opened. This was a regression caused by the hash randomization patch.
+
+- Issue #15841: The readable(), writable() and seekable() methods of
+ io.BytesIO and io.StringIO objects now raise ValueError when the object has
+ been closed. Patch by Alessandro Moura.
+
+- Issue #16112: platform.architecture does not correctly escape argument to
+ /usr/bin/file. Patch by David Benjamin.
+
+- Issue #12776,#11839: call argparse type function (specified by add_argument)
+ only once. Before, the type function was called twice in the case where the
+ default was specified and the argument was given as well. This was
+ especially problematic for the FileType type, as a default file would always
+ be opened, even if a file argument was specified on the command line.
+
+- Issue #15906: Fix a regression in argparse caused by the preceding change,
+ when action='append', type='str' and default=[].
+
+- Issue #13370: Ensure that ctypes works on Mac OS X when Python is
+ compiled using the clang compiler
+
+- Issue #15544: Fix Decimal.__float__ to work with payload-carrying NaNs.
+
+- Issue #15199: Fix JavaScript's default MIME type to application/javascript.
+ Patch by Bohuslav Kabrda.
+
+- Issue #15477: In cmath and math modules, add workaround for platforms whose
+ system-supplied log1p function doesn't respect signs of zeros.
+
+- Issue #11062: Fix adding a message from file to Babyl mailbox.
+
+- Issue #15646: Prevent equivalent of a fork bomb when using
+ multiprocessing on Windows without the "if __name__ == '__main__'"
+ idiom.
+
+- Issue #15567: Fix NameError when running threading._test
+
+- Issue #15424: Add a __sizeof__ implementation for array objects.
+ Patch by Ludwig Hähne.
+
+- Issue #15538: Fix compilation of the getnameinfo() / getaddrinfo()
+ emulation code. Patch by Philipp Hagemeister.
+
+- Issue #12288: Consider '0' and '0.0' as valid initialvalue
+ for tkinter SimpleDialog.
+
+- Issue #15489: Add a __sizeof__ implementation for BytesIO objects.
+ Patch by Serhiy Storchaka.
+
+- Issue #15469: Add a __sizeof__ implementation for deque objects.
+ Patch by Serhiy Storchaka.
+
+- Issue #15487: Add a __sizeof__ implementation for buffered I/O objects.
+ Patch by Serhiy Storchaka.
+
+- Issue #15512: Add a __sizeof__ implementation for parser.
+ Patch by Serhiy Storchaka.
+
+- Issue #15402: An issue in the struct module that caused sys.getsizeof to
+ return incorrect results for struct.Struct instances has been fixed.
+ Initial patch by Serhiy Storchaka.
+
+- Issue #15232: when mangle_from is True, email.Generator now correctly mangles
+ lines that start with 'From ' that occur in a MIME preamble or epilog.
+
+- Issue #13922: argparse no longer incorrectly strips '--'s that appear
+ after the first one.
+
+- Issue #12353: argparse now correctly handles null argument values.
+
+- Issue #6493: An issue in ctypes on Windows that caused structure bitfields
+ of type ctypes.c_uint32 and width 32 to incorrectly be set has been fixed.
+
+- Issue #14635: telnetlib will use poll() rather than select() when possible
+ to avoid failing due to the select() file descriptor limit.
+
+- Issue #15247: FileIO now raises an error when given a file descriptor
+ pointing to a directory.
+
+- Issue #14591: Fix bug in Random.jumpahead that could produce an invalid
+ Mersenne Twister state on 64-bit machines.
+
+- Issue #5346: Preserve permissions of mbox, MMDF and Babyl mailbox
+ files on flush().
+
+- Issue #15219: Fix a reference leak when hashlib.new() is called with
+ invalid parameters.
+
+- Issue #9559: If messages were only added, a new file is no longer
+ created and renamed over the old file when flush() is called on an
+ mbox, MMDF or Babyl mailbox.
+
+- Issue #14653: email.utils.mktime_tz() no longer relies on system
+ mktime() when timezone offest is supplied.
+
+- Issue #6056: Make multiprocessing use setblocking(True) on the
+ sockets it uses. Original patch by J Derek Wilson.
+
+- Issue #15101: Make pool finalizer avoid joining current thread.
+
+- Issue #15054: A bug in tokenize.tokenize that caused string literals
+ with 'b' and 'br' prefixes to be incorrectly tokenized has been fixed.
+ Patch by Serhiy Storchaka.
+
+- Issue #15036: Mailbox no longer throws an error if a flush is done
+ between operations when removing or changing multiple items in mbox,
+ MMDF, or Babyl mailboxes.
+
+- Issue #10133: Make multiprocessing deallocate buffer if socket read
+ fails. Patch by Hallvard B Furuseth.
+
+- Issue #13854: Make multiprocessing properly handle non-integer
+ non-string argument to SystemExit.
+
+- Issue #12157: Make pool.map() empty iterables correctly. Initial
+ patch by mouad.
+
+- Issue #14036: Add an additional check to validate that port in urlparse does
+ not go in illegal range and returns None.
+
+- Issue #14888: Fix misbehaviour of the _md5 module when called on data
+ larger than 2**32 bytes.
+
+- Issue #15908: Fix misbehaviour of the sha1 module when called on data
+ larger than 2**32 bytes.
+
+- Issue #15910: Fix misbehaviour of _md5 and sha1 modules when "updating"
+ on data larger than 2**32 bytes.
+
+- Issue #14875: Use float('inf') instead of float('1e66666') in the json module.
+
+- Issue #14572: Prevent build failures with pre-3.5.0 versions of
+ sqlite3, such as was shipped with Centos 5 and Mac OS X 10.4.
+
+- Issue #14426: Correct the Date format in Expires attribute of Set-Cookie
+ Header in Cookie.py.
+
+- Issue #14721: Send proper header, Content-length: 0 when the body is an empty
+ string ''. Initial Patch contributed by Arve Knudsen.
+
+- Issue #14072: Fix parsing of 'tel' URIs in urlparse by making the check for
+ ports stricter.
+
+- Issue #9374: Generic parsing of query and fragment portions of url for any
+ scheme. Supported both by RFC3986 and RFC2396.
+
+- Issue #14798: Fix the functions in pyclbr to raise an ImportError
+ when the first part of a dotted name is not a package. Patch by
+ Xavier de Gaye.
+
+- Issue #14832: fixed the order of the argument references in the error
+ message produced by unittest's assertItemsEqual.
+
+- Issue #14829: Fix bisect issues under 64-bit Windows.
+
+- Issue #14777: tkinter may return undecoded UTF-8 bytes as a string when
+ accessing the Tk clipboard. Modify clipboad_get() to first request type
+ UTF8_STRING when no specific type is requested in an X11 windowing
+ environment, falling back to the current default type STRING if that fails.
+ Original patch by Thomas Kluyver.
+
+- Issue #12541: Be lenient with quotes around Realm field with HTTP Basic
+ Authentation in urllib2.
+
+- Issue #14662: Prevent shutil failures on OS X when destination does not
+ support chflag operations. Patch by Hynek Schlawack.
+
+- Issue #14157: Fix time.strptime failing without a year on February 29th.
+ Patch by Hynek Schlawack.
+
+- Issue #14768: os.path.expanduser('~/a') doesn't works correctly when HOME is '/'.
+
+- Issue #13183: Fix pdb skipping frames after hitting a breakpoint and running
+ step. Patch by Xavier de Gaye.
+
+- Issue #14664: It is now possible to use @unittest.skip{If,Unless} on a
+ test class that doesn't inherit from TestCase (i.e. a mixin).
+
+- Issue #14160: TarFile.extractfile() failed to resolve symbolic links when
+ the links were not located in an archive subdirectory.
+
+- Issue #14638: pydoc now treats non-string __name__ values as if they
+ were missing, instead of raising an error.
+
+- Issue #13684: Fix httplib tunnel issue of infinite loops for certain sites
+ which send EOF without trailing \r\n.
+
+- Issue #14308: Fix an exception when a "dummy" thread is in the threading
+ module's active list after a fork().
+
+- Issue #14538: HTMLParser can now parse correctly start tags that contain
+ a bare '/'.
+
+- Issue #14452: SysLogHandler no longer inserts a UTF-8 BOM into the message.
+
+- Issue #13496: Fix potential overflow in bisect.bisect algorithm when applied
+ to a collection of size > sys.maxsize / 2.
+
+- Issue #14399: zipfile now recognizes that the archive has been modified even
+ if only the comment is changed. As a consequence of this fix, ZipFile is now
+ a new style class.
+
+- Issue #7978: SocketServer now restarts the select() call when EINTR is
+ returned. This avoids crashing the server loop when a signal is received.
+ Patch by Jerzy Kozera.
+
+- Issue #10340: asyncore - properly handle EINVAL in dispatcher constructor on
+ OSX; avoid to call handle_connect in case of a disconnected socket which
+ was not meant to connect.
+
+- Issue #12757: Fix the skipping of doctests when python is run with -OO so
+ that it works in unittest's verbose mode as well as non-verbose mode.
+
+- Issue #13694: asynchronous connect in asyncore.dispatcher does not set addr
+ attribute.
+
+- Issue #10484: Fix the CGIHTTPServer's PATH_INFO handling problem.
+
+- Issue #11199: Fix the with urllib which hangs on particular ftp urls.
+
+- Issue #14252: Fix subprocess.Popen.terminate() to not raise an error under
+ Windows when the child process has already exited.
+
+- Issue #14195: An issue that caused weakref.WeakSet instances to incorrectly
+ return True for a WeakSet instance 'a' in both 'a < a' and 'a > a' has been
+ fixed.
+
+- Issue #14159: Fix the len() of weak sets to return a better approximation
+ when some objects are dead or dying. Moreover, the implementation is now
+ O(1) rather than O(n).
+
+- Issue #2945: Make the distutils upload command aware of bdist_rpm products.
+
- Issue #6884: Fix long-standing bugs with MANIFEST.in parsing in distutils
on Windows.
+- Issue #16441: Avoid excessive memory usage working with large gzip
+ files using the gzip module.
+
+- Issue #15782: Prevent compile errors of OS X Carbon modules _Fm, _Qd, and
+ _Qdoffs when compiling with an SDK of 10.7 or later. The OS X APIs they
+ wrap have long been deprecated and have now been removed with 10.7.
+ These modules were already empty for 64-bit builds and have been removed
+ in Python 3.
+
+Extension Modules
+-----------------
+
+- Issue #17477: Update the bsddb module to pybsddb 5.3.0, supporting
+ db-5.x, and dropping support for db-4.1 and db-4.2.
+
+- Issue #17192: Update the ctypes module's libffi to v3.0.13. This
+ specifically addresses a stack misalignment issue on x86 and issues on
+ some more recent platforms.
+
+- Issue #12268: The io module file object write methods no longer abort early
+ when a write system calls is interrupted (EINTR).
+
+- Fix the leak of a dict in the time module when used in an embedded
+ interpreter that is repeatedly initialized and shutdown and reinitialized.
+
+- Issue #12268: File readline, readlines and read or readall methods
+ no longer lose data when an underlying read system call is interrupted
+ within an io module object. IOError is no longer raised due to a read
+ system call returning EINTR from within these methods.
+
+- Issue #16012: Fix a regression in pyexpat. The parser's UseForeignDTD()
+ method doesn't require an argument again.
+
+- Issue #13590: OS X Xcode 4 - improve support for universal extension modules
+ In particular, fix extension module build failures when trying to use
+ 32-bit-only installer Pythons on systems with Xcode 4 (currently
+ OS X 10.8, 10.7, and optionally 10.6).
+ * Backport 3.3.0 fixes to 2.7 branch (for release in 2.7.4)
+ * Since Xcode 4 removes ppc support, extension module builds now
+ check for ppc compiler support and by default remove ppc and
+ ppc64 archs when they are not available.
+ * Extension module builds now revert to using system installed
+ headers and libs (/usr and /System/Library) if the SDK used
+ to build the interpreter is not installed or has moved.
+ * Try to avoid building extension modules with deprecated
+ and problematic Apple llvm-gcc compiler. If original compiler
+ is not available, use clang instead by default.
+
+IDLE
+----
+
+- IDLE was displaying spurious SystemExit tracebacks when running scripts
+ that terminated by raising SystemExit (i.e. unittest and turtledemo).
+
+- Issue #9290: In IDLE the sys.std* streams now implement io.TextIOBase
+ interface and support all mandatory methods and properties.
+
+- Issue #16829: IDLE printing no longer fails if there are spaces or other
+ special characters in the file path.
+
+- Issue #16819: IDLE method completion now correctly works for unicode literals.
+
+- Issue #16504: IDLE now catches SyntaxErrors raised by tokenizer. Patch by
+ Roger Serwy.
+
+- Issue #1207589: Add Cut/Copy/Paste items to IDLE right click Context Menu
+ Patch by Todd Rovito.
+
+- Issue #13052: Fix IDLE crashing when replace string in Search/Replace dialog
+ ended with '\'. Patch by Roger Serwy.
+
+- Issue #9803: Don't close IDLE on saving if breakpoint is open.
+ Patch by Roger Serwy.
+
+- Issue #14958: Change IDLE systax highlighting to recognize all string and byte
+ literals currently supported in Python 2.7.
+
+- Issue #14962: Update text coloring in IDLE shell window after changing
+ options. Patch by Roger Serwy.
+
+- Issue #10997: Prevent a duplicate entry in IDLE's "Recent Files" menu.
+
+- Issue #12510: Attempting to get invalid tooltip no longer closes IDLE.
+ Original patch by Roger Serwy.
+
+- Issue #10365: File open dialog now works instead of crashing
+ even when parent window is closed. Patch by Roger Serwy.
+
+- Issue #14876: Use user-selected font for highlight configuration.
+ Patch by Roger Serwy.
+
+- Issue #14409: IDLE now properly executes commands in the Shell window
+ when it cannot read the normal config files on startup and
+ has to use the built-in default key bindings.
+ There was previously a bug in one of the defaults.
+
+- Issue #3573: IDLE hangs when passing invalid command line args
+ (directory(ies) instead of file(s)) (Patch by Guilherme Polo)
+
+- Issue #5219: Prevent event handler cascade in IDLE.
+
+Tests
+-----
+
+- Issue #16702: test_urllib2_localnet tests now correctly ignores proxies for
+ localhost tests.
+
+- Issue #13447: Add a test file to host regression tests for bugs in the
+ scripts found in the Tools directory.
+
+- Issue #11420: make test suite pass with -B/DONTWRITEBYTECODE set.
+ Initial patch by Thomas Wouters.
+
+- Issue #17299: Add test coverage for cPickle with file objects and general IO
+ objects. Original patch by Aman Shah.
+
+- Issue #11963: remove human verification from test_parser and test_subprocess.
+
+- Issue #17249: convert a test in test_capi to use unittest and reap threads.
+
+- We now run both test_email.py and test_email_renamed.py when running the
+ test_email regression test. test_email_renamed contains some tests that
+ test_email does not.
+
+- Issue #17041: Fix testing when Python is configured with the
+ --without-doc-strings option.
+
+- Issue #15539: Added regression tests for Tools/scripts/pindent.py.
+
+- Issue #15324: Fix regrtest parsing of --fromfile and --randomize options.
+
+- Issue #16618: Add more regression tests for glob.
+ Patch by Serhiy Storchaka.
+
+- Issue #16664: Add regression tests for glob's behaviour concerning entries
+ starting with a ".". Patch by Sebastian Kreft.
+
+- Issue #15747: ZFS always returns EOPNOTSUPP when attempting to set the
+ UF_IMMUTABLE flag (via either chflags or lchflags); refactor affected
+ tests in test_posix.py to account for this.
+
+- Issue #16549: Add tests for json.tools. Initial patch by Berker Peksag
+ and Serhiy Storchaka.
+
+- Issue #16559: Add more tests for the json module, including some from the
+ official test suite at json.org. Patch by Serhiy Storchaka.
+
+- Issue #16274: Fix test_asyncore on Solaris. Patch by Giampaolo Rodola'.
+
+- Issue #15040: Close files in mailbox tests for PyPy compatibility.
+ Original patch by Matti Picus.
+
+- Issue #15802: Fix test logic in TestMaildir.test_create_tmp. Patch
+ by Serhiy Storchaka.
+
+- Issue #15765: Extend a previous fix to Solaris and OpenBSD for quirky
+ getcwd() behaviour (issue #9185) to NetBSD as well.
+
+- Issue #15615: Add some tests for the json module's handling of invalid
+ input data. Patch by Kushal Das.
+
+- Issue #15496: Add directory removal helpers for tests on Windows.
+ Patch by Jeremy Kloth.
+
+- Issue #15043: test_gdb is now skipped entirely if gdb security settings
+ block loading of the gdb hooks
+
+- Issue #14589: Update certificate chain for sha256.tbs-internet.com, fixing
+ a test failure in test_ssl.
+
+- Issue #16698: Skip posix test_getgroups when built with OS X
+ deployment target prior to 10.6.
+
+- Issue #17111: Prevent test_surrogates (test_fileio) failure on OS X 10.4.
+
+Build
+-----
+
+- Issue #17425: Build against openssl 0.9.8y on Windows.
+
+- Issue #16004: Add `make touch`.
+
+- Issue #5033: Fix building of the sqlite3 extension module when the
+ SQLite library version has "beta" in it. Patch by Andreas Pelme.
+
+- Issue #17228: Fix building without pymalloc.
+
+- Issue #17086: Backport the patches from the 3.3 branch to cross-build
+ the package.
+
+- Issue #3754: fix typo in pthread AC_CACHE_VAL.
+
+- Issue #17029: Let h2py search the multiarch system include directory.
+
+- Issue #16953: Fix socket module compilation on platforms with
+ HAVE_BROKEN_POLL. Patch by Jeffrey Armstrong.
+
+- Issue #16836: Enable IPv6 support even if IPv6 is disabled on the build host.
+
+- Issue #15923: fix a mistake in asdl_c.py that resulted in a TypeError after
+ 2801bf875a24 (see #15801).
+
+- Issue #11715: Fix multiarch detection without having Debian development
+ tools (dpkg-dev) installed.
+
+- Issue #15819: Make sure we can build Python out-of-tree from a readonly
+ source directory. (Somewhat related to Issue #9860.)
+
+- Issue #15822: Ensure 2to3 grammar pickles are properly installed.
+
+- Issue #15560: Fix building _sqlite3 extension on OS X with an SDK.
+
+- Issue #8847: Disable COMDAT folding in Windows PGO builds.
+
+- Issue #14018: Fix OS X Tcl/Tk framework checking when using OS X SDKs.
+
+- Issue #16256: OS X installer now sets correct permissions for doc directory.
+
+- Issue #8767: Restore building with --disable-unicode.
+ Patch by Stefano Taschini.
+
+- Build against bzip2 1.0.6 and openssl 0.9.8x on Windows.
+
+- Issue #14557: Fix extensions build on HP-UX. Patch by Adi Roiban.
+
+- Issue #14437: Fix building the _io module under Cygwin.
+
+- Issue #15587: Enable Tk high-resolution text rendering on Macs with
+ Retina displays. Applies to Tkinter apps, such as IDLE, on OS X
+ framework builds linked with Cocoa Tk 8.5.
+
+- Issue #17161: make install now also installs a python2 and python man page.
+
+- Issue #16848: python-config now returns proper --ldflags values for OS X
+ framework builds.
+
+Tools/Demos
+-----------
+
+- Issue #17156: pygettext.py now correctly escapes non-ascii characters.
+
+- Issue #15539: Fix a number of bugs in Tools/scripts/pindent.py. Now
+ pindent.py works with a "with" statement. pindent.py no longer produces
+ improper indentation. pindent.py now works with continued lines broken after
+ "class" or "def" keywords and with continuations at the start of line.
+
+- Issue #16476: Fix json.tool to avoid including trailing whitespace.
+
+- Issue #13301: use ast.literal_eval() instead of eval() in Tools/i18n/msgfmt.py
+ Patch by Serhiy Storchaka.
+
+Documentation
+-------------
+
+- Issue #15041: Update "see also" list in tkinter documentation.
+
+- Issue #17412: update 2.7 Doc/make.bat to also use sphinx-1.0.7.
+
+- Issue #17047: remove doubled words in docs and docstrings
+ reported by Serhiy Storchaka and Matthew Barnett.
+
+- Issue #16406: combine the pages for uploading and registering to PyPI.
+
+- Issue #16403: Document how distutils uses the maintainer field in
+ PKG-INFO. Patch by Jyrki Pulliainen.
+
+- Issue #16695: Document how glob handles filenames starting with a
+ dot. Initial patch by Jyrki Pulliainen.
+
+- Issue #8890: Stop advertising an insecure practice by replacing uses
+ of the /tmp directory with better alternatives in the documentation.
+ Patch by Geoff Wilson.
+
+- Issue #17203: add long option names to unittest discovery docs.
+
+- Issue #13094: add "Why do lambdas defined in a loop with different values
+ all return the same result?" programming FAQ.
+
+- Issue #14901: Update portions of the Windows FAQ.
+ Patch by Ashish Nitin Patil.
+
+- Issue #15990: Improve argument/parameter documentation.
+
+- Issue #16400: Update the description of which versions of a given package
+ PyPI displays.
+
+- Issue #15677: Document that zlib and gzip accept a compression level of 0 to
+ mean 'no compression'. Patch by Brian Brazil.
+
+- Issue #8040: added a version switcher to the documentation. Patch by
+ Yury Selivanov.
+
+- Issue #16115: Improve subprocess.Popen() documentation around args, shell,
+ and executable arguments.
+
+- Issue #15979: Improve timeit documentation.
+
+- Issue #16036: Improve documentation of built-in int()'s signature and
+ arguments.
+
+- Issue #15935: Clarification of argparse docs, re: add_argument() type and
+ default arguments. Patch contributed by Chris Jerdonek.
+
+- Issue #13769: Document the effect of ensure_ascii to the return type
+ of JSON decoding functions.
+
+- Issue #14880: Fix kwargs notation in csv.reader, .writer & .register_dialect.
+ Patch by Chris Rebert.
+
+- Issue #14674: Add a discussion of the json module's standard compliance.
+ Patch by Chris Rebert.
+
+- Issue #15630: Add an example for "continue" stmt in the tutorial. Patch by
+ Daniel Ellis.
+
+- Issue #13557: Clarify effect of giving two different namespaces to exec or
+ execfile().
+
+- Issue #14034: added the argparse tutorial.
+
+- Issue #15250: Document that filecmp.dircmp compares files shallowly. Patch
+ contributed by Chris Jerdonek.
+
+- Issue #15116: Remove references to appscript as it is no longer being
+ supported.
+
What's New in Python 2.7.3 release candidate 2?
===============================================
@@ -184,21 +2451,8 @@ Library
- Issue #10811: Fix recursive usage of cursors. Instead of crashing,
raise a ProgrammingError now.
-- Issue #10881: Fix test_site failures with OS X framework builds.
-
-- Issue #964437 Make IDLE help window non-modal.
- Patch by Guilherme Polo and Roger Serwy.
-
-- Issue #13933: IDLE auto-complete did not work with some imported
- module, like hashlib. (Patch by Roger Serwy)
-
-- Issue #13901: Prevent test_distutils failures on OS X with --enable-shared.
-
- Issue #13676: Handle strings with embedded zeros correctly in sqlite3.
-- Issue #13506: Add '' to path for IDLE Shell when started and restarted with Restart Shell.
- Original patches by Marco Scataglini and Roger Serwy.
-
- Issue #13806: The size check in audioop decompression functions was too
strict and could reject valid compressed data. Patch by Oleg Plakhotnyuk.
@@ -237,10 +2491,6 @@ Library
- Issue #8035: urllib: Fix a bug where the client could remain stuck after a
redirection or an error.
-- Issue #4625: If IDLE cannot write to its recent file or breakpoint
- files, display a message popup and continue rather than crash.
- (original patch by Roger Serwy)
-
- tarfile.py: Correctly detect bzip2 compressed streams with blocksizes
other than 900k.
@@ -270,9 +2520,6 @@ Library
node when it is the only child of an element. Initial patch by Dan
Kenigsberg.
-- Issue #8793: Prevent IDLE crash when given strings with invalid hex escape
- sequences.
-
- Issues #1745761, #755670, #13357, #12629, #1200313: HTMLParser now correctly
handles non-valid attributes, including adjacent and unquoted attributes.
@@ -295,9 +2542,6 @@ Library
- Issue #10817: Fix urlretrieve function to raise ContentTooShortError even
when reporthook is None. Patch by Jyrki Pulliainen.
-- Issue #13296: Fix IDLE to clear compile __future__ flags on shell restart.
- (Patch by Roger Serwy)
-
- Issue #7334: close source files on ElementTree.parse and iterparse.
- Issue #13232: logging: Improved logging of exceptions in the presence of
@@ -451,7 +2695,7 @@ Library
check or set the MACOSX_DEPLOYMENT_TARGET environment variable for the
interpreter process. This could cause failures in non-Distutils subprocesses
and was unreliable since tests or user programs could modify the interpreter
- environment after Distutils set it. Instead, have Distutils set the the
+ environment after Distutils set it. Instead, have Distutils set the
deployment target only in the environment of each build subprocess. It is
still possible to globally override the default by setting
MACOSX_DEPLOYMENT_TARGET before launching the interpreter; its value must be
@@ -498,6 +2742,10 @@ Library
Extension Modules
-----------------
+- Issue #9041: An issue in ctypes.c_longdouble, ctypes.c_double, and
+ ctypes.c_float that caused an incorrect exception to be returned in the
+ case of overflow has been fixed.
+
- bsddb module: Erratic behaviour of "DBEnv->rep_elect()" because a typo.
Possible crash.
@@ -538,6 +2786,28 @@ Extension Modules
signature. Without this, architectures where sizeof void* != sizeof int are
broken. Patch given by Hallvard B Furuseth.
+IDLE
+----
+
+- Issue #964437 Make IDLE help window non-modal.
+ Patch by Guilherme Polo and Roger Serwy.
+
+- Issue #13933: IDLE auto-complete did not work with some imported
+ module, like hashlib. (Patch by Roger Serwy)
+
+- Issue #13506: Add '' to path for IDLE Shell when started and restarted with Restart Shell.
+ Original patches by Marco Scataglini and Roger Serwy.
+
+- Issue #4625: If IDLE cannot write to its recent file or breakpoint
+ files, display a message popup and continue rather than crash.
+ (original patch by Roger Serwy)
+
+- Issue #8793: Prevent IDLE crash when given strings with invalid hex escape
+ sequences.
+
+- Issue #13296: Fix IDLE to clear compile __future__ flags on shell restart.
+ (Patch by Roger Serwy)
+
Build
-----
@@ -572,9 +2842,16 @@ Tools/Demos
Tests
-----
+- Issue #15467: Move helpers for __sizeof__ tests into test_support.
+ Patch by Serhiy Storchaka.
+
- Issue #11689: Fix a variable scoping error in an sqlite3 test.
Initial patch by Torsten Landschoff.
+- Issue #10881: Fix test_site failures with OS X framework builds.
+
+- Issue #13901: Prevent test_distutils failures on OS X with --enable-shared.
+
- Issue #13304: Skip test case if user site-packages disabled (-s or
PYTHONNOUSERSITE). (Patch by Carl Meyer)
@@ -624,7 +2901,8 @@ Tests
Documentation
-------------
-- Issue #13995: Fix errors in sqlite3's Cursor.rowcount documentation
+- Issues #13491 and #13995: Fix many errors in sqlite3 documentation.
+ Initial patch for #13491 by Johannes Vogel.
- Issue #13402: Document absoluteness of sys.executable.
@@ -731,7 +3009,7 @@ Core and Builtins
rather than the Py_IsInitialized flag, avoiding a Fatal Python
error in certain circumstances when an import is done in __del__.
-- issue #11828: startswith and endswith don't accept None as slice index.
+- Issue #11828: startswith and endswith don't accept None as slice index.
Patch by Torsten Becker.
- Issue #10674: Remove unused 'dictmaker' rule from grammar.
@@ -746,9 +3024,6 @@ Core and Builtins
Library
-------
-- Issue #12590: IDLE editor window now always displays the first line
- when opening a long file. With Tk 8.5, the first line was hidden.
-
- Issue #12161: Cause StringIO.getvalue() to raise a ValueError when used on a
closed StringIO instance.
@@ -770,9 +3045,6 @@ Library
- Issue #12124: zipimport doesn't keep a reference to zlib.decompress() anymore
to be able to unload the module.
-- Issue #11088: don't crash when using F5 to run a script in IDLE on MacOSX
- with Tk 8.5.
-
- Issue #10154, #10090: change the normalization of UTF-8 to "UTF-8" instead
of "UTF8" in the locale module as the latter is not supported MacOSX and OpenBSD.
@@ -792,12 +3064,10 @@ Library
- Issue #12012: ssl.PROTOCOL_SSLv2 becomes optional.
-- Issue #11164: Remove obsolete allnodes test from minidom test.
-
- Issue #11927: SMTP_SSL now uses port 465 by default as documented. Patch
by Kasun Herath.
-- Issue 11999: fixed sporadic sync failure mailbox.Maildir due to its trying to
+- Issue #11999: fixed sporadic sync failure mailbox.Maildir due to its trying to
detect mtime changes by comparing to the system clock instead of to the
previous value of the mtime.
@@ -945,17 +3215,6 @@ Library
- Issue #8275: Fix passing of callback arguments with ctypes under Win64.
Patch by Stan Mihai.
-- Issue #10940: Workaround an IDLE hang on Mac OS X 10.6 when using the
- menu accelerators for Open Module, Go to Line, and New Indent Width.
- The accelerators still work but no longer appear in the menu items.
-
-- Issue #10907: Warn OS X 10.6 IDLE users to use ActiveState Tcl/Tk 8.5, rather
- than the currently problematic Apple-supplied one, when running with the
- 64-/32-bit installer variant.
-
-- Issue #11052: Correct IDLE menu accelerators on Mac OS X for Save
- commands.
-
- Issue #10949: Improved robustness of rotating file handlers.
- Issue #10955: Fix a potential crash when trying to mmap() a file past its
@@ -964,9 +3223,6 @@ Library
- Issue #10898: Allow compiling the posix module when the C library defines
a symbol named FSTAT.
-- Issue #6075: IDLE on Mac OS X now works with both Carbon AquaTk and
- Cocoa AquaTk.
-
- Issue #10916: mmap should not segfault when a file is mapped using 0 as
length and a non-zero offset, and an attempt to read past the end of file
is made (IndexError is raised instead). Patch by Ross Lagerwall.
@@ -1025,8 +3281,6 @@ Library
- Issue #6791: Limit header line length (to 65535 bytes) in http.client,
to avoid denial of services from the other party.
-- Issue #10404: Use ctl-button-1 on OSX for the context menu in Idle.
-
- Issue #9907: Fix tab handling on OSX when using editline by calling
rl_initialize first, then setting our custom defaults, then reading .editrc.
@@ -1044,11 +3298,6 @@ Library
- Issue #10695: passing the port as a string value to telnetlib no longer
causes debug mode to fail.
-- Issue #10107: Warn about unsaved files in IDLE on OSX.
-
-- Issue #10406: Enable Rstrip IDLE extension on OSX (just like on other
- platforms).
-
- Issue #10478: Reentrant calls inside buffered IO objects (for example by
way of a signal handler) now raise a RuntimeError instead of freezing the
current process.
@@ -1095,6 +3344,39 @@ Extension Modules
- Issue #678250: Make mmap flush a noop on ACCESS_READ and ACCESS_COPY.
+IDLE
+----
+
+- Issue #11718: IDLE's open module dialog couldn't find the __init__.py
+ file in a package.
+
+- Issue #12590: IDLE editor window now always displays the first line
+ when opening a long file. With Tk 8.5, the first line was hidden.
+
+- Issue #11088: don't crash when using F5 to run a script in IDLE on MacOSX
+ with Tk 8.5.
+
+- Issue #10940: Workaround an IDLE hang on Mac OS X 10.6 when using the
+ menu accelerators for Open Module, Go to Line, and New Indent Width.
+ The accelerators still work but no longer appear in the menu items.
+
+- Issue #10907: Warn OS X 10.6 IDLE users to use ActiveState Tcl/Tk 8.5, rather
+ than the currently problematic Apple-supplied one, when running with the
+ 64-/32-bit installer variant.
+
+- Issue #11052: Correct IDLE menu accelerators on Mac OS X for Save
+ commands.
+
+- Issue #6075: IDLE on Mac OS X now works with both Carbon AquaTk and
+ Cocoa AquaTk.
+
+- Issue #10404: Use ctl-button-1 on OSX for the context menu in Idle.
+
+- Issue #10107: Warn about unsaved files in IDLE on OSX.
+
+- Issue #10406: Enable Rstrip IDLE extension on OSX (just like on other
+ platforms).
+
Build
-----
@@ -1140,15 +3422,11 @@ Build
- Issue #1099: Fix the build on MacOSX when building a framework with pydebug
using GCC 4.0.
-IDLE
-----
-
-- Issue #11718: IDLE's open module dialog couldn't find the __init__.py
- file in a package.
-
Tests
-----
+- Issue #11164: Remove obsolete allnodes test from minidom test.
+
- Issue #12205: Fix test_subprocess failure due to uninstalled test data.
- Issue #5723: Improve json tests to be executed with and without accelerations.
@@ -1217,9 +3495,6 @@ Library
- Issue #4493: urllib2 adds '/' in front of path components which does not
start with '/. Common behavior exhibited by browsers and other clients.
-- Issue #6378: idle.bat now runs with the appropriate Python version rather than
- the system default. Patch by Sridhar Ratnakumar.
-
- Issue #10407: Fix one NameError in distutils.
- Issue #10198: fix duplicate header written to wave files when writeframes()
@@ -1230,6 +3505,12 @@ Library
- Issue #5111: IPv6 Host in the Header is wrapped inside [ ]. Patch by Chandru.
+IDLE
+----
+
+- Issue #6378: idle.bat now runs with the appropriate Python version rather than
+ the system default. Patch by Sridhar Ratnakumar.
+
Build
-----
@@ -1704,7 +3985,6 @@ Extension Modules
- Issue #7567: Don't call `setupterm' twice.
-
Tools/Demos
-----------
@@ -1995,7 +4275,7 @@ Library
- Issue #1285086: Speed up ``urllib.quote()`` and urllib.unquote for simple
cases.
-- Issue #8688: Distutils now recalculates MANIFEST everytime.
+- Issue #8688: Distutils now recalculates MANIFEST every time.
- Issue #5099: The ``__del__()`` method of ``subprocess.Popen`` (and the methods
it calls) referenced global objects, causing errors to pop up during
@@ -2873,7 +5153,7 @@ Tools/Demos
Tests
-----
-- issue #7728: test_timeout was changed to use ``test_support.bind_port()``
+- Issue #7728: test_timeout was changed to use ``test_support.bind_port()``
instead of a hard coded port.
Documentation
@@ -2942,7 +5222,6 @@ Core and Builtins
- Issue #7466: Segmentation fault when the garbage collector is called in the
middle of populating a tuple. Patch by Florent Xicluna.
-
Library
-------
@@ -3879,9 +6158,6 @@ Library
- Issue #6048: Now Distutils uses the tarfile module in archive_util.
-- Issue #5150: IDLE's format menu now has an option to strip trailing
- whitespace.
-
- Issue #6121: pydoc now ignores leading and trailing spaces in the argument to
the 'help' function.
@@ -4371,7 +6647,7 @@ Library
- Restore Python 2.3 compatibility for decimal.py.
- Issue #1702551: distutils sdist was not excluding VCS directories under
- Windows. Inital solution by Guy Dalberto.
+ Windows. Initial solution by Guy Dalberto.
- The _tkinter module functions "createfilehandler", "deletefilehandler",
"createtimerhandler", "mainloop", "dooneevent" and "quit" have been deprecated
@@ -4439,7 +6715,7 @@ Library
backporting to maintenance branches. Original patch by Alexander Belopolsky.
- Issue #4163: Use unicode-friendly word splitting in the textwrap functions
- when given an unicode string.
+ when given a Unicode string.
- Issue #4616: TarFile.utime(): Restore directory times on Windows.
@@ -4540,6 +6816,14 @@ Library
- Windows locale mapping updated to Vista.
+IDLE
+----
+
+- Issue #5150: IDLE's format menu now has an option to strip trailing
+ whitespace.
+
+- Issue #5847: Remove -n switch on "Edit with IDLE" menu item.
+
Tools/Demos
-----------
@@ -4573,8 +6857,6 @@ Build
- Issue #6094: Build correctly with Subversion 1.7.
-- Issue #5847: Remove -n switch on "Edit with IDLE" menu item.
-
- Issue #5726: Make Modules/ld_so_aix return the actual exit code of the linker,
rather than always exit successfully. Patch by Floris Bruynooghe.
@@ -4848,7 +7130,7 @@ Library
- Issue #3547: Fixed ctypes structures bitfields of varying integer
sizes.
-- Issue #3879: A regression in urllib.getproxies_enviroment was fixed.
+- Issue #3879: A regression in urllib.getproxies_environment was fixed.
- Issue #3863: Disabled a unit test of fork being called from a thread
when running on platforms known to exhibit OS bugs when attempting that.
@@ -5413,7 +7695,6 @@ Build
- Issue #3215: Build sqlite3 as sqlite3.dll, not sqlite3.pyd.
-
Documentation
-------------
@@ -5479,7 +7760,6 @@ Core and Builtins
only available if asserts are left in the code, in cases where they
can't be triggered from Python code.
-
Extension Modules
-----------------
- Issue #1179: [CVE-2007-4965] Integer overflow in imageop module.
@@ -5787,7 +8067,6 @@ Build
NOTE: 64-bit and 4-way builds are only suppported on Mac OS X 10.5 (or later).
-
C API
-----
@@ -6663,7 +8942,6 @@ Core and builtins
threading.enumerate() list after the join() for a brief period until
it actually exited.
-
Library
-------
@@ -7394,9 +9672,6 @@ Library
Allows the demo2 function to be executed on its own instead of only
when the module is run as a script.
-- Bug #813342: Start the IDLE subprocess with -Qnew if the parent is
- started with that option.
-
- Bug #1565150: Fix subsecond processing for os.utime on Windows.
- Support for MSVC 8 was added to bdist_wininst.
@@ -7445,9 +9720,6 @@ Library
- Bug #1531862: Do not close standard file descriptors in subprocess.
-- idle: Honor the "Cancel" action in the save dialog (Debian bug
- #299092).
-
- Fix utf-8-sig incremental decoder, which didn't recognise a BOM when
the first chunk fed to the decoder started with a BOM, but was
longer than 3 bytes.
@@ -7690,6 +9962,15 @@ Extension Modules
- The sqlite3 module was updated to pysqlite 2.4.1.
+IDLE
+----
+
+- Bug #813342: Start the IDLE subprocess with -Qnew if the parent is
+ started with that option.
+
+- IDLE: Honor the "Cancel" action in the save dialog (Debian bug
+ #299092).
+
Tests
-----
@@ -7753,7 +10034,6 @@ Tools
platform.python_implementation(); this will now be saved in the
benchmark pickle.
-
Documentation
-------------
@@ -7804,7 +10084,6 @@ Documentation
applied to a newly created list object and add notes that this isn't
a good idea.
-
Tools/Demos
-----------
@@ -7817,7 +10096,6 @@ Tools/Demos
- Bug #1546372: Fixed small bugglet in pybench that caused a missing
file not to get reported properly.
-
Build
-----
@@ -7900,7 +10178,6 @@ Build
pybuildbot.identify to include some information about the build
environment.
-
C API
-----
@@ -7963,7 +10240,6 @@ C API
- Bug #1542693: remove semi-colon at end of PyImport_ImportModuleEx
macro so it can be used as an expression.
-
Windows
-------
@@ -7977,7 +10253,6 @@ Windows
- Bug #1216: Restore support for Visual Studio 2002.
-
Mac
---
diff --git a/Misc/README.OpenBSD b/Misc/README.OpenBSD
index b417ecc..b9e5976 100644
--- a/Misc/README.OpenBSD
+++ b/Misc/README.OpenBSD
@@ -29,7 +29,7 @@ script to disable certain options. Search for a line that looks like:
If your version is not in that list, e.g., 3.9, add the version
number. In this case, you would just need to add a 9 after the 8.
-If you modify configure.in, you will need to regenerate configure
+If you modify configure.ac, you will need to regenerate configure
with autoconf.
If your version is already in the list, this is not a known problem.
diff --git a/Misc/RPM/python-2.7.spec b/Misc/RPM/python-2.7.spec
index aad3529..988c3cc 100644
--- a/Misc/RPM/python-2.7.spec
+++ b/Misc/RPM/python-2.7.spec
@@ -39,7 +39,7 @@
%define name python
#--start constants--
-%define version 2.7.3
+%define version 2.7.8
%define libvers 2.7
#--end constants--
%define release 1pydotorg
diff --git a/Misc/python-config.in b/Misc/python-config.in
index 552bbd5..ca9857a 100644
--- a/Misc/python-config.in
+++ b/Misc/python-config.in
@@ -51,6 +51,7 @@ for opt in opt_flags:
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
- libs.extend(getvar('LINKFORSHARED').split())
+ if not getvar('PYTHONFRAMEWORK'):
+ libs.extend(getvar('LINKFORSHARED').split())
print ' '.join(libs)
diff --git a/Misc/python.man b/Misc/python.man
index 96c9964..39edbca 100644
--- a/Misc/python.man
+++ b/Misc/python.man
@@ -459,7 +459,7 @@ Main website: http://www.python.org/
.br
Documentation: http://docs.python.org/
.br
-Developer resources: http://www.python.org/dev/
+Developer resources: http://docs.python.org/devguide/
.br
Downloads: http://python.org/download/
.br
diff --git a/Modules/Setup.dist b/Modules/Setup.dist
index e02fd77..2ad1aa3 100644
--- a/Modules/Setup.dist
+++ b/Modules/Setup.dist
@@ -177,6 +177,7 @@ GLHACK=-Dclear=__GLclear
#_testcapi _testcapimodule.c # Python C API test module
#_random _randommodule.c # Random number generator
#_collections _collectionsmodule.c # Container types
+#_heapq _heapqmodule.c # Heapq type
#itertools itertoolsmodule.c # Functions creating iterators for efficient looping
#strop stropmodule.c # String manipulations
#_functools _functoolsmodule.c # Tools for working with functions and callable objects
@@ -190,6 +191,9 @@ GLHACK=-Dclear=__GLclear
# access to ISO C locale support
#_locale _localemodule.c # -lintl
+# Standard I/O baseline
+#_io -I$(srcdir)/Modules/_io _io/bufferedio.c _io/bytesio.c _io/fileio.c _io/iobase.c _io/_iomodule.c _io/stringio.c _io/textio.c
+
# Modules with some UNIX dependencies -- on by default:
# (If you have a really backward UNIX, select and socket may not be
@@ -207,7 +211,7 @@ GLHACK=-Dclear=__GLclear
#_csv _csv.c
# Socket module helper for socket(2)
-#_socket socketmodule.c
+#_socket socketmodule.c timemodule.c
# Socket module helper for SSL support; you must comment out the other
# socket line above, and possibly edit the SSL variable:
diff --git a/Modules/_bisectmodule.c b/Modules/_bisectmodule.c
index e8976b2..b6652c0 100644
--- a/Modules/_bisectmodule.c
+++ b/Modules/_bisectmodule.c
@@ -21,7 +21,10 @@ internal_bisect_right(PyObject *list, PyObject *item, Py_ssize_t lo, Py_ssize_t
return -1;
}
while (lo < hi) {
- mid = (lo + hi) / 2;
+ /* The (size_t)cast ensures that the addition and subsequent division
+ are performed as unsigned operations, avoiding difficulties from
+ signed overflow. (See issue 13496.) */
+ mid = ((size_t)lo + hi) / 2;
litem = PySequence_GetItem(list, mid);
if (litem == NULL)
return -1;
@@ -56,7 +59,8 @@ bisect_right(PyObject *self, PyObject *args, PyObject *kw)
}
PyDoc_STRVAR(bisect_right_doc,
-"bisect_right(a, x[, lo[, hi]]) -> index\n\
+"bisect(a, x[, lo[, hi]]) -> index\n\
+bisect_right(a, x[, lo[, hi]]) -> index\n\
\n\
Return the index where to insert item x in list a, assuming a is sorted.\n\
\n\
@@ -97,7 +101,8 @@ insort_right(PyObject *self, PyObject *args, PyObject *kw)
}
PyDoc_STRVAR(insort_right_doc,
-"insort_right(a, x[, lo[, hi]])\n\
+"insort(a, x[, lo[, hi]])\n\
+insort_right(a, x[, lo[, hi]])\n\
\n\
Insert item x in list a, and keep it sorted assuming a is sorted.\n\
\n\
@@ -122,7 +127,10 @@ internal_bisect_left(PyObject *list, PyObject *item, Py_ssize_t lo, Py_ssize_t h
return -1;
}
while (lo < hi) {
- mid = (lo + hi) / 2;
+ /* The (size_t)cast ensures that the addition and subsequent division
+ are performed as unsigned operations, avoiding difficulties from
+ signed overflow. (See issue 13496.) */
+ mid = ((size_t)lo + hi) / 2;
litem = PySequence_GetItem(list, mid);
if (litem == NULL)
return -1;
@@ -187,7 +195,7 @@ insort_left(PyObject *self, PyObject *args, PyObject *kw)
if (PyList_Insert(list, index, item) < 0)
return NULL;
} else {
- result = PyObject_CallMethod(list, "insert", "iO",
+ result = PyObject_CallMethod(list, "insert", "nO",
index, item);
if (result == NULL)
return NULL;
@@ -207,18 +215,15 @@ If x is already in a, insert it to the left of the leftmost x.\n\
Optional args lo (default 0) and hi (default len(a)) bound the\n\
slice of a to be searched.\n");
-PyDoc_STRVAR(bisect_doc, "Alias for bisect_right().\n");
-PyDoc_STRVAR(insort_doc, "Alias for insort_right().\n");
-
static PyMethodDef bisect_methods[] = {
{"bisect_right", (PyCFunction)bisect_right,
METH_VARARGS|METH_KEYWORDS, bisect_right_doc},
{"bisect", (PyCFunction)bisect_right,
- METH_VARARGS|METH_KEYWORDS, bisect_doc},
+ METH_VARARGS|METH_KEYWORDS, bisect_right_doc},
{"insort_right", (PyCFunction)insort_right,
METH_VARARGS|METH_KEYWORDS, insort_right_doc},
{"insort", (PyCFunction)insort_right,
- METH_VARARGS|METH_KEYWORDS, insort_doc},
+ METH_VARARGS|METH_KEYWORDS, insort_right_doc},
{"bisect_left", (PyCFunction)bisect_left,
METH_VARARGS|METH_KEYWORDS, bisect_left_doc},
{"insort_left", (PyCFunction)insort_left,
diff --git a/Modules/_bsddb.c b/Modules/_bsddb.c
index 83f298f..203bba2 100644
--- a/Modules/_bsddb.c
+++ b/Modules/_bsddb.c
@@ -64,7 +64,7 @@
*
* http://www.python.org/peps/pep-0291.html
*
- * This module contains 6 types:
+ * This module contains 7 types:
*
* DB (Database)
* DBCursor (Database Cursor)
@@ -72,6 +72,7 @@
* DBTxn (An explicit database transaction)
* DBLock (A lock handle)
* DBSequence (Sequence)
+ * DBSite (Site)
*
* More datatypes added:
*
@@ -135,34 +136,11 @@ typedef int Py_ssize_t;
#define MYDB_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS;
#define MYDB_END_ALLOW_THREADS Py_END_ALLOW_THREADS;
-/* For 2.3, use the PyGILState_ calls */
-#if (PY_VERSION_HEX >= 0x02030000)
-#define MYDB_USE_GILSTATE
-#endif
-
/* and these are for calling C --> Python */
-#if defined(MYDB_USE_GILSTATE)
#define MYDB_BEGIN_BLOCK_THREADS \
PyGILState_STATE __savestate = PyGILState_Ensure();
#define MYDB_END_BLOCK_THREADS \
PyGILState_Release(__savestate);
-#else /* MYDB_USE_GILSTATE */
-/* Pre GILState API - do it the long old way */
-static PyInterpreterState* _db_interpreterState = NULL;
-#define MYDB_BEGIN_BLOCK_THREADS { \
- PyThreadState* prevState; \
- PyThreadState* newState; \
- PyEval_AcquireLock(); \
- newState = PyThreadState_New(_db_interpreterState); \
- prevState = PyThreadState_Swap(newState);
-
-#define MYDB_END_BLOCK_THREADS \
- newState = PyThreadState_Swap(prevState); \
- PyThreadState_Clear(newState); \
- PyEval_ReleaseLock(); \
- PyThreadState_Delete(newState); \
- }
-#endif /* MYDB_USE_GILSTATE */
#else
/* Compiled without threads - avoid all this cruft */
@@ -187,24 +165,24 @@ static PyObject* DBOldVersionError; /* DB_OLD_VERSION */
static PyObject* DBRunRecoveryError; /* DB_RUNRECOVERY */
static PyObject* DBVerifyBadError; /* DB_VERIFY_BAD */
static PyObject* DBNoServerError; /* DB_NOSERVER */
+#if (DBVER < 52)
static PyObject* DBNoServerHomeError; /* DB_NOSERVER_HOME */
static PyObject* DBNoServerIDError; /* DB_NOSERVER_ID */
+#endif
static PyObject* DBPageNotFoundError; /* DB_PAGE_NOTFOUND */
static PyObject* DBSecondaryBadError; /* DB_SECONDARY_BAD */
static PyObject* DBInvalidArgError; /* EINVAL */
static PyObject* DBAccessError; /* EACCES */
static PyObject* DBNoSpaceError; /* ENOSPC */
-static PyObject* DBNoMemoryError; /* DB_BUFFER_SMALL (ENOMEM when < 4.3) */
+static PyObject* DBNoMemoryError; /* DB_BUFFER_SMALL */
static PyObject* DBAgainError; /* EAGAIN */
static PyObject* DBBusyError; /* EBUSY */
static PyObject* DBFileExistsError; /* EEXIST */
static PyObject* DBNoSuchFileError; /* ENOENT */
static PyObject* DBPermissionsError; /* EPERM */
-#if (DBVER >= 42)
static PyObject* DBRepHandleDeadError; /* DB_REP_HANDLE_DEAD */
-#endif
#if (DBVER >= 44)
static PyObject* DBRepLockoutError; /* DB_REP_LOCKOUT */
#endif
@@ -220,10 +198,6 @@ static PyObject* DBForeignConflictError; /* DB_FOREIGN_CONFLICT */
static PyObject* DBRepUnavailError; /* DB_REP_UNAVAIL */
-#if (DBVER < 43)
-#define DB_BUFFER_SMALL ENOMEM
-#endif
-
#if (DBVER < 48)
#define DB_GID_SIZE DB_XIDDATASIZE
#endif
@@ -252,8 +226,9 @@ static PyObject* DBRepUnavailError; /* DB_REP_UNAVAIL */
staticforward PyTypeObject DB_Type, DBCursor_Type, DBEnv_Type, DBTxn_Type,
DBLock_Type, DBLogCursor_Type;
-#if (DBVER >= 43)
staticforward PyTypeObject DBSequence_Type;
+#if (DBVER >= 52)
+staticforward PyTypeObject DBSite_Type;
#endif
#ifndef Py_TYPE
@@ -267,8 +242,9 @@ staticforward PyTypeObject DBSequence_Type;
#define DBEnvObject_Check(v) (Py_TYPE(v) == &DBEnv_Type)
#define DBTxnObject_Check(v) (Py_TYPE(v) == &DBTxn_Type)
#define DBLockObject_Check(v) (Py_TYPE(v) == &DBLock_Type)
-#if (DBVER >= 43)
#define DBSequenceObject_Check(v) (Py_TYPE(v) == &DBSequence_Type)
+#if (DBVER >= 52)
+#define DBSiteObject_Check(v) (Py_TYPE(v) == &DBSite_Type)
#endif
#if (DBVER < 46)
@@ -372,9 +348,12 @@ staticforward PyTypeObject DBSequence_Type;
#define CHECK_LOGCURSOR_NOT_CLOSED(logcurs) \
_CHECK_OBJECT_NOT_CLOSED(logcurs->logc, DBCursorClosedError, DBLogCursor)
-#if (DBVER >= 43)
#define CHECK_SEQUENCE_NOT_CLOSED(curs) \
_CHECK_OBJECT_NOT_CLOSED(curs->sequence, DBError, DBSequence)
+
+#if (DBVER >= 52)
+#define CHECK_SITE_NOT_CLOSED(db_site) \
+ _CHECK_OBJECT_NOT_CLOSED(db_site->site, DBError, DBSite)
#endif
#define CHECK_DBFLAG(mydb, flag) (((mydb)->flags & (flag)) || \
@@ -567,12 +546,8 @@ unsigned int our_strlcpy(char* dest, const char* src, unsigned int n)
/* Callback used to save away more information about errors from the DB
* library. */
static char _db_errmsg[1024];
-#if (DBVER <= 42)
-static void _db_errorCallback(const char* prefix, char* msg)
-#else
static void _db_errorCallback(const DB_ENV *db_env,
const char* prefix, const char* msg)
-#endif
{
our_strlcpy(_db_errmsg, msg, sizeof(_db_errmsg));
}
@@ -626,11 +601,7 @@ PyObject *a, *b, *r;
return NULL;
}
-#if (PY_VERSION_HEX >= 0x02040000)
r = PyTuple_Pack(2, a, b) ;
-#else
- r = Py_BuildValue("OO", a, b);
-#endif
Py_DECREF(a);
Py_DECREF(b);
return r;
@@ -696,16 +667,15 @@ static int makeDBError(int err)
case DB_RUNRECOVERY: errObj = DBRunRecoveryError; break;
case DB_VERIFY_BAD: errObj = DBVerifyBadError; break;
case DB_NOSERVER: errObj = DBNoServerError; break;
+#if (DBVER < 52)
case DB_NOSERVER_HOME: errObj = DBNoServerHomeError; break;
case DB_NOSERVER_ID: errObj = DBNoServerIDError; break;
+#endif
case DB_PAGE_NOTFOUND: errObj = DBPageNotFoundError; break;
case DB_SECONDARY_BAD: errObj = DBSecondaryBadError; break;
case DB_BUFFER_SMALL: errObj = DBNoMemoryError; break;
-#if (DBVER >= 43)
- /* ENOMEM and DB_BUFFER_SMALL were one and the same until 4.3 */
case ENOMEM: errObj = PyExc_MemoryError; break;
-#endif
case EINVAL: errObj = DBInvalidArgError; break;
case EACCES: errObj = DBAccessError; break;
case ENOSPC: errObj = DBNoSpaceError; break;
@@ -715,9 +685,7 @@ static int makeDBError(int err)
case ENOENT: errObj = DBNoSuchFileError; break;
case EPERM : errObj = DBPermissionsError; break;
-#if (DBVER >= 42)
case DB_REP_HANDLE_DEAD : errObj = DBRepHandleDeadError; break;
-#endif
#if (DBVER >= 44)
case DB_REP_LOCKOUT : errObj = DBRepLockoutError; break;
#endif
@@ -902,7 +870,6 @@ static void _addTimeTToDict(PyObject* dict, char *name, time_t value)
Py_XDECREF(v);
}
-#if (DBVER >= 43)
/* add an db_seq_t to a dictionary using the given name as a key */
static void _addDb_seq_tToDict(PyObject* dict, char *name, db_seq_t value)
{
@@ -912,7 +879,6 @@ static void _addDb_seq_tToDict(PyObject* dict, char *name, db_seq_t value)
Py_XDECREF(v);
}
-#endif
static void _addDB_lsnToDict(PyObject* dict, char *name, DB_LSN value)
{
@@ -942,11 +908,10 @@ newDBObject(DBEnvObject* arg, int flags)
self->myenvobj = NULL;
self->db = NULL;
self->children_cursors = NULL;
-#if (DBVER >=43)
self->children_sequences = NULL;
-#endif
self->associateCallback = NULL;
self->btCompareCallback = NULL;
+ self->dupCompareCallback = NULL;
self->primaryDBType = 0;
Py_INCREF(Py_None);
self->private_obj = Py_None;
@@ -984,8 +949,7 @@ newDBObject(DBEnvObject* arg, int flags)
* DBTxns and closing any open DBs first. */
if (makeDBError(err)) {
if (self->myenvobj) {
- Py_DECREF(self->myenvobj);
- self->myenvobj = NULL;
+ Py_CLEAR(self->myenvobj);
}
Py_DECREF(self);
self = NULL;
@@ -1017,16 +981,16 @@ DB_dealloc(DBObject* self)
PyObject_ClearWeakRefs((PyObject *) self);
}
if (self->myenvobj) {
- Py_DECREF(self->myenvobj);
- self->myenvobj = NULL;
+ Py_CLEAR(self->myenvobj);
}
if (self->associateCallback != NULL) {
- Py_DECREF(self->associateCallback);
- self->associateCallback = NULL;
+ Py_CLEAR(self->associateCallback);
}
if (self->btCompareCallback != NULL) {
- Py_DECREF(self->btCompareCallback);
- self->btCompareCallback = NULL;
+ Py_CLEAR(self->btCompareCallback);
+ }
+ if (self->dupCompareCallback != NULL) {
+ Py_CLEAR(self->dupCompareCallback);
}
Py_DECREF(self->private_obj);
PyObject_Del(self);
@@ -1147,6 +1111,9 @@ newDBEnvObject(int flags)
self->children_dbs = NULL;
self->children_txns = NULL;
self->children_logcursors = NULL ;
+#if (DBVER >= 52)
+ self->children_sites = NULL;
+#endif
Py_INCREF(Py_None);
self->private_obj = Py_None;
Py_INCREF(Py_None);
@@ -1188,8 +1155,7 @@ DBEnv_dealloc(DBEnvObject* self)
PyErr_Clear();
}
- Py_XDECREF(self->event_notifyCallback);
- self->event_notifyCallback = NULL;
+ Py_CLEAR(self->event_notifyCallback);
if (self->in_weakreflist != NULL) {
PyObject_ClearWeakRefs((PyObject *) self);
@@ -1340,7 +1306,6 @@ DBLock_dealloc(DBLockObject* self)
}
-#if (DBVER >= 43)
static DBSequenceObject*
newDBSequenceObject(DBObject* mydb, int flags)
{
@@ -1396,6 +1361,53 @@ DBSequence_dealloc(DBSequenceObject* self)
Py_DECREF(self->mydb);
PyObject_Del(self);
}
+
+#if (DBVER >= 52)
+static DBSiteObject*
+newDBSiteObject(DB_SITE* sitep, DBEnvObject* env)
+{
+ DBSiteObject* self;
+
+ self = PyObject_New(DBSiteObject, &DBSite_Type);
+
+ if (self == NULL)
+ return NULL;
+
+ self->site = sitep;
+ self->env = env;
+
+ INSERT_IN_DOUBLE_LINKED_LIST(self->env->children_sites, self);
+
+ self->in_weakreflist = NULL;
+ Py_INCREF(self->env);
+ return self;
+}
+
+/* Forward declaration */
+static PyObject *DBSite_close_internal(DBSiteObject* self);
+
+static void
+DBSite_dealloc(DBSiteObject* self)
+{
+ PyObject *dummy;
+
+ if (self->site != NULL) {
+ dummy = DBSite_close_internal(self);
+ /*
+ ** Raising exceptions while doing
+ ** garbage collection is a fatal error.
+ */
+ if (dummy)
+ Py_DECREF(dummy);
+ else
+ PyErr_Clear();
+ }
+ if (self->in_weakreflist != NULL) {
+ PyObject_ClearWeakRefs((PyObject *) self);
+ }
+ Py_DECREF(self->env);
+ PyObject_Del(self);
+}
#endif
/* --------------------------------------------------------------------- */
@@ -1622,8 +1634,7 @@ DB_associate(DBObject* self, PyObject* args, PyObject* kwargs)
MYDB_END_ALLOW_THREADS;
if (err) {
- Py_XDECREF(secondaryDB->associateCallback);
- secondaryDB->associateCallback = NULL;
+ Py_CLEAR(secondaryDB->associateCallback);
secondaryDB->primaryDBType = 0;
}
@@ -1652,12 +1663,10 @@ DB_close_internal(DBObject* self, int flags, int do_not_close)
Py_XDECREF(dummy);
}
-#if (DBVER >= 43)
while(self->children_sequences) {
dummy=DBSequence_close_internal(self->children_sequences,0,0);
Py_XDECREF(dummy);
}
-#endif
/*
** "do_not_close" is used to dispose all related objects in the
@@ -2074,20 +2083,12 @@ DB_pget(DBObject* self, PyObject* args, PyObject* kwargs)
keyObj = NUMBER_FromLong(*(int *)key.data);
else
keyObj = Build_PyString(key.data, key.size);
-#if (PY_VERSION_HEX >= 0x02040000)
retval = PyTuple_Pack(3, keyObj, pkeyObj, dataObj);
-#else
- retval = Py_BuildValue("OOO", keyObj, pkeyObj, dataObj);
-#endif
Py_DECREF(keyObj);
}
else /* return just the pkey and data */
{
-#if (PY_VERSION_HEX >= 0x02040000)
retval = PyTuple_Pack(2, pkeyObj, dataObj);
-#else
- retval = Py_BuildValue("OO", pkeyObj, dataObj);
-#endif
}
Py_DECREF(dataObj);
Py_DECREF(pkeyObj);
@@ -2132,7 +2133,7 @@ DB_get_size(DBObject* self, PyObject* args, PyObject* kwargs)
MYDB_BEGIN_ALLOW_THREADS;
err = self->db->get(self->db, txn, &key, &data, flags);
MYDB_END_ALLOW_THREADS;
- if (err == DB_BUFFER_SMALL) {
+ if ((err == DB_BUFFER_SMALL) || (err == 0)) {
retval = NUMBER_FromLong((long)data.size);
err = 0;
}
@@ -2385,9 +2386,7 @@ DB_open(DBObject* self, PyObject* args, PyObject* kwargs)
return NULL;
}
-#if (DBVER >= 42)
self->db->get_flags(self->db, &self->setflags);
-#endif
self->flags = flags;
@@ -2539,6 +2538,37 @@ DB_get_priority(DBObject* self)
#endif
static PyObject*
+DB_get_dbname(DBObject* self)
+{
+ int err;
+ const char *filename, *dbname;
+
+ CHECK_DB_NOT_CLOSED(self);
+
+ MYDB_BEGIN_ALLOW_THREADS;
+ err = self->db->get_dbname(self->db, &filename, &dbname);
+ MYDB_END_ALLOW_THREADS;
+ RETURN_IF_ERR();
+ /* If "dbname==NULL", it is correctly converted to "None" */
+ return Py_BuildValue("(ss)", filename, dbname);
+}
+
+static PyObject*
+DB_get_open_flags(DBObject* self)
+{
+ int err;
+ unsigned int flags;
+
+ CHECK_DB_NOT_CLOSED(self);
+
+ MYDB_BEGIN_ALLOW_THREADS;
+ err = self->db->get_open_flags(self->db, &flags);
+ MYDB_END_ALLOW_THREADS;
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(flags);
+}
+
+static PyObject*
DB_set_q_extentsize(DBObject* self, PyObject* args)
{
int err;
@@ -2555,7 +2585,6 @@ DB_set_q_extentsize(DBObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DB_get_q_extentsize(DBObject* self)
{
@@ -2570,7 +2599,6 @@ DB_get_q_extentsize(DBObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(extentsize);
}
-#endif
static PyObject*
DB_set_bt_minkey(DBObject* self, PyObject* args)
@@ -2588,7 +2616,6 @@ DB_set_bt_minkey(DBObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DB_get_bt_minkey(DBObject* self)
{
@@ -2603,7 +2630,6 @@ DB_get_bt_minkey(DBObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(bt_minkey);
}
-#endif
static int
_default_cmp(const DBT *leftKey,
@@ -2740,6 +2766,120 @@ DB_set_bt_compare(DBObject* self, PyObject* comparator)
RETURN_NONE();
}
+static int
+_db_dupCompareCallback(DB* db,
+ const DBT *leftKey,
+ const DBT *rightKey)
+{
+ int res = 0;
+ PyObject *args;
+ PyObject *result = NULL;
+ DBObject *self = (DBObject *)db->app_private;
+
+ if (self == NULL || self->dupCompareCallback == NULL) {
+ MYDB_BEGIN_BLOCK_THREADS;
+ PyErr_SetString(PyExc_TypeError,
+ (self == 0
+ ? "DB_dup_compare db is NULL."
+ : "DB_dup_compare callback is NULL."));
+ /* we're in a callback within the DB code, we can't raise */
+ PyErr_Print();
+ res = _default_cmp(leftKey, rightKey);
+ MYDB_END_BLOCK_THREADS;
+ } else {
+ MYDB_BEGIN_BLOCK_THREADS;
+
+ args = BuildValue_SS(leftKey->data, leftKey->size, rightKey->data, rightKey->size);
+ if (args != NULL) {
+ result = PyEval_CallObject(self->dupCompareCallback, args);
+ }
+ if (args == NULL || result == NULL) {
+ /* we're in a callback within the DB code, we can't raise */
+ PyErr_Print();
+ res = _default_cmp(leftKey, rightKey);
+ } else if (NUMBER_Check(result)) {
+ res = NUMBER_AsLong(result);
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "DB_dup_compare callback MUST return an int.");
+ /* we're in a callback within the DB code, we can't raise */
+ PyErr_Print();
+ res = _default_cmp(leftKey, rightKey);
+ }
+
+ Py_XDECREF(args);
+ Py_XDECREF(result);
+
+ MYDB_END_BLOCK_THREADS;
+ }
+ return res;
+}
+
+static PyObject*
+DB_set_dup_compare(DBObject* self, PyObject* comparator)
+{
+ int err;
+ PyObject *tuple, *result;
+
+ CHECK_DB_NOT_CLOSED(self);
+
+ if (!PyCallable_Check(comparator)) {
+ makeTypeError("Callable", comparator);
+ return NULL;
+ }
+
+ /*
+ * Perform a test call of the comparator function with two empty
+ * string objects here. verify that it returns an int (0).
+ * err if not.
+ */
+ tuple = Py_BuildValue("(ss)", "", "");
+ result = PyEval_CallObject(comparator, tuple);
+ Py_DECREF(tuple);
+ if (result == NULL)
+ return NULL;
+ if (!NUMBER_Check(result)) {
+ Py_DECREF(result);
+ PyErr_SetString(PyExc_TypeError,
+ "callback MUST return an int");
+ return NULL;
+ } else if (NUMBER_AsLong(result) != 0) {
+ Py_DECREF(result);
+ PyErr_SetString(PyExc_TypeError,
+ "callback failed to return 0 on two empty strings");
+ return NULL;
+ }
+ Py_DECREF(result);
+
+ /* We don't accept multiple set_dup_compare operations, in order to
+ * simplify the code. This would have no real use, as one cannot
+ * change the function once the db is opened anyway */
+ if (self->dupCompareCallback != NULL) {
+ PyErr_SetString(PyExc_RuntimeError, "set_dup_compare() cannot be called more than once");
+ return NULL;
+ }
+
+ Py_INCREF(comparator);
+ self->dupCompareCallback = comparator;
+
+ /* This is to workaround a problem with un-initialized threads (see
+ comment in DB_associate) */
+#ifdef WITH_THREAD
+ PyEval_InitThreads();
+#endif
+
+ err = self->db->set_dup_compare(self->db, _db_dupCompareCallback);
+
+ if (err) {
+ /* restore the old state in case of error */
+ Py_DECREF(comparator);
+ self->dupCompareCallback = NULL;
+ }
+
+ RETURN_IF_ERR();
+ RETURN_NONE();
+}
+
static PyObject*
DB_set_cachesize(DBObject* self, PyObject* args)
@@ -2759,7 +2899,6 @@ DB_set_cachesize(DBObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DB_get_cachesize(DBObject* self)
{
@@ -2777,7 +2916,6 @@ DB_get_cachesize(DBObject* self)
return Py_BuildValue("(iii)", gbytes, bytes, ncache);
}
-#endif
static PyObject*
DB_set_flags(DBObject* self, PyObject* args)
@@ -2797,7 +2935,6 @@ DB_set_flags(DBObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DB_get_flags(DBObject* self)
{
@@ -2812,7 +2949,34 @@ DB_get_flags(DBObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(flags);
}
-#endif
+
+static PyObject*
+DB_get_transactional(DBObject* self)
+{
+ int err;
+
+ CHECK_DB_NOT_CLOSED(self);
+
+ MYDB_BEGIN_ALLOW_THREADS;
+ err = self->db->get_transactional(self->db);
+ MYDB_END_ALLOW_THREADS;
+
+ if(err == 0) {
+ Py_INCREF(Py_False);
+ return Py_False;
+ } else if(err == 1) {
+ Py_INCREF(Py_True);
+ return Py_True;
+ }
+
+ /*
+ ** If we reach there, there was an error. The
+ ** "return" should be unreachable.
+ */
+ RETURN_IF_ERR();
+ assert(0); /* This code SHOULD be unreachable */
+ return NULL;
+}
static PyObject*
DB_set_h_ffactor(DBObject* self, PyObject* args)
@@ -2830,7 +2994,6 @@ DB_set_h_ffactor(DBObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DB_get_h_ffactor(DBObject* self)
{
@@ -2845,7 +3008,6 @@ DB_get_h_ffactor(DBObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(ffactor);
}
-#endif
static PyObject*
DB_set_h_nelem(DBObject* self, PyObject* args)
@@ -2863,7 +3025,6 @@ DB_set_h_nelem(DBObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DB_get_h_nelem(DBObject* self)
{
@@ -2878,7 +3039,6 @@ DB_get_h_nelem(DBObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(nelem);
}
-#endif
static PyObject*
DB_set_lorder(DBObject* self, PyObject* args)
@@ -2896,7 +3056,6 @@ DB_set_lorder(DBObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DB_get_lorder(DBObject* self)
{
@@ -2911,7 +3070,6 @@ DB_get_lorder(DBObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(lorder);
}
-#endif
static PyObject*
DB_set_pagesize(DBObject* self, PyObject* args)
@@ -2929,7 +3087,6 @@ DB_set_pagesize(DBObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DB_get_pagesize(DBObject* self)
{
@@ -2944,7 +3101,6 @@ DB_get_pagesize(DBObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(pagesize);
}
-#endif
static PyObject*
DB_set_re_delim(DBObject* self, PyObject* args)
@@ -2967,7 +3123,6 @@ DB_set_re_delim(DBObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DB_get_re_delim(DBObject* self)
{
@@ -2981,7 +3136,6 @@ DB_get_re_delim(DBObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(re_delim);
}
-#endif
static PyObject*
DB_set_re_len(DBObject* self, PyObject* args)
@@ -2999,7 +3153,6 @@ DB_set_re_len(DBObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DB_get_re_len(DBObject* self)
{
@@ -3014,7 +3167,6 @@ DB_get_re_len(DBObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(re_len);
}
-#endif
static PyObject*
DB_set_re_pad(DBObject* self, PyObject* args)
@@ -3036,7 +3188,6 @@ DB_set_re_pad(DBObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DB_get_re_pad(DBObject* self)
{
@@ -3050,7 +3201,6 @@ DB_get_re_pad(DBObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(re_pad);
}
-#endif
static PyObject*
DB_set_re_source(DBObject* self, PyObject* args)
@@ -3069,7 +3219,6 @@ DB_set_re_source(DBObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DB_get_re_source(DBObject* self)
{
@@ -3084,7 +3233,6 @@ DB_get_re_source(DBObject* self)
RETURN_IF_ERR();
return PyBytes_FromString(source);
}
-#endif
static PyObject*
DB_stat(DBObject* self, PyObject* args, PyObject* kwargs)
@@ -3092,32 +3240,19 @@ DB_stat(DBObject* self, PyObject* args, PyObject* kwargs)
int err, flags = 0, type;
void* sp;
PyObject* d;
-#if (DBVER >= 43)
PyObject* txnobj = NULL;
DB_TXN *txn = NULL;
static char* kwnames[] = { "flags", "txn", NULL };
-#else
- static char* kwnames[] = { "flags", NULL };
-#endif
-#if (DBVER >= 43)
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iO:stat", kwnames,
&flags, &txnobj))
return NULL;
if (!checkTxnObj(txnobj, &txn))
return NULL;
-#else
- if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i:stat", kwnames, &flags))
- return NULL;
-#endif
CHECK_DB_NOT_CLOSED(self);
MYDB_BEGIN_ALLOW_THREADS;
-#if (DBVER >= 43)
err = self->db->stat(self->db, txn, &sp, flags);
-#else
- err = self->db->stat(self->db, &sp, flags);
-#endif
MYDB_END_ALLOW_THREADS;
RETURN_IF_ERR();
@@ -3172,9 +3307,7 @@ DB_stat(DBObject* self, PyObject* args, PyObject* kwargs)
MAKE_BT_ENTRY(leaf_pg);
MAKE_BT_ENTRY(dup_pg);
MAKE_BT_ENTRY(over_pg);
-#if (DBVER >= 43)
MAKE_BT_ENTRY(empty_pg);
-#endif
MAKE_BT_ENTRY(free);
MAKE_BT_ENTRY(int_pgfree);
MAKE_BT_ENTRY(leaf_pgfree);
@@ -3214,7 +3347,6 @@ DB_stat(DBObject* self, PyObject* args, PyObject* kwargs)
return d;
}
-#if (DBVER >= 43)
static PyObject*
DB_stat_print(DBObject* self, PyObject* args, PyObject *kwargs)
{
@@ -3234,7 +3366,6 @@ DB_stat_print(DBObject* self, PyObject* args, PyObject *kwargs)
RETURN_IF_ERR();
RETURN_NONE();
}
-#endif
static PyObject*
@@ -3381,7 +3512,6 @@ DB_set_encrypt(DBObject* self, PyObject* args, PyObject* kwargs)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DB_get_encrypt_flags(DBObject* self)
{
@@ -3396,7 +3526,6 @@ DB_get_encrypt_flags(DBObject* self)
return NUMBER_FromLong(flags);
}
-#endif
@@ -3420,11 +3549,7 @@ Py_ssize_t DB_length(PyObject* _self)
}
MYDB_BEGIN_ALLOW_THREADS;
-#if (DBVER >= 43)
err = self->db->stat(self->db, /*txnid*/ NULL, &sp, 0);
-#else
- err = self->db->stat(self->db, &sp, 0);
-#endif
MYDB_END_ALLOW_THREADS;
/* All the stat structures have matching fields upto the ndata field,
@@ -3868,6 +3993,136 @@ DBLogCursor_set(DBLogCursorObject* self, PyObject* args)
}
+/* --------------------------------------------------------------------- */
+/* DBSite methods */
+
+
+#if (DBVER >= 52)
+static PyObject*
+DBSite_close_internal(DBSiteObject* self)
+{
+ int err = 0;
+
+ if (self->site != NULL) {
+ EXTRACT_FROM_DOUBLE_LINKED_LIST(self);
+
+ MYDB_BEGIN_ALLOW_THREADS;
+ err = self->site->close(self->site);
+ MYDB_END_ALLOW_THREADS;
+ self->site = NULL;
+ }
+ RETURN_IF_ERR();
+ RETURN_NONE();
+}
+
+static PyObject*
+DBSite_close(DBSiteObject* self)
+{
+ return DBSite_close_internal(self);
+}
+
+static PyObject*
+DBSite_remove(DBSiteObject* self)
+{
+ int err = 0;
+
+ CHECK_SITE_NOT_CLOSED(self);
+
+ MYDB_BEGIN_ALLOW_THREADS;
+ err = self->site->remove(self->site);
+ MYDB_END_ALLOW_THREADS;
+
+ RETURN_IF_ERR();
+ RETURN_NONE();
+}
+
+static PyObject*
+DBSite_get_eid(DBSiteObject* self)
+{
+ int err = 0;
+ int eid;
+
+ CHECK_SITE_NOT_CLOSED(self);
+
+ MYDB_BEGIN_ALLOW_THREADS;
+ err = self->site->get_eid(self->site, &eid);
+ MYDB_END_ALLOW_THREADS;
+
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(eid);
+}
+
+static PyObject*
+DBSite_get_address(DBSiteObject* self)
+{
+ int err = 0;
+ const char *host;
+ u_int port;
+
+ CHECK_SITE_NOT_CLOSED(self);
+
+ MYDB_BEGIN_ALLOW_THREADS;
+ err = self->site->get_address(self->site, &host, &port);
+ MYDB_END_ALLOW_THREADS;
+
+ RETURN_IF_ERR();
+
+ return Py_BuildValue("(sI)", host, port);
+}
+
+static PyObject*
+DBSite_get_config(DBSiteObject* self, PyObject* args, PyObject* kwargs)
+{
+ int err = 0;
+ u_int32_t which, value;
+ static char* kwnames[] = { "which", NULL };
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i:get_config", kwnames,
+ &which))
+ return NULL;
+
+ CHECK_SITE_NOT_CLOSED(self);
+
+ MYDB_BEGIN_ALLOW_THREADS;
+ err = self->site->get_config(self->site, which, &value);
+ MYDB_END_ALLOW_THREADS;
+
+ RETURN_IF_ERR();
+
+ if (value) {
+ Py_INCREF(Py_True);
+ return Py_True;
+ } else {
+ Py_INCREF(Py_False);
+ return Py_False;
+ }
+}
+
+static PyObject*
+DBSite_set_config(DBSiteObject* self, PyObject* args, PyObject* kwargs)
+{
+ int err = 0;
+ u_int32_t which, value;
+ PyObject *valueO;
+ static char* kwnames[] = { "which", "value", NULL };
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "iO:set_config", kwnames,
+ &which, &valueO))
+ return NULL;
+
+ CHECK_SITE_NOT_CLOSED(self);
+
+ value = PyObject_IsTrue(valueO);
+
+ MYDB_BEGIN_ALLOW_THREADS;
+ err = self->site->set_config(self->site, which, value);
+ MYDB_END_ALLOW_THREADS;
+
+ RETURN_IF_ERR();
+ RETURN_NONE();
+}
+#endif
+
/* --------------------------------------------------------------------- */
/* DBCursor methods */
@@ -4127,21 +4382,13 @@ DBC_pget(DBCursorObject* self, PyObject* args, PyObject *kwargs)
keyObj = NUMBER_FromLong(*(int *)key.data);
else
keyObj = Build_PyString(key.data, key.size);
-#if (PY_VERSION_HEX >= 0x02040000)
retval = PyTuple_Pack(3, keyObj, pkeyObj, dataObj);
-#else
- retval = Py_BuildValue("OOO", keyObj, pkeyObj, dataObj);
-#endif
Py_DECREF(keyObj);
FREE_DBT(key); /* 'make_key_dbt' could do a 'malloc' */
}
else /* return just the pkey and data */
{
-#if (PY_VERSION_HEX >= 0x02040000)
retval = PyTuple_Pack(2, pkeyObj, dataObj);
-#else
- retval = Py_BuildValue("OO", pkeyObj, dataObj);
-#endif
}
Py_DECREF(dataObj);
Py_DECREF(pkeyObj);
@@ -4656,6 +4903,12 @@ DBEnv_close_internal(DBEnvObject* self, int flags)
dummy = DBLogCursor_close_internal(self->children_logcursors);
Py_XDECREF(dummy);
}
+#if (DBVER >= 52)
+ while(self->children_sites) {
+ dummy = DBSite_close_internal(self->children_sites);
+ Py_XDECREF(dummy);
+ }
+#endif
}
self->closed = 1;
@@ -4735,17 +4988,16 @@ DBEnv_memp_stat(DBEnvObject* self, PyObject* args, PyObject *kwargs)
#define MAKE_ENTRY(name) _addIntToDict(d, #name, gsp->st_##name)
MAKE_ENTRY(gbytes);
+ MAKE_ENTRY(bytes);
MAKE_ENTRY(ncache);
#if (DBVER >= 46)
MAKE_ENTRY(max_ncache);
#endif
MAKE_ENTRY(regsize);
-#if (DBVER >= 43)
MAKE_ENTRY(mmapsize);
MAKE_ENTRY(maxopenfd);
MAKE_ENTRY(maxwrite);
MAKE_ENTRY(maxwrite_sleep);
-#endif
MAKE_ENTRY(map);
MAKE_ENTRY(cache_hit);
MAKE_ENTRY(cache_miss);
@@ -4828,13 +5080,12 @@ DBEnv_memp_stat(DBEnvObject* self, PyObject* args, PyObject *kwargs)
#undef MAKE_ENTRY
free(fsp);
- r = Py_BuildValue("(OO)", d, d2);
+ r = PyTuple_Pack(2, d, d2);
Py_DECREF(d);
Py_DECREF(d2);
return r;
}
-#if (DBVER >= 43)
static PyObject*
DBEnv_memp_stat_print(DBEnvObject* self, PyObject* args, PyObject *kwargs)
{
@@ -4854,7 +5105,6 @@ DBEnv_memp_stat_print(DBEnvObject* self, PyObject* args, PyObject *kwargs)
RETURN_IF_ERR();
RETURN_NONE();
}
-#endif
static PyObject*
@@ -4987,7 +5237,6 @@ DBEnv_set_encrypt(DBEnvObject* self, PyObject* args, PyObject* kwargs)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DBEnv_get_encrypt_flags(DBEnvObject* self)
{
@@ -5025,7 +5274,6 @@ DBEnv_get_timeout(DBEnvObject* self, PyObject* args, PyObject* kwargs)
RETURN_IF_ERR();
return NUMBER_FromLong(timeout);
}
-#endif
static PyObject*
@@ -5064,7 +5312,6 @@ DBEnv_set_shm_key(DBEnvObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DBEnv_get_shm_key(DBEnvObject* self)
{
@@ -5081,7 +5328,6 @@ DBEnv_get_shm_key(DBEnvObject* self)
return NUMBER_FromLong(shm_key);
}
-#endif
#if (DBVER >= 46)
static PyObject*
@@ -5170,7 +5416,6 @@ DBEnv_set_cachesize(DBEnvObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DBEnv_get_cachesize(DBEnvObject* self)
{
@@ -5188,7 +5433,6 @@ DBEnv_get_cachesize(DBEnvObject* self)
return Py_BuildValue("(iii)", gbytes, bytes, ncache);
}
-#endif
static PyObject*
@@ -5208,7 +5452,6 @@ DBEnv_set_flags(DBEnvObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DBEnv_get_flags(DBEnvObject* self)
{
@@ -5223,7 +5466,6 @@ DBEnv_get_flags(DBEnvObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(flags);
}
-#endif
#if (DBVER >= 47)
static PyObject*
@@ -5423,7 +5665,6 @@ DBEnv_set_data_dir(DBEnvObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DBEnv_get_data_dirs(DBEnvObject* self)
{
@@ -5463,7 +5704,6 @@ DBEnv_get_data_dirs(DBEnvObject* self)
}
return tuple;
}
-#endif
#if (DBVER >= 44)
static PyObject*
@@ -5513,7 +5753,6 @@ DBEnv_set_lg_bsize(DBEnvObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DBEnv_get_lg_bsize(DBEnvObject* self)
{
@@ -5528,7 +5767,6 @@ DBEnv_get_lg_bsize(DBEnvObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(lg_bsize);
}
-#endif
static PyObject*
DBEnv_set_lg_dir(DBEnvObject* self, PyObject* args)
@@ -5547,7 +5785,6 @@ DBEnv_set_lg_dir(DBEnvObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DBEnv_get_lg_dir(DBEnvObject* self)
{
@@ -5562,7 +5799,6 @@ DBEnv_get_lg_dir(DBEnvObject* self)
RETURN_IF_ERR();
return PyBytes_FromString(dirp);
}
-#endif
static PyObject*
DBEnv_set_lg_max(DBEnvObject* self, PyObject* args)
@@ -5580,7 +5816,6 @@ DBEnv_set_lg_max(DBEnvObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DBEnv_get_lg_max(DBEnvObject* self)
{
@@ -5595,8 +5830,6 @@ DBEnv_get_lg_max(DBEnvObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(lg_max);
}
-#endif
-
static PyObject*
DBEnv_set_lg_regionmax(DBEnvObject* self, PyObject* args)
@@ -5614,7 +5847,6 @@ DBEnv_set_lg_regionmax(DBEnvObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DBEnv_get_lg_regionmax(DBEnvObject* self)
{
@@ -5629,7 +5861,6 @@ DBEnv_get_lg_regionmax(DBEnvObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(lg_regionmax);
}
-#endif
#if (DBVER >= 47)
static PyObject*
@@ -5680,7 +5911,6 @@ DBEnv_set_lk_detect(DBEnvObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DBEnv_get_lk_detect(DBEnvObject* self)
{
@@ -5695,8 +5925,6 @@ DBEnv_get_lk_detect(DBEnvObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(lk_detect);
}
-#endif
-
#if (DBVER < 45)
static PyObject*
@@ -5734,7 +5962,6 @@ DBEnv_set_lk_max_locks(DBEnvObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DBEnv_get_lk_max_locks(DBEnvObject* self)
{
@@ -5749,7 +5976,6 @@ DBEnv_get_lk_max_locks(DBEnvObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(lk_max);
}
-#endif
static PyObject*
DBEnv_set_lk_max_lockers(DBEnvObject* self, PyObject* args)
@@ -5767,7 +5993,6 @@ DBEnv_set_lk_max_lockers(DBEnvObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DBEnv_get_lk_max_lockers(DBEnvObject* self)
{
@@ -5782,7 +6007,6 @@ DBEnv_get_lk_max_lockers(DBEnvObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(lk_max);
}
-#endif
static PyObject*
DBEnv_set_lk_max_objects(DBEnvObject* self, PyObject* args)
@@ -5800,7 +6024,6 @@ DBEnv_set_lk_max_objects(DBEnvObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DBEnv_get_lk_max_objects(DBEnvObject* self)
{
@@ -5815,9 +6038,7 @@ DBEnv_get_lk_max_objects(DBEnvObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(lk_max);
}
-#endif
-#if (DBVER >= 42)
static PyObject*
DBEnv_get_mp_mmapsize(DBEnvObject* self)
{
@@ -5832,8 +6053,6 @@ DBEnv_get_mp_mmapsize(DBEnvObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(mmapsize);
}
-#endif
-
static PyObject*
DBEnv_set_mp_mmapsize(DBEnvObject* self, PyObject* args)
@@ -5869,8 +6088,6 @@ DBEnv_set_tmp_dir(DBEnvObject* self, PyObject* args)
RETURN_NONE();
}
-
-#if (DBVER >= 42)
static PyObject*
DBEnv_get_tmp_dir(DBEnvObject* self)
{
@@ -5887,8 +6104,6 @@ DBEnv_get_tmp_dir(DBEnvObject* self)
return PyBytes_FromString(dirpp);
}
-#endif
-
static PyObject*
DBEnv_txn_recover(DBEnvObject* self)
@@ -5899,7 +6114,7 @@ DBEnv_txn_recover(DBEnvObject* self)
DBTxnObject *txn;
#define PREPLIST_LEN 16
DB_PREPLIST preplist[PREPLIST_LEN];
-#if (DBVER < 48)
+#if (DBVER < 48) || (DBVER >= 52)
long retp;
#else
u_int32_t retp;
@@ -6003,8 +6218,6 @@ DBEnv_txn_checkpoint(DBEnvObject* self, PyObject* args)
RETURN_NONE();
}
-
-#if (DBVER >= 42)
static PyObject*
DBEnv_get_tx_max(DBEnvObject* self)
{
@@ -6019,8 +6232,6 @@ DBEnv_get_tx_max(DBEnvObject* self)
RETURN_IF_ERR();
return PyLong_FromUnsignedLong(max);
}
-#endif
-
static PyObject*
DBEnv_set_tx_max(DBEnvObject* self, PyObject* args)
@@ -6038,8 +6249,6 @@ DBEnv_set_tx_max(DBEnvObject* self, PyObject* args)
RETURN_NONE();
}
-
-#if (DBVER >= 42)
static PyObject*
DBEnv_get_tx_timestamp(DBEnvObject* self)
{
@@ -6054,7 +6263,6 @@ DBEnv_get_tx_timestamp(DBEnvObject* self)
RETURN_IF_ERR();
return NUMBER_FromLong(timestamp);
}
-#endif
static PyObject*
DBEnv_set_tx_timestamp(DBEnvObject* self, PyObject* args)
@@ -6204,7 +6412,6 @@ DBEnv_lsn_reset(DBEnvObject* self, PyObject* args, PyObject* kwargs)
#endif /* DBVER >= 4.4 */
-#if (DBVER >= 43)
static PyObject*
DBEnv_stat_print(DBEnvObject* self, PyObject* args, PyObject *kwargs)
{
@@ -6224,7 +6431,6 @@ DBEnv_stat_print(DBEnvObject* self, PyObject* args, PyObject *kwargs)
RETURN_IF_ERR();
RETURN_NONE();
}
-#endif
static PyObject*
@@ -6288,7 +6494,6 @@ DBEnv_log_stat(DBEnvObject* self, PyObject* args)
} /* DBEnv_log_stat */
-#if (DBVER >= 43)
static PyObject*
DBEnv_log_stat_print(DBEnvObject* self, PyObject* args, PyObject *kwargs)
{
@@ -6308,7 +6513,6 @@ DBEnv_log_stat_print(DBEnvObject* self, PyObject* args, PyObject *kwargs)
RETURN_IF_ERR();
RETURN_NONE();
}
-#endif
static PyObject*
@@ -6390,7 +6594,6 @@ DBEnv_lock_stat(DBEnvObject* self, PyObject* args)
return d;
}
-#if (DBVER >= 43)
static PyObject*
DBEnv_lock_stat_print(DBEnvObject* self, PyObject* args, PyObject *kwargs)
{
@@ -6410,7 +6613,6 @@ DBEnv_lock_stat_print(DBEnvObject* self, PyObject* args, PyObject *kwargs)
RETURN_IF_ERR();
RETURN_NONE();
}
-#endif
static PyObject*
@@ -6572,6 +6774,52 @@ DBEnv_log_archive(DBEnvObject* self, PyObject* args)
}
+#if (DBVER >= 52)
+static PyObject*
+DBEnv_repmgr_site(DBEnvObject* self, PyObject* args, PyObject *kwargs)
+{
+ int err;
+ DB_SITE* site;
+ char *host;
+ u_int port;
+ static char* kwnames[] = {"host", "port", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "si:repmgr_site", kwnames,
+ &host, &port))
+ return NULL;
+
+ CHECK_ENV_NOT_CLOSED(self);
+
+ MYDB_BEGIN_ALLOW_THREADS;
+ err = self->db_env->repmgr_site(self->db_env, host, port, &site, 0);
+ MYDB_END_ALLOW_THREADS;
+ RETURN_IF_ERR();
+ return (PyObject*) newDBSiteObject(site, self);
+}
+
+static PyObject*
+DBEnv_repmgr_site_by_eid(DBEnvObject* self, PyObject* args, PyObject *kwargs)
+{
+ int err;
+ DB_SITE* site;
+ int eid;
+ static char* kwnames[] = {"eid", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i:repmgr_site_by_eid",
+ kwnames, &eid))
+ return NULL;
+
+ CHECK_ENV_NOT_CLOSED(self);
+
+ MYDB_BEGIN_ALLOW_THREADS;
+ err = self->db_env->repmgr_site_by_eid(self->db_env, eid, &site);
+ MYDB_END_ALLOW_THREADS;
+ RETURN_IF_ERR();
+ return (PyObject*) newDBSiteObject(site, self);
+}
+#endif
+
+
#if (DBVER >= 44)
static PyObject*
DBEnv_mutex_stat(DBEnvObject* self, PyObject* args)
@@ -6640,7 +6888,6 @@ DBEnv_mutex_stat_print(DBEnvObject* self, PyObject* args, PyObject *kwargs)
#endif
-#if (DBVER >= 43)
static PyObject*
DBEnv_txn_stat_print(DBEnvObject* self, PyObject* args, PyObject *kwargs)
{
@@ -6662,7 +6909,6 @@ DBEnv_txn_stat_print(DBEnvObject* self, PyObject* args, PyObject *kwargs)
RETURN_IF_ERR();
RETURN_NONE();
}
-#endif
static PyObject*
@@ -6756,6 +7002,76 @@ DBEnv_set_private(DBEnvObject* self, PyObject* private_obj)
RETURN_NONE();
}
+#if (DBVER >= 47)
+static PyObject*
+DBEnv_set_intermediate_dir_mode(DBEnvObject* self, PyObject* args)
+{
+ int err;
+ const char *mode;
+
+ if (!PyArg_ParseTuple(args,"s:set_intermediate_dir_mode", &mode))
+ return NULL;
+
+ CHECK_ENV_NOT_CLOSED(self);
+
+ MYDB_BEGIN_ALLOW_THREADS;
+ err = self->db_env->set_intermediate_dir_mode(self->db_env, mode);
+ MYDB_END_ALLOW_THREADS;
+ RETURN_IF_ERR();
+ RETURN_NONE();
+}
+
+static PyObject*
+DBEnv_get_intermediate_dir_mode(DBEnvObject* self)
+{
+ int err;
+ const char *mode;
+
+ CHECK_ENV_NOT_CLOSED(self);
+
+ MYDB_BEGIN_ALLOW_THREADS;
+ err = self->db_env->get_intermediate_dir_mode(self->db_env, &mode);
+ MYDB_END_ALLOW_THREADS;
+ RETURN_IF_ERR();
+ return Py_BuildValue("s", mode);
+}
+#endif
+
+#if (DBVER < 47)
+static PyObject*
+DBEnv_set_intermediate_dir(DBEnvObject* self, PyObject* args)
+{
+ int err;
+ int mode;
+ u_int32_t flags;
+
+ if (!PyArg_ParseTuple(args, "iI:set_intermediate_dir", &mode, &flags))
+ return NULL;
+
+ CHECK_ENV_NOT_CLOSED(self);
+
+ MYDB_BEGIN_ALLOW_THREADS;
+ err = self->db_env->set_intermediate_dir(self->db_env, mode, flags);
+ MYDB_END_ALLOW_THREADS;
+ RETURN_IF_ERR();
+ RETURN_NONE();
+}
+#endif
+
+static PyObject*
+DBEnv_get_open_flags(DBEnvObject* self)
+{
+ int err;
+ unsigned int flags;
+
+ CHECK_ENV_NOT_CLOSED(self);
+
+ MYDB_BEGIN_ALLOW_THREADS;
+ err = self->db_env->get_open_flags(self->db_env, &flags);
+ MYDB_END_ALLOW_THREADS;
+ RETURN_IF_ERR();
+ return NUMBER_FromLong(flags);
+}
#if (DBVER < 48)
static PyObject*
@@ -6781,7 +7097,6 @@ DBEnv_set_rpc_server(DBEnvObject* self, PyObject* args, PyObject* kwargs)
}
#endif
-#if (DBVER >= 43)
static PyObject*
DBEnv_set_mp_max_openfd(DBEnvObject* self, PyObject* args)
{
@@ -6855,7 +7170,6 @@ DBEnv_get_mp_max_write(DBEnvObject* self)
return Py_BuildValue("(ii)", maxwrite, (int)maxwrite_sleep);
}
-#endif
static PyObject*
@@ -6875,7 +7189,6 @@ DBEnv_set_verbose(DBEnvObject* self, PyObject* args)
RETURN_NONE();
}
-#if (DBVER >= 42)
static PyObject*
DBEnv_get_verbose(DBEnvObject* self, PyObject* args)
{
@@ -6893,7 +7206,6 @@ DBEnv_get_verbose(DBEnvObject* self, PyObject* args)
RETURN_IF_ERR();
return PyBool_FromLong(verbose);
}
-#endif
#if (DBVER >= 45)
static void
@@ -6975,9 +7287,7 @@ DBEnv_rep_process_message(DBEnvObject* self, PyObject* args)
PyObject *control_py, *rec_py;
DBT control, rec;
int envid;
-#if (DBVER >= 42)
DB_LSN lsn;
-#endif
if (!PyArg_ParseTuple(args, "OOi:rep_process_message", &control_py,
&rec_py, &envid))
@@ -6994,13 +7304,8 @@ DBEnv_rep_process_message(DBEnvObject* self, PyObject* args)
err = self->db_env->rep_process_message(self->db_env, &control, &rec,
envid, &lsn);
#else
-#if (DBVER >= 42)
err = self->db_env->rep_process_message(self->db_env, &control, &rec,
&envid, &lsn);
-#else
- err = self->db_env->rep_process_message(self->db_env, &control, &rec,
- &envid);
-#endif
#endif
MYDB_END_ALLOW_THREADS;
switch (err) {
@@ -7029,15 +7334,13 @@ DBEnv_rep_process_message(DBEnvObject* self, PyObject* args)
return r;
break;
}
-#if (DBVER >= 42)
case DB_REP_NOTPERM :
case DB_REP_ISPERM :
return Py_BuildValue("(i(ll))", err, lsn.file, lsn.offset);
break;
-#endif
}
RETURN_IF_ERR();
- return Py_BuildValue("(OO)", Py_None, Py_None);
+ return PyTuple_Pack(2, Py_None, Py_None);
}
static int
@@ -7062,11 +7365,7 @@ _DBEnv_rep_transportCallback(DB_ENV* db_env, const DBT* control, const DBT* rec,
b = PyBytes_FromStringAndSize(rec->data, rec->size);
args = Py_BuildValue(
-#if (PY_VERSION_HEX >= 0x02040000)
"(OOO(ll)iI)",
-#else
- "(OOO(ll)ii)",
-#endif
dbenv,
a, b,
lsn->file, lsn->offset, envid, flags);
@@ -7086,20 +7385,6 @@ _DBEnv_rep_transportCallback(DB_ENV* db_env, const DBT* control, const DBT* rec,
return ret;
}
-#if (DBVER <= 41)
-static int
-_DBEnv_rep_transportCallbackOLD(DB_ENV* db_env, const DBT* control, const DBT* rec,
- int envid, u_int32_t flags)
-{
- DB_LSN lsn;
-
- lsn.file = -1; /* Dummy values */
- lsn.offset = -1;
- return _DBEnv_rep_transportCallback(db_env, control, rec, &lsn, envid,
- flags);
-}
-#endif
-
static PyObject*
DBEnv_rep_set_transport(DBEnvObject* self, PyObject* args)
{
@@ -7120,13 +7405,8 @@ DBEnv_rep_set_transport(DBEnvObject* self, PyObject* args)
err = self->db_env->rep_set_transport(self->db_env, envid,
&_DBEnv_rep_transportCallback);
#else
-#if (DBVER >= 42)
err = self->db_env->set_rep_transport(self->db_env, envid,
&_DBEnv_rep_transportCallback);
-#else
- err = self->db_env->set_rep_transport(self->db_env, envid,
- &_DBEnv_rep_transportCallbackOLD);
-#endif
#endif
MYDB_END_ALLOW_THREADS;
RETURN_IF_ERR();
@@ -7166,11 +7446,7 @@ DBEnv_rep_get_request(DBEnvObject* self)
err = self->db_env->rep_get_request(self->db_env, &minimum, &maximum);
MYDB_END_ALLOW_THREADS;
RETURN_IF_ERR();
-#if (PY_VERSION_HEX >= 0x02040000)
return Py_BuildValue("II", minimum, maximum);
-#else
- return Py_BuildValue("ii", minimum, maximum);
-#endif
}
#endif
@@ -7422,13 +7698,8 @@ DBEnv_rep_set_clockskew(DBEnvObject* self, PyObject* args)
int err;
unsigned int fast, slow;
-#if (PY_VERSION_HEX >= 0x02040000)
if (!PyArg_ParseTuple(args,"II:rep_set_clockskew", &fast, &slow))
return NULL;
-#else
- if (!PyArg_ParseTuple(args,"ii:rep_set_clockskew", &fast, &slow))
- return NULL;
-#endif
CHECK_ENV_NOT_CLOSED(self);
@@ -7450,15 +7721,10 @@ DBEnv_rep_get_clockskew(DBEnvObject* self)
err = self->db_env->rep_get_clockskew(self->db_env, &fast, &slow);
MYDB_END_ALLOW_THREADS;
RETURN_IF_ERR();
-#if (PY_VERSION_HEX >= 0x02040000)
return Py_BuildValue("(II)", fast, slow);
-#else
- return Py_BuildValue("(ii)", fast, slow);
-#endif
}
#endif
-#if (DBVER >= 43)
static PyObject*
DBEnv_rep_stat_print(DBEnvObject* self, PyObject* args, PyObject *kwargs)
{
@@ -7478,7 +7744,6 @@ DBEnv_rep_stat_print(DBEnvObject* self, PyObject* args, PyObject *kwargs)
RETURN_IF_ERR();
RETURN_NONE();
}
-#endif
static PyObject*
DBEnv_rep_stat(DBEnvObject* self, PyObject* args, PyObject *kwargs)
@@ -7519,7 +7784,6 @@ DBEnv_rep_stat(DBEnvObject* self, PyObject* args, PyObject *kwargs)
MAKE_ENTRY(client_svc_req);
#endif
MAKE_ENTRY(dupmasters);
-#if (DBVER >= 43)
MAKE_ENTRY(egen);
MAKE_ENTRY(election_nvotes);
MAKE_ENTRY(startup_complete);
@@ -7528,7 +7792,6 @@ DBEnv_rep_stat(DBEnvObject* self, PyObject* args, PyObject *kwargs)
MAKE_ENTRY(pg_requested);
MAKE_ENTRY(next_pg);
MAKE_ENTRY(waiting_pg);
-#endif
MAKE_ENTRY(election_cur_winner);
MAKE_ENTRY(election_gen);
MAKE_DB_LSN_ENTRY(election_lsn);
@@ -7608,6 +7871,7 @@ DBEnv_repmgr_start(DBEnvObject* self, PyObject* args, PyObject*
RETURN_NONE();
}
+#if (DBVER < 52)
static PyObject*
DBEnv_repmgr_set_local_site(DBEnvObject* self, PyObject* args, PyObject*
kwargs)
@@ -7654,6 +7918,7 @@ DBEnv_repmgr_add_remote_site(DBEnvObject* self, PyObject* args, PyObject*
RETURN_IF_ERR();
return NUMBER_FromLong(eidp);
}
+#endif
static PyObject*
DBEnv_repmgr_set_ack_policy(DBEnvObject* self, PyObject* args)
@@ -7714,13 +7979,8 @@ DBEnv_repmgr_site_list(DBEnvObject* self)
free(listp);
return NULL;
}
-#if (PY_VERSION_HEX >= 0x02040000)
tuple=Py_BuildValue("(sII)", listp[countp].host,
listp[countp].port, listp[countp].status);
-#else
- tuple=Py_BuildValue("(sii)", listp[countp].host,
- listp[countp].port, listp[countp].status);
-#endif
if(!tuple) {
Py_DECREF(key);
Py_DECREF(stats);
@@ -7824,9 +8084,7 @@ static void _close_transaction_cursors(DBTxnObject* txn)
static void _promote_transaction_dbs_and_sequences(DBTxnObject *txn)
{
DBObject *db;
-#if (DBVER >= 43)
DBSequenceObject *dbs;
-#endif
while (txn->children_dbs) {
db=txn->children_dbs;
@@ -7842,7 +8100,6 @@ static void _promote_transaction_dbs_and_sequences(DBTxnObject *txn)
}
}
-#if (DBVER >= 43)
while (txn->children_sequences) {
dbs=txn->children_sequences;
EXTRACT_FROM_DOUBLE_LINKED_LIST_TXN(dbs);
@@ -7856,7 +8113,6 @@ static void _promote_transaction_dbs_and_sequences(DBTxnObject *txn)
dbs->txn=NULL;
}
}
-#endif
}
@@ -7953,12 +8209,10 @@ DBTxn_abort_discard_internal(DBTxnObject* self, int discard)
self->txn = NULL; /* this DB_TXN is no longer valid after this call */
_close_transaction_cursors(self);
-#if (DBVER >= 43)
while (self->children_sequences) {
dummy=DBSequence_close_internal(self->children_sequences,0,0);
Py_XDECREF(dummy);
}
-#endif
while (self->children_dbs) {
dummy=DB_close_internal(self->children_dbs, 0, 0);
Py_XDECREF(dummy);
@@ -8094,7 +8348,6 @@ DBTxn_get_name(DBTxnObject* self)
#endif
-#if (DBVER >= 43)
/* --------------------------------------------------------------------- */
/* DBSequence methods */
@@ -8444,7 +8697,6 @@ DBSequence_stat(DBSequenceObject* self, PyObject* args, PyObject* kwargs)
free(sp);
return dict_stat;
}
-#endif
/* --------------------------------------------------------------------- */
@@ -8482,70 +8734,45 @@ static PyMethodDef DB_methods[] = {
{"remove", (PyCFunction)DB_remove, METH_VARARGS|METH_KEYWORDS},
{"rename", (PyCFunction)DB_rename, METH_VARARGS},
{"set_bt_minkey", (PyCFunction)DB_set_bt_minkey, METH_VARARGS},
-#if (DBVER >= 42)
{"get_bt_minkey", (PyCFunction)DB_get_bt_minkey, METH_NOARGS},
-#endif
{"set_bt_compare", (PyCFunction)DB_set_bt_compare, METH_O},
{"set_cachesize", (PyCFunction)DB_set_cachesize, METH_VARARGS},
-#if (DBVER >= 42)
{"get_cachesize", (PyCFunction)DB_get_cachesize, METH_NOARGS},
-#endif
+ {"set_dup_compare", (PyCFunction)DB_set_dup_compare, METH_O},
{"set_encrypt", (PyCFunction)DB_set_encrypt, METH_VARARGS|METH_KEYWORDS},
-#if (DBVER >= 42)
{"get_encrypt_flags", (PyCFunction)DB_get_encrypt_flags, METH_NOARGS},
-#endif
-
{"set_flags", (PyCFunction)DB_set_flags, METH_VARARGS},
-#if (DBVER >= 42)
{"get_flags", (PyCFunction)DB_get_flags, METH_NOARGS},
-#endif
+ {"get_transactional", (PyCFunction)DB_get_transactional, METH_NOARGS},
{"set_h_ffactor", (PyCFunction)DB_set_h_ffactor, METH_VARARGS},
-#if (DBVER >= 42)
{"get_h_ffactor", (PyCFunction)DB_get_h_ffactor, METH_NOARGS},
-#endif
{"set_h_nelem", (PyCFunction)DB_set_h_nelem, METH_VARARGS},
-#if (DBVER >= 42)
{"get_h_nelem", (PyCFunction)DB_get_h_nelem, METH_NOARGS},
-#endif
{"set_lorder", (PyCFunction)DB_set_lorder, METH_VARARGS},
-#if (DBVER >= 42)
{"get_lorder", (PyCFunction)DB_get_lorder, METH_NOARGS},
-#endif
{"set_pagesize", (PyCFunction)DB_set_pagesize, METH_VARARGS},
-#if (DBVER >= 42)
{"get_pagesize", (PyCFunction)DB_get_pagesize, METH_NOARGS},
-#endif
{"set_re_delim", (PyCFunction)DB_set_re_delim, METH_VARARGS},
-#if (DBVER >= 42)
{"get_re_delim", (PyCFunction)DB_get_re_delim, METH_NOARGS},
-#endif
{"set_re_len", (PyCFunction)DB_set_re_len, METH_VARARGS},
-#if (DBVER >= 42)
{"get_re_len", (PyCFunction)DB_get_re_len, METH_NOARGS},
-#endif
{"set_re_pad", (PyCFunction)DB_set_re_pad, METH_VARARGS},
-#if (DBVER >= 42)
{"get_re_pad", (PyCFunction)DB_get_re_pad, METH_NOARGS},
-#endif
{"set_re_source", (PyCFunction)DB_set_re_source, METH_VARARGS},
-#if (DBVER >= 42)
{"get_re_source", (PyCFunction)DB_get_re_source, METH_NOARGS},
-#endif
{"set_q_extentsize",(PyCFunction)DB_set_q_extentsize, METH_VARARGS},
-#if (DBVER >= 42)
{"get_q_extentsize",(PyCFunction)DB_get_q_extentsize, METH_NOARGS},
-#endif
{"set_private", (PyCFunction)DB_set_private, METH_O},
{"get_private", (PyCFunction)DB_get_private, METH_NOARGS},
#if (DBVER >= 46)
{"set_priority", (PyCFunction)DB_set_priority, METH_VARARGS},
{"get_priority", (PyCFunction)DB_get_priority, METH_NOARGS},
#endif
+ {"get_dbname", (PyCFunction)DB_get_dbname, METH_NOARGS},
+ {"get_open_flags", (PyCFunction)DB_get_open_flags, METH_NOARGS},
{"stat", (PyCFunction)DB_stat, METH_VARARGS|METH_KEYWORDS},
-#if (DBVER >= 43)
{"stat_print", (PyCFunction)DB_stat_print,
METH_VARARGS|METH_KEYWORDS},
-#endif
{"sync", (PyCFunction)DB_sync, METH_VARARGS},
{"truncate", (PyCFunction)DB_truncate, METH_VARARGS|METH_KEYWORDS},
{"type", (PyCFunction)DB_get_type, METH_NOARGS},
@@ -8627,6 +8854,19 @@ static PyMethodDef DBLogCursor_methods[] = {
{NULL, NULL} /* sentinel */
};
+#if (DBVER >= 52)
+static PyMethodDef DBSite_methods[] = {
+ {"get_config", (PyCFunction)DBSite_get_config,
+ METH_VARARGS | METH_KEYWORDS},
+ {"set_config", (PyCFunction)DBSite_set_config,
+ METH_VARARGS | METH_KEYWORDS},
+ {"remove", (PyCFunction)DBSite_remove, METH_NOARGS},
+ {"get_eid", (PyCFunction)DBSite_get_eid, METH_NOARGS},
+ {"get_address", (PyCFunction)DBSite_get_address, METH_NOARGS},
+ {"close", (PyCFunction)DBSite_close, METH_NOARGS},
+ {NULL, NULL} /* sentinel */
+};
+#endif
static PyMethodDef DBEnv_methods[] = {
{"close", (PyCFunction)DBEnv_close, METH_VARARGS},
@@ -8639,32 +8879,24 @@ static PyMethodDef DBEnv_methods[] = {
{"get_thread_count", (PyCFunction)DBEnv_get_thread_count, METH_NOARGS},
#endif
{"set_encrypt", (PyCFunction)DBEnv_set_encrypt, METH_VARARGS|METH_KEYWORDS},
-#if (DBVER >= 42)
{"get_encrypt_flags", (PyCFunction)DBEnv_get_encrypt_flags, METH_NOARGS},
{"get_timeout", (PyCFunction)DBEnv_get_timeout,
METH_VARARGS|METH_KEYWORDS},
-#endif
{"set_timeout", (PyCFunction)DBEnv_set_timeout, METH_VARARGS|METH_KEYWORDS},
{"set_shm_key", (PyCFunction)DBEnv_set_shm_key, METH_VARARGS},
-#if (DBVER >= 42)
{"get_shm_key", (PyCFunction)DBEnv_get_shm_key, METH_NOARGS},
-#endif
#if (DBVER >= 46)
{"set_cache_max", (PyCFunction)DBEnv_set_cache_max, METH_VARARGS},
{"get_cache_max", (PyCFunction)DBEnv_get_cache_max, METH_NOARGS},
#endif
{"set_cachesize", (PyCFunction)DBEnv_set_cachesize, METH_VARARGS},
-#if (DBVER >= 42)
{"get_cachesize", (PyCFunction)DBEnv_get_cachesize, METH_NOARGS},
-#endif
{"memp_trickle", (PyCFunction)DBEnv_memp_trickle, METH_VARARGS},
{"memp_sync", (PyCFunction)DBEnv_memp_sync, METH_VARARGS},
{"memp_stat", (PyCFunction)DBEnv_memp_stat,
METH_VARARGS|METH_KEYWORDS},
-#if (DBVER >= 43)
{"memp_stat_print", (PyCFunction)DBEnv_memp_stat_print,
METH_VARARGS|METH_KEYWORDS},
-#endif
#if (DBVER >= 44)
{"mutex_set_max", (PyCFunction)DBEnv_mutex_set_max, METH_VARARGS},
{"mutex_get_max", (PyCFunction)DBEnv_mutex_get_max, METH_NOARGS},
@@ -8685,33 +8917,21 @@ static PyMethodDef DBEnv_methods[] = {
#endif
#endif
{"set_data_dir", (PyCFunction)DBEnv_set_data_dir, METH_VARARGS},
-#if (DBVER >= 42)
{"get_data_dirs", (PyCFunction)DBEnv_get_data_dirs, METH_NOARGS},
-#endif
-#if (DBVER >= 42)
{"get_flags", (PyCFunction)DBEnv_get_flags, METH_NOARGS},
-#endif
{"set_flags", (PyCFunction)DBEnv_set_flags, METH_VARARGS},
#if (DBVER >= 47)
{"log_set_config", (PyCFunction)DBEnv_log_set_config, METH_VARARGS},
{"log_get_config", (PyCFunction)DBEnv_log_get_config, METH_VARARGS},
#endif
{"set_lg_bsize", (PyCFunction)DBEnv_set_lg_bsize, METH_VARARGS},
-#if (DBVER >= 42)
{"get_lg_bsize", (PyCFunction)DBEnv_get_lg_bsize, METH_NOARGS},
-#endif
{"set_lg_dir", (PyCFunction)DBEnv_set_lg_dir, METH_VARARGS},
-#if (DBVER >= 42)
{"get_lg_dir", (PyCFunction)DBEnv_get_lg_dir, METH_NOARGS},
-#endif
{"set_lg_max", (PyCFunction)DBEnv_set_lg_max, METH_VARARGS},
-#if (DBVER >= 42)
{"get_lg_max", (PyCFunction)DBEnv_get_lg_max, METH_NOARGS},
-#endif
{"set_lg_regionmax",(PyCFunction)DBEnv_set_lg_regionmax, METH_VARARGS},
-#if (DBVER >= 42)
{"get_lg_regionmax",(PyCFunction)DBEnv_get_lg_regionmax, METH_NOARGS},
-#endif
#if (DBVER >= 44)
{"set_lg_filemode", (PyCFunction)DBEnv_set_lg_filemode, METH_VARARGS},
{"get_lg_filemode", (PyCFunction)DBEnv_get_lg_filemode, METH_NOARGS},
@@ -8721,47 +8941,29 @@ static PyMethodDef DBEnv_methods[] = {
{"get_lk_partitions", (PyCFunction)DBEnv_get_lk_partitions, METH_NOARGS},
#endif
{"set_lk_detect", (PyCFunction)DBEnv_set_lk_detect, METH_VARARGS},
-#if (DBVER >= 42)
{"get_lk_detect", (PyCFunction)DBEnv_get_lk_detect, METH_NOARGS},
-#endif
#if (DBVER < 45)
{"set_lk_max", (PyCFunction)DBEnv_set_lk_max, METH_VARARGS},
#endif
{"set_lk_max_locks", (PyCFunction)DBEnv_set_lk_max_locks, METH_VARARGS},
-#if (DBVER >= 42)
{"get_lk_max_locks", (PyCFunction)DBEnv_get_lk_max_locks, METH_NOARGS},
-#endif
{"set_lk_max_lockers", (PyCFunction)DBEnv_set_lk_max_lockers, METH_VARARGS},
-#if (DBVER >= 42)
{"get_lk_max_lockers", (PyCFunction)DBEnv_get_lk_max_lockers, METH_NOARGS},
-#endif
{"set_lk_max_objects", (PyCFunction)DBEnv_set_lk_max_objects, METH_VARARGS},
-#if (DBVER >= 42)
{"get_lk_max_objects", (PyCFunction)DBEnv_get_lk_max_objects, METH_NOARGS},
-#endif
-#if (DBVER >= 43)
{"stat_print", (PyCFunction)DBEnv_stat_print,
METH_VARARGS|METH_KEYWORDS},
-#endif
{"set_mp_mmapsize", (PyCFunction)DBEnv_set_mp_mmapsize, METH_VARARGS},
-#if (DBVER >= 42)
{"get_mp_mmapsize", (PyCFunction)DBEnv_get_mp_mmapsize, METH_NOARGS},
-#endif
{"set_tmp_dir", (PyCFunction)DBEnv_set_tmp_dir, METH_VARARGS},
-#if (DBVER >= 42)
{"get_tmp_dir", (PyCFunction)DBEnv_get_tmp_dir, METH_NOARGS},
-#endif
{"txn_begin", (PyCFunction)DBEnv_txn_begin, METH_VARARGS|METH_KEYWORDS},
{"txn_checkpoint", (PyCFunction)DBEnv_txn_checkpoint, METH_VARARGS},
{"txn_stat", (PyCFunction)DBEnv_txn_stat, METH_VARARGS},
-#if (DBVER >= 43)
{"txn_stat_print", (PyCFunction)DBEnv_txn_stat_print,
METH_VARARGS|METH_KEYWORDS},
-#endif
-#if (DBVER >= 42)
{"get_tx_max", (PyCFunction)DBEnv_get_tx_max, METH_NOARGS},
{"get_tx_timestamp", (PyCFunction)DBEnv_get_tx_timestamp, METH_NOARGS},
-#endif
{"set_tx_max", (PyCFunction)DBEnv_set_tx_max, METH_VARARGS},
{"set_tx_timestamp", (PyCFunction)DBEnv_set_tx_timestamp, METH_VARARGS},
{"lock_detect", (PyCFunction)DBEnv_lock_detect, METH_VARARGS},
@@ -8770,10 +8972,8 @@ static PyMethodDef DBEnv_methods[] = {
{"lock_id_free", (PyCFunction)DBEnv_lock_id_free, METH_VARARGS},
{"lock_put", (PyCFunction)DBEnv_lock_put, METH_VARARGS},
{"lock_stat", (PyCFunction)DBEnv_lock_stat, METH_VARARGS},
-#if (DBVER >= 43)
{"lock_stat_print", (PyCFunction)DBEnv_lock_stat_print,
METH_VARARGS|METH_KEYWORDS},
-#endif
{"log_cursor", (PyCFunction)DBEnv_log_cursor, METH_NOARGS},
{"log_file", (PyCFunction)DBEnv_log_file, METH_VARARGS},
#if (DBVER >= 44)
@@ -8783,10 +8983,8 @@ static PyMethodDef DBEnv_methods[] = {
{"log_archive", (PyCFunction)DBEnv_log_archive, METH_VARARGS},
{"log_flush", (PyCFunction)DBEnv_log_flush, METH_NOARGS},
{"log_stat", (PyCFunction)DBEnv_log_stat, METH_VARARGS},
-#if (DBVER >= 43)
{"log_stat_print", (PyCFunction)DBEnv_log_stat_print,
METH_VARARGS|METH_KEYWORDS},
-#endif
#if (DBVER >= 44)
{"fileid_reset", (PyCFunction)DBEnv_fileid_reset, METH_VARARGS|METH_KEYWORDS},
{"lsn_reset", (PyCFunction)DBEnv_lsn_reset, METH_VARARGS|METH_KEYWORDS},
@@ -8797,18 +8995,25 @@ static PyMethodDef DBEnv_methods[] = {
{"set_rpc_server", (PyCFunction)DBEnv_set_rpc_server,
METH_VARARGS|METH_KEYWORDS},
#endif
-#if (DBVER >= 43)
{"set_mp_max_openfd", (PyCFunction)DBEnv_set_mp_max_openfd, METH_VARARGS},
{"get_mp_max_openfd", (PyCFunction)DBEnv_get_mp_max_openfd, METH_NOARGS},
{"set_mp_max_write", (PyCFunction)DBEnv_set_mp_max_write, METH_VARARGS},
{"get_mp_max_write", (PyCFunction)DBEnv_get_mp_max_write, METH_NOARGS},
-#endif
{"set_verbose", (PyCFunction)DBEnv_set_verbose, METH_VARARGS},
-#if (DBVER >= 42)
- {"get_verbose", (PyCFunction)DBEnv_get_verbose, METH_VARARGS},
+ {"get_verbose", (PyCFunction)DBEnv_get_verbose, METH_VARARGS},
+ {"set_private", (PyCFunction)DBEnv_set_private, METH_O},
+ {"get_private", (PyCFunction)DBEnv_get_private, METH_NOARGS},
+ {"get_open_flags", (PyCFunction)DBEnv_get_open_flags, METH_NOARGS},
+#if (DBVER >= 47)
+ {"set_intermediate_dir_mode", (PyCFunction)DBEnv_set_intermediate_dir_mode,
+ METH_VARARGS},
+ {"get_intermediate_dir_mode", (PyCFunction)DBEnv_get_intermediate_dir_mode,
+ METH_NOARGS},
+#endif
+#if (DBVER < 47)
+ {"set_intermediate_dir", (PyCFunction)DBEnv_set_intermediate_dir,
+ METH_VARARGS},
#endif
- {"set_private", (PyCFunction)DBEnv_set_private, METH_O},
- {"get_private", (PyCFunction)DBEnv_get_private, METH_NOARGS},
{"rep_start", (PyCFunction)DBEnv_rep_start,
METH_VARARGS|METH_KEYWORDS},
{"rep_set_transport", (PyCFunction)DBEnv_rep_set_transport, METH_VARARGS},
@@ -8847,18 +9052,18 @@ static PyMethodDef DBEnv_methods[] = {
#endif
{"rep_stat", (PyCFunction)DBEnv_rep_stat,
METH_VARARGS|METH_KEYWORDS},
-#if (DBVER >= 43)
{"rep_stat_print", (PyCFunction)DBEnv_rep_stat_print,
METH_VARARGS|METH_KEYWORDS},
-#endif
#if (DBVER >= 45)
{"repmgr_start", (PyCFunction)DBEnv_repmgr_start,
METH_VARARGS|METH_KEYWORDS},
+#if (DBVER < 52)
{"repmgr_set_local_site", (PyCFunction)DBEnv_repmgr_set_local_site,
METH_VARARGS|METH_KEYWORDS},
{"repmgr_add_remote_site", (PyCFunction)DBEnv_repmgr_add_remote_site,
METH_VARARGS|METH_KEYWORDS},
+#endif
{"repmgr_set_ack_policy", (PyCFunction)DBEnv_repmgr_set_ack_policy,
METH_VARARGS},
{"repmgr_get_ack_policy", (PyCFunction)DBEnv_repmgr_get_ack_policy,
@@ -8872,6 +9077,12 @@ static PyMethodDef DBEnv_methods[] = {
{"repmgr_stat_print", (PyCFunction)DBEnv_repmgr_stat_print,
METH_VARARGS|METH_KEYWORDS},
#endif
+#if (DBVER >= 52)
+ {"repmgr_site", (PyCFunction)DBEnv_repmgr_site,
+ METH_VARARGS | METH_KEYWORDS},
+ {"repmgr_site_by_eid", (PyCFunction)DBEnv_repmgr_site_by_eid,
+ METH_VARARGS | METH_KEYWORDS},
+#endif
{NULL, NULL} /* sentinel */
};
@@ -8892,7 +9103,6 @@ static PyMethodDef DBTxn_methods[] = {
};
-#if (DBVER >= 43)
static PyMethodDef DBSequence_methods[] = {
{"close", (PyCFunction)DBSequence_close, METH_VARARGS},
{"get", (PyCFunction)DBSequence_get, METH_VARARGS|METH_KEYWORDS},
@@ -8912,7 +9122,6 @@ static PyMethodDef DBSequence_methods[] = {
METH_VARARGS|METH_KEYWORDS},
{NULL, NULL} /* sentinel */
};
-#endif
static PyObject*
@@ -8922,13 +9131,9 @@ DBEnv_db_home_get(DBEnvObject* self)
CHECK_ENV_NOT_CLOSED(self);
-#if (DBVER >= 42)
MYDB_BEGIN_ALLOW_THREADS;
self->db_env->get_home(self->db_env, &home);
MYDB_END_ALLOW_THREADS;
-#else
- home=self->db_env->db_home;
-#endif
if (home == NULL) {
RETURN_NONE();
@@ -9070,6 +9275,49 @@ statichere PyTypeObject DBLogCursor_Type = {
0, /*tp_members*/
};
+#if (DBVER >= 52)
+statichere PyTypeObject DBSite_Type = {
+#if (PY_VERSION_HEX < 0x03000000)
+ PyObject_HEAD_INIT(NULL)
+ 0, /*ob_size*/
+#else
+ PyVarObject_HEAD_INIT(NULL, 0)
+#endif
+ "DBSite", /*tp_name*/
+ sizeof(DBSiteObject), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ /* methods */
+ (destructor)DBSite_dealloc,/*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+#if (PY_VERSION_HEX < 0x03000000)
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_WEAKREFS, /* tp_flags */
+#else
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+#endif
+ 0, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ offsetof(DBSiteObject, in_weakreflist), /* tp_weaklistoffset */
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ DBSite_methods, /*tp_methods*/
+ 0, /*tp_members*/
+};
+#endif
statichere PyTypeObject DBEnv_Type = {
#if (PY_VERSION_HEX < 0x03000000)
@@ -9195,7 +9443,6 @@ statichere PyTypeObject DBLock_Type = {
offsetof(DBLockObject, in_weakreflist), /* tp_weaklistoffset */
};
-#if (DBVER >= 43)
statichere PyTypeObject DBSequence_Type = {
#if (PY_VERSION_HEX < 0x03000000)
PyObject_HEAD_INIT(NULL)
@@ -9237,7 +9484,6 @@ statichere PyTypeObject DBSequence_Type = {
DBSequence_methods, /*tp_methods*/
0, /*tp_members*/
};
-#endif
/* --------------------------------------------------------------------- */
/* Module-level functions */
@@ -9271,7 +9517,6 @@ DBEnv_construct(PyObject* self, PyObject* args)
return (PyObject* )newDBEnvObject(flags);
}
-#if (DBVER >= 43)
static PyObject*
DBSequence_construct(PyObject* self, PyObject* args, PyObject* kwargs)
{
@@ -9287,7 +9532,6 @@ DBSequence_construct(PyObject* self, PyObject* args, PyObject* kwargs)
}
return (PyObject* )newDBSequenceObject((DBObject*)dbobj, flags);
}
-#endif
static char bsddb_version_doc[] =
"Returns a tuple of major, minor, and patch release numbers of the\n\
@@ -9298,19 +9542,35 @@ bsddb_version(PyObject* self)
{
int major, minor, patch;
+ /* This should be instantaneous, no need to release the GIL */
db_version(&major, &minor, &patch);
return Py_BuildValue("(iii)", major, minor, patch);
}
+#if (DBVER >= 50)
+static PyObject*
+bsddb_version_full(PyObject* self)
+{
+ char *version_string;
+ int family, release, major, minor, patch;
+
+ /* This should be instantaneous, no need to release the GIL */
+ version_string = db_full_version(&family, &release, &major, &minor, &patch);
+ return Py_BuildValue("(siiiii)",
+ version_string, family, release, major, minor, patch);
+}
+#endif
+
/* List of functions defined in the module */
static PyMethodDef bsddb_methods[] = {
{"DB", (PyCFunction)DB_construct, METH_VARARGS | METH_KEYWORDS },
{"DBEnv", (PyCFunction)DBEnv_construct, METH_VARARGS},
-#if (DBVER >= 43)
{"DBSequence", (PyCFunction)DBSequence_construct, METH_VARARGS | METH_KEYWORDS },
-#endif
{"version", (PyCFunction)bsddb_version, METH_NOARGS, bsddb_version_doc},
+#if (DBVER >= 50)
+ {"full_version", (PyCFunction)bsddb_version_full, METH_NOARGS},
+#endif
{NULL, NULL} /* sentinel */
};
@@ -9328,6 +9588,11 @@ static BSDDB_api bsddb_api;
*/
#define ADD_INT(dict, NAME) _addIntToDict(dict, #NAME, NAME)
+/*
+** We can rename the module at import time, so the string allocated
+** must be big enough, and any use of the name must use this particular
+** string.
+*/
#define MODULE_NAME_MAX_LEN 11
static char _bsddbModuleName[MODULE_NAME_MAX_LEN+1] = "_bsddb";
@@ -9378,8 +9643,9 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
|| (PyType_Ready(&DBEnv_Type) < 0)
|| (PyType_Ready(&DBTxn_Type) < 0)
|| (PyType_Ready(&DBLock_Type) < 0)
-#if (DBVER >= 43)
|| (PyType_Ready(&DBSequence_Type) < 0)
+#if (DBVER >= 52)
+ || (PyType_Ready(&DBSite_Type) < 0)
#endif
) {
#if (PY_VERSION_HEX < 0x03000000)
@@ -9389,11 +9655,6 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
#endif
}
-#if defined(WITH_THREAD) && !defined(MYDB_USE_GILSTATE)
- /* Save the current interpreter, so callbacks can do the right thing. */
- _db_interpreterState = PyThreadState_GET()->interp;
-#endif
-
/* Create the module and add the functions */
#if (PY_VERSION_HEX < 0x03000000)
m = Py_InitModule(_bsddbModuleName, bsddb_methods);
@@ -9428,13 +9689,7 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
ADD_INT(d, DB_MAX_RECORDS);
#if (DBVER < 48)
-#if (DBVER >= 42)
ADD_INT(d, DB_RPCCLIENT);
-#else
- ADD_INT(d, DB_CLIENT);
- /* allow apps to be written using DB_RPCCLIENT on older Berkeley DB */
- _addIntToDict(d, "DB_RPCCLIENT", DB_CLIENT);
-#endif
#endif
#if (DBVER < 48)
@@ -9477,6 +9732,14 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
ADD_INT(d, DB_TXN_SYNC);
ADD_INT(d, DB_TXN_NOWAIT);
+#if (DBVER >= 51)
+ ADD_INT(d, DB_TXN_BULK);
+#endif
+
+#if (DBVER >= 48)
+ ADD_INT(d, DB_CURSOR_BULK);
+#endif
+
#if (DBVER >= 46)
ADD_INT(d, DB_TXN_WAIT);
#endif
@@ -9511,9 +9774,7 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
ADD_INT(d, DB_LOCK_MINWRITE);
ADD_INT(d, DB_LOCK_EXPIRE);
-#if (DBVER >= 43)
ADD_INT(d, DB_LOCK_MAXWRITE);
-#endif
_addIntToDict(d, "DB_LOCK_CONFLICT", 0);
@@ -9549,9 +9810,6 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
ADD_INT(d, DB_LOCK_UPGRADE);
ADD_INT(d, DB_LSTAT_ABORTED);
-#if (DBVER < 43)
- ADD_INT(d, DB_LSTAT_ERR);
-#endif
ADD_INT(d, DB_LSTAT_FREE);
ADD_INT(d, DB_LSTAT_HELD);
@@ -9561,9 +9819,7 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
ADD_INT(d, DB_ARCH_ABS);
ADD_INT(d, DB_ARCH_DATA);
ADD_INT(d, DB_ARCH_LOG);
-#if (DBVER >= 42)
ADD_INT(d, DB_ARCH_REMOVE);
-#endif
ADD_INT(d, DB_BTREE);
ADD_INT(d, DB_HASH);
@@ -9578,9 +9834,7 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
ADD_INT(d, DB_REVSPLITOFF);
ADD_INT(d, DB_SNAPSHOT);
-#if (DBVER >= 43)
ADD_INT(d, DB_INORDER);
-#endif
ADD_INT(d, DB_JOIN_NOSORT);
@@ -9591,9 +9845,6 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
ADD_INT(d, DB_CACHED_COUNTS);
#endif
-#if (DBVER <= 41)
- ADD_INT(d, DB_COMMIT);
-#endif
ADD_INT(d, DB_CONSUME);
ADD_INT(d, DB_CONSUME_WAIT);
ADD_INT(d, DB_CURRENT);
@@ -9651,8 +9902,10 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
ADD_INT(d, DB_LOCK_DEADLOCK);
ADD_INT(d, DB_LOCK_NOTGRANTED);
ADD_INT(d, DB_NOSERVER);
+#if (DBVER < 52)
ADD_INT(d, DB_NOSERVER_HOME);
ADD_INT(d, DB_NOSERVER_ID);
+#endif
ADD_INT(d, DB_NOTFOUND);
ADD_INT(d, DB_OLD_VERSION);
ADD_INT(d, DB_RUNRECOVERY);
@@ -9665,13 +9918,14 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
ADD_INT(d, DB_YIELDCPU);
ADD_INT(d, DB_PANIC_ENVIRONMENT);
ADD_INT(d, DB_NOPANIC);
-
ADD_INT(d, DB_OVERWRITE);
-#if (DBVER >= 43)
ADD_INT(d, DB_STAT_SUBSYSTEM);
ADD_INT(d, DB_STAT_MEMP_HASH);
-#endif
+ ADD_INT(d, DB_STAT_LOCK_CONF);
+ ADD_INT(d, DB_STAT_LOCK_LOCKERS);
+ ADD_INT(d, DB_STAT_LOCK_OBJECTS);
+ ADD_INT(d, DB_STAT_LOCK_PARAMS);
#if (DBVER >= 48)
ADD_INT(d, DB_OVERWRITE_DUP);
@@ -9690,7 +9944,6 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
ADD_INT(d, DB_EID_INVALID);
ADD_INT(d, DB_EID_BROADCAST);
-#if (DBVER >= 42)
ADD_INT(d, DB_TIME_NOTGRANTED);
ADD_INT(d, DB_TXN_NOT_DURABLE);
ADD_INT(d, DB_TXN_WRITE_NOSYNC);
@@ -9698,9 +9951,8 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
ADD_INT(d, DB_INIT_REP);
ADD_INT(d, DB_ENCRYPT);
ADD_INT(d, DB_CHKSUM);
-#endif
-#if (DBVER >= 42) && (DBVER < 47)
+#if (DBVER < 47)
ADD_INT(d, DB_LOG_AUTOREMOVE);
ADD_INT(d, DB_DIRECT_LOG);
#endif
@@ -9733,6 +9985,20 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
ADD_INT(d, DB_VERB_REPLICATION);
ADD_INT(d, DB_VERB_WAITSFOR);
+#if (DBVER >= 50)
+ ADD_INT(d, DB_VERB_REP_SYSTEM);
+#endif
+
+#if (DBVER >= 47)
+ ADD_INT(d, DB_VERB_REP_ELECT);
+ ADD_INT(d, DB_VERB_REP_LEASE);
+ ADD_INT(d, DB_VERB_REP_MISC);
+ ADD_INT(d, DB_VERB_REP_MSGS);
+ ADD_INT(d, DB_VERB_REP_SYNC);
+ ADD_INT(d, DB_VERB_REPMGR_CONNFAIL);
+ ADD_INT(d, DB_VERB_REPMGR_MISC);
+#endif
+
#if (DBVER >= 45)
ADD_INT(d, DB_EVENT_PANIC);
ADD_INT(d, DB_EVENT_REP_CLIENT);
@@ -9748,16 +10014,48 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
ADD_INT(d, DB_EVENT_WRITE_FAILED);
#endif
+#if (DBVER >= 50)
+ ADD_INT(d, DB_REPMGR_CONF_ELECTIONS);
+ ADD_INT(d, DB_EVENT_REP_MASTER_FAILURE);
+ ADD_INT(d, DB_EVENT_REP_DUPMASTER);
+ ADD_INT(d, DB_EVENT_REP_ELECTION_FAILED);
+#endif
+#if (DBVER >= 48)
+ ADD_INT(d, DB_EVENT_REG_ALIVE);
+ ADD_INT(d, DB_EVENT_REG_PANIC);
+#endif
+
+#if (DBVER >=52)
+ ADD_INT(d, DB_EVENT_REP_SITE_ADDED);
+ ADD_INT(d, DB_EVENT_REP_SITE_REMOVED);
+ ADD_INT(d, DB_EVENT_REP_LOCAL_SITE_REMOVED);
+ ADD_INT(d, DB_EVENT_REP_CONNECT_BROKEN);
+ ADD_INT(d, DB_EVENT_REP_CONNECT_ESTD);
+ ADD_INT(d, DB_EVENT_REP_CONNECT_TRY_FAILED);
+ ADD_INT(d, DB_EVENT_REP_INIT_DONE);
+
+ ADD_INT(d, DB_MEM_LOCK);
+ ADD_INT(d, DB_MEM_LOCKOBJECT);
+ ADD_INT(d, DB_MEM_LOCKER);
+ ADD_INT(d, DB_MEM_LOGID);
+ ADD_INT(d, DB_MEM_TRANSACTION);
+ ADD_INT(d, DB_MEM_THREAD);
+
+ ADD_INT(d, DB_BOOTSTRAP_HELPER);
+ ADD_INT(d, DB_GROUP_CREATOR);
+ ADD_INT(d, DB_LEGACY);
+ ADD_INT(d, DB_LOCAL_SITE);
+ ADD_INT(d, DB_REPMGR_PEER);
+#endif
+
ADD_INT(d, DB_REP_DUPMASTER);
ADD_INT(d, DB_REP_HOLDELECTION);
#if (DBVER >= 44)
ADD_INT(d, DB_REP_IGNORE);
ADD_INT(d, DB_REP_JOIN_FAILURE);
#endif
-#if (DBVER >= 42)
ADD_INT(d, DB_REP_ISPERM);
ADD_INT(d, DB_REP_NOTPERM);
-#endif
ADD_INT(d, DB_REP_NEWSITE);
ADD_INT(d, DB_REP_MASTER);
@@ -9766,7 +10064,13 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
ADD_INT(d, DB_REP_PERMANENT);
#if (DBVER >= 44)
+#if (DBVER >= 50)
+ ADD_INT(d, DB_REP_CONF_AUTOINIT);
+#else
ADD_INT(d, DB_REP_CONF_NOAUTOINIT);
+#endif /* 5.0 */
+#endif /* 4.4 */
+#if (DBVER >= 44)
ADD_INT(d, DB_REP_CONF_DELAYCLIENT);
ADD_INT(d, DB_REP_CONF_BULK);
ADD_INT(d, DB_REP_CONF_NOWAIT);
@@ -9774,9 +10078,7 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
ADD_INT(d, DB_REP_REREQUEST);
#endif
-#if (DBVER >= 42)
ADD_INT(d, DB_REP_NOBUFFER);
-#endif
#if (DBVER >= 46)
ADD_INT(d, DB_REP_LEASE_EXPIRED);
@@ -9819,14 +10121,34 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
ADD_INT(d, DB_STAT_ALL);
#endif
-#if (DBVER >= 43)
+#if (DBVER >= 51)
+ ADD_INT(d, DB_REPMGR_ACKS_ALL_AVAILABLE);
+#endif
+
+#if (DBVER >= 48)
+ ADD_INT(d, DB_REP_CONF_INMEM);
+#endif
+
+ ADD_INT(d, DB_TIMEOUT);
+
+#if (DBVER >= 50)
+ ADD_INT(d, DB_FORCESYNC);
+#endif
+
+#if (DBVER >= 48)
+ ADD_INT(d, DB_FAILCHK);
+#endif
+
+#if (DBVER >= 51)
+ ADD_INT(d, DB_HOTBACKUP_IN_PROGRESS);
+#endif
+
ADD_INT(d, DB_BUFFER_SMALL);
ADD_INT(d, DB_SEQ_DEC);
ADD_INT(d, DB_SEQ_INC);
ADD_INT(d, DB_SEQ_WRAP);
-#endif
-#if (DBVER >= 43) && (DBVER < 47)
+#if (DBVER < 47)
ADD_INT(d, DB_LOG_INMEMORY);
ADD_INT(d, DB_DSYNC_LOG);
#endif
@@ -9856,6 +10178,10 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
ADD_INT(d, DB_SET_LOCK_TIMEOUT);
ADD_INT(d, DB_SET_TXN_TIMEOUT);
+#if (DBVER >= 48)
+ ADD_INT(d, DB_SET_REG_TIMEOUT);
+#endif
+
/* The exception name must be correct for pickled exception *
* objects to unpickle properly. */
#ifdef PYBSDDB_STANDALONE /* different value needed for standalone pybsddb */
@@ -9912,8 +10238,10 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
MAKE_EX(DBRunRecoveryError);
MAKE_EX(DBVerifyBadError);
MAKE_EX(DBNoServerError);
+#if (DBVER < 52)
MAKE_EX(DBNoServerHomeError);
MAKE_EX(DBNoServerIDError);
+#endif
MAKE_EX(DBPageNotFoundError);
MAKE_EX(DBSecondaryBadError);
@@ -9927,9 +10255,7 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
MAKE_EX(DBNoSuchFileError);
MAKE_EX(DBPermissionsError);
-#if (DBVER >= 42)
MAKE_EX(DBRepHandleDeadError);
-#endif
#if (DBVER >= 44)
MAKE_EX(DBRepLockoutError);
#endif
@@ -9947,27 +10273,30 @@ PyMODINIT_FUNC PyInit__bsddb(void) /* Note the two underscores */
#undef MAKE_EX
/* Initialise the C API structure and add it to the module */
+ bsddb_api.api_version = PYBSDDB_API_VERSION;
bsddb_api.db_type = &DB_Type;
bsddb_api.dbcursor_type = &DBCursor_Type;
bsddb_api.dblogcursor_type = &DBLogCursor_Type;
bsddb_api.dbenv_type = &DBEnv_Type;
bsddb_api.dbtxn_type = &DBTxn_Type;
bsddb_api.dblock_type = &DBLock_Type;
-#if (DBVER >= 43)
bsddb_api.dbsequence_type = &DBSequence_Type;
-#endif
bsddb_api.makeDBError = makeDBError;
/*
- ** Capsules exist from Python 3.1, but I
- ** don't want to break the API compatibility
- ** for already published Python versions.
+ ** Capsules exist from Python 2.7 and 3.1.
+ ** We don't support Python 3.0 anymore, so...
+ ** #if (PY_VERSION_HEX < ((PY_MAJOR_VERSION < 3) ? 0x02070000 : 0x03020000))
*/
-#if (PY_VERSION_HEX < 0x03020000)
+#if (PY_VERSION_HEX < 0x02070000)
py_api = PyCObject_FromVoidPtr((void*)&bsddb_api, NULL);
#else
{
- char py_api_name[250];
+ /*
+ ** The data must outlive the call!!. So, the static definition.
+ ** The buffer must be big enough...
+ */
+ static char py_api_name[MODULE_NAME_MAX_LEN+10];
strcpy(py_api_name, _bsddbModuleName);
strcat(py_api_name, ".api");
diff --git a/Modules/_collectionsmodule.c b/Modules/_collectionsmodule.c
index ccc3043..bcdffcb 100644
--- a/Modules/_collectionsmodule.c
+++ b/Modules/_collectionsmodule.c
@@ -8,9 +8,13 @@
*/
/* The block length may be set to any number over 1. Larger numbers
- * reduce the number of calls to the memory allocator but take more
- * memory. Ideally, BLOCKLEN should be set with an eye to the
- * length of a cache line.
+ * reduce the number of calls to the memory allocator, give faster
+ * indexing and rotation, and reduce the link::data overhead ratio.
+ *
+ * Ideally, the block length will be set to two less than some
+ * multiple of the cache-line length (so that the full block
+ * including the leftlink and rightlink will fit neatly into
+ * cache lines).
*/
#define BLOCKLEN 62
@@ -46,9 +50,9 @@
*/
typedef struct BLOCK {
- struct BLOCK *leftlink;
- struct BLOCK *rightlink;
PyObject *data[BLOCKLEN];
+ struct BLOCK *rightlink;
+ struct BLOCK *leftlink;
} block;
#define MAXFREEBLOCKS 10
@@ -58,13 +62,8 @@ static block *freeblocks[MAXFREEBLOCKS];
static block *
newblock(block *leftlink, block *rightlink, Py_ssize_t len) {
block *b;
- /* To prevent len from overflowing PY_SSIZE_T_MAX on 64-bit machines, we
- * refuse to allocate new blocks if the current len is dangerously
- * close. There is some extra margin to prevent spurious arithmetic
- * overflows at various places. The following check ensures that
- * the blocks allocated to the deque, in the worst case, can only
- * have PY_SSIZE_T_MAX-2 entries in total.
- */
+ /* To prevent len from overflowing PY_SSIZE_T_MAX on 32-bit machines, we
+ * refuse to allocate new blocks if the current len is nearing overflow. */
if (len >= PY_SSIZE_T_MAX - 2*BLOCKLEN) {
PyErr_SetString(PyExc_OverflowError,
"cannot add more blocks to the deque");
@@ -103,8 +102,8 @@ typedef struct {
Py_ssize_t leftindex; /* in range(BLOCKLEN) */
Py_ssize_t rightindex; /* in range(BLOCKLEN) */
Py_ssize_t len;
- Py_ssize_t maxlen;
long state; /* incremented whenever the indices move */
+ Py_ssize_t maxlen;
PyObject *weakreflist; /* List of weak references */
} dequeobject;
@@ -413,10 +412,9 @@ deque_inplace_concat(dequeobject *deque, PyObject *other)
static int
_deque_rotate(dequeobject *deque, Py_ssize_t n)
{
- Py_ssize_t i, len=deque->len, halflen=(len+1)>>1;
- PyObject *item, *rv;
+ Py_ssize_t m, len=deque->len, halflen=len>>1;
- if (len == 0)
+ if (len <= 1)
return 0;
if (n > halflen || n < -halflen) {
n %= len;
@@ -425,24 +423,79 @@ _deque_rotate(dequeobject *deque, Py_ssize_t n)
else if (n < -halflen)
n += len;
}
+ assert(len > 1);
+ assert(-halflen <= n && n <= halflen);
- for (i=0 ; i<n ; i++) {
- item = deque_pop(deque, NULL);
- assert (item != NULL);
- rv = deque_appendleft(deque, item);
- Py_DECREF(item);
- if (rv == NULL)
- return -1;
- Py_DECREF(rv);
+ deque->state++;
+ while (n > 0) {
+ if (deque->leftindex == 0) {
+ block *b = newblock(NULL, deque->leftblock, len);
+ if (b == NULL)
+ return -1;
+ assert(deque->leftblock->leftlink == NULL);
+ deque->leftblock->leftlink = b;
+ deque->leftblock = b;
+ deque->leftindex = BLOCKLEN;
+ }
+ assert(deque->leftindex > 0);
+
+ m = n;
+ if (m > deque->rightindex + 1)
+ m = deque->rightindex + 1;
+ if (m > deque->leftindex)
+ m = deque->leftindex;
+ assert (m > 0 && m <= len);
+ memcpy(&deque->leftblock->data[deque->leftindex - m],
+ &deque->rightblock->data[deque->rightindex + 1 - m],
+ m * sizeof(PyObject *));
+ deque->rightindex -= m;
+ deque->leftindex -= m;
+ n -= m;
+
+ if (deque->rightindex == -1) {
+ block *prevblock = deque->rightblock->leftlink;
+ assert(deque->rightblock != NULL);
+ assert(deque->leftblock != deque->rightblock);
+ freeblock(deque->rightblock);
+ prevblock->rightlink = NULL;
+ deque->rightblock = prevblock;
+ deque->rightindex = BLOCKLEN - 1;
+ }
}
- for (i=0 ; i>n ; i--) {
- item = deque_popleft(deque, NULL);
- assert (item != NULL);
- rv = deque_append(deque, item);
- Py_DECREF(item);
- if (rv == NULL)
- return -1;
- Py_DECREF(rv);
+ while (n < 0) {
+ if (deque->rightindex == BLOCKLEN - 1) {
+ block *b = newblock(deque->rightblock, NULL, len);
+ if (b == NULL)
+ return -1;
+ assert(deque->rightblock->rightlink == NULL);
+ deque->rightblock->rightlink = b;
+ deque->rightblock = b;
+ deque->rightindex = -1;
+ }
+ assert (deque->rightindex < BLOCKLEN - 1);
+
+ m = -n;
+ if (m > BLOCKLEN - deque->leftindex)
+ m = BLOCKLEN - deque->leftindex;
+ if (m > BLOCKLEN - 1 - deque->rightindex)
+ m = BLOCKLEN - 1 - deque->rightindex;
+ assert (m > 0 && m <= len);
+ memcpy(&deque->rightblock->data[deque->rightindex + 1],
+ &deque->leftblock->data[deque->leftindex],
+ m * sizeof(PyObject *));
+ deque->leftindex += m;
+ deque->rightindex += m;
+ n += m;
+
+ if (deque->leftindex == BLOCKLEN) {
+ block *nextblock = deque->leftblock->rightlink;
+ assert(deque->leftblock != deque->rightblock);
+ freeblock(deque->leftblock);
+ assert(nextblock != NULL);
+ nextblock->leftlink = NULL;
+ deque->leftblock = nextblock;
+ deque->leftindex = 0;
+ }
}
return 0;
}
@@ -588,7 +641,7 @@ deque_remove(dequeobject *deque, PyObject *value)
PyDoc_STRVAR(remove_doc,
"D.remove(value) -- remove first occurrence of value.");
-static int
+static void
deque_clear(dequeobject *deque)
{
PyObject *item;
@@ -601,7 +654,6 @@ deque_clear(dequeobject *deque)
assert(deque->leftblock == deque->rightblock &&
deque->leftindex - 1 == deque->rightindex &&
deque->len == 0);
- return 0;
}
static PyObject *
@@ -704,10 +756,7 @@ deque_ass_item(dequeobject *deque, Py_ssize_t i, PyObject *v)
static PyObject *
deque_clearmethod(dequeobject *deque)
{
- int rv;
-
- rv = deque_clear(deque);
- assert (rv != -1);
+ deque_clear(deque);
Py_RETURN_NONE;
}
@@ -991,6 +1040,23 @@ deque_init(dequeobject *deque, PyObject *args, PyObject *kwdargs)
}
static PyObject *
+deque_sizeof(dequeobject *deque, void *unused)
+{
+ Py_ssize_t res;
+ Py_ssize_t blocks;
+
+ res = sizeof(dequeobject);
+ blocks = (deque->leftindex + deque->len + BLOCKLEN - 1) / BLOCKLEN;
+ assert(deque->leftindex + deque->len - 1 ==
+ (blocks - 1) * BLOCKLEN + deque->rightindex);
+ res += blocks * sizeof(block);
+ return PyLong_FromSsize_t(res);
+}
+
+PyDoc_STRVAR(sizeof_doc,
+"D.__sizeof__() -- size of D in memory, in bytes");
+
+static PyObject *
deque_get_maxlen(dequeobject *deque)
{
if (deque->maxlen == -1)
@@ -1053,12 +1119,14 @@ static PyMethodDef deque_methods[] = {
{"reverse", (PyCFunction)deque_reverse,
METH_NOARGS, reverse_doc},
{"rotate", (PyCFunction)deque_rotate,
- METH_VARARGS, rotate_doc},
+ METH_VARARGS, rotate_doc},
+ {"__sizeof__", (PyCFunction)deque_sizeof,
+ METH_NOARGS, sizeof_doc},
{NULL, NULL} /* sentinel */
};
PyDoc_STRVAR(deque_doc,
-"deque(iterable[, maxlen]) --> deque object\n\
+"deque([iterable[, maxlen]]) --> deque object\n\
\n\
Build an ordered collection with optimized access from its endpoints.");
@@ -1544,11 +1612,13 @@ defdict_init(PyObject *self, PyObject *args, PyObject *kwds)
}
PyDoc_STRVAR(defdict_doc,
-"defaultdict(default_factory) --> dict with default factory\n\
+"defaultdict(default_factory[, ...]) --> dict with default factory\n\
\n\
The default factory is called without arguments to produce\n\
a new value when a key is not present, in __getitem__ only.\n\
A defaultdict compares equal to a dict with the same items.\n\
+All remaining arguments are treated the same as if they were\n\
+passed to the dict constructor, including keyword arguments.\n\
");
/* See comment in xxsubtype.c */
diff --git a/Modules/_csv.c b/Modules/_csv.c
index ea3add2..00f5d00 100644
--- a/Modules/_csv.c
+++ b/Modules/_csv.c
@@ -208,8 +208,12 @@ _set_bool(const char *name, int *target, PyObject *src, int dflt)
{
if (src == NULL)
*target = dflt;
- else
- *target = PyObject_IsTrue(src);
+ else {
+ int b = PyObject_IsTrue(src);
+ if (b < 0)
+ return -1;
+ *target = b;
+ }
return 0;
}
@@ -235,19 +239,24 @@ _set_char(const char *name, char *target, PyObject *src, char dflt)
if (src == NULL)
*target = dflt;
else {
- if (src == Py_None || PyString_Size(src) == 0)
- *target = '\0';
- else if (!PyString_Check(src) || PyString_Size(src) != 1) {
- PyErr_Format(PyExc_TypeError,
- "\"%s\" must be an 1-character string",
- name);
- return -1;
- }
- else {
- char *s = PyString_AsString(src);
- if (s == NULL)
+ *target = '\0';
+ if (src != Py_None) {
+ Py_ssize_t len;
+ if (!PyString_Check(src)) {
+ PyErr_Format(PyExc_TypeError,
+ "\"%s\" must be string, not %.200s", name,
+ src->ob_type->tp_name);
+ return -1;
+ }
+ len = PyString_GET_SIZE(src);
+ if (len > 1) {
+ PyErr_Format(PyExc_TypeError,
+ "\"%s\" must be an 1-character string",
+ name);
return -1;
- *target = s[0];
+ }
+ if (len > 0)
+ *target = *PyString_AS_STRING(src);
}
}
return 0;
@@ -263,7 +272,7 @@ _set_str(const char *name, PyObject **target, PyObject *src, const char *dflt)
*target = NULL;
else if (!IS_BASESTRING(src)) {
PyErr_Format(PyExc_TypeError,
- "\"%s\" must be an string", name);
+ "\"%s\" must be a string", name);
return -1;
}
else {
@@ -422,7 +431,8 @@ dialect_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
if (dialect_check_quoting(self->quoting))
goto err;
if (self->delimiter == 0) {
- PyErr_SetString(PyExc_TypeError, "delimiter must be set");
+ PyErr_SetString(PyExc_TypeError,
+ "\"delimiter\" must be an 1-character string");
goto err;
}
if (quotechar == Py_None && quoting == NULL)
@@ -784,9 +794,13 @@ Reader_iternext(ReaderObj *self)
lineobj = PyIter_Next(self->input_iter);
if (lineobj == NULL) {
/* End of input OR exception */
- if (!PyErr_Occurred() && self->field_len != 0)
- PyErr_Format(error_obj,
- "newline inside string");
+ if (!PyErr_Occurred() && (self->field_len != 0 ||
+ self->state == IN_QUOTED_FIELD)) {
+ if (self->dialect->strict)
+ PyErr_SetString(error_obj, "unexpected end of data");
+ else if (parse_save_field(self) >= 0 )
+ break;
+ }
return NULL;
}
++self->line_num;
diff --git a/Modules/_ctypes/_ctypes.c b/Modules/_ctypes/_ctypes.c
index 4ae2c41..1700afd 100644
--- a/Modules/_ctypes/_ctypes.c
+++ b/Modules/_ctypes/_ctypes.c
@@ -194,10 +194,8 @@ _DictRemover_call(PyObject *_self, PyObject *args, PyObject *kw)
if (-1 == PyDict_DelItem(self->dict, self->key))
/* XXX Error context */
PyErr_WriteUnraisable(Py_None);
- Py_DECREF(self->key);
- self->key = NULL;
- Py_DECREF(self->dict);
- self->dict = NULL;
+ Py_CLEAR(self->key);
+ Py_CLEAR(self->dict);
}
Py_INCREF(Py_None);
return Py_None;
@@ -324,6 +322,48 @@ _ctypes_alloc_format_string(const char *prefix, const char *suffix)
}
/*
+ Allocate a memory block for a pep3118 format string, adding
+ the given prefix (if non-null), an additional shape prefix, and a suffix.
+ Returns NULL on failure, with the error indicator set. If called with
+ a suffix of NULL the error indicator must already be set.
+ */
+char *
+_ctypes_alloc_format_string_with_shape(int ndim, const Py_ssize_t *shape,
+ const char *prefix, const char *suffix)
+{
+ char *new_prefix;
+ char *result;
+ char buf[32];
+ int prefix_len;
+ int k;
+
+ prefix_len = 32 * ndim + 3;
+ if (prefix)
+ prefix_len += strlen(prefix);
+ new_prefix = PyMem_Malloc(prefix_len);
+ if (new_prefix == NULL)
+ return NULL;
+ new_prefix[0] = '\0';
+ if (prefix)
+ strcpy(new_prefix, prefix);
+ if (ndim > 0) {
+ /* Add the prefix "(shape[0],shape[1],...,shape[ndim-1])" */
+ strcat(new_prefix, "(");
+ for (k = 0; k < ndim; ++k) {
+ if (k < ndim-1) {
+ sprintf(buf, "%"PY_FORMAT_SIZE_T"d,", shape[k]);
+ } else {
+ sprintf(buf, "%"PY_FORMAT_SIZE_T"d)", shape[k]);
+ }
+ strcat(new_prefix, buf);
+ }
+ }
+ result = _ctypes_alloc_format_string(new_prefix, suffix);
+ PyMem_Free(new_prefix);
+ return result;
+}
+
+/*
PyCStructType_Type - a meta type/class. Creating a new class using this one as
__metaclass__ will call the contructor StructUnionType_new. It replaces the
tp_dict member with a new instance of StgDict, and initializes the C
@@ -919,14 +959,21 @@ PyCPointerType_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
if (proto) {
StgDictObject *itemdict = PyType_stgdict(proto);
+ const char *current_format;
assert(itemdict);
/* If itemdict->format is NULL, then this is a pointer to an
incomplete type. We create a generic format string
'pointer to bytes' in this case. XXX Better would be to
fix the format string later...
*/
- stgdict->format = _ctypes_alloc_format_string("&",
- itemdict->format ? itemdict->format : "B");
+ current_format = itemdict->format ? itemdict->format : "B";
+ if (itemdict->shape != NULL) {
+ /* pointer to an array: the shape needs to be prefixed */
+ stgdict->format = _ctypes_alloc_format_string_with_shape(
+ itemdict->ndim, itemdict->shape, "&", current_format);
+ } else {
+ stgdict->format = _ctypes_alloc_format_string("&", current_format);
+ }
if (stgdict->format == NULL) {
Py_DECREF((PyObject *)stgdict);
return NULL;
@@ -1328,7 +1375,6 @@ PyCArrayType_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
long length;
Py_ssize_t itemsize, itemalign;
- char buf[32];
typedict = PyTuple_GetItem(args, 2);
if (!typedict)
@@ -1364,13 +1410,7 @@ PyCArrayType_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
}
assert(itemdict->format);
- if (itemdict->format[0] == '(') {
- sprintf(buf, "(%ld,", length);
- stgdict->format = _ctypes_alloc_format_string(buf, itemdict->format+1);
- } else {
- sprintf(buf, "(%ld)", length);
- stgdict->format = _ctypes_alloc_format_string(buf, itemdict->format);
- }
+ stgdict->format = _ctypes_alloc_format_string(NULL, itemdict->format);
if (stgdict->format == NULL) {
Py_DECREF((PyObject *)stgdict);
return NULL;
@@ -2526,11 +2566,9 @@ PyCData_traverse(CDataObject *self, visitproc visit, void *arg)
static int
PyCData_clear(CDataObject *self)
{
- StgDictObject *dict = PyObject_stgdict((PyObject *)self);
- assert(dict); /* Cannot be NULL for CDataObject instances */
Py_CLEAR(self->b_objects);
if ((self->b_needsfree)
- && ((size_t)dict->size > sizeof(self->b_value)))
+ && _CDataObject_HasExternalBuffer(self))
PyMem_Free(self->b_ptr);
self->b_ptr = NULL;
Py_CLEAR(self->b_base);
@@ -3042,10 +3080,8 @@ static int
PyCFuncPtr_set_restype(PyCFuncPtrObject *self, PyObject *ob)
{
if (ob == NULL) {
- Py_XDECREF(self->restype);
- self->restype = NULL;
- Py_XDECREF(self->checker);
- self->checker = NULL;
+ Py_CLEAR(self->restype);
+ Py_CLEAR(self->checker);
return 0;
}
if (ob != Py_None && !PyType_stgdict(ob) && !PyCallable_Check(ob)) {
@@ -3088,10 +3124,8 @@ PyCFuncPtr_set_argtypes(PyCFuncPtrObject *self, PyObject *ob)
PyObject *converters;
if (ob == NULL || ob == Py_None) {
- Py_XDECREF(self->converters);
- self->converters = NULL;
- Py_XDECREF(self->argtypes);
- self->argtypes = NULL;
+ Py_CLEAR(self->converters);
+ Py_CLEAR(self->argtypes);
} else {
converters = converters_from_argtypes(ob);
if (!converters)
@@ -3296,23 +3330,37 @@ PyCFuncPtr_FromDll(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
char *name;
int (* address)(void);
+ PyObject *ftuple;
PyObject *dll;
PyObject *obj;
PyCFuncPtrObject *self;
void *handle;
PyObject *paramflags = NULL;
- if (!PyArg_ParseTuple(args, "(O&O)|O", _get_name, &name, &dll, &paramflags))
+ if (!PyArg_ParseTuple(args, "O|O", &ftuple, &paramflags))
return NULL;
if (paramflags == Py_None)
paramflags = NULL;
+ ftuple = PySequence_Tuple(ftuple);
+ if (!ftuple)
+ /* Here ftuple is a borrowed reference */
+ return NULL;
+
+ if (!PyArg_ParseTuple(ftuple, "O&O", _get_name, &name, &dll)) {
+ Py_DECREF(ftuple);
+ return NULL;
+ }
+
obj = PyObject_GetAttrString(dll, "_handle");
- if (!obj)
+ if (!obj) {
+ Py_DECREF(ftuple);
return NULL;
+ }
if (!PyInt_Check(obj) && !PyLong_Check(obj)) {
PyErr_SetString(PyExc_TypeError,
"the _handle attribute of the second argument must be an integer");
+ Py_DECREF(ftuple);
Py_DECREF(obj);
return NULL;
}
@@ -3321,6 +3369,7 @@ PyCFuncPtr_FromDll(PyTypeObject *type, PyObject *args, PyObject *kwds)
if (PyErr_Occurred()) {
PyErr_SetString(PyExc_ValueError,
"could not convert the _handle attribute to a pointer");
+ Py_DECREF(ftuple);
return NULL;
}
@@ -3335,6 +3384,7 @@ PyCFuncPtr_FromDll(PyTypeObject *type, PyObject *args, PyObject *kwds)
PyErr_Format(PyExc_AttributeError,
"function ordinal %d not found",
(WORD)(size_t)name);
+ Py_DECREF(ftuple);
return NULL;
}
#else
@@ -3348,9 +3398,12 @@ PyCFuncPtr_FromDll(PyTypeObject *type, PyObject *args, PyObject *kwds)
#else
PyErr_SetString(PyExc_AttributeError, ctypes_dlerror());
#endif
+ Py_DECREF(ftuple);
return NULL;
}
#endif
+ Py_INCREF(dll); /* for KeepRef */
+ Py_DECREF(ftuple);
if (!_validate_paramflags(type, paramflags))
return NULL;
@@ -3363,7 +3416,6 @@ PyCFuncPtr_FromDll(PyTypeObject *type, PyObject *args, PyObject *kwds)
*(void **)self->b_ptr = address;
- Py_INCREF((PyObject *)dll); /* for KeepRef */
if (-1 == KeepRef((CDataObject *)self, 0, dll)) {
Py_DECREF((PyObject *)self);
return NULL;
diff --git a/Modules/_ctypes/callproc.c b/Modules/_ctypes/callproc.c
index 78c6dc0..6642dc3 100644
--- a/Modules/_ctypes/callproc.c
+++ b/Modules/_ctypes/callproc.c
@@ -25,8 +25,8 @@
2. After several checks, _build_callargs() is called which returns another
tuple 'callargs'. This may be the same tuple as 'inargs', a slice of
- 'inargs', or a completely fresh tuple, depending on several things (is is a
- COM method, are 'paramflags' available).
+ 'inargs', or a completely fresh tuple, depending on several things (is it a
+ COM method?, are 'paramflags' available?).
3. _build_callargs also calculates bitarrays containing indexes into
the callargs tuple, specifying how to build the return value(s) of
@@ -75,6 +75,10 @@
#include <ffi.h>
#include "ctypes.h"
+#ifdef HAVE_ALLOCA_H
+/* AIX needs alloca.h for alloca() */
+#include <alloca.h>
+#endif
#if defined(_DEBUG) || defined(__MINGW32__)
/* Don't use structured exception handling on Windows if this is defined.
@@ -401,6 +405,11 @@ static DWORD HandleException(EXCEPTION_POINTERS *ptrs,
{
*pdw = ptrs->ExceptionRecord->ExceptionCode;
*record = *ptrs->ExceptionRecord;
+ /* We don't want to catch breakpoint exceptions, they are used to attach
+ * a debugger to the process.
+ */
+ if (*pdw == EXCEPTION_BREAKPOINT)
+ return EXCEPTION_CONTINUE_SEARCH;
return EXCEPTION_EXECUTE_HANDLER;
}
#endif
@@ -844,11 +853,11 @@ static int _call_function_pointer(int flags,
space[0] = errno;
errno = temp;
}
- Py_XDECREF(error_object);
#ifdef WITH_THREAD
if ((flags & FUNCFLAG_PYTHONAPI) == 0)
Py_BLOCK_THREADS
#endif
+ Py_XDECREF(error_object);
#ifdef MS_WIN32
#ifndef DONT_USE_SEH
if (dwExceptionCode) {
@@ -1740,7 +1749,7 @@ resize(PyObject *self, PyObject *args)
obj->b_size = size;
goto done;
}
- if (obj->b_size <= sizeof(obj->b_value)) {
+ if (!_CDataObject_HasExternalBuffer(obj)) {
/* We are currently using the objects default buffer, but it
isn't large enough any more. */
void *ptr = PyMem_Malloc(size);
diff --git a/Modules/_ctypes/cfield.c b/Modules/_ctypes/cfield.c
index c7053ef..76c72f8 100644
--- a/Modules/_ctypes/cfield.c
+++ b/Modules/_ctypes/cfield.c
@@ -431,12 +431,8 @@ get_ulonglong(PyObject *v, unsigned PY_LONG_LONG *p)
#define LOW_BIT(x) ((x) & 0xFFFF)
#define NUM_BITS(x) ((x) >> 16)
-/* This seems nore a compiler issue than a Windows/non-Windows one */
-#ifdef MS_WIN32
-# define BIT_MASK(size) ((1 << NUM_BITS(size))-1)
-#else
-# define BIT_MASK(size) ((1LL << NUM_BITS(size))-1)
-#endif
+/* Doesn't work if NUM_BITS(size) == 0, but it never happens in SET() call. */
+#define BIT_MASK(type, size) (((((type)1 << (NUM_BITS(size) - 1)) - 1) << 1) + 1)
/* This macro CHANGES the first parameter IN PLACE. For proper sign handling,
we must first shift left, then right.
@@ -448,10 +444,10 @@ get_ulonglong(PyObject *v, unsigned PY_LONG_LONG *p)
}
/* This macro RETURNS the first parameter with the bit field CHANGED. */
-#define SET(x, v, size) \
+#define SET(type, x, v, size) \
(NUM_BITS(size) ? \
- ( ( x & ~(BIT_MASK(size) << LOW_BIT(size)) ) | ( (v & BIT_MASK(size)) << LOW_BIT(size) ) ) \
- : v)
+ ( ( (type)x & ~(BIT_MASK(type, size) << LOW_BIT(size)) ) | ( ((type)v & BIT_MASK(type, size)) << LOW_BIT(size) ) ) \
+ : (type)v)
/* byte swapping macros */
#define SWAP_2(v) \
@@ -523,7 +519,7 @@ b_set(void *ptr, PyObject *value, Py_ssize_t size)
long val;
if (get_long(value, &val) < 0)
return NULL;
- *(signed char *)ptr = (signed char)SET(*(signed char *)ptr, (signed char)val, size);
+ *(signed char *)ptr = SET(signed char, *(signed char *)ptr, val, size);
_RET(value);
}
@@ -542,8 +538,7 @@ B_set(void *ptr, PyObject *value, Py_ssize_t size)
unsigned long val;
if (get_ulong(value, &val) < 0)
return NULL;
- *(unsigned char *)ptr = (unsigned char)SET(*(unsigned char*)ptr,
- (unsigned short)val, size);
+ *(unsigned char *)ptr = SET(unsigned char, *(unsigned char*)ptr, val, size);
_RET(value);
}
@@ -564,7 +559,7 @@ h_set(void *ptr, PyObject *value, Py_ssize_t size)
if (get_long(value, &val) < 0)
return NULL;
memcpy(&x, ptr, sizeof(x));
- x = SET(x, (short)val, size);
+ x = SET(short, x, val, size);
memcpy(ptr, &x, sizeof(x));
_RET(value);
}
@@ -579,7 +574,7 @@ h_set_sw(void *ptr, PyObject *value, Py_ssize_t size)
return NULL;
memcpy(&field, ptr, sizeof(field));
field = SWAP_2(field);
- field = SET(field, (short)val, size);
+ field = SET(short, field, val, size);
field = SWAP_2(field);
memcpy(ptr, &field, sizeof(field));
_RET(value);
@@ -612,7 +607,7 @@ H_set(void *ptr, PyObject *value, Py_ssize_t size)
if (get_ulong(value, &val) < 0)
return NULL;
memcpy(&x, ptr, sizeof(x));
- x = SET(x, (unsigned short)val, size);
+ x = SET(unsigned short, x, val, size);
memcpy(ptr, &x, sizeof(x));
_RET(value);
}
@@ -626,7 +621,7 @@ H_set_sw(void *ptr, PyObject *value, Py_ssize_t size)
return NULL;
memcpy(&field, ptr, sizeof(field));
field = SWAP_2(field);
- field = SET(field, (unsigned short)val, size);
+ field = SET(unsigned short, field, val, size);
field = SWAP_2(field);
memcpy(ptr, &field, sizeof(field));
_RET(value);
@@ -660,7 +655,7 @@ i_set(void *ptr, PyObject *value, Py_ssize_t size)
if (get_long(value, &val) < 0)
return NULL;
memcpy(&x, ptr, sizeof(x));
- x = SET(x, (int)val, size);
+ x = SET(int, x, val, size);
memcpy(ptr, &x, sizeof(x));
_RET(value);
}
@@ -674,7 +669,7 @@ i_set_sw(void *ptr, PyObject *value, Py_ssize_t size)
return NULL;
memcpy(&field, ptr, sizeof(field));
field = SWAP_INT(field);
- field = SET(field, (int)val, size);
+ field = SET(int, field, val, size);
field = SWAP_INT(field);
memcpy(ptr, &field, sizeof(field));
_RET(value);
@@ -761,7 +756,7 @@ I_set(void *ptr, PyObject *value, Py_ssize_t size)
if (get_ulong(value, &val) < 0)
return NULL;
memcpy(&x, ptr, sizeof(x));
- x = SET(x, (unsigned int)val, size);
+ x = SET(unsigned int, x, val, size);
memcpy(ptr, &x, sizeof(x));
_RET(value);
}
@@ -774,7 +769,7 @@ I_set_sw(void *ptr, PyObject *value, Py_ssize_t size)
if (get_ulong(value, &val) < 0)
return NULL;
memcpy(&field, ptr, sizeof(field));
- field = (unsigned int)SET(field, (unsigned int)val, size);
+ field = SET(unsigned int, field, (unsigned int)val, size);
field = SWAP_INT(field);
memcpy(ptr, &field, sizeof(field));
_RET(value);
@@ -808,7 +803,7 @@ l_set(void *ptr, PyObject *value, Py_ssize_t size)
if (get_long(value, &val) < 0)
return NULL;
memcpy(&x, ptr, sizeof(x));
- x = SET(x, val, size);
+ x = SET(long, x, val, size);
memcpy(ptr, &x, sizeof(x));
_RET(value);
}
@@ -822,7 +817,7 @@ l_set_sw(void *ptr, PyObject *value, Py_ssize_t size)
return NULL;
memcpy(&field, ptr, sizeof(field));
field = SWAP_LONG(field);
- field = (long)SET(field, val, size);
+ field = SET(long, field, val, size);
field = SWAP_LONG(field);
memcpy(ptr, &field, sizeof(field));
_RET(value);
@@ -856,7 +851,7 @@ L_set(void *ptr, PyObject *value, Py_ssize_t size)
if (get_ulong(value, &val) < 0)
return NULL;
memcpy(&x, ptr, sizeof(x));
- x = SET(x, val, size);
+ x = SET(unsigned long, x, val, size);
memcpy(ptr, &x, sizeof(x));
_RET(value);
}
@@ -870,7 +865,7 @@ L_set_sw(void *ptr, PyObject *value, Py_ssize_t size)
return NULL;
memcpy(&field, ptr, sizeof(field));
field = SWAP_LONG(field);
- field = (unsigned long)SET(field, val, size);
+ field = SET(unsigned long, field, val, size);
field = SWAP_LONG(field);
memcpy(ptr, &field, sizeof(field));
_RET(value);
@@ -905,7 +900,7 @@ q_set(void *ptr, PyObject *value, Py_ssize_t size)
if (get_longlong(value, &val) < 0)
return NULL;
memcpy(&x, ptr, sizeof(x));
- x = SET(x, val, size);
+ x = SET(PY_LONG_LONG, x, val, size);
memcpy(ptr, &x, sizeof(x));
_RET(value);
}
@@ -919,7 +914,7 @@ q_set_sw(void *ptr, PyObject *value, Py_ssize_t size)
return NULL;
memcpy(&field, ptr, sizeof(field));
field = SWAP_8(field);
- field = (PY_LONG_LONG)SET(field, val, size);
+ field = SET(PY_LONG_LONG, field, val, size);
field = SWAP_8(field);
memcpy(ptr, &field, sizeof(field));
_RET(value);
@@ -952,7 +947,7 @@ Q_set(void *ptr, PyObject *value, Py_ssize_t size)
if (get_ulonglong(value, &val) < 0)
return NULL;
memcpy(&x, ptr, sizeof(x));
- x = SET(x, val, size);
+ x = SET(PY_LONG_LONG, x, val, size);
memcpy(ptr, &x, sizeof(x));
_RET(value);
}
@@ -966,7 +961,7 @@ Q_set_sw(void *ptr, PyObject *value, Py_ssize_t size)
return NULL;
memcpy(&field, ptr, sizeof(field));
field = SWAP_8(field);
- field = (unsigned PY_LONG_LONG)SET(field, val, size);
+ field = SET(unsigned PY_LONG_LONG, field, val, size);
field = SWAP_8(field);
memcpy(ptr, &field, sizeof(field));
_RET(value);
@@ -1003,12 +998,8 @@ g_set(void *ptr, PyObject *value, Py_ssize_t size)
long double x;
x = PyFloat_AsDouble(value);
- if (x == -1 && PyErr_Occurred()) {
- PyErr_Format(PyExc_TypeError,
- " float expected instead of %s instance",
- value->ob_type->tp_name);
+ if (x == -1 && PyErr_Occurred())
return NULL;
- }
memcpy(ptr, &x, sizeof(long double));
_RET(value);
}
@@ -1027,12 +1018,8 @@ d_set(void *ptr, PyObject *value, Py_ssize_t size)
double x;
x = PyFloat_AsDouble(value);
- if (x == -1 && PyErr_Occurred()) {
- PyErr_Format(PyExc_TypeError,
- " float expected instead of %s instance",
- value->ob_type->tp_name);
+ if (x == -1 && PyErr_Occurred())
return NULL;
- }
memcpy(ptr, &x, sizeof(double));
_RET(value);
}
@@ -1051,12 +1038,8 @@ d_set_sw(void *ptr, PyObject *value, Py_ssize_t size)
double x;
x = PyFloat_AsDouble(value);
- if (x == -1 && PyErr_Occurred()) {
- PyErr_Format(PyExc_TypeError,
- " float expected instead of %s instance",
- value->ob_type->tp_name);
+ if (x == -1 && PyErr_Occurred())
return NULL;
- }
#ifdef WORDS_BIGENDIAN
if (_PyFloat_Pack8(x, (unsigned char *)ptr, 1))
return NULL;
@@ -1083,12 +1066,8 @@ f_set(void *ptr, PyObject *value, Py_ssize_t size)
float x;
x = (float)PyFloat_AsDouble(value);
- if (x == -1 && PyErr_Occurred()) {
- PyErr_Format(PyExc_TypeError,
- " float expected instead of %s instance",
- value->ob_type->tp_name);
+ if (x == -1 && PyErr_Occurred())
return NULL;
- }
memcpy(ptr, &x, sizeof(x));
_RET(value);
}
@@ -1107,12 +1086,8 @@ f_set_sw(void *ptr, PyObject *value, Py_ssize_t size)
float x;
x = (float)PyFloat_AsDouble(value);
- if (x == -1 && PyErr_Occurred()) {
- PyErr_Format(PyExc_TypeError,
- " float expected instead of %s instance",
- value->ob_type->tp_name);
+ if (x == -1 && PyErr_Occurred())
return NULL;
- }
#ifdef WORDS_BIGENDIAN
if (_PyFloat_Pack4(x, (unsigned char *)ptr, 1))
return NULL;
@@ -1712,9 +1687,9 @@ typedef struct { char c; void *x; } s_void_p;
/*
#define CHAR_ALIGN (sizeof(s_char) - sizeof(char))
#define SHORT_ALIGN (sizeof(s_short) - sizeof(short))
-#define INT_ALIGN (sizeof(s_int) - sizeof(int))
#define LONG_ALIGN (sizeof(s_long) - sizeof(long))
*/
+#define INT_ALIGN (sizeof(s_int) - sizeof(int))
#define FLOAT_ALIGN (sizeof(s_float) - sizeof(float))
#define DOUBLE_ALIGN (sizeof(s_double) - sizeof(double))
#define LONGDOUBLE_ALIGN (sizeof(s_long_double) - sizeof(long double))
@@ -1756,8 +1731,8 @@ ffi_type ffi_type_sint8 = { 1, 1, FFI_TYPE_SINT8 };
ffi_type ffi_type_uint16 = { 2, 2, FFI_TYPE_UINT16 };
ffi_type ffi_type_sint16 = { 2, 2, FFI_TYPE_SINT16 };
-ffi_type ffi_type_uint32 = { 4, 4, FFI_TYPE_UINT32 };
-ffi_type ffi_type_sint32 = { 4, 4, FFI_TYPE_SINT32 };
+ffi_type ffi_type_uint32 = { 4, INT_ALIGN, FFI_TYPE_UINT32 };
+ffi_type ffi_type_sint32 = { 4, INT_ALIGN, FFI_TYPE_SINT32 };
ffi_type ffi_type_uint64 = { 8, LONG_LONG_ALIGN, FFI_TYPE_UINT64 };
ffi_type ffi_type_sint64 = { 8, LONG_LONG_ALIGN, FFI_TYPE_SINT64 };
diff --git a/Modules/_ctypes/ctypes.h b/Modules/_ctypes/ctypes.h
index 59640cc..b88cf4f 100644
--- a/Modules/_ctypes/ctypes.h
+++ b/Modules/_ctypes/ctypes.h
@@ -153,6 +153,7 @@ extern int PyObject_stginfo(PyObject *self, Py_ssize_t *psize, Py_ssize_t *palig
extern PyTypeObject PyCData_Type;
#define CDataObject_CheckExact(v) ((v)->ob_type == &PyCData_Type)
#define CDataObject_Check(v) PyObject_TypeCheck(v, &PyCData_Type)
+#define _CDataObject_HasExternalBuffer(v) ((v)->b_ptr != (char *)&(v)->b_value)
extern PyTypeObject PyCSimpleType_Type;
#define PyCSimpleTypeObject_CheckExact(v) ((v)->ob_type == &PyCSimpleType_Type)
@@ -433,6 +434,9 @@ extern void _ctypes_add_traceback(char *, char *, int);
extern PyObject *PyCData_FromBaseObj(PyObject *type, PyObject *base, Py_ssize_t index, char *adr);
extern char *_ctypes_alloc_format_string(const char *prefix, const char *suffix);
+extern char *_ctypes_alloc_format_string_with_shape(int ndim,
+ const Py_ssize_t *shape,
+ const char *prefix, const char *suffix);
extern int _ctypes_simple_instance(PyObject *obj);
diff --git a/Modules/_ctypes/libffi.diff b/Modules/_ctypes/libffi.diff
index 7afa0ed..828a277 100644
--- a/Modules/_ctypes/libffi.diff
+++ b/Modules/_ctypes/libffi.diff
@@ -1,24 +1,26 @@
-diff -urN libffi.orig/configure libffi/configure
---- libffi.orig/configure 2010-03-19 18:29:54.588499862 +0100
-+++ libffi/configure 2010-03-19 18:32:09.113499479 +0100
-@@ -11228,6 +11228,9 @@
- i?86-*-solaris2.1[0-9]*)
- TARGET=X86_64; TARGETDIR=x86
+diff -r -N -u libffi.orig/autom4te.cache/output.0 libffi/autom4te.cache/output.0
+diff -r -N -u libffi.orig/configure libffi/configure
+--- libffi.orig/configure 2013-03-17 15:37:50.000000000 -0700
++++ libffi/configure 2013-03-18 15:11:39.611575163 -0700
+@@ -13368,6 +13368,10 @@
+ fi
;;
+
+ i*86-*-nto-qnx*)
+ TARGET=X86; TARGETDIR=x86
+ ;;
- i?86-*-*)
- TARGET=X86; TARGETDIR=x86
++
+ x86_64-*-darwin*)
+ TARGET=X86_DARWIN; TARGETDIR=x86
;;
-@@ -11245,12 +11248,12 @@
+@@ -13426,12 +13430,12 @@
;;
- mips-sgi-irix5.* | mips-sgi-irix6.*)
+ mips-sgi-irix5.* | mips-sgi-irix6.* | mips*-*-rtems*)
- TARGET=MIPS; TARGETDIR=mips
+ TARGET=MIPS_IRIX; TARGETDIR=mips
;;
- mips*-*-linux*)
+ mips*-*-linux* | mips*-*-openbsd*)
# Support 128-bit long double for NewABI.
HAVE_LONG_DOUBLE='defined(__mips64)'
- TARGET=MIPS; TARGETDIR=mips
@@ -26,8 +28,8 @@ diff -urN libffi.orig/configure libffi/configure
;;
powerpc*-*-linux* | powerpc-*-sysv*)
-@@ -11307,7 +11310,7 @@
- as_fn_error "\"libffi has not been ported to $host.\"" "$LINENO" 5
+@@ -13491,7 +13495,7 @@
+ as_fn_error $? "\"libffi has not been ported to $host.\"" "$LINENO" 5
fi
- if test x$TARGET = xMIPS; then
@@ -35,7 +37,7 @@ diff -urN libffi.orig/configure libffi/configure
MIPS_TRUE=
MIPS_FALSE='#'
else
-@@ -12422,6 +12425,12 @@
+@@ -14862,6 +14866,12 @@
ac_config_files="$ac_config_files include/Makefile include/ffi.h Makefile testsuite/Makefile man/Makefile libffi.pc"
@@ -48,44 +50,45 @@ diff -urN libffi.orig/configure libffi/configure
cat >confcache <<\_ACEOF
# This file is a shell script that caches the results of configure
# tests run on this system so they can be shared between configure
-@@ -13521,6 +13530,8 @@
+@@ -16047,6 +16057,8 @@
"testsuite/Makefile") CONFIG_FILES="$CONFIG_FILES testsuite/Makefile" ;;
"man/Makefile") CONFIG_FILES="$CONFIG_FILES man/Makefile" ;;
"libffi.pc") CONFIG_FILES="$CONFIG_FILES libffi.pc" ;;
+ "include/ffi_common.h") CONFIG_LINKS="$CONFIG_LINKS include/ffi_common.h:include/ffi_common.h" ;;
+ "fficonfig.py") CONFIG_FILES="$CONFIG_FILES fficonfig.py" ;;
- *) as_fn_error "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
+ *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
esac
-diff -urN libffi.orig/configure.ac libffi/configure.ac
---- libffi.orig/configure.ac 2010-03-19 18:27:44.988498585 +0100
-+++ libffi/configure.ac 2010-03-19 18:31:29.252505178 +0100
+diff -r -N -u libffi.orig/configure.ac libffi/configure.ac
+--- libffi.orig/configure.ac 2013-03-17 15:37:50.000000000 -0700
++++ libffi/configure.ac 2013-03-18 15:11:11.392989136 -0700
@@ -1,4 +1,7 @@
dnl Process this with autoconf to create configure
+#
-+# file from libffi - slightly patched for ctypes
++# file from libffi - slightly patched for Python's ctypes
+#
- AC_PREREQ(2.63)
+ AC_PREREQ(2.68)
-@@ -91,6 +94,9 @@
- i?86-*-solaris2.1[[0-9]]*)
- TARGET=X86_64; TARGETDIR=x86
+@@ -146,6 +149,10 @@
+ fi
;;
+
+ i*86-*-nto-qnx*)
+ TARGET=X86; TARGETDIR=x86
+ ;;
- i?86-*-*)
- TARGET=X86; TARGETDIR=x86
++
+ x86_64-*-darwin*)
+ TARGET=X86_DARWIN; TARGETDIR=x86
;;
-@@ -108,12 +114,12 @@
+@@ -204,12 +211,12 @@
;;
- mips-sgi-irix5.* | mips-sgi-irix6.*)
+ mips-sgi-irix5.* | mips-sgi-irix6.* | mips*-*-rtems*)
- TARGET=MIPS; TARGETDIR=mips
+ TARGET=MIPS_IRIX; TARGETDIR=mips
;;
- mips*-*-linux*)
+ mips*-*-linux* | mips*-*-openbsd*)
# Support 128-bit long double for NewABI.
HAVE_LONG_DOUBLE='defined(__mips64)'
- TARGET=MIPS; TARGETDIR=mips
@@ -93,16 +96,16 @@ diff -urN libffi.orig/configure.ac libffi/configure.ac
;;
powerpc*-*-linux* | powerpc-*-sysv*)
-@@ -170,7 +176,7 @@
+@@ -269,7 +276,7 @@
AC_MSG_ERROR(["libffi has not been ported to $host."])
fi
-AM_CONDITIONAL(MIPS, test x$TARGET = xMIPS)
+AM_CONDITIONAL(MIPS,[expr x$TARGET : 'xMIPS' > /dev/null])
+ AM_CONDITIONAL(BFIN, test x$TARGET = xBFIN)
AM_CONDITIONAL(SPARC, test x$TARGET = xSPARC)
AM_CONDITIONAL(X86, test x$TARGET = xX86)
- AM_CONDITIONAL(X86_FREEBSD, test x$TARGET = xX86_FREEBSD)
-@@ -401,4 +407,8 @@
+@@ -567,4 +574,8 @@
AC_CONFIG_FILES(include/Makefile include/ffi.h Makefile testsuite/Makefile man/Makefile libffi.pc)
diff --git a/Modules/_ctypes/libffi/.gitignore b/Modules/_ctypes/libffi/.gitignore
new file mode 100644
index 0000000..6af76ac
--- /dev/null
+++ b/Modules/_ctypes/libffi/.gitignore
@@ -0,0 +1,21 @@
+.libs
+.deps
+*.o
+*.lo
+.dirstamp
+*.la
+Makefile
+config.log
+config.status
+*~
+fficonfig.h
+include/ffi.h
+include/ffitarget.h
+libffi.pc
+libtool
+stamp-h1
+libffi*gz
+autom4te.cache
+libffi.xcodeproj/xcuserdata
+libffi.xcodeproj/project.xcworkspace
+ios/
diff --git a/Modules/_ctypes/libffi/.travis.yml b/Modules/_ctypes/libffi/.travis.yml
new file mode 100644
index 0000000..1a6a425
--- /dev/null
+++ b/Modules/_ctypes/libffi/.travis.yml
@@ -0,0 +1,8 @@
+language: c
+compiler:
+ - gcc
+ - clang
+
+before_script: sudo apt-get install dejagnu
+
+script: ./configure && make && make check
diff --git a/Modules/_ctypes/libffi/ChangeLog b/Modules/_ctypes/libffi/ChangeLog
index 00ba7ef..e0b057c 100644
--- a/Modules/_ctypes/libffi/ChangeLog
+++ b/Modules/_ctypes/libffi/ChangeLog
@@ -1,8 +1,1360 @@
+2013-03-17 Anthony Green <green@moxielogic.com>
+
+ * README: Update for 3.0.13.
+ * configure.ac: Ditto.
+ * configure: Rebuilt.
+ * doc/*: Update version.
+
+2013-03-17 Dave Korn <dave.korn.cygwin@gmail.com>
+
+ * src/closures.c (is_emutramp_enabled
+ [!FFI_MMAP_EXEC_EMUTRAMP_PAX]): Move default definition outside
+ enclosing #if scope.
+
+2013-03-17 Anthony Green <green@moxielogic.com>
+
+ * configure.ac: Only modify toolexecdir in certain cases.
+ * configure: Rebuilt.
+
+2013-03-16 Gilles Talis <gilles.talis@gmail.com>
+
+ * src/powerpc/ffi.c (ffi_prep_args_SYSV): Don't use
+ fparg_count,etc on __NO_FPRS__ targets.
+
+2013-03-16 Alan Hourihane <alanh@fairlite.co.uk>
+
+ * src/m68k/sysv.S (epilogue): Don't use extb instruction on
+ m680000 machines.
+
+2013-03-16 Alex Gaynor <alex.gaynor@gmail.com>
+
+ * src/x86/ffi.c (ffi_prep_cif_machdep): Always align stack.
+
+2013-03-13 Markos Chandras <markos.chandras@imgtec.com>
+
+ * configure.ac: Add support for Imagination Technologies Meta.
+ * Makefile.am: Likewise.
+ * README: Add Imagination Technologies Meta details.
+ * src/metag/ffi.c: New.
+ * src/metag/ffitarget.h: Likewise.
+ * src/metag/sysv.S: Likewise.
+
+2013-02-24 Andreas Schwab <schwab@linux-m68k.org>
+
+ * doc/libffi.texi (Structures): Fix missing category argument of
+ @deftp.
+
+2013-02-11 Anthony Green <green@moxielogic.com>
+
+ * configure.ac: Update release number to 3.0.12.
+ * configure: Rebuilt.
+ * README: Update release info.
+
+2013-02-10 Anthony Green <green@moxielogic.com>
+
+ * README: Add Moxie.
+ * src/moxie/ffi.c: Created.
+ * src/moxie/eabi.S: Created.
+ * src/moxie/ffitarget.h: Created.
+ * Makefile.am (nodist_libffi_la_SOURCES): Add Moxie.
+ * Makefile.in: Rebuilt.
+ * configure.ac: Add Moxie.
+ * configure: Rebuilt.
+ * testsuite/libffi.call/huge_struct.c: Disable format string
+ warnings for moxie*-*-elf tests.
+
+2013-02-10 Anthony Green <green@moxielogic.com>
+
+ * Makefile.am (LTLDFLAGS): Fix reference.
+ * Makefile.in: Rebuilt.
+
+2013-02-10 Anthony Green <green@moxielogic.com>
+
+ * README: Update supported platforms. Update test results link.
+
+2013-02-09 Anthony Green <green@moxielogic.com>
+
+ * testsuite/libffi.call/negint.c: Remove forced -O2.
+ * testsuite/libffi.call/many2.c (foo): Remove GCCism.
+ * testsuite/libffi.call/ffitest.h: Add default PRIuPTR definition.
+
+ * src/sparc/v8.S (ffi_closure_v8): Import ancient ulonglong
+ closure return type fix developed by Martin v. Löwis for cpython
+ fork.
+
+2013-02-08 Andreas Tobler <andreast@fgznet.ch>
+
+ * src/powerpc/ffi.c (ffi_prep_cif_machdep): Fix small struct
+ support.
+ * src/powerpc/sysv.S: Ditto.
+
+2013-02-08 Anthony Green <green@moxielogic.com>
+
+ * testsuite/libffi.call/cls_longdouble.c: Remove xfail for
+ arm*-*-*.
+
+2013-02-08 Anthony Green <green@moxielogic.com>
+
+ * src/sparc/ffi.c (ffi_prep_closure_loc): Fix cache flushing for GCC.
+
+2013-02-08 Matthias Klose <doko@ubuntu.com>
+
+ * man/ffi_prep_cif.3: Clean up for debian linter.
+
+2013-02-08 Peter Bergner <bergner@vnet.ibm.com>
+
+ * src/powerpc/ffi.c (ffi_prep_args_SYSV): Account for FP args pushed
+ on the stack.
+
+2013-02-08 Anthony Green <green@moxielogic.com>
+
+ * Makefile.am (EXTRA_DIST): Add missing files.
+ * testsuite/Makefile.am (EXTRA_DIST): Ditto.
+ * Makefile.in: Rebuilt.
+
+2013-02-08 Anthony Green <green@moxielogic.com>
+
+ * configure.ac: Move sparc asm config checks to within functions
+ for compatibility with sun tools.
+ * configure: Rebuilt.
+ * src/sparc/ffi.c (ffi_prep_closure_loc): Flush cache on v9
+ systems.
+ * src/sparc/v8.S (ffi_flush_icache): Implement a sparc v9 cache
+ flusher.
+
+2013-02-08 Nathan Rossi <nathan.rossi@xilinx.com>
+
+ * src/microblaze/ffi.c (ffi_closure_call_SYSV): Fix handling of
+ small big-endian structures.
+ (ffi_prep_args): Ditto.
+
+2013-02-07 Anthony Green <green@moxielogic.com>
+
+ * src/sparc/v8.S (ffi_call_v8): Fix typo from last patch
+ (effectively hiding ffi_call_v8).
+
+2013-02-07 Anthony Green <green@moxielogic.com>
+
+ * configure.ac: Update bug reporting address.
+ * configure.in: Rebuild.
+
+ * src/sparc/v8.S (ffi_flush_icache): Out-of-line cache flusher for
+ Sun compiler.
+ * src/sparc/ffi.c (ffi_call): Remove warning.
+ Call ffi_flush_icache for non-GCC builds.
+ (ffi_prep_closure_loc): Use ffi_flush_icache.
+
+ * Makefile.am (EXTRA_DIST): Add libtool-ldflags.
+ * Makefile.in: Rebuilt.
+ * libtool-ldflags: New file.
+
+2013-02-07 Daniel Schepler <dschepler@gmail.com>
+
+ * configure.ac: Correctly identify x32 systems as 64-bit.
+ * m4/libtool.m4: Remove libtool expr error.
+ * aclocal.m4, configure: Rebuilt.
+
+2013-02-07 Anthony Green <green@moxielogic.com>
+
+ * configure.ac: Fix GCC usage test.
+ * configure: Rebuilt.
+ * README: Mention LLVM/GCC x86_64 issue.
+ * testsuite/Makefile.in: Rebuilt.
+
+2013-02-07 Anthony Green <green@moxielogic.com>
+
+ * testsuite/libffi.call/cls_double_va.c (main): Replace // style
+ comments with /* */ for xlc compiler.
+ * testsuite/libffi.call/stret_large.c (main): Ditto.
+ * testsuite/libffi.call/stret_large2.c (main): Ditto.
+ * testsuite/libffi.call/nested_struct1.c (main): Ditto.
+ * testsuite/libffi.call/huge_struct.c (main): Ditto.
+ * testsuite/libffi.call/float_va.c (main): Ditto.
+ * testsuite/libffi.call/cls_struct_va1.c (main): Ditto.
+ * testsuite/libffi.call/cls_pointer_stack.c (main): Ditto.
+ * testsuite/libffi.call/cls_pointer.c (main): Ditto.
+ * testsuite/libffi.call/cls_longdouble_va.c (main): Ditto.
+
+2013-02-06 Anthony Green <green@moxielogic.com>
+
+ * man/ffi_prep_cif.3: Clean up for debian lintian checker.
+
+2013-02-06 Anthony Green <green@moxielogic.com>
+
+ * Makefile.am (pkgconfigdir): Add missing pkgconfig install bits.
+ * Makefile.in: Rebuild.
+
+2013-02-02 Mark H Weaver <mhw@netris.org>
+
+ * src/x86/ffi64.c (ffi_call): Sign-extend integer arguments passed
+ via general purpose registers.
+
+2013-01-21 Nathan Rossi <nathan.rossi@xilinx.com>
+
+ * README: Add MicroBlaze details.
+ * Makefile.am: Add MicroBlaze support.
+ * configure.ac: Likewise.
+ * src/microblaze/ffi.c: New.
+ * src/microblaze/ffitarget.h: Likewise.
+ * src/microblaze/sysv.S: Likewise.
+
+2013-01-21 Nathan Rossi <nathan.rossi@xilinx.com>
+ * testsuite/libffi.call/return_uc.c: Fixed issue.
+
+2013-01-21 Chris Zankel <chris@zankel.net>
+
+ * README: Add Xtensa support.
+ * Makefile.am: Likewise.
+ * configure.ac: Likewise.
+ * Makefile.in Regenerate.
+ * configure: Likewise.
+ * src/prep_cif.c: Handle Xtensa.
+ * src/xtensa: New directory.
+ * src/xtensa/ffi.c: New file.
+ * src/xtensa/ffitarget.h: Ditto.
+ * src/xtensa/sysv.S: Ditto.
+
+2013-01-11 Anthony Green <green@moxielogic.com>
+
+ * src/powerpc/ffi_darwin.c (ffi_prep_args): Replace // style
+ comments with /* */ for xlc compiler.
+ * src/powerpc/aix.S (ffi_call_AIX): Ditto.
+ * testsuite/libffi.call/ffitest.h (allocate_mmap): Delete
+ deprecated inline function.
+ * testsuite/libffi.special/ffitestcxx.h: Ditto.
+ * README: Add update for AIX support.
+
+2013-01-11 Anthony Green <green@moxielogic.com>
+
+ * configure.ac: Robustify pc relative reloc check.
+ * m4/ax_cc_maxopt.m4: Don't -malign-double. This is an ABI
+ changing option for 32-bit x86.
+ * aclocal.m4, configure: Rebuilt.
+ * README: Update supported target list.
+
+2013-01-10 Anthony Green <green@moxielogic.com>
+
+ * README (tested): Add Compiler column to table.
+
+2013-01-10 Anthony Green <green@moxielogic.com>
+
+ * src/x86/ffi64.c (struct register_args): Make sse array and array
+ of unions for sunpro compiler compatibility.
+
+2013-01-10 Anthony Green <green@moxielogic.com>
+
+ * configure.ac: Test target platform size_t size. Handle both 32
+ and 64-bit builds for x86_64-* and i?86-* targets (allowing for
+ CFLAG option to change default settings).
+ * configure, aclocal.m4: Rebuilt.
+
+2013-01-10 Anthony Green <green@moxielogic.com>
+
+ * testsuite/libffi.special/special.exp: Only run exception
+ handling tests when using GNU compiler.
+
+ * m4/ax_compiler_vendor.m4: New file.
+ * configure.ac: Test for compiler vendor and don't use
+ AX_CFLAGS_WARN_ALL with the sun compiler.
+ * aclocal.m4, configure: Rebuilt.
+
+2013-01-10 Anthony Green <green@moxielogic.com>
+
+ * include/ffi_common.h: Don't use GCCisms to define types when
+ building with the SUNPRO compiler.
+
+2013-01-10 Anthony Green <green@moxielogic.com>
+
+ * configure.ac: Put local.exp in the right place.
+ * configure: Rebuilt.
+
+ * src/x86/ffi.c: Update comment about regparm function attributes.
+ * src/x86/sysv.S (ffi_closure_SYSV): The SUNPRO compiler requires
+ that all function arguments be passed on the stack (no regparm
+ support).
+
+2013-01-08 Anthony Green <green@moxielogic.com>
+
+ * configure.ac: Generate local.exp. This sets CC_FOR_TARGET
+ when we are using the vendor compiler.
+ * testsuite/Makefile.am (EXTRA_DEJAGNU_SITE_CONFIG): Point to
+ ../local.exp.
+ * configure, testsuite/Makefile.in: Rebuilt.
+
+ * testsuite/libffi.call/call.exp: Run tests with different
+ options, depending on whether or not we are using gcc or the
+ vendor compiler.
+ * testsuite/lib/libffi.exp (libffi-init): Set using_gcc based on
+ whether or not we are building/testing with gcc.
+
+2013-01-08 Anthony Green <green@moxielogic.com>
+
+ * configure.ac: Switch x86 solaris target to X86 by default.
+ * configure: Rebuilt.
+
+2013-01-08 Anthony Green <green@moxielogic.com>
+
+ * configure.ac: Fix test for read-only eh_frame.
+ * configure: Rebuilt.
+
+2013-01-08 Anthony Green <green@moxielogic.com>
+
+ * src/x86/sysv.S, src/x86/unix64.S: Only emit DWARF unwind info
+ when building with the GNU toolchain.
+ * testsuite/libffi.call/ffitest.h (CHECK): Fix for Solaris vendor
+ compiler.
+
+2013-01-07 Thorsten Glaser <tg@mirbsd.org>
+
+ * testsuite/libffi.call/cls_uchar_va.c,
+ testsuite/libffi.call/cls_ushort_va.c,
+ testsuite/libffi.call/va_1.c: Testsuite fixes.
+
+2013-01-07 Thorsten Glaser <tg@mirbsd.org>
+
+ * src/m68k/ffi.c (CIF_FLAGS_SINT8, CIF_FLAGS_SINT16): Define.
+ (ffi_prep_cif_machdep): Fix 8-bit and 16-bit signed calls.
+ * src/m68k/sysv.S (ffi_call_SYSV, ffi_closure_SYSV): Ditto.
+
+2013-01-04 Anthony Green <green@moxielogic.com>
+
+ * Makefile.am (AM_CFLAGS): Don't automatically add -fexceptions
+ and -Wall. This is set in the configure script after testing for
+ GCC.
+ * Makefile.in: Rebuilt.
+
+2013-01-02 rofl0r <https://github.com/rofl0r>
+
+ * src/powerpc/ffi.c (ffi_prep_cif_machdep): Fix build error on ppc
+ when long double == double.
+
+2013-01-02 Reini Urban <rurban@x-ray.at>
+
+ * Makefile.am (libffi_la_LDFLAGS): Add -no-undefined to LDFLAGS
+ (required for shared libs on cygwin/mingw).
+ * Makefile.in: Rebuilt.
+
+2012-10-31 Alan Modra <amodra@gmail.com>
+
+ * src/powerpc/linux64_closure.S: Add new ABI support.
+ * src/powerpc/linux64.S: Likewise.
+
+2012-10-30 Magnus Granberg <zorry@gentoo.org>
+ Pavel Labushev <pavel.labushev@runbox.ru>
+
+ * configure.ac: New options pax_emutramp
+ * configure, fficonfig.h.in: Regenerated
+ * src/closures.c: New function emutramp_enabled_check() and
+ checks.
+
+2012-10-30 Frederick Cheung <frederick.cheung@gmail.com>
+
+ * configure.ac: Enable FFI_MAP_EXEC_WRIT for Darwin 12 (mountain
+ lion) and future version.
+ * configure: Rebuild.
+
+2012-10-30 James Greenhalgh <james.greenhalgh at arm.com>
+ Marcus Shawcroft <marcus.shawcroft at arm.com>
+
+ * README: Add details of aarch64 port.
+ * src/aarch64/ffi.c: New.
+ * src/aarch64/ffitarget.h: Likewise.
+ * src/aarch64/sysv.S: Likewise.
+ * Makefile.am: Support aarch64.
+ * configure.ac: Support aarch64.
+ * Makefile.in, configure: Rebuilt.
+
+2012-10-30 James Greenhalgh <james.greenhalgh at arm.com>
+ Marcus Shawcroft <marcus.shawcroft at arm.com>
+
+ * testsuite/lib/libffi.exp: Add support for aarch64.
+ * testsuite/libffi.call/cls_struct_va1.c: New.
+ * testsuite/libffi.call/cls_uchar_va.c: Likewise.
+ * testsuite/libffi.call/cls_uint_va.c: Likewise.
+ * testsuite/libffi.call/cls_ulong_va.c: Likewise.
+ * testsuite/libffi.call/cls_ushort_va.c: Likewise.
+ * testsuite/libffi.call/nested_struct11.c: Likewise.
+ * testsuite/libffi.call/uninitialized.c: Likewise.
+ * testsuite/libffi.call/va_1.c: Likewise.
+ * testsuite/libffi.call/va_struct1.c: Likewise.
+ * testsuite/libffi.call/va_struct2.c: Likewise.
+ * testsuite/libffi.call/va_struct3.c: Likewise.
+
+2012-10-12 Walter Lee <walt@tilera.com>
+
+ * Makefile.am: Add TILE-Gx/TILEPro support.
+ * configure.ac: Likewise.
+ * Makefile.in: Regenerate.
+ * configure: Likewise.
+ * src/prep_cif.c (ffi_prep_cif_core): Handle TILE-Gx/TILEPro.
+ * src/tile: New directory.
+ * src/tile/ffi.c: New file.
+ * src/tile/ffitarget.h: Ditto.
+ * src/tile/tile.S: Ditto.
+
+2012-10-12 Matthias Klose <doko@ubuntu.com>
+
+ * generate-osx-source-and-headers.py: Normalize whitespace.
+
+2012-09-14 David Edelsohn <dje.gcc@gmail.com>
+
+ * configure: Regenerated.
+
+2012-08-26 Andrew Pinski <apinski@cavium.com>
+
+ PR libffi/53014
+ * src/mips/ffi.c (ffi_prep_closure_loc): Allow n32 with soft-float and n64 with
+ soft-float.
+
+2012-08-08 Uros Bizjak <ubizjak@gmail.com>
+
+ * src/s390/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test,
+ just return FFI_BAD_ABI when things are wrong.
+
+2012-07-18 H.J. Lu <hongjiu.lu@intel.com>
+
+ PR libffi/53982
+ PR libffi/53973
+ * src/x86/ffitarget.h: Check __ILP32__ instead of __LP64__ for x32.
+ (FFI_SIZEOF_JAVA_RAW): Defined to 4 for x32.
+
+2012-05-16 H.J. Lu <hongjiu.lu@intel.com>
+
+ * configure: Regenerated.
+
+2012-05-05 Nicolas Lelong
+
+ * libffi.xcodeproj/project.pbxproj: Fixes.
+ * README: Update for iOS builds.
+
+2012-04-23 Alexandre Keunecke I. de Mendonca <alexandre.keunecke@gmail.com>
+
+ * configure.ac: Add Blackfin/sysv support
+ * Makefile.am: Add Blackfin/sysv support
+ * src/bfin/ffi.c: Add Blackfin/sysv support
+ * src/bfin/ffitarget.h: Add Blackfin/sysv support
+
+2012-04-11 Anthony Green <green@moxielogic.com>
+
+ * Makefile.am (EXTRA_DIST): Add new script.
+ * Makefile.in: Rebuilt.
+
+2012-04-11 Zachary Waldowski <zwaldowski@gmail.com>
+
+ * generate-ios-source-and-headers.py,
+ libffi.xcodeproj/project.pbxproj: Support a Mac static library via
+ Xcode. Set iOS compatibility to 4.0. Move iOS trampoline
+ generation into an Xcode "run script" phase. Include both as
+ Xcode build scripts. Don't always regenerate config files.
+
+2012-04-10 Anthony Green <green@moxielogic.com>
+
+ * src/powerpc/ffi_darwin.c (ffi_prep_args): Add missing semicolon.
+
+2012-04-06 Anthony Green <green@moxielogic.com>
+
+ * Makefile.am (EXTRA_DIST): Add new iOS/xcode files.
+ * Makefile.in: Rebuilt.
+
+2012-04-06 Mike Lewis <mikelikespie@gmail.com>
+
+ * generate-ios-source-and-headers.py: New file.
+ * libffi.xcodeproj/project.pbxproj: New file.
+ * README: Update instructions on building iOS binary.
+ * build-ios.sh: Delete.
+
+2012-04-06 Anthony Green <green@moxielogic.com>
+
+ * src/x86/ffi64.c (UINT128): Define differently for Intel and GNU
+ compilers, then use it.
+
+2012-04-06 H.J. Lu <hongjiu.lu@intel.com>
+
+ * m4/libtool.m4 (_LT_ENABLE_LOCK): Support x32.
+
+2012-04-06 Anthony Green <green@moxielogic.com>
+
+ * testsuite/Makefile.am (EXTRA_DIST): Add missing test cases.
+ * testsuite/Makefile.in: Rebuilt.
+
+2012-04-05 Zachary Waldowski <zwaldowski@gmail.com>
+
+ * include/ffi.h.in: Add missing trampoline table fields.
+ * src/arm/sysv.S: Fix ENTRY definition, and wrap symbol references
+ in CNAME.
+ * src/x86/ffi.c: Wrap Windows specific code in ifdefs.
+
+2012-04-02 Peter Bergner <bergner@vnet.ibm.com>
+
+ * src/powerpc/ffi.c (ffi_prep_args_SYSV): Declare double_tmp.
+ Silence casting pointer to integer of different size warning.
+ Delete goto to previously deleted label.
+ (ffi_call): Silence possibly undefined warning.
+ (ffi_closure_helper_SYSV): Declare variable type.
+
+2012-04-02 Peter Rosin <peda@lysator.liu.se>
+
+ * src/x86/win32.S (ffi_call_win32): Sign/zero extend the return
+ value in the Intel version as is already done for the AT&T version.
+ (ffi_closure_SYSV): Likewise.
+ (ffi_closure_raw_SYSV): Likewise.
+ (ffi_closure_STDCALL): Likewise.
+
+2012-03-29 Peter Rosin <peda@lysator.liu.se>
+
+ * src/x86/win32.S (ffi_closure_raw_THISCALL): Unify the frame
+ generation, fix the ENDP label and remove the surplus third arg
+ from the 'lea' insn.
+
+2012-03-29 Peter Rosin <peda@lysator.liu.se>
+
+ * src/x86/win32.S (ffi_closure_raw_SYSV): Make the 'stubraw' label
+ visible outside the PROC, so that ffi_closure_raw_THISCALL can see
+ it. Also instruct the assembler to add a frame to the function.
+
+2012-03-23 Peter Rosin <peda@lysator.liu.se>
+
+ * Makefile.am (AM_CPPFLAGS): Add -DFFI_BUILDING.
+ * Makefile.in: Rebuilt.
+ * include/ffi.h.in [MSVC]: Add __declspec(dllimport) decorations
+ to all data exports, when compiling libffi clients using MSVC.
+
+2012-03-29 Peter Rosin <peda@lysator.liu.se>
+
+ * src/x86/ffitarget.h (ffi_abi): Add new ABI FFI_MS_CDECL and
+ make it the default for MSVC.
+ (FFI_TYPE_MS_STRUCT): New structure return convention.
+ * src/x86/ffi.c (ffi_prep_cif_machdep): Tweak the structure
+ return convention for FFI_MS_CDECL to be FFI_TYPE_MS_STRUCT
+ instead of an ordinary FFI_TYPE_STRUCT.
+ (ffi_prep_args): Treat FFI_TYPE_MS_STRUCT as FFI_TYPE_STRUCT.
+ (ffi_call): Likewise.
+ (ffi_prep_incoming_args_SYSV): Likewise.
+ (ffi_raw_call): Likewise.
+ (ffi_prep_closure_loc): Treat FFI_MS_CDECL as FFI_SYSV.
+ * src/x86/win32.S (ffi_closure_SYSV): For FFI_TYPE_MS_STRUCT,
+ return a pointer to the result structure in eax and don't pop
+ that pointer from the stack, the caller takes care of it.
+ (ffi_call_win32): Treat FFI_TYPE_MS_STRUCT as FFI_TYPE_STRUCT.
+ (ffi_closure_raw_SYSV): Likewise.
+
+2012-03-22 Peter Rosin <peda@lysator.liu.se>
+
+ * testsuite/libffi.call/closure_stdcall.c [MSVC]: Add inline
+ assembly version with Intel syntax.
+ * testsuite/libffi.call/closure_thiscall.c [MSVC]: Likewise.
+
+2012-03-23 Peter Rosin <peda@lysator.liu.se>
+
+ * testsuite/libffi.call/ffitest.h: Provide abstration of
+ __attribute__((fastcall)) in the form of a __FASTCALL__
+ define. Define it to __fastcall for MSVC.
+ * testsuite/libffi.call/fastthis1_win32.c: Use the above.
+ * testsuite/libffi.call/fastthis2_win32.c: Likewise.
+ * testsuite/libffi.call/fastthis3_win32.c: Likewise.
+ * testsuite/libffi.call/strlen2_win32.c: Likewise.
+ * testsuite/libffi.call/struct1_win32.c: Likewise.
+ * testsuite/libffi.call/struct2_win32.c: Likewise.
+
+2012-03-22 Peter Rosin <peda@lysator.liu.se>
+
+ * src/x86/win32.S [MSVC] (ffi_closure_THISCALL): Remove the manual
+ frame on function entry, MASM adds one automatically.
+
+2012-03-22 Peter Rosin <peda@lysator.liu.se>
+
+ * testsuite/libffi.call/ffitest.h [MSVC]: Add kludge for missing
+ bits in the MSVC headers.
+
+2012-03-22 Peter Rosin <peda@lysator.liu.se>
+
+ * testsuite/libffi.call/cls_12byte.c: Adjust to the C89 style
+ with no declarations after statements.
+ * testsuite/libffi.call/cls_16byte.c: Likewise.
+ * testsuite/libffi.call/cls_18byte.c: Likewise.
+ * testsuite/libffi.call/cls_19byte.c: Likewise.
+ * testsuite/libffi.call/cls_1_1byte.c: Likewise.
+ * testsuite/libffi.call/cls_20byte.c: Likewise.
+ * testsuite/libffi.call/cls_20byte1.c: Likewise.
+ * testsuite/libffi.call/cls_24byte.c: Likewise.
+ * testsuite/libffi.call/cls_2byte.c: Likewise.
+ * testsuite/libffi.call/cls_3_1byte.c: Likewise.
+ * testsuite/libffi.call/cls_3byte1.c: Likewise.
+ * testsuite/libffi.call/cls_3byte2.c: Likewise.
+ * testsuite/libffi.call/cls_4_1byte.c: Likewise.
+ * testsuite/libffi.call/cls_4byte.c: Likewise.
+ * testsuite/libffi.call/cls_5_1_byte.c: Likewise.
+ * testsuite/libffi.call/cls_5byte.c: Likewise.
+ * testsuite/libffi.call/cls_64byte.c: Likewise.
+ * testsuite/libffi.call/cls_6_1_byte.c: Likewise.
+ * testsuite/libffi.call/cls_6byte.c: Likewise.
+ * testsuite/libffi.call/cls_7_1_byte.c: Likewise.
+ * testsuite/libffi.call/cls_7byte.c: Likewise.
+ * testsuite/libffi.call/cls_8byte.c: Likewise.
+ * testsuite/libffi.call/cls_9byte1.c: Likewise.
+ * testsuite/libffi.call/cls_9byte2.c: Likewise.
+ * testsuite/libffi.call/cls_align_double.c: Likewise.
+ * testsuite/libffi.call/cls_align_float.c: Likewise.
+ * testsuite/libffi.call/cls_align_longdouble.c: Likewise.
+ * testsuite/libffi.call/cls_align_longdouble_split.c: Likewise.
+ * testsuite/libffi.call/cls_align_longdouble_split2.c: Likewise.
+ * testsuite/libffi.call/cls_align_pointer.c: Likewise.
+ * testsuite/libffi.call/cls_align_sint16.c: Likewise.
+ * testsuite/libffi.call/cls_align_sint32.c: Likewise.
+ * testsuite/libffi.call/cls_align_sint64.c: Likewise.
+ * testsuite/libffi.call/cls_align_uint16.c: Likewise.
+ * testsuite/libffi.call/cls_align_uint32.c: Likewise.
+ * testsuite/libffi.call/cls_align_uint64.c: Likewise.
+ * testsuite/libffi.call/cls_dbls_struct.c: Likewise.
+ * testsuite/libffi.call/cls_pointer_stack.c: Likewise.
+ * testsuite/libffi.call/err_bad_typedef.c: Likewise.
+ * testsuite/libffi.call/huge_struct.c: Likewise.
+ * testsuite/libffi.call/nested_struct.c: Likewise.
+ * testsuite/libffi.call/nested_struct1.c: Likewise.
+ * testsuite/libffi.call/nested_struct10.c: Likewise.
+ * testsuite/libffi.call/nested_struct2.c: Likewise.
+ * testsuite/libffi.call/nested_struct3.c: Likewise.
+ * testsuite/libffi.call/nested_struct4.c: Likewise.
+ * testsuite/libffi.call/nested_struct5.c: Likewise.
+ * testsuite/libffi.call/nested_struct6.c: Likewise.
+ * testsuite/libffi.call/nested_struct7.c: Likewise.
+ * testsuite/libffi.call/nested_struct8.c: Likewise.
+ * testsuite/libffi.call/nested_struct9.c: Likewise.
+ * testsuite/libffi.call/stret_large.c: Likewise.
+ * testsuite/libffi.call/stret_large2.c: Likewise.
+ * testsuite/libffi.call/stret_medium.c: Likewise.
+ * testsuite/libffi.call/stret_medium2.c: Likewise.
+ * testsuite/libffi.call/struct1.c: Likewise.
+ * testsuite/libffi.call/struct1_win32.c: Likewise.
+ * testsuite/libffi.call/struct2.c: Likewise.
+ * testsuite/libffi.call/struct2_win32.c: Likewise.
+ * testsuite/libffi.call/struct3.c: Likewise.
+ * testsuite/libffi.call/struct4.c: Likewise.
+ * testsuite/libffi.call/struct5.c: Likewise.
+ * testsuite/libffi.call/struct6.c: Likewise.
+ * testsuite/libffi.call/struct7.c: Likewise.
+ * testsuite/libffi.call/struct8.c: Likewise.
+ * testsuite/libffi.call/struct9.c: Likewise.
+ * testsuite/libffi.call/testclosure.c: Likewise.
+
+2012-03-21 Peter Rosin <peda@lysator.liu.se>
+
+ * testsuite/libffi.call/float_va.c (float_va_fn): Use %f when
+ printing doubles (%lf is for long doubles).
+ (main): Likewise.
+
+2012-03-21 Peter Rosin <peda@lysator.liu.se>
+
+ * testsuite/lib/target-libpath.exp [*-*-cygwin*, *-*-mingw*]
+ (set_ld_library_path_env_vars): Add the library search dir to PATH
+ (and save PATH for later).
+ (restore_ld_library_path_env_vars): Restore PATH.
+
+2012-03-21 Peter Rosin <peda@lysator.liu.se>
+
+ * testsuite/lib/target-libpath.exp [*-*-cygwin*, *-*-mingw*]
+ (set_ld_library_path_env_vars): Add the library search dir to PATH
+ (and save PATH for later).
+ (restore_ld_library_path_env_vars): Restore PATH.
+
+2012-03-20 Peter Rosin <peda@lysator.liu.se>
+
+ * testsuite/libffi.call/strlen2_win32.c (main): Remove bug.
+ * src/x86/win32.S [MSVC] (ffi_closure_SYSV): Make the 'stub' label
+ visible outside the PROC, so that ffi_closure_THISCALL can see it.
+
+2012-03-20 Peter Rosin <peda@lysator.liu.se>
+
+ * testsuite/libffi.call/strlen2_win32.c (main): Remove bug.
+ * src/x86/win32.S [MSVC] (ffi_closure_SYSV): Make the 'stub' label
+ visible outside the PROC, so that ffi_closure_THISCALL can see it.
+
+2012-03-19 Alan Hourihane <alanh@fairlite.co.uk>
+
+ * src/m68k/ffi.c: Add MINT support.
+ * src/m68k/sysv.S: Ditto.
+
+2012-03-06 Chung-Lin Tang <cltang@codesourcery.com>
+
+ * src/arm/ffi.c (ffi_call): Add __ARM_EABI__ guard around call to
+ ffi_call_VFP().
+ (ffi_prep_closure_loc): Add __ARM_EABI__ guard around use of
+ ffi_closure_VFP.
+ * src/arm/sysv.S: Add __ARM_EABI__ guard around VFP code.
+
+2012-03-19 chennam <csit@axway.com>
+
+ * src/powerpc/ffi_darwin.c (ffi_prep_closure_loc): Fix AIX closure
+ support.
+
+2012-03-13 Kaz Kojima <kkojima@gcc.gnu.org>
+
+ * src/sh/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test,
+ just return FFI_BAD_ABI when things are wrong.
+ * src/sh64/ffi.c (ffi_prep_closure_loc): Ditto.
+
+2012-03-09 David Edelsohn <dje.gcc@gmail.com>
+
+ * src/powerpc/aix_closure.S (ffi_closure_ASM): Adjust for Darwin64
+ change to return value of ffi_closure_helper_DARWIN and load type
+ from return type.
+
+2012-03-03 H.J. Lu <hongjiu.lu@intel.com>
+
+ * src/x86/ffi64.c (ffi_call): Cast the return value to unsigned
+ long.
+ (ffi_prep_closure_loc): Cast to 64bit address in trampoline.
+ (ffi_closure_unix64_inner): Cast return pointer to unsigned long
+ first.
+
+ * src/x86/ffitarget.h (FFI_SIZEOF_ARG): Defined to 8 for x32.
+ (ffi_arg): Set to unsigned long long for x32.
+ (ffi_sarg): Set to long long for x32.
+
+2012-03-03 H.J. Lu <hongjiu.lu@intel.com>
+
+ * src/prep_cif.c (ffi_prep_cif_core): Properly check bad ABI.
+
+2012-03-03 Andoni Morales Alastruey <ylatuya@gmail.com>
+
+ * configure.ac: Add -no-undefined for both 32- and 64-bit x86
+ windows-like hosts.
+ * configure: Rebuilt.
+
+2012-02-27 Mikael Pettersson <mikpe@it.uu.se>
+
+ PR libffi/52223
+ * Makefile.am (FLAGS_TO_PASS): Define.
+ * Makefile.in: Regenerate.
+
+2012-02-23 Anthony Green <green@moxielogic.com>
+
+ * src/*/ffitarget.h: Ensure that users never include ffitarget.h
+ directly.
+
+2012-02-23 Kai Tietz <ktietz@redhat.com>
+
+ PR libffi/52221
+ * src/x86/ffi.c (ffi_closure_raw_THISCALL): New
+ prototype.
+ (ffi_prep_raw_closure_loc): Use ffi_closure_raw_THISCALL for
+ thiscall-convention.
+ (ffi_raw_call): Use ffi_prep_args_raw.
+ * src/x86/win32.S (ffi_closure_raw_THISCALL): Add
+ implementation for stub.
+
+2012-02-10 Kai Tietz <ktietz@redhat.com>
+
+ * configure.ac (AM_LTLDFLAGS): Add -no-undefine for x64
+ windows target.
+ * configure: Regenerated.
+
+2012-02-08 Kai Tietz <ktietz@redhat.com>
+
+ * src/prep_cif.c (ffi_prep_cif): Allow for X86_WIN32
+ also FFI_THISCALL.
+ * src/x86/ffi.c (ffi_closure_THISCALL): Add prototype.
+ (FFI_INIT_TRAMPOLINE_THISCALL): New trampoline code.
+ (ffi_prep_closure_loc): Add FFI_THISCALL support.
+ * src/x86/ffitarget.h (FFI_TRAMPOLINE_SIZE): Adjust size.
+ * src/x86/win32.S (ffi_closure_THISCALL): New closure code
+ for thiscall-calling convention.
+ * testsuite/libffi.call/closure_thiscall.c: New test.
+
+2012-01-28 Kai Tietz <ktietz@redhat.com>
+
+ * src/libffi/src/x86/ffi.c (ffi_call_win32): Add new
+ argument to prototype for specify calling-convention.
+ (ffi_call): Add support for stdcall/thiscall convention.
+ (ffi_prep_args): Likewise.
+ (ffi_raw_call): Likewise.
+ * src/x86/ffitarget.h (ffi_abi): Add FFI_THISCALL and
+ FFI_FASTCALL.
+ * src/x86/win32.S (_ffi_call_win32): Add support for
+ fastcall/thiscall calling-convention calls.
+ * testsuite/libffi.call/fastthis1_win32.c: New test.
+ * testsuite/libffi.call/fastthis2_win32.c: New test.
+ * testsuite/libffi.call/fastthis3_win32.c: New test.
+ * testsuite/libffi.call/strlen2_win32.c: New test.
+ * testsuite/libffi.call/many2_win32.c: New test.
+ * testsuite/libffi.call/struct1_win32.c: New test.
+ * testsuite/libffi.call/struct2_win32.c: New test.
+
+2012-01-23 Uros Bizjak <ubizjak@gmail.com>
+
+ * src/alpha/ffi.c (ffi_prep_closure_loc): Check for bad ABI.
+
+2012-01-23 Anthony Green <green@moxielogic.com>
+ Chris Young <cdyoung@ntlworld.com>
+
+ * configure.ac: Add Amiga support.
+ * configure: Rebuilt.
+
+2012-01-23 Dmitry Nadezhin <dmitry.nadezhin@gmail.com>
+
+ * include/ffi_common.h (LIKELY, UNLIKELY): Fix definitions.
+
+2012-01-23 Andreas Schwab <schwab@linux-m68k.org>
+
+ * src/m68k/sysv.S (ffi_call_SYSV): Properly test for plain
+ mc68000. Test for __HAVE_68881__ in addition to __MC68881__.
+
+2012-01-19 Jakub Jelinek <jakub@redhat.com>
+
+ PR rtl-optimization/48496
+ * src/ia64/ffi.c (ffi_call): Fix up aliasing violations.
+
+2012-01-09 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * configure.ac (i?86-*-*): Set TARGET to X86_64.
+ * configure: Regenerate.
+
+2011-12-07 Andrew Pinski <apinski@cavium.com>
+
+ PR libffi/50051
+ * src/mips/n32.S: Add ".set mips4".
+
+2011-11-21 Andreas Tobler <andreast@fgznet.ch>
+
+ * configure: Regenerate.
+
+2011-11-12 David Gilbert <david.gilbert@linaro.org>
+
+ * doc/libffi.texi, include/ffi.h.in, include/ffi_common.h,
+ man/Makefile.am, man/ffi.3, man/ffi_prep_cif.3,
+ man/ffi_prep_cif_var.3, src/arm/ffi.c, src/arm/ffitarget.h,
+ src/cris/ffi.c, src/prep_cif.c,
+ testsuite/libffi.call/cls_double_va.c,
+ testsuite/libffi.call/cls_longdouble_va.c,
+ testsuite/libffi.call/float_va.c: Many changes to support variadic
+ function calls.
+
+2011-11-12 Kyle Moffett <Kyle.D.Moffett@boeing.com>
+
+ * src/powerpc/ffi.c, src/powerpc/ffitarget.h,
+ src/powerpc/ppc_closure.S, src/powerpc/sysv.S: Many changes for
+ softfloat powerpc variants.
+
+2011-11-12 Petr Salinger <Petr.Salinger@seznam.cz>
+
+ * configure.ac (FFI_EXEC_TRAMPOLINE_TABLE): Fix kfreebsd support.
+ * configure: Rebuilt.
+
+2011-11-12 Timothy Wall <twall@users.sf.net>
+
+ * src/arm/ffi.c (ffi_prep_args, ffi_prep_incoming_args_SYSV): Max
+ alignment of 4 for wince on ARM.
+
+2011-11-12 Kyle Moffett <Kyle.D.Moffett@boeing.com>
+ Anthony Green <green@moxielogic.com>
+
+ * src/ppc/sysv.S, src/ppc/ffi.c: Remove use of ppc string
+ instructions (not available on some cores, like the PPC440).
+
+2011-11-12 Kimura Wataru <kimuraw@i.nifty.jp>
+
+ * m4/ax_enable_builddir: Change from string comparison to numeric
+ comparison for wc output.
+ * configure.ac: Enable FFI_MMAP_EXEC_WRIT for darwin11 aka Mac OS
+ X 10.7.
+ * configure: Rebuilt.
+
+2011-11-12 Anthony Green <green@moxielogic.com>
+
+ * Makefile.am (AM_CCASFLAGS): Add -g option to build assembly
+ files with debug info.
+ * Makefile.in: Rebuilt.
+
+2011-11-12 Jasper Lievisse Adriaanse <jasper@openbsd.org>
+
+ * README: Update list of supported OpenBSD systems.
+
+2011-11-12 Anthony Green <green@moxielogic.com>
+
+ * libtool-version: Update.
+ * Makefile.am (nodist_libffi_la_SOURCES): Add src/debug.c if
+ FFI_DEBUG.
+ (libffi_la_SOURCES): Remove src/debug.c
+ (EXTRA_DIST): Add src/debug.c
+ * Makefile.in: Rebuilt.
+ * README: Update for 3.0.11.
+
+2011-11-10 Richard Henderson <rth@redhat.com>
+
+ * configure.ac (GCC_AS_CFI_PSEUDO_OP): Use it instead of inline check.
+ * configure, aclocal.m4: Rebuild.
+
+2011-09-04 Iain Sandoe <iains@gcc.gnu.org>
+
+ PR libffi/49594
+ * src/powerpc/darwin_closure.S (stubs): Make the stub binding
+ helper reference track the architecture pointer size.
+
+2011-08-25 Andrew Haley <aph@redhat.com>
+
+ * src/arm/ffi.c (FFI_INIT_TRAMPOLINE): Remove hard-coded assembly
+ instructions.
+ * src/arm/sysv.S (ffi_arm_trampoline): Put them here instead.
+
+2011-07-11 Andrew Haley <aph@redhat.com>
+
+ * src/arm/ffi.c (FFI_INIT_TRAMPOLINE): Clear icache.
+
+2011-06-29 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * testsuite/libffi.call/cls_double_va.c: Move PR number to comment.
+ * testsuite/libffi.call/cls_longdouble_va.c: Likewise.
+
+2011-06-29 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ PR libffi/46660
+ * testsuite/libffi.call/cls_double_va.c: xfail dg-output on
+ mips-sgi-irix6*.
+ * testsuite/libffi.call/cls_longdouble_va.c: Likewise.
+
+2011-06-14 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * testsuite/libffi.call/huge_struct.c (test_large_fn): Use PRIu8,
+ PRId8 instead of %hhu, %hhd.
+ * testsuite/libffi.call/ffitest.h [__alpha__ && __osf__] (PRId8,
+ PRIu8): Define.
+ [__sgi__] (PRId8, PRIu8): Define.
+
+2011-04-29 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * src/alpha/osf.S (UA_SI, FDE_ENCODING, FDE_ENCODE, FDE_ARANGE):
+ Define.
+ Use them to handle ELF vs. ECOFF differences.
+ [__osf__] (_GLOBAL__F_ffi_call_osf): Define.
+
+2011-03-30 Timothy Wall <twall@users.sf.net>
+
+ * src/powerpc/darwin.S: Fix unknown FDE encoding.
+ * src/powerpc/darwin_closure.S: ditto.
+
+2011-02-25 Anthony Green <green@moxielogic.com>
+
+ * src/powerpc/ffi.c (ffi_prep_closure_loc): Allow for more
+ 32-bit ABIs.
+
+2011-02-15 Anthony Green <green@moxielogic.com>
+
+ * m4/ax_cc_maxopt.m4: Don't -malign-double or use -ffast-math.
+ * configure: Rebuilt.
+
+2011-02-13 Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
+
+ * configure: Regenerate.
+
+2011-02-13 Anthony Green <green@moxielogic.com>
+
+ * include/ffi_common.h (UNLIKELY, LIKELY): Define.
+ * src/x86/ffi64.c (UNLIKELY, LIKELY): Remove definition.
+ * src/prep_cif.c (UNLIKELY, LIKELY): Remove definition.
+
+ * src/prep_cif.c (initialize_aggregate): Convert assertion into
+ FFI_BAD_TYPEDEF return. Initialize arg size and alignment to 0.
+
+ * src/pa/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test,
+ just return FFI_BAD_ABI when things are wrong.
+ * src/arm/ffi.c (ffi_prep_closure_loc): Ditto.
+ * src/powerpc/ffi.c (ffi_prep_closure_loc): Ditto.
+ * src/mips/ffi.c (ffi_prep_closure_loc): Ditto.
+ * src/ia64/ffi.c (ffi_prep_closure_loc): Ditto.
+ * src/avr32/ffi.c (ffi_prep_closure_loc): Ditto.
+
+2011-02-11 Anthony Green <green@moxielogic.com>
+
+ * src/sparc/ffi.c (ffi_prep_closure_loc): Don't ASSERT ABI test,
+ just return FFI_BAD_ABI when things are wrong.
+
+2012-02-11 Eric Botcazou <ebotcazou@adacore.com>
+
+ * src/sparc/v9.S (STACKFRAME): Bump to 176.
+
+2011-02-09 Stuart Shelton <srcshelton@gmail.com>
+
+ http://bugs.gentoo.org/show_bug.cgi?id=286911
+ * src/mips/ffitarget.h: Clean up error messages.
+ * src/java_raw_api.c (ffi_java_translate_args): Cast raw arg to
+ ffi_raw*.
+ * include/ffi.h.in: Add pragma for SGI compiler.
+
+2011-02-09 Anthony Green <green@moxielogic.com>
+
+ * configure.ac: Add powerpc64-*-darwin* support.
+
+2011-02-09 Anthony Green <green@moxielogic.com>
+
+ * README: Mention Interix.
+
+2011-02-09 Jonathan Callen <abcd@gentoo.org>
+
+ * configure.ac: Add Interix to win32/cygwin/mingw case.
+ * configure: Ditto.
+ * src/closures.c: Treat Interix like Cygwin, instead of as a
+ generic win32.
+
+2011-02-09 Anthony Green <green@moxielogic.com>
+
+ * testsuite/libffi.call/err_bad_typedef.c: Remove xfail.
+ * testsuite/libffi.call/err_bad_abi.c: Remove xfail.
+ * src/x86/ffi64.c (UNLIKELY, LIKELY): Define.
+ (ffi_prep_closure_loc): Check for bad ABI.
+ * src/prep_cif.c (UNLIKELY, LIKELY): Define.
+ (initialize_aggregate): Check for bad types.
+
+2011-02-09 Landon Fuller <landonf@plausible.coop>
+
+ * Makefile.am (EXTRA_DIST): Add build-ios.sh, src/arm/gentramp.sh,
+ src/arm/trampoline.S.
+ (nodist_libffi_la_SOURCES): Add src/arc/trampoline.S.
+ * configure.ac (FFI_EXEC_TRAMPOLINE_TABLE): Define.
+ * src/arm/ffi.c (ffi_trampoline_table)
+ (ffi_closure_trampoline_table_page, ffi_trampoline_table_entry)
+ (FFI_TRAMPOLINE_CODELOC_CONFIG, FFI_TRAMPOLINE_CONFIG_PAGE_OFFSET)
+ (FFI_TRAMPOLINE_COUNT, ffi_trampoline_lock, ffi_trampoline_tables)
+ (ffi_trampoline_table_alloc, ffi_closure_alloc, ffi_closure_free):
+ Define for FFI_EXEC_TRAMPOLINE_TABLE case (iOS).
+ (ffi_prep_closure_loc): Handl FFI_EXEC_TRAMPOLINE_TABLE case
+ separately.
+ * src/arm/sysv.S: Handle Apple iOS host.
+ * src/closures.c: Handle FFI_EXEC_TRAMPOLINE_TABLE case.
+ * build-ios.sh: New file.
+ * fficonfig.h.in, configure, Makefile.in: Rebuilt.
+ * README: Mention ARM iOS.
+
+2011-02-08 Oren Held <orenhe@il.ibm.com>
+
+ * src/dlmalloc.c (_STRUCT_MALLINFO): Define in order to avoid
+ redefinition of mallinfo on HP-UX.
+
+2011-02-08 Ginn Chen <ginn.chen@oracle.com>
+
+ * src/sparc/ffi.c (ffi_call): Make compatible with Solaris Studio
+ aggregate return ABI. Flush cache.
+ (ffi_prep_closure_loc): Flush cache.
+
+2011-02-11 Anthony Green <green@moxielogic.com>
+
+ From Tom Honermann <tom.honermann@oracle.com>:
+ * src/powerpc/aix.S (ffi_call_AIX): Support for xlc toolchain on
+ AIX. Declare .ffi_prep_args. Insert nops after branch
+ instructions so that the AIX linker can insert TOC reload
+ instructions.
+ * src/powerpc/aix_closure.S: Declare .ffi_closure_helper_DARWIN.
+
+2011-02-08 Ed <ed@kdtc.net>
+
+ * src/powerpc/asm.h: Fix grammar nit in comment.
+
+2011-02-08 Uli Link <ul.mcamafia@linkitup.de>
+
+ * include/ffi.h.in (FFI_64_BIT_MAX): Define and use.
+
+2011-02-09 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ PR libffi/46661
+ * testsuite/libffi.call/cls_pointer.c (main): Cast void * to
+ uintptr_t first.
+ * testsuite/libffi.call/cls_pointer_stack.c (main): Likewise.
+
+2011-02-08 Rafael Avila de Espindola <respindola@mozilla.com>
+
+ * configure.ac: Fix x86 test for pc related relocs.
+ * configure: Rebuilt.
+
+2011-02-07 Joel Sherrill <joel.sherrill@oarcorp.com>
+
+ * libffi/src/m68k/ffi.c: Add RTEMS support for cache flushing.
+ Handle case when CPU variant does not have long double support.
+ * libffi/src/m68k/sysv.S: Add support for mc68000, Coldfire,
+ and cores with soft floating point.
+
+2011-02-07 Joel Sherrill <joel.sherrill@oarcorp.com>
+
+ * configure.ac: Add mips*-*-rtems* support.
+ * configure: Regenerate.
+ * src/mips/ffitarget.h: Ensure needed constants are available
+ for targets which do not have sgidefs.h.
+
+2011-01-26 Dave Korn <dave.korn.cygwin@gmail.com>
+
+ PR target/40125
+ * configure.ac (AM_LTLDFLAGS): Add -bindir option for windows DLLs.
+ * configure: Regenerate.
+
+2010-12-18 Iain Sandoe <iains@gcc.gnu.org>
+
+ PR libffi/29152
+ PR libffi/42378
+ * src/powerpc/darwin_closure.S: Provide Darwin64 implementation,
+ update comments.
+ * src/powerpc/ffitarget.h (POWERPC_DARWIN64): New,
+ (FFI_TRAMPOLINE_SIZE): Update for Darwin64.
+ * src/powerpc/darwin.S: Provide Darwin64 implementation,
+ update comments.
+ * src/powerpc/ffi_darwin.c: Likewise.
+
+2010-12-06 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * configure.ac (libffi_cv_as_ascii_pseudo_op): Use double
+ backslashes.
+ (libffi_cv_as_string_pseudo_op): Likewise.
+ * configure: Regenerate.
+
+2010-12-03 Chung-Lin Tang <cltang@codesourcery.com>
+
+ * src/arm/sysv.S (ffi_closure_SYSV): Add UNWIND to .pad directive.
+ (ffi_closure_VFP): Same.
+ (ffi_call_VFP): Move down to before ffi_closure_VFP. Add '.fpu vfp'
+ directive.
+
+2010-12-01 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * testsuite/libffi.call/ffitest.h [__sgi] (PRId64, PRIu64): Define.
+ (PRIuPTR): Define.
+
+2010-11-29 Richard Henderson <rth@redhat.com>
+ Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * src/x86/sysv.S (FDE_ENCODING, FDE_ENCODE): Define.
+ (.eh_frame): Use FDE_ENCODING.
+ (.LASFDE1, .LASFDE2, LASFDE3): Simplify with FDE_ENCODE.
+
+2010-11-22 Jacek Caban <jacek@codeweavers.com>
+
+ * configure.ac: Check for symbol underscores on mingw-w64.
+ * configure: Rebuilt.
+ * src/x86/win64.S: Correctly access extern symbols in respect to
+ underscores.
+
+2010-11-15 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * testsuite/lib/libffi-dg.exp: Rename ...
+ * testsuite/lib/libffi.exp: ... to this.
+ * libffi/testsuite/libffi.call/call.exp: Don't load libffi-dg.exp.
+ * libffi/testsuite/libffi.special/special.exp: Likewise.
+
+2010-10-28 Chung-Lin Tang <cltang@codesourcery.com>
+
+ * src/arm/ffi.c (ffi_prep_args): Add VFP register argument handling
+ code, new parameter, and return value. Update comments.
+ (ffi_prep_cif_machdep): Add case for VFP struct return values. Add
+ call to layout_vfp_args().
+ (ffi_call_SYSV): Update declaration.
+ (ffi_call_VFP): New declaration.
+ (ffi_call): Add VFP struct return conditions. Call ffi_call_VFP()
+ when ABI is FFI_VFP.
+ (ffi_closure_VFP): New declaration.
+ (ffi_closure_SYSV_inner): Add new vfp_args parameter, update call to
+ ffi_prep_incoming_args_SYSV().
+ (ffi_prep_incoming_args_SYSV): Update parameters. Add VFP argument
+ case handling.
+ (ffi_prep_closure_loc): Pass ffi_closure_VFP to trampoline
+ construction under VFP hard-float.
+ (rec_vfp_type_p): New function.
+ (vfp_type_p): Same.
+ (place_vfp_arg): Same.
+ (layout_vfp_args): Same.
+ * src/arm/ffitarget.h (ffi_abi): Add FFI_VFP. Define FFI_DEFAULT_ABI
+ based on __ARM_PCS_VFP.
+ (FFI_EXTRA_CIF_FIELDS): Define for adding VFP hard-float specific
+ fields.
+ (FFI_TYPE_STRUCT_VFP_FLOAT): Define internally used type code.
+ (FFI_TYPE_STRUCT_VFP_DOUBLE): Same.
+ * src/arm/sysv.S (ffi_call_SYSV): Change call of ffi_prep_args() to
+ direct call. Move function pointer load upwards.
+ (ffi_call_VFP): New function.
+ (ffi_closure_VFP): Same.
+
+ * testsuite/lib/libffi-dg.exp (check-flags): New function.
+ (dg-skip-if): New function.
+ * testsuite/libffi.call/cls_double_va.c: Skip if target is arm*-*-*
+ and compiler options include -mfloat-abi=hard.
+ * testsuite/libffi.call/cls_longdouble_va.c: Same.
+
+2010-10-01 Jakub Jelinek <jakub@redhat.com>
+
+ PR libffi/45677
+ * src/x86/ffi64.c (ffi_prep_cif_machdep): Ensure cif->bytes is
+ a multiple of 8.
+ * testsuite/libffi.call/many2.c: New test.
+
+2010-08-20 Mark Wielaard <mjw@redhat.com>
+
+ * src/closures.c (open_temp_exec_file_mnt): Check if getmntent_r
+ returns NULL.
+
+2010-08-09 Andreas Tobler <andreast@fgznet.ch>
+
+ * configure.ac: Add target powerpc64-*-freebsd*.
+ * configure: Regenerate.
+ * testsuite/libffi.call/cls_align_longdouble_split.c: Pass
+ -mlong-double-128 only to linux targets.
+ * testsuite/libffi.call/cls_align_longdouble_split2.c: Likewise.
+ * testsuite/libffi.call/cls_longdouble.c: Likewise.
+ * testsuite/libffi.call/huge_struct.c: Likewise.
+
+2010-08-05 Dan Witte <dwitte@mozilla.com>
+
+ * Makefile.am: Pass FFI_DEBUG define to msvcc.sh for linking to the
+ debug CRT when --enable-debug is given.
+ * configure.ac: Define it.
+ * msvcc.sh: Translate -g and -DFFI_DEBUG appropriately.
+
+2010-08-04 Dan Witte <dwitte@mozilla.com>
+
+ * src/x86/ffitarget.h: Add X86_ANY define for all x86/x86_64
+ platforms.
+ * src/x86/ffi.c: Remove redundant ifdef checks.
+ * src/prep_cif.c: Push stack space computation into src/x86/ffi.c
+ for X86_ANY so return value space doesn't get added twice.
+
+2010-08-03 Neil Rashbrooke <neil@parkwaycc.co.uk>
+
+ * msvcc.sh: Don't pass -safeseh to ml64 because behavior is buggy.
+
+2010-07-22 Dan Witte <dwitte@mozilla.com>
+
+ * src/*/ffitarget.h: Make FFI_LAST_ABI one past the last valid ABI.
+ * src/prep_cif.c: Fix ABI assertion.
+ * src/cris/ffi.c: Ditto.
+
+2010-07-10 Evan Phoenix <evan@fallingsnow.net>
+
+ * src/closures.c (selinux_enabled_check): Fix strncmp usage bug.
+
+2010-07-07 Dan Horák <dan@danny.cz>
+
+ * include/ffi.h.in: Protect #define with #ifndef.
+ * src/powerpc/ffitarget.h: Ditto.
+ * src/s390/ffitarget.h: Ditto.
+ * src/sparc/ffitarget.h: Ditto.
+
+2010-07-07 Neil Roberts <neil@linux.intel.com>
+
+ * src/x86/sysv.S (ffi_call_SYSV): Align the stack pointer to
+ 16-bytes.
+
+2010-07-02 Jakub Jelinek <jakub@redhat.com>
+
+ * Makefile.am (AM_MAKEFLAGS): Pass also mandir to submakes.
+ * Makefile.in: Regenerated.
+
+2010-05-19 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * configure.ac (libffi_cv_as_x86_pcrel): Check for illegal in as
+ output, too.
+ (libffi_cv_as_ascii_pseudo_op): Check for .ascii.
+ (libffi_cv_as_string_pseudo_op): Check for .string.
+ * configure: Regenerate.
+ * fficonfig.h.in: Regenerate.
+ * src/x86/sysv.S (.eh_frame): Use .ascii, .string or error.
+
+2010-05-11 Dan Witte <dwitte@mozilla.com>
+
+ * doc/libffi.tex: Document previous change.
+
+2010-05-11 Makoto Kato <m_kato@ga2.so-net.ne.jp>
+
+ * src/x86/ffi.c (ffi_call): Don't copy structs passed by value.
+
+2010-05-05 Michael Kohler <michaelkohler@live.com>
+
+ * src/dlmalloc.c (dlfree): Fix spelling.
+ * src/ia64/ffi.c (ffi_prep_cif_machdep): Ditto.
+ * configure.ac: Ditto.
+ * configure: Rebuilt.
+
+2010-04-13 Dan Witte <dwitte@mozilla.com>
+
+ * msvcc.sh: Build with -W3 instead of -Wall.
+ * src/powerpc/ffi_darwin.c: Remove build warnings.
+ * src/x86/ffi.c: Ditto.
+ * src/x86/ffitarget.h: Ditto.
+
+2010-04-12 Dan Witte <dwitte@mozilla.com>
+ Walter Meinl <wuno@lsvw.de>
+
+ * configure.ac: Add OS/2 support.
+ * configure: Rebuilt.
+ * src/closures.c: Ditto.
+ * src/dlmalloc.c: Ditto.
+ * src/x86/win32.S: Ditto.
+
+2010-04-07 Jakub Jelinek <jakub@redhat.com>
+
+ * testsuite/libffi.call/err_bad_abi.c: Remove unused args variable.
+
+2010-04-02 Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
+
+ * Makefile.in: Regenerate.
+ * aclocal.m4: Regenerate.
+ * include/Makefile.in: Regenerate.
+ * man/Makefile.in: Regenerate.
+ * testsuite/Makefile.in: Regenerate.
+
+2010-03-30 Dan Witte <dwitte@mozilla.com>
+
+ * msvcc.sh: Disable build warnings.
+ * README (tested): Clarify windows build procedure.
+
+2010-03-15 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * configure.ac (libffi_cv_as_x86_64_unwind_section_type): New test.
+ * configure: Regenerate.
+ * fficonfig.h.in: Regenerate.
+ * libffi/src/x86/unix64.S (.eh_frame)
+ [HAVE_AS_X86_64_UNWIND_SECTION_TYPE]: Use @unwind section type.
+
2010-03-14 Matthias Klose <doko@ubuntu.com>
* src/x86/ffi64.c: Fix typo in comment.
* src/x86/ffi.c: Use /* ... */ comment style.
+2010-02-24 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
+
+ * doc/libffi.texi (The Closure API): Fix typo.
+ * doc/libffi.info: Remove.
+
+2010-02-15 Matthias Klose <doko@ubuntu.com>
+
+ * src/arm/sysv.S (__ARM_ARCH__): Define for processor
+ __ARM_ARCH_7EM__.
+
+2010-01-15 Anthony Green <green@redhat.com>
+
+ * README: Add notes on building with Microsoft Visual C++.
+
+2010-01-15 Daniel Witte <dwitte@mozilla.com>
+
+ * msvcc.sh: New file.
+
+ * src/x86/win32.S: Port assembly routines to MSVC and #ifdef.
+ * src/x86/ffi.c: Tweak function declaration and remove excess
+ parens.
+ * include/ffi.h.in: Add __declspec(align(8)) to typedef struct
+ ffi_closure.
+
+ * src/x86/ffi.c: Merge ffi_call_SYSV and ffi_call_STDCALL into new
+ function ffi_call_win32 on X86_WIN32.
+ * src/x86/win32.S (ffi_call_SYSV): Rename to ffi_call_win32.
+ (ffi_call_STDCALL): Remove.
+
+ * src/prep_cif.c (ffi_prep_cif): Move stack space allocation code
+ to ffi_prep_cif_machdep for x86.
+ * src/x86/ffi.c (ffi_prep_cif_machdep): To here.
+
+2010-01-15 Oliver Kiddle <okiddle@yahoo.co.uk>
+
+ * src/x86/ffitarget.h (ffi_abi): Check for __i386 and __amd64 for
+ Sun Studio compiler compatibility.
+
+2010-01-12 Conrad Irwin <conrad.irwin@gmail.com>
+
+ * doc/libffi.texi: Add closure example.
+
2010-01-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
PR libffi/40701
@@ -148,6 +1500,13 @@
* src/pa/ffi.c (ffi_closure_inner_pa32): Handle FFI_TYPE_LONGDOUBLE
type on HP-UX.
+2012-02-13 Kai Tietz <ktietz@redhat.com>
+
+ PR libffi/52221
+ * src/x86/ffi.c (ffi_prep_raw_closure_loc): Add thiscall
+ support for X86_WIN32.
+ (FFI_INIT_TRAMPOLINE_THISCALL): Fix displacement.
+
2009-12-11 Eric Botcazou <ebotcazou@adacore.com>
* src/sparc/ffi.c (ffi_closure_sparc_inner_v9): Properly align 'long
@@ -322,6 +1681,11 @@
* man/Makefile.in: Regenerate.
* testsuite/Makefile.in: Regenerate.
+2011-08-22 Jasper Lievisse Adriaanse <jasper@openbsd.org>
+
+ * configure.ac: Add OpenBSD/hppa and OpenBSD/powerpc support.
+ * configure: Rebuilt.
+
2009-07-30 Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
* configure.ac (_AC_ARG_VAR_PRECIOUS): Use m4_rename_force.
diff --git a/Modules/_ctypes/libffi/ChangeLog.libffi b/Modules/_ctypes/libffi/ChangeLog.libffi
index 5272f33..49ba8da 100644
--- a/Modules/_ctypes/libffi/ChangeLog.libffi
+++ b/Modules/_ctypes/libffi/ChangeLog.libffi
@@ -1,35 +1,6 @@
-2010-01-15 Anthony Green <green@redhat.com>
+2011-02-08 Andreas Tobler <andreast@fgznet.ch>
- * README: Add notes on building with Microsoft Visual C++.
-
-2010-01-15 Daniel Witte <dwitte@mozilla.com>
-
- * msvcc.sh: New file.
-
- * src/x86/win32.S: Port assembly routines to MSVC and #ifdef.
- * src/x86/ffi.c: Tweak function declaration and remove excess
- parens.
- * include/ffi.h.in: Add __declspec(align(8)) to typedef struct
- ffi_closure.
-
- * src/x86/ffi.c: Merge ffi_call_SYSV and ffi_call_STDCALL into new
- function ffi_call_win32 on X86_WIN32.
- * src/x86/win32.S (ffi_call_SYSV): Rename to ffi_call_win32.
- (ffi_call_STDCALL): Remove.
-
- * src/prep_cif.c (ffi_prep_cif): Move stack space allocation code
- to ffi_prep_cif_machdep for x86.
- * src/x86/ffi.c (ffi_prep_cif_machdep): To here.
-
-2010-01-15 Oliver Kiddle <okiddle@yahoo.co.uk>
-
- * src/x86/ffitarget.h (ffi_abi): Check for __i386 and __amd64 for
- Sun Studio compiler compatibility.
-
-2010-01-12 Conrad Irwin <conrad.irwin@gmail.com>
-
- * doc/libffi.texi: Add closure example.
- * doc/libffi.info: Rebuilt.
+ * testsuite/lib/libffi.exp: Tweak for stand-alone mode.
2009-12-25 Samuli Suominen <ssuominen@gentoo.org>
@@ -603,8 +574,8 @@
* Makefile.am, include/Makefile.am: Move headers to
libffi_la_SOURCES for new automake.
* Makefile.in, include/Makefile.in: Rebuilt.
-
- * testsuite/lib/wrapper.exp: Copied from gcc tree to allow for
+
+ * testsuite/lib/wrapper.exp: Copied from gcc tree to allow for
execution outside of gcc tree.
* testsuite/lib/target-libpath.exp: Ditto.
diff --git a/Modules/_ctypes/libffi/LICENSE b/Modules/_ctypes/libffi/LICENSE
index ec2fd69..aa60342 100644
--- a/Modules/_ctypes/libffi/LICENSE
+++ b/Modules/_ctypes/libffi/LICENSE
@@ -1,4 +1,4 @@
-libffi - Copyright (c) 1996-2009 Anthony Green, Red Hat, Inc and others.
+libffi - Copyright (c) 1996-2012 Anthony Green, Red Hat, Inc and others.
See source files for details.
Permission is hereby granted, free of charge, to any person obtaining
@@ -9,8 +9,8 @@ distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
-The above copyright notice and this permission notice shall be included
-in all copies or substantial portions of the Software.
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/Modules/_ctypes/libffi/Makefile.am b/Modules/_ctypes/libffi/Makefile.am
index f47dd1a..bf0156f 100644
--- a/Modules/_ctypes/libffi/Makefile.am
+++ b/Modules/_ctypes/libffi/Makefile.am
@@ -2,37 +2,50 @@
AUTOMAKE_OPTIONS = foreign subdir-objects
+ACLOCAL_AMFLAGS = -I m4
+
SUBDIRS = include testsuite man
-EXTRA_DIST = LICENSE ChangeLog.v1 ChangeLog.libgcj configure.host \
- src/alpha/ffi.c src/alpha/osf.S src/alpha/ffitarget.h \
- src/arm/ffi.c src/arm/sysv.S src/arm/ffitarget.h \
- src/avr32/ffi.c src/avr32/sysv.S src/avr32/ffitarget.h \
- src/cris/ffi.c src/cris/sysv.S src/cris/ffitarget.h \
- src/ia64/ffi.c src/ia64/ffitarget.h src/ia64/ia64_flags.h \
- src/ia64/unix.S \
- src/mips/ffi.c src/mips/n32.S src/mips/o32.S \
- src/mips/ffitarget.h \
- src/m32r/ffi.c src/m32r/sysv.S src/m32r/ffitarget.h \
- src/m68k/ffi.c src/m68k/sysv.S src/m68k/ffitarget.h \
- src/powerpc/ffi.c src/powerpc/sysv.S \
- src/powerpc/linux64.S src/powerpc/linux64_closure.S \
- src/powerpc/ppc_closure.S src/powerpc/asm.h \
- src/powerpc/aix.S src/powerpc/darwin.S \
- src/powerpc/aix_closure.S src/powerpc/darwin_closure.S \
- src/powerpc/ffi_darwin.c src/powerpc/ffitarget.h \
- src/s390/ffi.c src/s390/sysv.S src/s390/ffitarget.h \
- src/sh/ffi.c src/sh/sysv.S src/sh/ffitarget.h \
- src/sh64/ffi.c src/sh64/sysv.S src/sh64/ffitarget.h \
- src/sparc/v8.S src/sparc/v9.S src/sparc/ffitarget.h \
- src/sparc/ffi.c src/x86/darwin64.S \
- src/x86/ffi.c src/x86/sysv.S src/x86/win32.S src/x86/win64.S \
- src/x86/darwin.S src/x86/freebsd.S \
- src/x86/ffi64.c src/x86/unix64.S src/x86/ffitarget.h \
- src/pa/ffitarget.h src/pa/ffi.c src/pa/linux.S src/pa/hpux32.S \
- src/frv/ffi.c src/frv/eabi.S src/frv/ffitarget.h src/dlmalloc.c \
- libtool-version ChangeLog.libffi m4/libtool.m4 \
- m4/lt~obsolete.m4 m4/ltoptions.m4 m4/ltsugar.m4 m4/ltversion.m4
+EXTRA_DIST = LICENSE ChangeLog.v1 ChangeLog.libgcj configure.host \
+ src/aarch64/ffi.c src/aarch64/ffitarget.h src/aarch64/sysv.S \
+ build-ios.sh src/alpha/ffi.c src/alpha/osf.S \
+ src/alpha/ffitarget.h src/arm/ffi.c src/arm/sysv.S \
+ src/arm/ffitarget.h src/avr32/ffi.c src/avr32/sysv.S \
+ src/avr32/ffitarget.h src/cris/ffi.c src/cris/sysv.S \
+ src/cris/ffitarget.h src/ia64/ffi.c src/ia64/ffitarget.h \
+ src/ia64/ia64_flags.h src/ia64/unix.S src/mips/ffi.c \
+ src/mips/n32.S src/mips/o32.S src/metag/ffi.c \
+ src/metag/ffitarget.h src/metag/sysv.S src/moxie/ffi.c \
+ src/moxie/ffitarget.h src/moxie/eabi.S src/mips/ffitarget.h \
+ src/m32r/ffi.c src/m32r/sysv.S src/m32r/ffitarget.h \
+ src/m68k/ffi.c src/m68k/sysv.S src/m68k/ffitarget.h \
+ src/microblaze/ffi.c src/microblaze/sysv.S \
+ src/microblaze/ffitarget.h src/powerpc/ffi.c \
+ src/powerpc/sysv.S src/powerpc/linux64.S \
+ src/powerpc/linux64_closure.S src/powerpc/ppc_closure.S \
+ src/powerpc/asm.h src/powerpc/aix.S src/powerpc/darwin.S \
+ src/powerpc/aix_closure.S src/powerpc/darwin_closure.S \
+ src/powerpc/ffi_darwin.c src/powerpc/ffitarget.h \
+ src/s390/ffi.c src/s390/sysv.S src/s390/ffitarget.h \
+ src/sh/ffi.c src/sh/sysv.S src/sh/ffitarget.h src/sh64/ffi.c \
+ src/sh64/sysv.S src/sh64/ffitarget.h src/sparc/v8.S \
+ src/sparc/v9.S src/sparc/ffitarget.h src/sparc/ffi.c \
+ src/x86/darwin64.S src/x86/ffi.c src/x86/sysv.S \
+ src/x86/win32.S src/x86/darwin.S src/x86/win64.S \
+ src/x86/freebsd.S src/x86/ffi64.c src/x86/unix64.S \
+ src/x86/ffitarget.h src/pa/ffitarget.h src/pa/ffi.c \
+ src/pa/linux.S src/pa/hpux32.S src/frv/ffi.c src/bfin/ffi.c \
+ src/bfin/ffitarget.h src/bfin/sysv.S src/frv/eabi.S \
+ src/frv/ffitarget.h src/dlmalloc.c src/tile/ffi.c \
+ src/tile/ffitarget.h src/tile/tile.S libtool-version \
+ src/xtensa/ffitarget.h src/xtensa/ffi.c src/xtensa/sysv.S \
+ ChangeLog.libffi m4/libtool.m4 m4/lt~obsolete.m4 \
+ m4/ltoptions.m4 m4/ltsugar.m4 m4/ltversion.m4 \
+ m4/ltversion.m4 src/arm/gentramp.sh src/debug.c msvcc.sh \
+ generate-ios-source-and-headers.py \
+ generate-osx-source-and-headers.py \
+ libffi.xcodeproj/project.pbxproj src/arm/trampoline.S \
+ libtool-ldflags
info_TEXINFOS = doc/libffi.texi
@@ -69,6 +82,7 @@ AM_MAKEFLAGS = \
"exec_prefix=$(exec_prefix)" \
"infodir=$(infodir)" \
"libdir=$(libdir)" \
+ "mandir=$(mandir)" \
"prefix=$(prefix)" \
"AR=$(AR)" \
"AS=$(AS)" \
@@ -79,14 +93,15 @@ AM_MAKEFLAGS = \
"RANLIB=$(RANLIB)" \
"DESTDIR=$(DESTDIR)"
-MAKEOVERRIDES=
+# Subdir rules rely on $(FLAGS_TO_PASS)
+FLAGS_TO_PASS = $(AM_MAKEFLAGS)
-ACLOCAL_AMFLAGS=$(ACLOCAL_AMFLAGS) -I m4
+MAKEOVERRIDES=
-lib_LTLIBRARIES = libffi.la
+toolexeclib_LTLIBRARIES = libffi.la
noinst_LTLIBRARIES = libffi_convenience.la
-libffi_la_SOURCES = src/debug.c src/prep_cif.c src/types.c \
+libffi_la_SOURCES = src/prep_cif.c src/types.c \
src/raw_api.c src/java_raw_api.c src/closures.c
pkgconfigdir = $(libdir)/pkgconfig
@@ -94,9 +109,16 @@ pkgconfig_DATA = libffi.pc
nodist_libffi_la_SOURCES =
+if FFI_DEBUG
+nodist_libffi_la_SOURCES += src/debug.c
+endif
+
if MIPS
nodist_libffi_la_SOURCES += src/mips/ffi.c src/mips/o32.S src/mips/n32.S
endif
+if BFIN
+nodist_libffi_la_SOURCES += src/bfin/ffi.c src/bfin/sysv.S
+endif
if X86
nodist_libffi_la_SOURCES += src/x86/ffi.c src/x86/sysv.S
endif
@@ -127,6 +149,12 @@ endif
if M68K
nodist_libffi_la_SOURCES += src/m68k/ffi.c src/m68k/sysv.S
endif
+if MOXIE
+nodist_libffi_la_SOURCES += src/moxie/ffi.c src/moxie/eabi.S
+endif
+if MICROBLAZE
+nodist_libffi_la_SOURCES += src/microblaze/ffi.c src/microblaze/sysv.S
+endif
if POWERPC
nodist_libffi_la_SOURCES += src/powerpc/ffi.c src/powerpc/sysv.S src/powerpc/ppc_closure.S src/powerpc/linux64.S src/powerpc/linux64_closure.S
endif
@@ -139,8 +167,14 @@ endif
if POWERPC_FREEBSD
nodist_libffi_la_SOURCES += src/powerpc/ffi.c src/powerpc/sysv.S src/powerpc/ppc_closure.S
endif
+if AARCH64
+nodist_libffi_la_SOURCES += src/aarch64/sysv.S src/aarch64/ffi.c
+endif
if ARM
nodist_libffi_la_SOURCES += src/arm/sysv.S src/arm/ffi.c
+if FFI_EXEC_TRAMPOLINE_TABLE
+nodist_libffi_la_SOURCES += src/arm/trampoline.S
+endif
endif
if AVR32
nodist_libffi_la_SOURCES += src/avr32/sysv.S src/avr32/ffi.c
@@ -169,18 +203,23 @@ endif
if PA_HPUX
nodist_libffi_la_SOURCES += src/pa/hpux32.S src/pa/ffi.c
endif
+if TILE
+nodist_libffi_la_SOURCES += src/tile/tile.S src/tile/ffi.c
+endif
+if XTENSA
+nodist_libffi_la_SOURCES += src/xtensa/sysv.S src/xtensa/ffi.c
+endif
+if METAG
+nodist_libffi_la_SOURCES += src/metag/sysv.S src/metag/ffi.c
+endif
libffi_convenience_la_SOURCES = $(libffi_la_SOURCES)
nodist_libffi_convenience_la_SOURCES = $(nodist_libffi_la_SOURCES)
-AM_CFLAGS = -Wall -g -fexceptions
+LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/libtool-ldflags $(LDFLAGS))
-libffi_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(LTLDFLAGS) $(AM_LTLDFLAGS)
+libffi_la_LDFLAGS = -no-undefined -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(LTLDFLAGS) $(AM_LTLDFLAGS)
AM_CPPFLAGS = -I. -I$(top_srcdir)/include -Iinclude -I$(top_srcdir)/src
AM_CCASFLAGS = $(AM_CPPFLAGS)
-# No install-html or install-pdf support in automake yet
-.PHONY: install-html install-pdf
-install-html:
-install-pdf:
diff --git a/Modules/_ctypes/libffi/Makefile.in b/Modules/_ctypes/libffi/Makefile.in
index 49afcc1..4b6abe5 100644
--- a/Modules/_ctypes/libffi/Makefile.in
+++ b/Modules/_ctypes/libffi/Makefile.in
@@ -1,9 +1,8 @@
-# Makefile.in generated by automake 1.11 from Makefile.am.
+# Makefile.in generated by automake 1.12.2 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
-# Inc.
+# Copyright (C) 1994-2012 Free Software Foundation, Inc.
+
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -17,6 +16,23 @@
VPATH = @srcdir@
+am__make_dryrun = \
+ { \
+ am__dry=no; \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ echo 'am--echo: ; @echo "AM" OK' | $(MAKE) -f - 2>/dev/null \
+ | grep '^AM OK$$' >/dev/null || am__dry=yes;; \
+ *) \
+ for am__flg in $$MAKEFLAGS; do \
+ case $$am__flg in \
+ *=*|--*) ;; \
+ *n*) am__dry=yes; break;; \
+ esac; \
+ done;; \
+ esac; \
+ test $$am__dry = yes; \
+ }
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -36,31 +52,40 @@ POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
target_triplet = @target@
-@MIPS_TRUE@am__append_1 = src/mips/ffi.c src/mips/o32.S src/mips/n32.S
-@X86_TRUE@am__append_2 = src/x86/ffi.c src/x86/sysv.S
-@X86_FREEBSD_TRUE@am__append_3 = src/x86/ffi.c src/x86/freebsd.S
-@X86_WIN32_TRUE@am__append_4 = src/x86/ffi.c src/x86/win32.S
-@X86_WIN64_TRUE@am__append_5 = src/x86/ffi.c src/x86/win64.S
-@X86_DARWIN_TRUE@am__append_6 = src/x86/ffi.c src/x86/darwin.S src/x86/ffi64.c src/x86/darwin64.S
-@SPARC_TRUE@am__append_7 = src/sparc/ffi.c src/sparc/v8.S src/sparc/v9.S
-@ALPHA_TRUE@am__append_8 = src/alpha/ffi.c src/alpha/osf.S
-@IA64_TRUE@am__append_9 = src/ia64/ffi.c src/ia64/unix.S
-@M32R_TRUE@am__append_10 = src/m32r/sysv.S src/m32r/ffi.c
-@M68K_TRUE@am__append_11 = src/m68k/ffi.c src/m68k/sysv.S
-@POWERPC_TRUE@am__append_12 = src/powerpc/ffi.c src/powerpc/sysv.S src/powerpc/ppc_closure.S src/powerpc/linux64.S src/powerpc/linux64_closure.S
-@POWERPC_AIX_TRUE@am__append_13 = src/powerpc/ffi_darwin.c src/powerpc/aix.S src/powerpc/aix_closure.S
-@POWERPC_DARWIN_TRUE@am__append_14 = src/powerpc/ffi_darwin.c src/powerpc/darwin.S src/powerpc/darwin_closure.S
-@POWERPC_FREEBSD_TRUE@am__append_15 = src/powerpc/ffi.c src/powerpc/sysv.S src/powerpc/ppc_closure.S
-@ARM_TRUE@am__append_16 = src/arm/sysv.S src/arm/ffi.c
-@AVR32_TRUE@am__append_17 = src/avr32/sysv.S src/avr32/ffi.c
-@LIBFFI_CRIS_TRUE@am__append_18 = src/cris/sysv.S src/cris/ffi.c
-@FRV_TRUE@am__append_19 = src/frv/eabi.S src/frv/ffi.c
-@S390_TRUE@am__append_20 = src/s390/sysv.S src/s390/ffi.c
-@X86_64_TRUE@am__append_21 = src/x86/ffi64.c src/x86/unix64.S src/x86/ffi.c src/x86/sysv.S
-@SH_TRUE@am__append_22 = src/sh/sysv.S src/sh/ffi.c
-@SH64_TRUE@am__append_23 = src/sh64/sysv.S src/sh64/ffi.c
-@PA_LINUX_TRUE@am__append_24 = src/pa/linux.S src/pa/ffi.c
-@PA_HPUX_TRUE@am__append_25 = src/pa/hpux32.S src/pa/ffi.c
+@FFI_DEBUG_TRUE@am__append_1 = src/debug.c
+@MIPS_TRUE@am__append_2 = src/mips/ffi.c src/mips/o32.S src/mips/n32.S
+@BFIN_TRUE@am__append_3 = src/bfin/ffi.c src/bfin/sysv.S
+@X86_TRUE@am__append_4 = src/x86/ffi.c src/x86/sysv.S
+@X86_FREEBSD_TRUE@am__append_5 = src/x86/ffi.c src/x86/freebsd.S
+@X86_WIN32_TRUE@am__append_6 = src/x86/ffi.c src/x86/win32.S
+@X86_WIN64_TRUE@am__append_7 = src/x86/ffi.c src/x86/win64.S
+@X86_DARWIN_TRUE@am__append_8 = src/x86/ffi.c src/x86/darwin.S src/x86/ffi64.c src/x86/darwin64.S
+@SPARC_TRUE@am__append_9 = src/sparc/ffi.c src/sparc/v8.S src/sparc/v9.S
+@ALPHA_TRUE@am__append_10 = src/alpha/ffi.c src/alpha/osf.S
+@IA64_TRUE@am__append_11 = src/ia64/ffi.c src/ia64/unix.S
+@M32R_TRUE@am__append_12 = src/m32r/sysv.S src/m32r/ffi.c
+@M68K_TRUE@am__append_13 = src/m68k/ffi.c src/m68k/sysv.S
+@MOXIE_TRUE@am__append_14 = src/moxie/ffi.c src/moxie/eabi.S
+@MICROBLAZE_TRUE@am__append_15 = src/microblaze/ffi.c src/microblaze/sysv.S
+@POWERPC_TRUE@am__append_16 = src/powerpc/ffi.c src/powerpc/sysv.S src/powerpc/ppc_closure.S src/powerpc/linux64.S src/powerpc/linux64_closure.S
+@POWERPC_AIX_TRUE@am__append_17 = src/powerpc/ffi_darwin.c src/powerpc/aix.S src/powerpc/aix_closure.S
+@POWERPC_DARWIN_TRUE@am__append_18 = src/powerpc/ffi_darwin.c src/powerpc/darwin.S src/powerpc/darwin_closure.S
+@POWERPC_FREEBSD_TRUE@am__append_19 = src/powerpc/ffi.c src/powerpc/sysv.S src/powerpc/ppc_closure.S
+@AARCH64_TRUE@am__append_20 = src/aarch64/sysv.S src/aarch64/ffi.c
+@ARM_TRUE@am__append_21 = src/arm/sysv.S src/arm/ffi.c
+@ARM_TRUE@@FFI_EXEC_TRAMPOLINE_TABLE_TRUE@am__append_22 = src/arm/trampoline.S
+@AVR32_TRUE@am__append_23 = src/avr32/sysv.S src/avr32/ffi.c
+@LIBFFI_CRIS_TRUE@am__append_24 = src/cris/sysv.S src/cris/ffi.c
+@FRV_TRUE@am__append_25 = src/frv/eabi.S src/frv/ffi.c
+@S390_TRUE@am__append_26 = src/s390/sysv.S src/s390/ffi.c
+@X86_64_TRUE@am__append_27 = src/x86/ffi64.c src/x86/unix64.S src/x86/ffi.c src/x86/sysv.S
+@SH_TRUE@am__append_28 = src/sh/sysv.S src/sh/ffi.c
+@SH64_TRUE@am__append_29 = src/sh64/sysv.S src/sh64/ffi.c
+@PA_LINUX_TRUE@am__append_30 = src/pa/linux.S src/pa/ffi.c
+@PA_HPUX_TRUE@am__append_31 = src/pa/hpux32.S src/pa/ffi.c
+@TILE_TRUE@am__append_32 = src/tile/tile.S src/tile/ffi.c
+@XTENSA_TRUE@am__append_33 = src/xtensa/sysv.S src/xtensa/ffi.c
+@METAG_TRUE@am__append_34 = src/metag/sysv.S src/metag/ffi.c
subdir = .
DIST_COMMON = README $(am__configure_deps) $(srcdir)/Makefile.am \
$(srcdir)/Makefile.in $(srcdir)/doc/stamp-vti \
@@ -69,7 +94,19 @@ DIST_COMMON = README $(am__configure_deps) $(srcdir)/Makefile.am \
compile config.guess config.sub depcomp install-sh ltmain.sh \
mdate-sh missing texinfo.tex
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \
+am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \
+ $(top_srcdir)/m4/ax_append_flag.m4 \
+ $(top_srcdir)/m4/ax_cc_maxopt.m4 \
+ $(top_srcdir)/m4/ax_cflags_warn_all.m4 \
+ $(top_srcdir)/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/m4/ax_compiler_vendor.m4 \
+ $(top_srcdir)/m4/ax_configure_args.m4 \
+ $(top_srcdir)/m4/ax_enable_builddir.m4 \
+ $(top_srcdir)/m4/ax_gcc_archflag.m4 \
+ $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \
+ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
+ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
+ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \
$(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
@@ -100,51 +137,67 @@ am__nobase_list = $(am__nobase_strip_setup); \
am__base_list = \
sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
-am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(infodir)" \
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
+am__installdirs = "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(infodir)" \
"$(DESTDIR)$(pkgconfigdir)"
-LTLIBRARIES = $(lib_LTLIBRARIES) $(noinst_LTLIBRARIES)
+LTLIBRARIES = $(noinst_LTLIBRARIES) $(toolexeclib_LTLIBRARIES)
libffi_la_LIBADD =
am__dirstamp = $(am__leading_dot)dirstamp
-am_libffi_la_OBJECTS = src/debug.lo src/prep_cif.lo src/types.lo \
- src/raw_api.lo src/java_raw_api.lo src/closures.lo
-@MIPS_TRUE@am__objects_1 = src/mips/ffi.lo src/mips/o32.lo \
+am_libffi_la_OBJECTS = src/prep_cif.lo src/types.lo src/raw_api.lo \
+ src/java_raw_api.lo src/closures.lo
+@FFI_DEBUG_TRUE@am__objects_1 = src/debug.lo
+@MIPS_TRUE@am__objects_2 = src/mips/ffi.lo src/mips/o32.lo \
@MIPS_TRUE@ src/mips/n32.lo
-@X86_TRUE@am__objects_2 = src/x86/ffi.lo src/x86/sysv.lo
-@X86_FREEBSD_TRUE@am__objects_3 = src/x86/ffi.lo src/x86/freebsd.lo
-@X86_WIN32_TRUE@am__objects_4 = src/x86/ffi.lo src/x86/win32.lo
-@X86_WIN64_TRUE@am__objects_5 = src/x86/ffi.lo src/x86/win64.lo
-@X86_DARWIN_TRUE@am__objects_6 = src/x86/ffi.lo src/x86/darwin.lo \
+@BFIN_TRUE@am__objects_3 = src/bfin/ffi.lo src/bfin/sysv.lo
+@X86_TRUE@am__objects_4 = src/x86/ffi.lo src/x86/sysv.lo
+@X86_FREEBSD_TRUE@am__objects_5 = src/x86/ffi.lo src/x86/freebsd.lo
+@X86_WIN32_TRUE@am__objects_6 = src/x86/ffi.lo src/x86/win32.lo
+@X86_WIN64_TRUE@am__objects_7 = src/x86/ffi.lo src/x86/win64.lo
+@X86_DARWIN_TRUE@am__objects_8 = src/x86/ffi.lo src/x86/darwin.lo \
@X86_DARWIN_TRUE@ src/x86/ffi64.lo src/x86/darwin64.lo
-@SPARC_TRUE@am__objects_7 = src/sparc/ffi.lo src/sparc/v8.lo \
+@SPARC_TRUE@am__objects_9 = src/sparc/ffi.lo src/sparc/v8.lo \
@SPARC_TRUE@ src/sparc/v9.lo
-@ALPHA_TRUE@am__objects_8 = src/alpha/ffi.lo src/alpha/osf.lo
-@IA64_TRUE@am__objects_9 = src/ia64/ffi.lo src/ia64/unix.lo
-@M32R_TRUE@am__objects_10 = src/m32r/sysv.lo src/m32r/ffi.lo
-@M68K_TRUE@am__objects_11 = src/m68k/ffi.lo src/m68k/sysv.lo
-@POWERPC_TRUE@am__objects_12 = src/powerpc/ffi.lo src/powerpc/sysv.lo \
+@ALPHA_TRUE@am__objects_10 = src/alpha/ffi.lo src/alpha/osf.lo
+@IA64_TRUE@am__objects_11 = src/ia64/ffi.lo src/ia64/unix.lo
+@M32R_TRUE@am__objects_12 = src/m32r/sysv.lo src/m32r/ffi.lo
+@M68K_TRUE@am__objects_13 = src/m68k/ffi.lo src/m68k/sysv.lo
+@MOXIE_TRUE@am__objects_14 = src/moxie/ffi.lo src/moxie/eabi.lo
+@MICROBLAZE_TRUE@am__objects_15 = src/microblaze/ffi.lo \
+@MICROBLAZE_TRUE@ src/microblaze/sysv.lo
+@POWERPC_TRUE@am__objects_16 = src/powerpc/ffi.lo src/powerpc/sysv.lo \
@POWERPC_TRUE@ src/powerpc/ppc_closure.lo \
@POWERPC_TRUE@ src/powerpc/linux64.lo \
@POWERPC_TRUE@ src/powerpc/linux64_closure.lo
-@POWERPC_AIX_TRUE@am__objects_13 = src/powerpc/ffi_darwin.lo \
+@POWERPC_AIX_TRUE@am__objects_17 = src/powerpc/ffi_darwin.lo \
@POWERPC_AIX_TRUE@ src/powerpc/aix.lo \
@POWERPC_AIX_TRUE@ src/powerpc/aix_closure.lo
-@POWERPC_DARWIN_TRUE@am__objects_14 = src/powerpc/ffi_darwin.lo \
+@POWERPC_DARWIN_TRUE@am__objects_18 = src/powerpc/ffi_darwin.lo \
@POWERPC_DARWIN_TRUE@ src/powerpc/darwin.lo \
@POWERPC_DARWIN_TRUE@ src/powerpc/darwin_closure.lo
-@POWERPC_FREEBSD_TRUE@am__objects_15 = src/powerpc/ffi.lo \
+@POWERPC_FREEBSD_TRUE@am__objects_19 = src/powerpc/ffi.lo \
@POWERPC_FREEBSD_TRUE@ src/powerpc/sysv.lo \
@POWERPC_FREEBSD_TRUE@ src/powerpc/ppc_closure.lo
-@ARM_TRUE@am__objects_16 = src/arm/sysv.lo src/arm/ffi.lo
-@AVR32_TRUE@am__objects_17 = src/avr32/sysv.lo src/avr32/ffi.lo
-@LIBFFI_CRIS_TRUE@am__objects_18 = src/cris/sysv.lo src/cris/ffi.lo
-@FRV_TRUE@am__objects_19 = src/frv/eabi.lo src/frv/ffi.lo
-@S390_TRUE@am__objects_20 = src/s390/sysv.lo src/s390/ffi.lo
-@X86_64_TRUE@am__objects_21 = src/x86/ffi64.lo src/x86/unix64.lo \
+@AARCH64_TRUE@am__objects_20 = src/aarch64/sysv.lo src/aarch64/ffi.lo
+@ARM_TRUE@am__objects_21 = src/arm/sysv.lo src/arm/ffi.lo
+@ARM_TRUE@@FFI_EXEC_TRAMPOLINE_TABLE_TRUE@am__objects_22 = src/arm/trampoline.lo
+@AVR32_TRUE@am__objects_23 = src/avr32/sysv.lo src/avr32/ffi.lo
+@LIBFFI_CRIS_TRUE@am__objects_24 = src/cris/sysv.lo src/cris/ffi.lo
+@FRV_TRUE@am__objects_25 = src/frv/eabi.lo src/frv/ffi.lo
+@S390_TRUE@am__objects_26 = src/s390/sysv.lo src/s390/ffi.lo
+@X86_64_TRUE@am__objects_27 = src/x86/ffi64.lo src/x86/unix64.lo \
@X86_64_TRUE@ src/x86/ffi.lo src/x86/sysv.lo
-@SH_TRUE@am__objects_22 = src/sh/sysv.lo src/sh/ffi.lo
-@SH64_TRUE@am__objects_23 = src/sh64/sysv.lo src/sh64/ffi.lo
-@PA_LINUX_TRUE@am__objects_24 = src/pa/linux.lo src/pa/ffi.lo
-@PA_HPUX_TRUE@am__objects_25 = src/pa/hpux32.lo src/pa/ffi.lo
+@SH_TRUE@am__objects_28 = src/sh/sysv.lo src/sh/ffi.lo
+@SH64_TRUE@am__objects_29 = src/sh64/sysv.lo src/sh64/ffi.lo
+@PA_LINUX_TRUE@am__objects_30 = src/pa/linux.lo src/pa/ffi.lo
+@PA_HPUX_TRUE@am__objects_31 = src/pa/hpux32.lo src/pa/ffi.lo
+@TILE_TRUE@am__objects_32 = src/tile/tile.lo src/tile/ffi.lo
+@XTENSA_TRUE@am__objects_33 = src/xtensa/sysv.lo src/xtensa/ffi.lo
+@METAG_TRUE@am__objects_34 = src/metag/sysv.lo src/metag/ffi.lo
nodist_libffi_la_OBJECTS = $(am__objects_1) $(am__objects_2) \
$(am__objects_3) $(am__objects_4) $(am__objects_5) \
$(am__objects_6) $(am__objects_7) $(am__objects_8) \
@@ -153,17 +206,20 @@ nodist_libffi_la_OBJECTS = $(am__objects_1) $(am__objects_2) \
$(am__objects_15) $(am__objects_16) $(am__objects_17) \
$(am__objects_18) $(am__objects_19) $(am__objects_20) \
$(am__objects_21) $(am__objects_22) $(am__objects_23) \
- $(am__objects_24) $(am__objects_25)
+ $(am__objects_24) $(am__objects_25) $(am__objects_26) \
+ $(am__objects_27) $(am__objects_28) $(am__objects_29) \
+ $(am__objects_30) $(am__objects_31) $(am__objects_32) \
+ $(am__objects_33) $(am__objects_34)
libffi_la_OBJECTS = $(am_libffi_la_OBJECTS) \
$(nodist_libffi_la_OBJECTS)
libffi_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
$(libffi_la_LDFLAGS) $(LDFLAGS) -o $@
libffi_convenience_la_LIBADD =
-am__objects_26 = src/debug.lo src/prep_cif.lo src/types.lo \
- src/raw_api.lo src/java_raw_api.lo src/closures.lo
-am_libffi_convenience_la_OBJECTS = $(am__objects_26)
-am__objects_27 = $(am__objects_1) $(am__objects_2) $(am__objects_3) \
+am__objects_35 = src/prep_cif.lo src/types.lo src/raw_api.lo \
+ src/java_raw_api.lo src/closures.lo
+am_libffi_convenience_la_OBJECTS = $(am__objects_35)
+am__objects_36 = $(am__objects_1) $(am__objects_2) $(am__objects_3) \
$(am__objects_4) $(am__objects_5) $(am__objects_6) \
$(am__objects_7) $(am__objects_8) $(am__objects_9) \
$(am__objects_10) $(am__objects_11) $(am__objects_12) \
@@ -171,8 +227,11 @@ am__objects_27 = $(am__objects_1) $(am__objects_2) $(am__objects_3) \
$(am__objects_16) $(am__objects_17) $(am__objects_18) \
$(am__objects_19) $(am__objects_20) $(am__objects_21) \
$(am__objects_22) $(am__objects_23) $(am__objects_24) \
- $(am__objects_25)
-nodist_libffi_convenience_la_OBJECTS = $(am__objects_27)
+ $(am__objects_25) $(am__objects_26) $(am__objects_27) \
+ $(am__objects_28) $(am__objects_29) $(am__objects_30) \
+ $(am__objects_31) $(am__objects_32) $(am__objects_33) \
+ $(am__objects_34)
+nodist_libffi_convenience_la_OBJECTS = $(am__objects_36)
libffi_convenience_la_OBJECTS = $(am_libffi_convenience_la_OBJECTS) \
$(nodist_libffi_convenience_la_OBJECTS)
DEFAULT_INCLUDES = -I.@am__isrc@
@@ -216,22 +275,31 @@ RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
install-pdf-recursive install-ps-recursive install-recursive \
installcheck-recursive installdirs-recursive pdf-recursive \
ps-recursive uninstall-recursive
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
DATA = $(pkgconfig_DATA)
RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
distclean-recursive maintainer-clean-recursive
AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \
$(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \
- distdir dist dist-all distcheck
+ cscope distdir dist dist-all distcheck
ETAGS = etags
CTAGS = ctags
+CSCOPE = cscope
DIST_SUBDIRS = $(SUBDIRS)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
distdir = $(PACKAGE)-$(VERSION)
top_distdir = $(distdir)
am__remove_distdir = \
- { test ! -d "$(distdir)" \
- || { find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \
- && rm -fr "$(distdir)"; }; }
+ if test -d "$(distdir)"; then \
+ find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \
+ && rm -rf "$(distdir)" \
+ || { sleep 5 && rm -rf "$(distdir)"; }; \
+ else :; fi
+am__post_remove_distdir = $(am__remove_distdir)
am__relativize = \
dir0=`pwd`; \
sed_first='s,^\([^/]*\)/.*$$,\1,'; \
@@ -259,7 +327,10 @@ am__relativize = \
reldir="$$dir2"
DIST_ARCHIVES = $(distdir).tar.gz
GZIP_ENV = --best
+DIST_TARGETS = dist-gzip
distuninstallcheck_listfiles = find . -type f -print
+am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \
+ | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$'
distcleancheck_listfiles = find . -type f -print
ACLOCAL = @ACLOCAL@
ALLOCA = @ALLOCA@
@@ -282,6 +353,7 @@ CPPFLAGS = @CPPFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
ECHO_C = @ECHO_C@
@@ -289,6 +361,7 @@ ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
+FFI_EXEC_TRAMPOLINE_TABLE = @FFI_EXEC_TRAMPOLINE_TABLE@
FGREP = @FGREP@
GREP = @GREP@
HAVE_LONG_DOUBLE = @HAVE_LONG_DOUBLE@
@@ -307,6 +380,7 @@ LN_S = @LN_S@
LTLIBOBJS = @LTLIBOBJS@
MAINT = @MAINT@
MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
MKDIR_P = @MKDIR_P@
NM = @NM@
NMEDIT = @NMEDIT@
@@ -319,8 +393,10 @@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
+PRTDIAG = @PRTDIAG@
RANLIB = @RANLIB@
SED = @SED@
SET_MAKE = @SET_MAKE@
@@ -333,6 +409,7 @@ abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
ac_ct_CC = @ac_ct_CC@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
am__include = @am__include@
@@ -340,6 +417,7 @@ am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
+ax_enable_builddir_sed = @ax_enable_builddir_sed@
bindir = @bindir@
build = @build@
build_alias = @build_alias@
@@ -365,7 +443,6 @@ libdir = @libdir@
libexecdir = @libexecdir@
localedir = @localedir@
localstatedir = @localstatedir@
-lt_ECHO = @lt_ECHO@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
@@ -376,6 +453,7 @@ psdir = @psdir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
+sys_symbol_underscore = @sys_symbol_underscore@
sysconfdir = @sysconfdir@
target = @target@
target_alias = @target_alias@
@@ -388,36 +466,48 @@ top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
AUTOMAKE_OPTIONS = foreign subdir-objects
+ACLOCAL_AMFLAGS = -I m4
SUBDIRS = include testsuite man
-EXTRA_DIST = LICENSE ChangeLog.v1 ChangeLog.libgcj configure.host \
- src/alpha/ffi.c src/alpha/osf.S src/alpha/ffitarget.h \
- src/arm/ffi.c src/arm/sysv.S src/arm/ffitarget.h \
- src/avr32/ffi.c src/avr32/sysv.S src/avr32/ffitarget.h \
- src/cris/ffi.c src/cris/sysv.S src/cris/ffitarget.h \
- src/ia64/ffi.c src/ia64/ffitarget.h src/ia64/ia64_flags.h \
- src/ia64/unix.S \
- src/mips/ffi.c src/mips/n32.S src/mips/o32.S \
- src/mips/ffitarget.h \
- src/m32r/ffi.c src/m32r/sysv.S src/m32r/ffitarget.h \
- src/m68k/ffi.c src/m68k/sysv.S src/m68k/ffitarget.h \
- src/powerpc/ffi.c src/powerpc/sysv.S \
- src/powerpc/linux64.S src/powerpc/linux64_closure.S \
- src/powerpc/ppc_closure.S src/powerpc/asm.h \
- src/powerpc/aix.S src/powerpc/darwin.S \
- src/powerpc/aix_closure.S src/powerpc/darwin_closure.S \
- src/powerpc/ffi_darwin.c src/powerpc/ffitarget.h \
- src/s390/ffi.c src/s390/sysv.S src/s390/ffitarget.h \
- src/sh/ffi.c src/sh/sysv.S src/sh/ffitarget.h \
- src/sh64/ffi.c src/sh64/sysv.S src/sh64/ffitarget.h \
- src/sparc/v8.S src/sparc/v9.S src/sparc/ffitarget.h \
- src/sparc/ffi.c src/x86/darwin64.S \
- src/x86/ffi.c src/x86/sysv.S src/x86/win32.S src/x86/win64.S \
- src/x86/darwin.S src/x86/freebsd.S \
- src/x86/ffi64.c src/x86/unix64.S src/x86/ffitarget.h \
- src/pa/ffitarget.h src/pa/ffi.c src/pa/linux.S src/pa/hpux32.S \
- src/frv/ffi.c src/frv/eabi.S src/frv/ffitarget.h src/dlmalloc.c \
- libtool-version ChangeLog.libffi m4/libtool.m4 \
- m4/lt~obsolete.m4 m4/ltoptions.m4 m4/ltsugar.m4 m4/ltversion.m4
+EXTRA_DIST = LICENSE ChangeLog.v1 ChangeLog.libgcj configure.host \
+ src/aarch64/ffi.c src/aarch64/ffitarget.h src/aarch64/sysv.S \
+ build-ios.sh src/alpha/ffi.c src/alpha/osf.S \
+ src/alpha/ffitarget.h src/arm/ffi.c src/arm/sysv.S \
+ src/arm/ffitarget.h src/avr32/ffi.c src/avr32/sysv.S \
+ src/avr32/ffitarget.h src/cris/ffi.c src/cris/sysv.S \
+ src/cris/ffitarget.h src/ia64/ffi.c src/ia64/ffitarget.h \
+ src/ia64/ia64_flags.h src/ia64/unix.S src/mips/ffi.c \
+ src/mips/n32.S src/mips/o32.S src/metag/ffi.c \
+ src/metag/ffitarget.h src/metag/sysv.S src/moxie/ffi.c \
+ src/moxie/ffitarget.h src/moxie/eabi.S src/mips/ffitarget.h \
+ src/m32r/ffi.c src/m32r/sysv.S src/m32r/ffitarget.h \
+ src/m68k/ffi.c src/m68k/sysv.S src/m68k/ffitarget.h \
+ src/microblaze/ffi.c src/microblaze/sysv.S \
+ src/microblaze/ffitarget.h src/powerpc/ffi.c \
+ src/powerpc/sysv.S src/powerpc/linux64.S \
+ src/powerpc/linux64_closure.S src/powerpc/ppc_closure.S \
+ src/powerpc/asm.h src/powerpc/aix.S src/powerpc/darwin.S \
+ src/powerpc/aix_closure.S src/powerpc/darwin_closure.S \
+ src/powerpc/ffi_darwin.c src/powerpc/ffitarget.h \
+ src/s390/ffi.c src/s390/sysv.S src/s390/ffitarget.h \
+ src/sh/ffi.c src/sh/sysv.S src/sh/ffitarget.h src/sh64/ffi.c \
+ src/sh64/sysv.S src/sh64/ffitarget.h src/sparc/v8.S \
+ src/sparc/v9.S src/sparc/ffitarget.h src/sparc/ffi.c \
+ src/x86/darwin64.S src/x86/ffi.c src/x86/sysv.S \
+ src/x86/win32.S src/x86/darwin.S src/x86/win64.S \
+ src/x86/freebsd.S src/x86/ffi64.c src/x86/unix64.S \
+ src/x86/ffitarget.h src/pa/ffitarget.h src/pa/ffi.c \
+ src/pa/linux.S src/pa/hpux32.S src/frv/ffi.c src/bfin/ffi.c \
+ src/bfin/ffitarget.h src/bfin/sysv.S src/frv/eabi.S \
+ src/frv/ffitarget.h src/dlmalloc.c src/tile/ffi.c \
+ src/tile/ffitarget.h src/tile/tile.S libtool-version \
+ src/xtensa/ffitarget.h src/xtensa/ffi.c src/xtensa/sysv.S \
+ ChangeLog.libffi m4/libtool.m4 m4/lt~obsolete.m4 \
+ m4/ltoptions.m4 m4/ltsugar.m4 m4/ltversion.m4 \
+ m4/ltversion.m4 src/arm/gentramp.sh src/debug.c msvcc.sh \
+ generate-ios-source-and-headers.py \
+ generate-osx-source-and-headers.py \
+ libffi.xcodeproj/project.pbxproj src/arm/trampoline.S \
+ libtool-ldflags
info_TEXINFOS = doc/libffi.texi
@@ -448,6 +538,7 @@ AM_MAKEFLAGS = \
"exec_prefix=$(exec_prefix)" \
"infodir=$(infodir)" \
"libdir=$(libdir)" \
+ "mandir=$(mandir)" \
"prefix=$(prefix)" \
"AR=$(AR)" \
"AS=$(AS)" \
@@ -458,11 +549,13 @@ AM_MAKEFLAGS = \
"RANLIB=$(RANLIB)" \
"DESTDIR=$(DESTDIR)"
+
+# Subdir rules rely on $(FLAGS_TO_PASS)
+FLAGS_TO_PASS = $(AM_MAKEFLAGS)
MAKEOVERRIDES =
-ACLOCAL_AMFLAGS = $(ACLOCAL_AMFLAGS) -I m4
-lib_LTLIBRARIES = libffi.la
+toolexeclib_LTLIBRARIES = libffi.la
noinst_LTLIBRARIES = libffi_convenience.la
-libffi_la_SOURCES = src/debug.c src/prep_cif.c src/types.c \
+libffi_la_SOURCES = src/prep_cif.c src/types.c \
src/raw_api.c src/java_raw_api.c src/closures.c
pkgconfigdir = $(libdir)/pkgconfig
@@ -475,11 +568,14 @@ nodist_libffi_la_SOURCES = $(am__append_1) $(am__append_2) \
$(am__append_15) $(am__append_16) $(am__append_17) \
$(am__append_18) $(am__append_19) $(am__append_20) \
$(am__append_21) $(am__append_22) $(am__append_23) \
- $(am__append_24) $(am__append_25)
+ $(am__append_24) $(am__append_25) $(am__append_26) \
+ $(am__append_27) $(am__append_28) $(am__append_29) \
+ $(am__append_30) $(am__append_31) $(am__append_32) \
+ $(am__append_33) $(am__append_34)
libffi_convenience_la_SOURCES = $(libffi_la_SOURCES)
nodist_libffi_convenience_la_SOURCES = $(nodist_libffi_la_SOURCES)
-AM_CFLAGS = -Wall -g -fexceptions
-libffi_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(AM_LTLDFLAGS)
+LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/libtool-ldflags $(LDFLAGS))
+libffi_la_LDFLAGS = -no-undefined -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(LTLDFLAGS) $(AM_LTLDFLAGS)
AM_CPPFLAGS = -I. -I$(top_srcdir)/include -Iinclude -I$(top_srcdir)/src
AM_CCASFLAGS = $(AM_CPPFLAGS)
all: fficonfig.h
@@ -487,7 +583,7 @@ all: fficonfig.h
.SUFFIXES:
.SUFFIXES: .S .c .dvi .lo .o .obj .ps
-am--refresh:
+am--refresh: Makefile
@:
$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
@@ -523,10 +619,8 @@ $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
$(am__aclocal_m4_deps):
fficonfig.h: stamp-h1
- @if test ! -f $@; then \
- rm -f stamp-h1; \
- $(MAKE) $(AM_MAKEFLAGS) stamp-h1; \
- else :; fi
+ @if test ! -f $@; then rm -f stamp-h1; else :; fi
+ @if test ! -f $@; then $(MAKE) $(AM_MAKEFLAGS) stamp-h1; else :; fi
stamp-h1: $(srcdir)/fficonfig.h.in $(top_builddir)/config.status
@rm -f stamp-h1
@@ -540,58 +634,63 @@ distclean-hdr:
-rm -f fficonfig.h stamp-h1
libffi.pc: $(top_builddir)/config.status $(srcdir)/libffi.pc.in
cd $(top_builddir) && $(SHELL) ./config.status $@
-install-libLTLIBRARIES: $(lib_LTLIBRARIES)
+
+clean-noinstLTLIBRARIES:
+ -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+ @list='$(noinst_LTLIBRARIES)'; \
+ locs=`for p in $$list; do echo $$p; done | \
+ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+ sort -u`; \
+ test -z "$$locs" || { \
+ echo rm -f $${locs}; \
+ rm -f $${locs}; \
+ }
+install-toolexeclibLTLIBRARIES: $(toolexeclib_LTLIBRARIES)
@$(NORMAL_INSTALL)
- test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)"
- @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
+ @list='$(toolexeclib_LTLIBRARIES)'; test -n "$(toolexeclibdir)" || list=; \
list2=; for p in $$list; do \
if test -f $$p; then \
list2="$$list2 $$p"; \
else :; fi; \
done; \
test -z "$$list2" || { \
- echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \
- $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \
+ echo " $(MKDIR_P) '$(DESTDIR)$(toolexeclibdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(toolexeclibdir)" || exit 1; \
+ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(toolexeclibdir)'"; \
+ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(toolexeclibdir)"; \
}
-uninstall-libLTLIBRARIES:
+uninstall-toolexeclibLTLIBRARIES:
@$(NORMAL_UNINSTALL)
- @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
+ @list='$(toolexeclib_LTLIBRARIES)'; test -n "$(toolexeclibdir)" || list=; \
for p in $$list; do \
$(am__strip_dir) \
- echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \
- $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \
- done
-
-clean-libLTLIBRARIES:
- -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES)
- @list='$(lib_LTLIBRARIES)'; for p in $$list; do \
- dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
- test "$$dir" != "$$p" || dir=.; \
- echo "rm -f \"$${dir}/so_locations\""; \
- rm -f "$${dir}/so_locations"; \
+ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(toolexeclibdir)/$$f'"; \
+ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(toolexeclibdir)/$$f"; \
done
-clean-noinstLTLIBRARIES:
- -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
- @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \
- dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
- test "$$dir" != "$$p" || dir=.; \
- echo "rm -f \"$${dir}/so_locations\""; \
- rm -f "$${dir}/so_locations"; \
- done
+clean-toolexeclibLTLIBRARIES:
+ -test -z "$(toolexeclib_LTLIBRARIES)" || rm -f $(toolexeclib_LTLIBRARIES)
+ @list='$(toolexeclib_LTLIBRARIES)'; \
+ locs=`for p in $$list; do echo $$p; done | \
+ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+ sort -u`; \
+ test -z "$$locs" || { \
+ echo rm -f $${locs}; \
+ rm -f $${locs}; \
+ }
src/$(am__dirstamp):
@$(MKDIR_P) src
@: > src/$(am__dirstamp)
src/$(DEPDIR)/$(am__dirstamp):
@$(MKDIR_P) src/$(DEPDIR)
@: > src/$(DEPDIR)/$(am__dirstamp)
-src/debug.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp)
src/prep_cif.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp)
src/types.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp)
src/raw_api.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp)
src/java_raw_api.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp)
src/closures.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp)
+src/debug.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp)
src/mips/$(am__dirstamp):
@$(MKDIR_P) src/mips
@: > src/mips/$(am__dirstamp)
@@ -604,6 +703,16 @@ src/mips/o32.lo: src/mips/$(am__dirstamp) \
src/mips/$(DEPDIR)/$(am__dirstamp)
src/mips/n32.lo: src/mips/$(am__dirstamp) \
src/mips/$(DEPDIR)/$(am__dirstamp)
+src/bfin/$(am__dirstamp):
+ @$(MKDIR_P) src/bfin
+ @: > src/bfin/$(am__dirstamp)
+src/bfin/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) src/bfin/$(DEPDIR)
+ @: > src/bfin/$(DEPDIR)/$(am__dirstamp)
+src/bfin/ffi.lo: src/bfin/$(am__dirstamp) \
+ src/bfin/$(DEPDIR)/$(am__dirstamp)
+src/bfin/sysv.lo: src/bfin/$(am__dirstamp) \
+ src/bfin/$(DEPDIR)/$(am__dirstamp)
src/x86/$(am__dirstamp):
@$(MKDIR_P) src/x86
@: > src/x86/$(am__dirstamp)
@@ -678,6 +787,26 @@ src/m68k/ffi.lo: src/m68k/$(am__dirstamp) \
src/m68k/$(DEPDIR)/$(am__dirstamp)
src/m68k/sysv.lo: src/m68k/$(am__dirstamp) \
src/m68k/$(DEPDIR)/$(am__dirstamp)
+src/moxie/$(am__dirstamp):
+ @$(MKDIR_P) src/moxie
+ @: > src/moxie/$(am__dirstamp)
+src/moxie/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) src/moxie/$(DEPDIR)
+ @: > src/moxie/$(DEPDIR)/$(am__dirstamp)
+src/moxie/ffi.lo: src/moxie/$(am__dirstamp) \
+ src/moxie/$(DEPDIR)/$(am__dirstamp)
+src/moxie/eabi.lo: src/moxie/$(am__dirstamp) \
+ src/moxie/$(DEPDIR)/$(am__dirstamp)
+src/microblaze/$(am__dirstamp):
+ @$(MKDIR_P) src/microblaze
+ @: > src/microblaze/$(am__dirstamp)
+src/microblaze/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) src/microblaze/$(DEPDIR)
+ @: > src/microblaze/$(DEPDIR)/$(am__dirstamp)
+src/microblaze/ffi.lo: src/microblaze/$(am__dirstamp) \
+ src/microblaze/$(DEPDIR)/$(am__dirstamp)
+src/microblaze/sysv.lo: src/microblaze/$(am__dirstamp) \
+ src/microblaze/$(DEPDIR)/$(am__dirstamp)
src/powerpc/$(am__dirstamp):
@$(MKDIR_P) src/powerpc
@: > src/powerpc/$(am__dirstamp)
@@ -704,6 +833,16 @@ src/powerpc/darwin.lo: src/powerpc/$(am__dirstamp) \
src/powerpc/$(DEPDIR)/$(am__dirstamp)
src/powerpc/darwin_closure.lo: src/powerpc/$(am__dirstamp) \
src/powerpc/$(DEPDIR)/$(am__dirstamp)
+src/aarch64/$(am__dirstamp):
+ @$(MKDIR_P) src/aarch64
+ @: > src/aarch64/$(am__dirstamp)
+src/aarch64/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) src/aarch64/$(DEPDIR)
+ @: > src/aarch64/$(DEPDIR)/$(am__dirstamp)
+src/aarch64/sysv.lo: src/aarch64/$(am__dirstamp) \
+ src/aarch64/$(DEPDIR)/$(am__dirstamp)
+src/aarch64/ffi.lo: src/aarch64/$(am__dirstamp) \
+ src/aarch64/$(DEPDIR)/$(am__dirstamp)
src/arm/$(am__dirstamp):
@$(MKDIR_P) src/arm
@: > src/arm/$(am__dirstamp)
@@ -714,6 +853,8 @@ src/arm/sysv.lo: src/arm/$(am__dirstamp) \
src/arm/$(DEPDIR)/$(am__dirstamp)
src/arm/ffi.lo: src/arm/$(am__dirstamp) \
src/arm/$(DEPDIR)/$(am__dirstamp)
+src/arm/trampoline.lo: src/arm/$(am__dirstamp) \
+ src/arm/$(DEPDIR)/$(am__dirstamp)
src/avr32/$(am__dirstamp):
@$(MKDIR_P) src/avr32
@: > src/avr32/$(am__dirstamp)
@@ -786,125 +927,91 @@ src/pa/linux.lo: src/pa/$(am__dirstamp) \
src/pa/ffi.lo: src/pa/$(am__dirstamp) src/pa/$(DEPDIR)/$(am__dirstamp)
src/pa/hpux32.lo: src/pa/$(am__dirstamp) \
src/pa/$(DEPDIR)/$(am__dirstamp)
-libffi.la: $(libffi_la_OBJECTS) $(libffi_la_DEPENDENCIES)
- $(libffi_la_LINK) -rpath $(libdir) $(libffi_la_OBJECTS) $(libffi_la_LIBADD) $(LIBS)
-libffi_convenience.la: $(libffi_convenience_la_OBJECTS) $(libffi_convenience_la_DEPENDENCIES)
+src/tile/$(am__dirstamp):
+ @$(MKDIR_P) src/tile
+ @: > src/tile/$(am__dirstamp)
+src/tile/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) src/tile/$(DEPDIR)
+ @: > src/tile/$(DEPDIR)/$(am__dirstamp)
+src/tile/tile.lo: src/tile/$(am__dirstamp) \
+ src/tile/$(DEPDIR)/$(am__dirstamp)
+src/tile/ffi.lo: src/tile/$(am__dirstamp) \
+ src/tile/$(DEPDIR)/$(am__dirstamp)
+src/xtensa/$(am__dirstamp):
+ @$(MKDIR_P) src/xtensa
+ @: > src/xtensa/$(am__dirstamp)
+src/xtensa/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) src/xtensa/$(DEPDIR)
+ @: > src/xtensa/$(DEPDIR)/$(am__dirstamp)
+src/xtensa/sysv.lo: src/xtensa/$(am__dirstamp) \
+ src/xtensa/$(DEPDIR)/$(am__dirstamp)
+src/xtensa/ffi.lo: src/xtensa/$(am__dirstamp) \
+ src/xtensa/$(DEPDIR)/$(am__dirstamp)
+src/metag/$(am__dirstamp):
+ @$(MKDIR_P) src/metag
+ @: > src/metag/$(am__dirstamp)
+src/metag/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) src/metag/$(DEPDIR)
+ @: > src/metag/$(DEPDIR)/$(am__dirstamp)
+src/metag/sysv.lo: src/metag/$(am__dirstamp) \
+ src/metag/$(DEPDIR)/$(am__dirstamp)
+src/metag/ffi.lo: src/metag/$(am__dirstamp) \
+ src/metag/$(DEPDIR)/$(am__dirstamp)
+libffi.la: $(libffi_la_OBJECTS) $(libffi_la_DEPENDENCIES) $(EXTRA_libffi_la_DEPENDENCIES)
+ $(libffi_la_LINK) -rpath $(toolexeclibdir) $(libffi_la_OBJECTS) $(libffi_la_LIBADD) $(LIBS)
+libffi_convenience.la: $(libffi_convenience_la_OBJECTS) $(libffi_convenience_la_DEPENDENCIES) $(EXTRA_libffi_convenience_la_DEPENDENCIES)
$(LINK) $(libffi_convenience_la_OBJECTS) $(libffi_convenience_la_LIBADD) $(LIBS)
mostlyclean-compile:
-rm -f *.$(OBJEXT)
- -rm -f src/alpha/ffi.$(OBJEXT)
- -rm -f src/alpha/ffi.lo
- -rm -f src/alpha/osf.$(OBJEXT)
- -rm -f src/alpha/osf.lo
- -rm -f src/arm/ffi.$(OBJEXT)
- -rm -f src/arm/ffi.lo
- -rm -f src/arm/sysv.$(OBJEXT)
- -rm -f src/arm/sysv.lo
- -rm -f src/avr32/ffi.$(OBJEXT)
- -rm -f src/avr32/ffi.lo
- -rm -f src/avr32/sysv.$(OBJEXT)
- -rm -f src/avr32/sysv.lo
- -rm -f src/closures.$(OBJEXT)
- -rm -f src/closures.lo
- -rm -f src/cris/ffi.$(OBJEXT)
- -rm -f src/cris/ffi.lo
- -rm -f src/cris/sysv.$(OBJEXT)
- -rm -f src/cris/sysv.lo
- -rm -f src/debug.$(OBJEXT)
- -rm -f src/debug.lo
- -rm -f src/frv/eabi.$(OBJEXT)
- -rm -f src/frv/eabi.lo
- -rm -f src/frv/ffi.$(OBJEXT)
- -rm -f src/frv/ffi.lo
- -rm -f src/ia64/ffi.$(OBJEXT)
- -rm -f src/ia64/ffi.lo
- -rm -f src/ia64/unix.$(OBJEXT)
- -rm -f src/ia64/unix.lo
- -rm -f src/java_raw_api.$(OBJEXT)
- -rm -f src/java_raw_api.lo
- -rm -f src/m32r/ffi.$(OBJEXT)
- -rm -f src/m32r/ffi.lo
- -rm -f src/m32r/sysv.$(OBJEXT)
- -rm -f src/m32r/sysv.lo
- -rm -f src/m68k/ffi.$(OBJEXT)
- -rm -f src/m68k/ffi.lo
- -rm -f src/m68k/sysv.$(OBJEXT)
- -rm -f src/m68k/sysv.lo
- -rm -f src/mips/ffi.$(OBJEXT)
- -rm -f src/mips/ffi.lo
- -rm -f src/mips/n32.$(OBJEXT)
- -rm -f src/mips/n32.lo
- -rm -f src/mips/o32.$(OBJEXT)
- -rm -f src/mips/o32.lo
- -rm -f src/pa/ffi.$(OBJEXT)
- -rm -f src/pa/ffi.lo
- -rm -f src/pa/hpux32.$(OBJEXT)
- -rm -f src/pa/hpux32.lo
- -rm -f src/pa/linux.$(OBJEXT)
- -rm -f src/pa/linux.lo
- -rm -f src/powerpc/aix.$(OBJEXT)
- -rm -f src/powerpc/aix.lo
- -rm -f src/powerpc/aix_closure.$(OBJEXT)
- -rm -f src/powerpc/aix_closure.lo
- -rm -f src/powerpc/darwin.$(OBJEXT)
- -rm -f src/powerpc/darwin.lo
- -rm -f src/powerpc/darwin_closure.$(OBJEXT)
- -rm -f src/powerpc/darwin_closure.lo
- -rm -f src/powerpc/ffi.$(OBJEXT)
- -rm -f src/powerpc/ffi.lo
- -rm -f src/powerpc/ffi_darwin.$(OBJEXT)
- -rm -f src/powerpc/ffi_darwin.lo
- -rm -f src/powerpc/linux64.$(OBJEXT)
- -rm -f src/powerpc/linux64.lo
- -rm -f src/powerpc/linux64_closure.$(OBJEXT)
- -rm -f src/powerpc/linux64_closure.lo
- -rm -f src/powerpc/ppc_closure.$(OBJEXT)
- -rm -f src/powerpc/ppc_closure.lo
- -rm -f src/powerpc/sysv.$(OBJEXT)
- -rm -f src/powerpc/sysv.lo
- -rm -f src/prep_cif.$(OBJEXT)
- -rm -f src/prep_cif.lo
- -rm -f src/raw_api.$(OBJEXT)
- -rm -f src/raw_api.lo
- -rm -f src/s390/ffi.$(OBJEXT)
- -rm -f src/s390/ffi.lo
- -rm -f src/s390/sysv.$(OBJEXT)
- -rm -f src/s390/sysv.lo
- -rm -f src/sh/ffi.$(OBJEXT)
- -rm -f src/sh/ffi.lo
- -rm -f src/sh/sysv.$(OBJEXT)
- -rm -f src/sh/sysv.lo
- -rm -f src/sh64/ffi.$(OBJEXT)
- -rm -f src/sh64/ffi.lo
- -rm -f src/sh64/sysv.$(OBJEXT)
- -rm -f src/sh64/sysv.lo
- -rm -f src/sparc/ffi.$(OBJEXT)
- -rm -f src/sparc/ffi.lo
- -rm -f src/sparc/v8.$(OBJEXT)
- -rm -f src/sparc/v8.lo
- -rm -f src/sparc/v9.$(OBJEXT)
- -rm -f src/sparc/v9.lo
- -rm -f src/types.$(OBJEXT)
- -rm -f src/types.lo
- -rm -f src/x86/darwin.$(OBJEXT)
- -rm -f src/x86/darwin.lo
- -rm -f src/x86/darwin64.$(OBJEXT)
- -rm -f src/x86/darwin64.lo
- -rm -f src/x86/ffi.$(OBJEXT)
- -rm -f src/x86/ffi.lo
- -rm -f src/x86/ffi64.$(OBJEXT)
- -rm -f src/x86/ffi64.lo
- -rm -f src/x86/freebsd.$(OBJEXT)
- -rm -f src/x86/freebsd.lo
- -rm -f src/x86/sysv.$(OBJEXT)
- -rm -f src/x86/sysv.lo
- -rm -f src/x86/unix64.$(OBJEXT)
- -rm -f src/x86/unix64.lo
- -rm -f src/x86/win32.$(OBJEXT)
- -rm -f src/x86/win32.lo
- -rm -f src/x86/win64.$(OBJEXT)
- -rm -f src/x86/win64.lo
+ -rm -f src/*.$(OBJEXT)
+ -rm -f src/*.lo
+ -rm -f src/aarch64/*.$(OBJEXT)
+ -rm -f src/aarch64/*.lo
+ -rm -f src/alpha/*.$(OBJEXT)
+ -rm -f src/alpha/*.lo
+ -rm -f src/arm/*.$(OBJEXT)
+ -rm -f src/arm/*.lo
+ -rm -f src/avr32/*.$(OBJEXT)
+ -rm -f src/avr32/*.lo
+ -rm -f src/bfin/*.$(OBJEXT)
+ -rm -f src/bfin/*.lo
+ -rm -f src/cris/*.$(OBJEXT)
+ -rm -f src/cris/*.lo
+ -rm -f src/frv/*.$(OBJEXT)
+ -rm -f src/frv/*.lo
+ -rm -f src/ia64/*.$(OBJEXT)
+ -rm -f src/ia64/*.lo
+ -rm -f src/m32r/*.$(OBJEXT)
+ -rm -f src/m32r/*.lo
+ -rm -f src/m68k/*.$(OBJEXT)
+ -rm -f src/m68k/*.lo
+ -rm -f src/metag/*.$(OBJEXT)
+ -rm -f src/metag/*.lo
+ -rm -f src/microblaze/*.$(OBJEXT)
+ -rm -f src/microblaze/*.lo
+ -rm -f src/mips/*.$(OBJEXT)
+ -rm -f src/mips/*.lo
+ -rm -f src/moxie/*.$(OBJEXT)
+ -rm -f src/moxie/*.lo
+ -rm -f src/pa/*.$(OBJEXT)
+ -rm -f src/pa/*.lo
+ -rm -f src/powerpc/*.$(OBJEXT)
+ -rm -f src/powerpc/*.lo
+ -rm -f src/s390/*.$(OBJEXT)
+ -rm -f src/s390/*.lo
+ -rm -f src/sh/*.$(OBJEXT)
+ -rm -f src/sh/*.lo
+ -rm -f src/sh64/*.$(OBJEXT)
+ -rm -f src/sh64/*.lo
+ -rm -f src/sparc/*.$(OBJEXT)
+ -rm -f src/sparc/*.lo
+ -rm -f src/tile/*.$(OBJEXT)
+ -rm -f src/tile/*.lo
+ -rm -f src/x86/*.$(OBJEXT)
+ -rm -f src/x86/*.lo
+ -rm -f src/xtensa/*.$(OBJEXT)
+ -rm -f src/xtensa/*.lo
distclean-compile:
-rm -f *.tab.c
@@ -915,12 +1022,17 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@src/$(DEPDIR)/prep_cif.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/$(DEPDIR)/raw_api.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/$(DEPDIR)/types.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@src/aarch64/$(DEPDIR)/ffi.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@src/aarch64/$(DEPDIR)/sysv.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/alpha/$(DEPDIR)/ffi.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/alpha/$(DEPDIR)/osf.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/arm/$(DEPDIR)/ffi.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/arm/$(DEPDIR)/sysv.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@src/arm/$(DEPDIR)/trampoline.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/avr32/$(DEPDIR)/ffi.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/avr32/$(DEPDIR)/sysv.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@src/bfin/$(DEPDIR)/ffi.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@src/bfin/$(DEPDIR)/sysv.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/cris/$(DEPDIR)/ffi.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/cris/$(DEPDIR)/sysv.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/frv/$(DEPDIR)/eabi.Plo@am__quote@
@@ -931,9 +1043,15 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@src/m32r/$(DEPDIR)/sysv.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/m68k/$(DEPDIR)/ffi.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/m68k/$(DEPDIR)/sysv.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@src/metag/$(DEPDIR)/ffi.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@src/metag/$(DEPDIR)/sysv.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@src/microblaze/$(DEPDIR)/ffi.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@src/microblaze/$(DEPDIR)/sysv.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/mips/$(DEPDIR)/ffi.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/mips/$(DEPDIR)/n32.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/mips/$(DEPDIR)/o32.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@src/moxie/$(DEPDIR)/eabi.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@src/moxie/$(DEPDIR)/ffi.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/pa/$(DEPDIR)/ffi.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/pa/$(DEPDIR)/hpux32.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/pa/$(DEPDIR)/linux.Plo@am__quote@
@@ -956,6 +1074,8 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@src/sparc/$(DEPDIR)/ffi.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/sparc/$(DEPDIR)/v8.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/sparc/$(DEPDIR)/v9.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@src/tile/$(DEPDIR)/ffi.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@src/tile/$(DEPDIR)/tile.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/x86/$(DEPDIR)/darwin.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/x86/$(DEPDIR)/darwin64.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/x86/$(DEPDIR)/ffi.Plo@am__quote@
@@ -965,6 +1085,8 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@src/x86/$(DEPDIR)/unix64.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/x86/$(DEPDIR)/win32.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@src/x86/$(DEPDIR)/win64.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@src/xtensa/$(DEPDIR)/ffi.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@src/xtensa/$(DEPDIR)/sysv.Plo@am__quote@
.S.o:
@am__fastdepCCAS_TRUE@ depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\
@@ -1020,22 +1142,29 @@ mostlyclean-libtool:
clean-libtool:
-rm -rf .libs _libs
-rm -rf src/.libs src/_libs
+ -rm -rf src/aarch64/.libs src/aarch64/_libs
-rm -rf src/alpha/.libs src/alpha/_libs
-rm -rf src/arm/.libs src/arm/_libs
-rm -rf src/avr32/.libs src/avr32/_libs
+ -rm -rf src/bfin/.libs src/bfin/_libs
-rm -rf src/cris/.libs src/cris/_libs
-rm -rf src/frv/.libs src/frv/_libs
-rm -rf src/ia64/.libs src/ia64/_libs
-rm -rf src/m32r/.libs src/m32r/_libs
-rm -rf src/m68k/.libs src/m68k/_libs
+ -rm -rf src/metag/.libs src/metag/_libs
+ -rm -rf src/microblaze/.libs src/microblaze/_libs
-rm -rf src/mips/.libs src/mips/_libs
+ -rm -rf src/moxie/.libs src/moxie/_libs
-rm -rf src/pa/.libs src/pa/_libs
-rm -rf src/powerpc/.libs src/powerpc/_libs
-rm -rf src/s390/.libs src/s390/_libs
-rm -rf src/sh/.libs src/sh/_libs
-rm -rf src/sh64/.libs src/sh64/_libs
-rm -rf src/sparc/.libs src/sparc/_libs
+ -rm -rf src/tile/.libs src/tile/_libs
-rm -rf src/x86/.libs src/x86/_libs
+ -rm -rf src/xtensa/.libs src/xtensa/_libs
distclean-libtool:
-rm -f libtool config.lt
@@ -1068,12 +1197,12 @@ $(srcdir)/doc/libffi.info: doc/libffi.texi $(srcdir)/doc/version.texi
doc/libffi.dvi: doc/libffi.texi $(srcdir)/doc/version.texi doc/$(am__dirstamp)
TEXINPUTS="$(am__TEXINFO_TEX_DIR)$(PATH_SEPARATOR)$$TEXINPUTS" \
MAKEINFO='$(MAKEINFO) $(AM_MAKEINFOFLAGS) $(MAKEINFOFLAGS) -I doc -I $(srcdir)/doc' \
- $(TEXI2DVI) -o $@ `test -f 'doc/libffi.texi' || echo '$(srcdir)/'`doc/libffi.texi
+ $(TEXI2DVI) --clean -o $@ `test -f 'doc/libffi.texi' || echo '$(srcdir)/'`doc/libffi.texi
doc/libffi.pdf: doc/libffi.texi $(srcdir)/doc/version.texi doc/$(am__dirstamp)
TEXINPUTS="$(am__TEXINFO_TEX_DIR)$(PATH_SEPARATOR)$$TEXINPUTS" \
MAKEINFO='$(MAKEINFO) $(AM_MAKEINFOFLAGS) $(MAKEINFOFLAGS) -I doc -I $(srcdir)/doc' \
- $(TEXI2PDF) -o $@ `test -f 'doc/libffi.texi' || echo '$(srcdir)/'`doc/libffi.texi
+ $(TEXI2PDF) --clean -o $@ `test -f 'doc/libffi.texi' || echo '$(srcdir)/'`doc/libffi.texi
doc/libffi.html: doc/libffi.texi $(srcdir)/doc/version.texi doc/$(am__dirstamp)
rm -rf $(@:.html=.htp)
@@ -1110,7 +1239,7 @@ maintainer-clean-vti:
@MAINTAINER_MODE_TRUE@ -rm -f $(srcdir)/doc/stamp-vti $(srcdir)/doc/version.texi
.dvi.ps:
TEXINPUTS="$(am__TEXINFO_TEX_DIR)$(PATH_SEPARATOR)$$TEXINPUTS" \
- $(DVIPS) -o $@ $<
+ $(DVIPS) -o $@ $<
uninstall-dvi-am:
@$(NORMAL_UNINSTALL)
@@ -1132,9 +1261,7 @@ uninstall-html-am:
uninstall-info-am:
@$(PRE_UNINSTALL)
- @if test -d '$(DESTDIR)$(infodir)' && \
- (install-info --version && \
- install-info --version 2>&1 | sed 1q | grep -i -v debian) >/dev/null 2>&1; then \
+ @if test -d '$(DESTDIR)$(infodir)' && $(am__can_run_installinfo); then \
list='$(INFO_DEPS)'; \
for file in $$list; do \
relfile=`echo "$$file" | sed 's|^.*/||'`; \
@@ -1206,8 +1333,11 @@ maintainer-clean-aminfo:
done
install-pkgconfigDATA: $(pkgconfig_DATA)
@$(NORMAL_INSTALL)
- test -z "$(pkgconfigdir)" || $(MKDIR_P) "$(DESTDIR)$(pkgconfigdir)"
@list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pkgconfigdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pkgconfigdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -1221,18 +1351,16 @@ uninstall-pkgconfigDATA:
@$(NORMAL_UNINSTALL)
@list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- test -n "$$files" || exit 0; \
- echo " ( cd '$(DESTDIR)$(pkgconfigdir)' && rm -f" $$files ")"; \
- cd "$(DESTDIR)$(pkgconfigdir)" && rm -f $$files
+ dir='$(DESTDIR)$(pkgconfigdir)'; $(am__uninstall_files_from_dir)
# This directory's subdirectories are mostly independent; you can cd
-# into them and run `make' without going through this Makefile.
-# To change the values of `make' variables: instead of editing Makefiles,
-# (1) if the variable is set in `config.status', edit `config.status'
-# (which will cause the Makefiles to be regenerated when you run `make');
-# (2) otherwise, pass the desired values on the `make' command line.
-$(RECURSIVE_TARGETS):
- @failcom='exit 1'; \
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+# (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(RECURSIVE_TARGETS) $(RECURSIVE_CLEAN_TARGETS):
+ @fail= failcom='exit 1'; \
for f in x $$MAKEFLAGS; do \
case $$f in \
*=* | --[!k]*);; \
@@ -1241,7 +1369,11 @@ $(RECURSIVE_TARGETS):
done; \
dot_seen=no; \
target=`echo $@ | sed s/-recursive//`; \
- list='$(SUBDIRS)'; for subdir in $$list; do \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ for subdir in $$list; do \
echo "Making $$target in $$subdir"; \
if test "$$subdir" = "."; then \
dot_seen=yes; \
@@ -1255,37 +1387,6 @@ $(RECURSIVE_TARGETS):
if test "$$dot_seen" = "no"; then \
$(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
fi; test -z "$$fail"
-
-$(RECURSIVE_CLEAN_TARGETS):
- @failcom='exit 1'; \
- for f in x $$MAKEFLAGS; do \
- case $$f in \
- *=* | --[!k]*);; \
- *k*) failcom='fail=yes';; \
- esac; \
- done; \
- dot_seen=no; \
- case "$@" in \
- distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
- *) list='$(SUBDIRS)' ;; \
- esac; \
- rev=''; for subdir in $$list; do \
- if test "$$subdir" = "."; then :; else \
- rev="$$subdir $$rev"; \
- fi; \
- done; \
- rev="$$rev ."; \
- target=`echo $@ | sed s/-recursive//`; \
- for subdir in $$rev; do \
- echo "Making $$target in $$subdir"; \
- if test "$$subdir" = "."; then \
- local_target="$$target-am"; \
- else \
- local_target="$$target"; \
- fi; \
- ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
- || eval $$failcom; \
- done && test -z "$$fail"
tags-recursive:
list='$(SUBDIRS)'; for subdir in $$list; do \
test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \
@@ -1294,6 +1395,10 @@ ctags-recursive:
list='$(SUBDIRS)'; for subdir in $$list; do \
test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \
done
+cscopelist-recursive:
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) cscopelist); \
+ done
ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
@@ -1357,8 +1462,32 @@ GTAGS:
&& $(am__cd) $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) "$$here"
+cscope: cscope.files
+ test ! -s cscope.files \
+ || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS)
+
+clean-cscope:
+ -rm -f cscope.files
+
+cscope.files: clean-cscope cscopelist-recursive cscopelist
+
+cscopelist: cscopelist-recursive $(HEADERS) $(SOURCES) $(LISP)
+ list='$(SOURCES) $(HEADERS) $(LISP)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+ -rm -f cscope.out cscope.in.out cscope.po.out cscope.files
distdir: $(DISTFILES)
$(am__remove_distdir)
@@ -1394,13 +1523,10 @@ distdir: $(DISTFILES)
done
@list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
if test "$$subdir" = .; then :; else \
- test -d "$(distdir)/$$subdir" \
- || $(MKDIR_P) "$(distdir)/$$subdir" \
- || exit 1; \
- fi; \
- done
- @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
- if test "$$subdir" = .; then :; else \
+ $(am__make_dryrun) \
+ || test -d "$(distdir)/$$subdir" \
+ || $(MKDIR_P) "$(distdir)/$$subdir" \
+ || exit 1; \
dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
$(am__relativize); \
new_distdir=$$reldir; \
@@ -1424,43 +1550,44 @@ distdir: $(DISTFILES)
top_distdir="$(top_distdir)" distdir="$(distdir)" \
dist-info
-test -n "$(am__skip_mode_fix)" \
- || find "$(distdir)" -type d ! -perm -777 -exec chmod a+rwx {} \; -o \
+ || find "$(distdir)" -type d ! -perm -755 \
+ -exec chmod u+rwx,go+rx {} \; -o \
! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \
! -type d ! -perm -400 -exec chmod a+r {} \; -o \
! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \
|| chmod -R a+r "$(distdir)"
dist-gzip: distdir
tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
- $(am__remove_distdir)
+ $(am__post_remove_distdir)
dist-bzip2: distdir
- tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2
- $(am__remove_distdir)
+ tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2
+ $(am__post_remove_distdir)
-dist-lzma: distdir
- tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma
- $(am__remove_distdir)
+dist-lzip: distdir
+ tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz
+ $(am__post_remove_distdir)
dist-xz: distdir
- tardir=$(distdir) && $(am__tar) | xz -c >$(distdir).tar.xz
- $(am__remove_distdir)
+ tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz
+ $(am__post_remove_distdir)
dist-tarZ: distdir
tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z
- $(am__remove_distdir)
+ $(am__post_remove_distdir)
dist-shar: distdir
shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz
- $(am__remove_distdir)
+ $(am__post_remove_distdir)
dist-zip: distdir
-rm -f $(distdir).zip
zip -rq $(distdir).zip $(distdir)
- $(am__remove_distdir)
+ $(am__post_remove_distdir)
-dist dist-all: distdir
- tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
- $(am__remove_distdir)
+dist dist-all:
+ $(MAKE) $(AM_MAKEFLAGS) $(DIST_TARGETS) am__post_remove_distdir='@:'
+ $(am__post_remove_distdir)
# This target untars the dist file and tries a VPATH configuration. Then
# it guarantees that the distribution is self-contained by making another
@@ -1468,21 +1595,21 @@ dist dist-all: distdir
distcheck: dist
case '$(DIST_ARCHIVES)' in \
*.tar.gz*) \
- GZIP=$(GZIP_ENV) gunzip -c $(distdir).tar.gz | $(am__untar) ;;\
+ GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\
*.tar.bz2*) \
- bunzip2 -c $(distdir).tar.bz2 | $(am__untar) ;;\
- *.tar.lzma*) \
- unlzma -c $(distdir).tar.lzma | $(am__untar) ;;\
+ bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\
+ *.tar.lz*) \
+ lzip -dc $(distdir).tar.lz | $(am__untar) ;;\
*.tar.xz*) \
xz -dc $(distdir).tar.xz | $(am__untar) ;;\
*.tar.Z*) \
uncompress -c $(distdir).tar.Z | $(am__untar) ;;\
*.shar.gz*) \
- GZIP=$(GZIP_ENV) gunzip -c $(distdir).shar.gz | unshar ;;\
+ GZIP=$(GZIP_ENV) gzip -dc $(distdir).shar.gz | unshar ;;\
*.zip*) \
unzip $(distdir).zip ;;\
esac
- chmod -R a-w $(distdir); chmod a+w $(distdir)
+ chmod -R a-w $(distdir); chmod u+w $(distdir)
mkdir $(distdir)/_build
mkdir $(distdir)/_inst
chmod a-w $(distdir)
@@ -1492,6 +1619,7 @@ distcheck: dist
&& am__cwd=`pwd` \
&& $(am__cd) $(distdir)/_build \
&& ../configure --srcdir=.. --prefix="$$dc_install_base" \
+ $(AM_DISTCHECK_CONFIGURE_FLAGS) \
$(DISTCHECK_CONFIGURE_FLAGS) \
&& $(MAKE) $(AM_MAKEFLAGS) \
&& $(MAKE) $(AM_MAKEFLAGS) dvi \
@@ -1515,13 +1643,21 @@ distcheck: dist
&& $(MAKE) $(AM_MAKEFLAGS) distcleancheck \
&& cd "$$am__cwd" \
|| exit 1
- $(am__remove_distdir)
+ $(am__post_remove_distdir)
@(echo "$(distdir) archives ready for distribution: "; \
list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \
sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x'
distuninstallcheck:
- @$(am__cd) '$(distuninstallcheck_dir)' \
- && test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \
+ @test -n '$(distuninstallcheck_dir)' || { \
+ echo 'ERROR: trying to run $@ with an empty' \
+ '$$(distuninstallcheck_dir)' >&2; \
+ exit 1; \
+ }; \
+ $(am__cd) '$(distuninstallcheck_dir)' || { \
+ echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \
+ exit 1; \
+ }; \
+ test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \
|| { echo "ERROR: files left after uninstall:" ; \
if test -n "$(DESTDIR)"; then \
echo " (check DESTDIR support)"; \
@@ -1542,7 +1678,7 @@ check: check-recursive
all-am: Makefile $(INFO_DEPS) $(LTLIBRARIES) $(DATA) fficonfig.h
installdirs: installdirs-recursive
installdirs-am:
- for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(infodir)" "$(DESTDIR)$(pkgconfigdir)"; do \
+ for dir in "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(infodir)" "$(DESTDIR)$(pkgconfigdir)"; do \
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
done
install: install-recursive
@@ -1555,10 +1691,15 @@ install-am: all-am
installcheck: installcheck-recursive
install-strip:
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- `test -z '$(STRIP)' || \
- echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
mostlyclean-generic:
clean-generic:
@@ -1569,12 +1710,16 @@ distclean-generic:
-rm -f doc/$(am__dirstamp)
-rm -f src/$(DEPDIR)/$(am__dirstamp)
-rm -f src/$(am__dirstamp)
+ -rm -f src/aarch64/$(DEPDIR)/$(am__dirstamp)
+ -rm -f src/aarch64/$(am__dirstamp)
-rm -f src/alpha/$(DEPDIR)/$(am__dirstamp)
-rm -f src/alpha/$(am__dirstamp)
-rm -f src/arm/$(DEPDIR)/$(am__dirstamp)
-rm -f src/arm/$(am__dirstamp)
-rm -f src/avr32/$(DEPDIR)/$(am__dirstamp)
-rm -f src/avr32/$(am__dirstamp)
+ -rm -f src/bfin/$(DEPDIR)/$(am__dirstamp)
+ -rm -f src/bfin/$(am__dirstamp)
-rm -f src/cris/$(DEPDIR)/$(am__dirstamp)
-rm -f src/cris/$(am__dirstamp)
-rm -f src/frv/$(DEPDIR)/$(am__dirstamp)
@@ -1585,8 +1730,14 @@ distclean-generic:
-rm -f src/m32r/$(am__dirstamp)
-rm -f src/m68k/$(DEPDIR)/$(am__dirstamp)
-rm -f src/m68k/$(am__dirstamp)
+ -rm -f src/metag/$(DEPDIR)/$(am__dirstamp)
+ -rm -f src/metag/$(am__dirstamp)
+ -rm -f src/microblaze/$(DEPDIR)/$(am__dirstamp)
+ -rm -f src/microblaze/$(am__dirstamp)
-rm -f src/mips/$(DEPDIR)/$(am__dirstamp)
-rm -f src/mips/$(am__dirstamp)
+ -rm -f src/moxie/$(DEPDIR)/$(am__dirstamp)
+ -rm -f src/moxie/$(am__dirstamp)
-rm -f src/pa/$(DEPDIR)/$(am__dirstamp)
-rm -f src/pa/$(am__dirstamp)
-rm -f src/powerpc/$(DEPDIR)/$(am__dirstamp)
@@ -1599,20 +1750,25 @@ distclean-generic:
-rm -f src/sh64/$(am__dirstamp)
-rm -f src/sparc/$(DEPDIR)/$(am__dirstamp)
-rm -f src/sparc/$(am__dirstamp)
+ -rm -f src/tile/$(DEPDIR)/$(am__dirstamp)
+ -rm -f src/tile/$(am__dirstamp)
-rm -f src/x86/$(DEPDIR)/$(am__dirstamp)
-rm -f src/x86/$(am__dirstamp)
+ -rm -f src/xtensa/$(DEPDIR)/$(am__dirstamp)
+ -rm -f src/xtensa/$(am__dirstamp)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-recursive
-clean-am: clean-aminfo clean-generic clean-libLTLIBRARIES \
- clean-libtool clean-noinstLTLIBRARIES mostlyclean-am
+clean-am: clean-aminfo clean-generic clean-libtool \
+ clean-noinstLTLIBRARIES clean-toolexeclibLTLIBRARIES \
+ mostlyclean-am
distclean: distclean-recursive
-rm -f $(am__CONFIG_DISTCLEAN_FILES)
- -rm -rf src/$(DEPDIR) src/alpha/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/cris/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/mips/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/x86/$(DEPDIR)
+ -rm -rf src/$(DEPDIR) src/aarch64/$(DEPDIR) src/alpha/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/bfin/$(DEPDIR) src/cris/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/metag/$(DEPDIR) src/microblaze/$(DEPDIR) src/mips/$(DEPDIR) src/moxie/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/tile/$(DEPDIR) src/x86/$(DEPDIR) src/xtensa/$(DEPDIR)
-rm -f Makefile
distclean-am: clean-am distclean-compile distclean-generic \
distclean-hdr distclean-libtool distclean-tags
@@ -1635,8 +1791,11 @@ install-dvi: install-dvi-recursive
install-dvi-am: $(DVIS)
@$(NORMAL_INSTALL)
- test -z "$(dvidir)" || $(MKDIR_P) "$(DESTDIR)$(dvidir)"
@list='$(DVIS)'; test -n "$(dvidir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(dvidir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(dvidir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -1645,22 +1804,28 @@ install-dvi-am: $(DVIS)
echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(dvidir)'"; \
$(INSTALL_DATA) $$files "$(DESTDIR)$(dvidir)" || exit $$?; \
done
-install-exec-am: install-libLTLIBRARIES
+install-exec-am: install-toolexeclibLTLIBRARIES
+
+install-html: install-html-recursive
install-html-am: $(HTMLS)
@$(NORMAL_INSTALL)
- test -z "$(htmldir)" || $(MKDIR_P) "$(DESTDIR)$(htmldir)"
@list='$(HTMLS)'; list2=; test -n "$(htmldir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(htmldir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(htmldir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p" || test -d "$$p"; then d=; else d="$(srcdir)/"; fi; \
$(am__strip_dir) \
- if test -d "$$d$$p"; then \
+ d2=$$d$$p; \
+ if test -d "$$d2"; then \
echo " $(MKDIR_P) '$(DESTDIR)$(htmldir)/$$f'"; \
$(MKDIR_P) "$(DESTDIR)$(htmldir)/$$f" || exit 1; \
- echo " $(INSTALL_DATA) '$$d$$p'/* '$(DESTDIR)$(htmldir)/$$f'"; \
- $(INSTALL_DATA) "$$d$$p"/* "$(DESTDIR)$(htmldir)/$$f" || exit $$?; \
+ echo " $(INSTALL_DATA) '$$d2'/* '$(DESTDIR)$(htmldir)/$$f'"; \
+ $(INSTALL_DATA) "$$d2"/* "$(DESTDIR)$(htmldir)/$$f" || exit $$?; \
else \
- list2="$$list2 $$d$$p"; \
+ list2="$$list2 $$d2"; \
fi; \
done; \
test -z "$$list2" || { echo "$$list2" | $(am__base_list) | \
@@ -1672,9 +1837,12 @@ install-info: install-info-recursive
install-info-am: $(INFO_DEPS)
@$(NORMAL_INSTALL)
- test -z "$(infodir)" || $(MKDIR_P) "$(DESTDIR)$(infodir)"
@srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
list='$(INFO_DEPS)'; test -n "$(infodir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(infodir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(infodir)" || exit 1; \
+ fi; \
for file in $$list; do \
case $$file in \
$(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
@@ -1692,8 +1860,7 @@ install-info-am: $(INFO_DEPS)
echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(infodir)'"; \
$(INSTALL_DATA) $$files "$(DESTDIR)$(infodir)" || exit $$?; done
@$(POST_INSTALL)
- @if (install-info --version && \
- install-info --version 2>&1 | sed 1q | grep -i -v debian) >/dev/null 2>&1; then \
+ @if $(am__can_run_installinfo); then \
list='$(INFO_DEPS)'; test -n "$(infodir)" || list=; \
for file in $$list; do \
relfile=`echo "$$file" | sed 's|^.*/||'`; \
@@ -1703,10 +1870,15 @@ install-info-am: $(INFO_DEPS)
else : ; fi
install-man:
+install-pdf: install-pdf-recursive
+
install-pdf-am: $(PDFS)
@$(NORMAL_INSTALL)
- test -z "$(pdfdir)" || $(MKDIR_P) "$(DESTDIR)$(pdfdir)"
@list='$(PDFS)'; test -n "$(pdfdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(pdfdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(pdfdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -1718,8 +1890,11 @@ install-ps: install-ps-recursive
install-ps-am: $(PSS)
@$(NORMAL_INSTALL)
- test -z "$(psdir)" || $(MKDIR_P) "$(DESTDIR)$(psdir)"
@list='$(PSS)'; test -n "$(psdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(psdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(psdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -1732,7 +1907,7 @@ installcheck-am:
maintainer-clean: maintainer-clean-recursive
-rm -f $(am__CONFIG_DISTCLEAN_FILES)
-rm -rf $(top_srcdir)/autom4te.cache
- -rm -rf src/$(DEPDIR) src/alpha/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/cris/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/mips/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/x86/$(DEPDIR)
+ -rm -rf src/$(DEPDIR) src/aarch64/$(DEPDIR) src/alpha/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/bfin/$(DEPDIR) src/cris/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/metag/$(DEPDIR) src/microblaze/$(DEPDIR) src/mips/$(DEPDIR) src/moxie/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/tile/$(DEPDIR) src/x86/$(DEPDIR) src/xtensa/$(DEPDIR)
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-aminfo \
maintainer-clean-generic maintainer-clean-vti
@@ -1751,40 +1926,38 @@ ps: ps-recursive
ps-am: $(PSS)
uninstall-am: uninstall-dvi-am uninstall-html-am uninstall-info-am \
- uninstall-libLTLIBRARIES uninstall-pdf-am \
- uninstall-pkgconfigDATA uninstall-ps-am
+ uninstall-pdf-am uninstall-pkgconfigDATA uninstall-ps-am \
+ uninstall-toolexeclibLTLIBRARIES
.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) all \
- ctags-recursive install-am install-strip tags-recursive
+ cscopelist-recursive ctags-recursive install-am install-strip \
+ tags-recursive
.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \
all all-am am--refresh check check-am clean clean-aminfo \
- clean-generic clean-libLTLIBRARIES clean-libtool \
- clean-noinstLTLIBRARIES ctags ctags-recursive dist dist-all \
- dist-bzip2 dist-gzip dist-info dist-lzma dist-shar dist-tarZ \
- dist-xz dist-zip distcheck distclean distclean-compile \
- distclean-generic distclean-hdr distclean-libtool \
- distclean-tags distcleancheck distdir distuninstallcheck dvi \
- dvi-am html html-am info info-am install install-am \
- install-data install-data-am install-dvi install-dvi-am \
- install-exec install-exec-am install-html install-html-am \
- install-info install-info-am install-libLTLIBRARIES \
- install-man install-pdf install-pdf-am install-pkgconfigDATA \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs installdirs-am maintainer-clean \
- maintainer-clean-aminfo maintainer-clean-generic \
- maintainer-clean-vti mostlyclean mostlyclean-aminfo \
- mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
- mostlyclean-vti pdf pdf-am ps ps-am tags tags-recursive \
- uninstall uninstall-am uninstall-dvi-am uninstall-html-am \
- uninstall-info-am uninstall-libLTLIBRARIES uninstall-pdf-am \
- uninstall-pkgconfigDATA uninstall-ps-am
-
-
-# No install-html or install-pdf support in automake yet
-.PHONY: install-html install-pdf
-install-html:
-install-pdf:
+ clean-cscope clean-generic clean-libtool \
+ clean-noinstLTLIBRARIES clean-toolexeclibLTLIBRARIES cscope \
+ cscopelist cscopelist-recursive ctags ctags-recursive dist \
+ dist-all dist-bzip2 dist-gzip dist-info dist-lzip dist-shar \
+ dist-tarZ dist-xz dist-zip distcheck distclean \
+ distclean-compile distclean-generic distclean-hdr \
+ distclean-libtool distclean-tags distcleancheck distdir \
+ distuninstallcheck dvi dvi-am html html-am info info-am \
+ install install-am install-data install-data-am install-dvi \
+ install-dvi-am install-exec install-exec-am install-html \
+ install-html-am install-info install-info-am install-man \
+ install-pdf install-pdf-am install-pkgconfigDATA install-ps \
+ install-ps-am install-strip install-toolexeclibLTLIBRARIES \
+ installcheck installcheck-am installdirs installdirs-am \
+ maintainer-clean maintainer-clean-aminfo \
+ maintainer-clean-generic maintainer-clean-vti mostlyclean \
+ mostlyclean-aminfo mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool mostlyclean-vti pdf pdf-am ps ps-am tags \
+ tags-recursive uninstall uninstall-am uninstall-dvi-am \
+ uninstall-html-am uninstall-info-am uninstall-pdf-am \
+ uninstall-pkgconfigDATA uninstall-ps-am \
+ uninstall-toolexeclibLTLIBRARIES
+
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
diff --git a/Modules/_ctypes/libffi/README b/Modules/_ctypes/libffi/README
index 167de42..19156fe 100644
--- a/Modules/_ctypes/libffi/README
+++ b/Modules/_ctypes/libffi/README
@@ -1,7 +1,7 @@
Status
======
-libffi-3.0.10 was released on XXXXXXXXXX, 2010. Check the libffi web
+libffi-3.0.13 was released on March 17, 2013. Check the libffi web
page for updates: <URL:http://sourceware.org/libffi/>.
@@ -43,46 +43,70 @@ Libffi has been ported to many different platforms.
For specific configuration details and testing status, please
refer to the wiki page here:
- http://www.moxielogic.org/wiki/index.php?title=Libffi_3.0.10
+ http://www.moxielogic.org/wiki/index.php?title=Libffi_3.0.13
At the time of release, the following basic configurations have been
tested:
-|--------------+------------------|
-| Architecture | Operating System |
-|--------------+------------------|
-| Alpha | Linux |
-| Alpha | Tru64 |
-| ARM | Linux |
-| AVR32 | Linux |
-| HPPA | HPUX |
-| IA-64 | Linux |
-| MIPS | IRIX |
-| MIPS | Linux |
-| MIPS64 | Linux |
-| PowerPC | Linux |
-| PowerPC | Mac OSX |
-| PowerPC | FreeBSD |
-| PowerPC64 | Linux |
-| S390 | Linux |
-| S390X | Linux |
-| SPARC | Linux |
-| SPARC | Solaris |
-| SPARC64 | Linux |
-| SPARC64 | FreeBSD |
-| X86 | FreeBSD |
-| X86 | kFreeBSD |
-| X86 | Linux |
-| X86 | Mac OSX |
-| X86 | OpenBSD |
-| X86 | Solaris |
-| X86 | Windows/Cygwin |
-| X86 | Windows/MingW |
-| X86-64 | FreeBSD |
-| X86-64 | Linux |
-| X86-64 | OpenBSD |
-| X86-64 | Windows/MingW |
-|--------------+------------------|
+|-----------------+------------------+-------------------------|
+| Architecture | Operating System | Compiler |
+|-----------------+------------------+-------------------------|
+| AArch64 | Linux | GCC |
+| Alpha | Linux | GCC |
+| Alpha | Tru64 | GCC |
+| ARM | Linux | GCC |
+| ARM | iOS | GCC |
+| AVR32 | Linux | GCC |
+| Blackfin | uClinux | GCC |
+| HPPA | HPUX | GCC |
+| IA-64 | Linux | GCC |
+| M68K | FreeMiNT | GCC |
+| M68K | Linux | GCC |
+| M68K | RTEMS | GCC |
+| Meta | Linux | GCC |
+| MicroBlaze | Linux | GCC |
+| MIPS | IRIX | GCC |
+| MIPS | Linux | GCC |
+| MIPS | RTEMS | GCC |
+| MIPS64 | Linux | GCC |
+| Moxie | Bare metal | GCC
+| PowerPC 32-bit | AIX | IBM XL C |
+| PowerPC 64-bit | AIX | IBM XL C |
+| PowerPC | AMIGA | GCC |
+| PowerPC | Linux | GCC |
+| PowerPC | Mac OSX | GCC |
+| PowerPC | FreeBSD | GCC |
+| PowerPC 64-bit | FreeBSD | GCC |
+| PowerPC 64-bit | Linux | GCC |
+| S390 | Linux | GCC |
+| S390X | Linux | GCC |
+| SPARC | Linux | GCC |
+| SPARC | Solaris | GCC |
+| SPARC | Solaris | Oracle Solaris Studio C |
+| SPARC64 | Linux | GCC |
+| SPARC64 | FreeBSD | GCC |
+| SPARC64 | Solaris | Oracle Solaris Studio C |
+| TILE-Gx/TILEPro | Linux | GCC |
+| X86 | FreeBSD | GCC |
+| X86 | GNU HURD | GCC |
+| X86 | Interix | GCC |
+| X86 | kFreeBSD | GCC |
+| X86 | Linux | GCC |
+| X86 | Mac OSX | GCC |
+| X86 | OpenBSD | GCC |
+| X86 | OS/2 | GCC |
+| X86 | Solaris | GCC |
+| X86 | Solaris | Oracle Solaris Studio C |
+| X86 | Windows/Cygwin | GCC |
+| X86 | Windows/MingW | GCC |
+| X86-64 | FreeBSD | GCC |
+| X86-64 | Linux | GCC |
+| X86-64 | Linux/x32 | GCC |
+| X86-64 | OpenBSD | GCC |
+| X86-64 | Solaris | Oracle Solaris Studio C |
+| X86-64 | Windows/MingW | GCC |
+| Xtensa | Linux | GCC |
+|-----------------+------------------+-------------------------|
Please send additional platform test results to
libffi-discuss@sourceware.org and feel free to update the wiki page
@@ -113,14 +137,20 @@ It's also possible to build libffi on Windows platforms with
Microsoft's Visual C++ compiler. In this case, use the msvcc.sh
wrapper script during configuration like so:
-path/to/configure --enable-shared --enable-static \
- CC=path/to/msvcc.sh LD=link \
- CPP=\"cl -nologo -EP\"
+path/to/configure CC=path/to/msvcc.sh LD=link CPP=\"cl -nologo -EP\"
+
+For 64-bit Windows builds, use CC="path/to/msvcc.sh -m64".
+You may also need to specify --build appropriately. When building with MSVC
+under a MingW environment, you may need to remove the line in configure
+that sets 'fix_srcfile_path' to a 'cygpath' command. ('cygpath' is not
+present in MingW, and is not required when using MingW-style paths.)
+
+For iOS builds, the 'libffi.xcodeproj' Xcode project is available.
Configure has many other options. Use "configure --help" to see them all.
Once configure has finished, type "make". Note that you must be using
-GNU make. You can ftp GNU make from prep.ai.mit.edu:/pub/gnu.
+GNU make. You can ftp GNU make from ftp.gnu.org:/pub/gnu/make .
To ensure that libffi is working as advertised, type "make check".
This will require that you have DejaGNU installed.
@@ -133,11 +163,53 @@ History
See the ChangeLog files for details.
-3.0.10 ???-??-??
- Fix the N64 build on mips-sgi-irix6.5.
- Testsuite fixes for Tru64 Unix.
+3.0.13 Mar-17-13
+ Add Meta support.
+ Add missing Moxie bits.
+ Fix stack alignment bug on 32-bit x86.
+ Build fix for m68000 targets.
+ Build fix for soft-float Power targets.
+ Fix the install dir location for some platforms when building
+ with GCC (OS X, Solaris).
+ Fix Cygwin regression.
+
+3.0.12 Feb-11-13
+ Add Moxie support.
+ Add AArch64 support.
+ Add Blackfin support.
+ Add TILE-Gx/TILEPro support.
+ Add MicroBlaze support.
+ Add Xtensa support.
+ Add support for PaX enabled kernels with MPROTECT.
+ Add support for native vendor compilers on
+ Solaris and AIX.
+ Work around LLVM/GCC interoperability issue on x86_64.
+
+3.0.11 Apr-11-12
+ Lots of build fixes.
+ Add Amiga newer MacOS support.
+ Add support for variadic functions (ffi_prep_cif_var).
+ Add Linux/x32 support.
+ Add thiscall, fastcall and MSVC cdecl support on Windows.
+ Add Amiga and newer MacOS support.
+ Add m68k FreeMiNT support.
+ Integration with iOS' xcode build tools.
+ Fix Octeon and MC68881 support.
+ Fix code pessimizations.
+
+3.0.10 Aug-23-11
+ Add support for Apple's iOS.
+ Add support for ARM VFP ABI.
+ Add RTEMS support for MIPS and M68K.
+ Fix instruction cache clearing problems on
+ ARM and SPARC.
+ Fix the N64 build on mips-sgi-irix6.5.
Enable builds with Microsoft's compiler.
- Enable x86 builds with Sun's compiler.
+ Enable x86 builds with Oracle's Solaris compiler.
+ Fix support for calling code compiled with Oracle's Sparc
+ Solaris compiler.
+ Testsuite fixes for Tru64 Unix.
+ Additional platform support.
3.0.9 Dec-31-09
Add AVR32 and win64 ports. Add ARM softfp support.
@@ -282,15 +354,19 @@ Thorup.
Major processor architecture ports were contributed by the following
developers:
+aarch64 Marcus Shawcroft, James Greenhalgh
alpha Richard Henderson
arm Raffaele Sena
+blackfin Alexandre Keunecke I. de Mendonca
cris Simon Posnjak, Hans-Peter Nilsson
frv Anthony Green
ia64 Hans Boehm
m32r Kazuhiro Inaoka
m68k Andreas Schwab
+microblaze Nathan Rossi
mips Anthony Green, Casey Marshall
mips64 David Daney
+moxie Anthony Green
pa Randolph Chung, Dave Anglin, Andreas Tobler
powerpc Geoffrey Keating, Andreas Tobler,
David Edelsohn, John Hornkvist
@@ -299,8 +375,10 @@ s390 Gerhard Tonn, Ulrich Weigand
sh Kaz Kojima
sh64 Kaz Kojima
sparc Anthony Green, Gordon Irlam
+tile-gx/tilepro Walter Lee
x86 Anthony Green, Jon Beniston
x86-64 Bo Thorsen
+xtensa Chris Zankel
Jesper Skov and Andrew Haley both did more than their fair share of
stepping through the code and tracking down bugs.
@@ -318,5 +396,6 @@ Alex Oliva solved the executable page problem for SElinux.
The list above is almost certainly incomplete and inaccurate. I'm
happy to make corrections or additions upon request.
-If you have a problem, or have found a bug, please send a note to
-green@redhat.com.
+If you have a problem, or have found a bug, please send a note to the
+author at green@moxielogic.com, or the project mailing list at
+libffi-discuss@sourceware.org.
diff --git a/Modules/_ctypes/libffi/aclocal.m4 b/Modules/_ctypes/libffi/aclocal.m4
index c8e526f..c3ab272 100644
--- a/Modules/_ctypes/libffi/aclocal.m4
+++ b/Modules/_ctypes/libffi/aclocal.m4
@@ -1,7 +1,7 @@
-# generated automatically by aclocal 1.11.1 -*- Autoconf -*-
+# generated automatically by aclocal 1.12.2 -*- Autoconf -*-
+
+# Copyright (C) 1996-2012 Free Software Foundation, Inc.
-# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
-# 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -13,28 +13,848 @@
m4_ifndef([AC_AUTOCONF_VERSION],
[m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
-m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.65],,
-[m4_warning([this file was generated for autoconf 2.65.
+m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],,
+[m4_warning([this file was generated for autoconf 2.69.
You have another version of autoconf. It may work, but is not guaranteed to.
If you have problems, you may need to regenerate the build system entirely.
-To do so, use the procedure documented by the package, typically `autoreconf'.])])
+To do so, use the procedure documented by the package, typically 'autoreconf'.])])
+
+# ltdl.m4 - Configure ltdl for the target system. -*-Autoconf-*-
+#
+# Copyright (C) 1999-2006, 2007, 2008, 2011 Free Software Foundation, Inc.
+# Written by Thomas Tanner, 1999
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+# serial 18 LTDL_INIT
+
+# LT_CONFIG_LTDL_DIR(DIRECTORY, [LTDL-MODE])
+# ------------------------------------------
+# DIRECTORY contains the libltdl sources. It is okay to call this
+# function multiple times, as long as the same DIRECTORY is always given.
+AC_DEFUN([LT_CONFIG_LTDL_DIR],
+[AC_BEFORE([$0], [LTDL_INIT])
+_$0($*)
+])# LT_CONFIG_LTDL_DIR
+
+# We break this out into a separate macro, so that we can call it safely
+# internally without being caught accidentally by the sed scan in libtoolize.
+m4_defun([_LT_CONFIG_LTDL_DIR],
+[dnl remove trailing slashes
+m4_pushdef([_ARG_DIR], m4_bpatsubst([$1], [/*$]))
+m4_case(_LTDL_DIR,
+ [], [dnl only set lt_ltdl_dir if _ARG_DIR is not simply `.'
+ m4_if(_ARG_DIR, [.],
+ [],
+ [m4_define([_LTDL_DIR], _ARG_DIR)
+ _LT_SHELL_INIT([lt_ltdl_dir=']_ARG_DIR['])])],
+ [m4_if(_ARG_DIR, _LTDL_DIR,
+ [],
+ [m4_fatal([multiple libltdl directories: `]_LTDL_DIR[', `]_ARG_DIR['])])])
+m4_popdef([_ARG_DIR])
+])# _LT_CONFIG_LTDL_DIR
+
+# Initialise:
+m4_define([_LTDL_DIR], [])
+
+
+# _LT_BUILD_PREFIX
+# ----------------
+# If Autoconf is new enough, expand to `${top_build_prefix}', otherwise
+# to `${top_builddir}/'.
+m4_define([_LT_BUILD_PREFIX],
+[m4_ifdef([AC_AUTOCONF_VERSION],
+ [m4_if(m4_version_compare(m4_defn([AC_AUTOCONF_VERSION]), [2.62]),
+ [-1], [m4_ifdef([_AC_HAVE_TOP_BUILD_PREFIX],
+ [${top_build_prefix}],
+ [${top_builddir}/])],
+ [${top_build_prefix}])],
+ [${top_builddir}/])[]dnl
+])
+
+
+# LTDL_CONVENIENCE
+# ----------------
+# sets LIBLTDL to the link flags for the libltdl convenience library and
+# LTDLINCL to the include flags for the libltdl header and adds
+# --enable-ltdl-convenience to the configure arguments. Note that
+# AC_CONFIG_SUBDIRS is not called here. LIBLTDL will be prefixed with
+# '${top_build_prefix}' if available, otherwise with '${top_builddir}/',
+# and LTDLINCL will be prefixed with '${top_srcdir}/' (note the single
+# quotes!). If your package is not flat and you're not using automake,
+# define top_build_prefix, top_builddir, and top_srcdir appropriately
+# in your Makefiles.
+AC_DEFUN([LTDL_CONVENIENCE],
+[AC_BEFORE([$0], [LTDL_INIT])dnl
+dnl Although the argument is deprecated and no longer documented,
+dnl LTDL_CONVENIENCE used to take a DIRECTORY orgument, if we have one
+dnl here make sure it is the same as any other declaration of libltdl's
+dnl location! This also ensures lt_ltdl_dir is set when configure.ac is
+dnl not yet using an explicit LT_CONFIG_LTDL_DIR.
+m4_ifval([$1], [_LT_CONFIG_LTDL_DIR([$1])])dnl
+_$0()
+])# LTDL_CONVENIENCE
+
+# AC_LIBLTDL_CONVENIENCE accepted a directory argument in older libtools,
+# now we have LT_CONFIG_LTDL_DIR:
+AU_DEFUN([AC_LIBLTDL_CONVENIENCE],
+[_LT_CONFIG_LTDL_DIR([m4_default([$1], [libltdl])])
+_LTDL_CONVENIENCE])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBLTDL_CONVENIENCE], [])
+
+
+# _LTDL_CONVENIENCE
+# -----------------
+# Code shared by LTDL_CONVENIENCE and LTDL_INIT([convenience]).
+m4_defun([_LTDL_CONVENIENCE],
+[case $enable_ltdl_convenience in
+ no) AC_MSG_ERROR([this package needs a convenience libltdl]) ;;
+ "") enable_ltdl_convenience=yes
+ ac_configure_args="$ac_configure_args --enable-ltdl-convenience" ;;
+esac
+LIBLTDL='_LT_BUILD_PREFIX'"${lt_ltdl_dir+$lt_ltdl_dir/}libltdlc.la"
+LTDLDEPS=$LIBLTDL
+LTDLINCL='-I${top_srcdir}'"${lt_ltdl_dir+/$lt_ltdl_dir}"
+
+AC_SUBST([LIBLTDL])
+AC_SUBST([LTDLDEPS])
+AC_SUBST([LTDLINCL])
+
+# For backwards non-gettext consistent compatibility...
+INCLTDL="$LTDLINCL"
+AC_SUBST([INCLTDL])
+])# _LTDL_CONVENIENCE
+
+
+# LTDL_INSTALLABLE
+# ----------------
+# sets LIBLTDL to the link flags for the libltdl installable library
+# and LTDLINCL to the include flags for the libltdl header and adds
+# --enable-ltdl-install to the configure arguments. Note that
+# AC_CONFIG_SUBDIRS is not called from here. If an installed libltdl
+# is not found, LIBLTDL will be prefixed with '${top_build_prefix}' if
+# available, otherwise with '${top_builddir}/', and LTDLINCL will be
+# prefixed with '${top_srcdir}/' (note the single quotes!). If your
+# package is not flat and you're not using automake, define top_build_prefix,
+# top_builddir, and top_srcdir appropriately in your Makefiles.
+# In the future, this macro may have to be called after LT_INIT.
+AC_DEFUN([LTDL_INSTALLABLE],
+[AC_BEFORE([$0], [LTDL_INIT])dnl
+dnl Although the argument is deprecated and no longer documented,
+dnl LTDL_INSTALLABLE used to take a DIRECTORY orgument, if we have one
+dnl here make sure it is the same as any other declaration of libltdl's
+dnl location! This also ensures lt_ltdl_dir is set when configure.ac is
+dnl not yet using an explicit LT_CONFIG_LTDL_DIR.
+m4_ifval([$1], [_LT_CONFIG_LTDL_DIR([$1])])dnl
+_$0()
+])# LTDL_INSTALLABLE
+
+# AC_LIBLTDL_INSTALLABLE accepted a directory argument in older libtools,
+# now we have LT_CONFIG_LTDL_DIR:
+AU_DEFUN([AC_LIBLTDL_INSTALLABLE],
+[_LT_CONFIG_LTDL_DIR([m4_default([$1], [libltdl])])
+_LTDL_INSTALLABLE])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBLTDL_INSTALLABLE], [])
+
+
+# _LTDL_INSTALLABLE
+# -----------------
+# Code shared by LTDL_INSTALLABLE and LTDL_INIT([installable]).
+m4_defun([_LTDL_INSTALLABLE],
+[if test -f $prefix/lib/libltdl.la; then
+ lt_save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="-L$prefix/lib $LDFLAGS"
+ AC_CHECK_LIB([ltdl], [lt_dlinit], [lt_lib_ltdl=yes])
+ LDFLAGS="$lt_save_LDFLAGS"
+ if test x"${lt_lib_ltdl-no}" = xyes; then
+ if test x"$enable_ltdl_install" != xyes; then
+ # Don't overwrite $prefix/lib/libltdl.la without --enable-ltdl-install
+ AC_MSG_WARN([not overwriting libltdl at $prefix, force with `--enable-ltdl-install'])
+ enable_ltdl_install=no
+ fi
+ elif test x"$enable_ltdl_install" = xno; then
+ AC_MSG_WARN([libltdl not installed, but installation disabled])
+ fi
+fi
+
+# If configure.ac declared an installable ltdl, and the user didn't override
+# with --disable-ltdl-install, we will install the shipped libltdl.
+case $enable_ltdl_install in
+ no) ac_configure_args="$ac_configure_args --enable-ltdl-install=no"
+ LIBLTDL="-lltdl"
+ LTDLDEPS=
+ LTDLINCL=
+ ;;
+ *) enable_ltdl_install=yes
+ ac_configure_args="$ac_configure_args --enable-ltdl-install"
+ LIBLTDL='_LT_BUILD_PREFIX'"${lt_ltdl_dir+$lt_ltdl_dir/}libltdl.la"
+ LTDLDEPS=$LIBLTDL
+ LTDLINCL='-I${top_srcdir}'"${lt_ltdl_dir+/$lt_ltdl_dir}"
+ ;;
+esac
+
+AC_SUBST([LIBLTDL])
+AC_SUBST([LTDLDEPS])
+AC_SUBST([LTDLINCL])
+
+# For backwards non-gettext consistent compatibility...
+INCLTDL="$LTDLINCL"
+AC_SUBST([INCLTDL])
+])# LTDL_INSTALLABLE
+
+
+# _LTDL_MODE_DISPATCH
+# -------------------
+m4_define([_LTDL_MODE_DISPATCH],
+[dnl If _LTDL_DIR is `.', then we are configuring libltdl itself:
+m4_if(_LTDL_DIR, [],
+ [],
+ dnl if _LTDL_MODE was not set already, the default value is `subproject':
+ [m4_case(m4_default(_LTDL_MODE, [subproject]),
+ [subproject], [AC_CONFIG_SUBDIRS(_LTDL_DIR)
+ _LT_SHELL_INIT([lt_dlopen_dir="$lt_ltdl_dir"])],
+ [nonrecursive], [_LT_SHELL_INIT([lt_dlopen_dir="$lt_ltdl_dir"; lt_libobj_prefix="$lt_ltdl_dir/"])],
+ [recursive], [],
+ [m4_fatal([unknown libltdl mode: ]_LTDL_MODE)])])dnl
+dnl Be careful not to expand twice:
+m4_define([$0], [])
+])# _LTDL_MODE_DISPATCH
+
+
+# _LT_LIBOBJ(MODULE_NAME)
+# -----------------------
+# Like AC_LIBOBJ, except that MODULE_NAME goes into _LT_LIBOBJS instead
+# of into LIBOBJS.
+AC_DEFUN([_LT_LIBOBJ], [
+ m4_pattern_allow([^_LT_LIBOBJS$])
+ _LT_LIBOBJS="$_LT_LIBOBJS $1.$ac_objext"
+])# _LT_LIBOBJS
+
+
+# LTDL_INIT([OPTIONS])
+# --------------------
+# Clients of libltdl can use this macro to allow the installer to
+# choose between a shipped copy of the ltdl sources or a preinstalled
+# version of the library. If the shipped ltdl sources are not in a
+# subdirectory named libltdl, the directory name must be given by
+# LT_CONFIG_LTDL_DIR.
+AC_DEFUN([LTDL_INIT],
+[dnl Parse OPTIONS
+_LT_SET_OPTIONS([$0], [$1])
+
+dnl We need to keep our own list of libobjs separate from our parent project,
+dnl and the easiest way to do that is redefine the AC_LIBOBJs macro while
+dnl we look for our own LIBOBJs.
+m4_pushdef([AC_LIBOBJ], m4_defn([_LT_LIBOBJ]))
+m4_pushdef([AC_LIBSOURCES])
+
+dnl If not otherwise defined, default to the 1.5.x compatible subproject mode:
+m4_if(_LTDL_MODE, [],
+ [m4_define([_LTDL_MODE], m4_default([$2], [subproject]))
+ m4_if([-1], [m4_bregexp(_LTDL_MODE, [\(subproject\|\(non\)?recursive\)])],
+ [m4_fatal([unknown libltdl mode: ]_LTDL_MODE)])])
+
+AC_ARG_WITH([included_ltdl],
+ [AS_HELP_STRING([--with-included-ltdl],
+ [use the GNU ltdl sources included here])])
+
+if test "x$with_included_ltdl" != xyes; then
+ # We are not being forced to use the included libltdl sources, so
+ # decide whether there is a useful installed version we can use.
+ AC_CHECK_HEADER([ltdl.h],
+ [AC_CHECK_DECL([lt_dlinterface_register],
+ [AC_CHECK_LIB([ltdl], [lt_dladvise_preload],
+ [with_included_ltdl=no],
+ [with_included_ltdl=yes])],
+ [with_included_ltdl=yes],
+ [AC_INCLUDES_DEFAULT
+ #include <ltdl.h>])],
+ [with_included_ltdl=yes],
+ [AC_INCLUDES_DEFAULT]
+ )
+fi
+
+dnl If neither LT_CONFIG_LTDL_DIR, LTDL_CONVENIENCE nor LTDL_INSTALLABLE
+dnl was called yet, then for old times' sake, we assume libltdl is in an
+dnl eponymous directory:
+AC_PROVIDE_IFELSE([LT_CONFIG_LTDL_DIR], [], [_LT_CONFIG_LTDL_DIR([libltdl])])
+
+AC_ARG_WITH([ltdl_include],
+ [AS_HELP_STRING([--with-ltdl-include=DIR],
+ [use the ltdl headers installed in DIR])])
+
+if test -n "$with_ltdl_include"; then
+ if test -f "$with_ltdl_include/ltdl.h"; then :
+ else
+ AC_MSG_ERROR([invalid ltdl include directory: `$with_ltdl_include'])
+ fi
+else
+ with_ltdl_include=no
+fi
+
+AC_ARG_WITH([ltdl_lib],
+ [AS_HELP_STRING([--with-ltdl-lib=DIR],
+ [use the libltdl.la installed in DIR])])
+
+if test -n "$with_ltdl_lib"; then
+ if test -f "$with_ltdl_lib/libltdl.la"; then :
+ else
+ AC_MSG_ERROR([invalid ltdl library directory: `$with_ltdl_lib'])
+ fi
+else
+ with_ltdl_lib=no
+fi
+
+case ,$with_included_ltdl,$with_ltdl_include,$with_ltdl_lib, in
+ ,yes,no,no,)
+ m4_case(m4_default(_LTDL_TYPE, [convenience]),
+ [convenience], [_LTDL_CONVENIENCE],
+ [installable], [_LTDL_INSTALLABLE],
+ [m4_fatal([unknown libltdl build type: ]_LTDL_TYPE)])
+ ;;
+ ,no,no,no,)
+ # If the included ltdl is not to be used, then use the
+ # preinstalled libltdl we found.
+ AC_DEFINE([HAVE_LTDL], [1],
+ [Define this if a modern libltdl is already installed])
+ LIBLTDL=-lltdl
+ LTDLDEPS=
+ LTDLINCL=
+ ;;
+ ,no*,no,*)
+ AC_MSG_ERROR([`--with-ltdl-include' and `--with-ltdl-lib' options must be used together])
+ ;;
+ *) with_included_ltdl=no
+ LIBLTDL="-L$with_ltdl_lib -lltdl"
+ LTDLDEPS=
+ LTDLINCL="-I$with_ltdl_include"
+ ;;
+esac
+INCLTDL="$LTDLINCL"
+
+# Report our decision...
+AC_MSG_CHECKING([where to find libltdl headers])
+AC_MSG_RESULT([$LTDLINCL])
+AC_MSG_CHECKING([where to find libltdl library])
+AC_MSG_RESULT([$LIBLTDL])
+
+_LTDL_SETUP
+
+dnl restore autoconf definition.
+m4_popdef([AC_LIBOBJ])
+m4_popdef([AC_LIBSOURCES])
+
+AC_CONFIG_COMMANDS_PRE([
+ _ltdl_libobjs=
+ _ltdl_ltlibobjs=
+ if test -n "$_LT_LIBOBJS"; then
+ # Remove the extension.
+ _lt_sed_drop_objext='s/\.o$//;s/\.obj$//'
+ for i in `for i in $_LT_LIBOBJS; do echo "$i"; done | sed "$_lt_sed_drop_objext" | sort -u`; do
+ _ltdl_libobjs="$_ltdl_libobjs $lt_libobj_prefix$i.$ac_objext"
+ _ltdl_ltlibobjs="$_ltdl_ltlibobjs $lt_libobj_prefix$i.lo"
+ done
+ fi
+ AC_SUBST([ltdl_LIBOBJS], [$_ltdl_libobjs])
+ AC_SUBST([ltdl_LTLIBOBJS], [$_ltdl_ltlibobjs])
+])
+
+# Only expand once:
+m4_define([LTDL_INIT])
+])# LTDL_INIT
+
+# Old names:
+AU_DEFUN([AC_LIB_LTDL], [LTDL_INIT($@)])
+AU_DEFUN([AC_WITH_LTDL], [LTDL_INIT($@)])
+AU_DEFUN([LT_WITH_LTDL], [LTDL_INIT($@)])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIB_LTDL], [])
+dnl AC_DEFUN([AC_WITH_LTDL], [])
+dnl AC_DEFUN([LT_WITH_LTDL], [])
+
+
+# _LTDL_SETUP
+# -----------
+# Perform all the checks necessary for compilation of the ltdl objects
+# -- including compiler checks and header checks. This is a public
+# interface mainly for the benefit of libltdl's own configure.ac, most
+# other users should call LTDL_INIT instead.
+AC_DEFUN([_LTDL_SETUP],
+[AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([LT_SYS_MODULE_EXT])dnl
+AC_REQUIRE([LT_SYS_MODULE_PATH])dnl
+AC_REQUIRE([LT_SYS_DLSEARCH_PATH])dnl
+AC_REQUIRE([LT_LIB_DLLOAD])dnl
+AC_REQUIRE([LT_SYS_SYMBOL_USCORE])dnl
+AC_REQUIRE([LT_FUNC_DLSYM_USCORE])dnl
+AC_REQUIRE([LT_SYS_DLOPEN_DEPLIBS])dnl
+AC_REQUIRE([gl_FUNC_ARGZ])dnl
+
+m4_require([_LT_CHECK_OBJDIR])dnl
+m4_require([_LT_HEADER_DLFCN])dnl
+m4_require([_LT_CHECK_DLPREOPEN])dnl
+m4_require([_LT_DECL_SED])dnl
+
+dnl Don't require this, or it will be expanded earlier than the code
+dnl that sets the variables it relies on:
+_LT_ENABLE_INSTALL
+
+dnl _LTDL_MODE specific code must be called at least once:
+_LTDL_MODE_DISPATCH
+
+# In order that ltdl.c can compile, find out the first AC_CONFIG_HEADERS
+# the user used. This is so that ltdl.h can pick up the parent projects
+# config.h file, The first file in AC_CONFIG_HEADERS must contain the
+# definitions required by ltdl.c.
+# FIXME: Remove use of undocumented AC_LIST_HEADERS (2.59 compatibility).
+AC_CONFIG_COMMANDS_PRE([dnl
+m4_pattern_allow([^LT_CONFIG_H$])dnl
+m4_ifset([AH_HEADER],
+ [LT_CONFIG_H=AH_HEADER],
+ [m4_ifset([AC_LIST_HEADERS],
+ [LT_CONFIG_H=`echo "AC_LIST_HEADERS" | $SED 's,^[[ ]]*,,;s,[[ :]].*$,,'`],
+ [])])])
+AC_SUBST([LT_CONFIG_H])
+
+AC_CHECK_HEADERS([unistd.h dl.h sys/dl.h dld.h mach-o/dyld.h dirent.h],
+ [], [], [AC_INCLUDES_DEFAULT])
+
+AC_CHECK_FUNCS([closedir opendir readdir], [], [AC_LIBOBJ([lt__dirent])])
+AC_CHECK_FUNCS([strlcat strlcpy], [], [AC_LIBOBJ([lt__strl])])
+
+m4_pattern_allow([LT_LIBEXT])dnl
+AC_DEFINE_UNQUOTED([LT_LIBEXT],["$libext"],[The archive extension])
+
+name=
+eval "lt_libprefix=\"$libname_spec\""
+m4_pattern_allow([LT_LIBPREFIX])dnl
+AC_DEFINE_UNQUOTED([LT_LIBPREFIX],["$lt_libprefix"],[The archive prefix])
+
+name=ltdl
+eval "LTDLOPEN=\"$libname_spec\""
+AC_SUBST([LTDLOPEN])
+])# _LTDL_SETUP
+
+
+# _LT_ENABLE_INSTALL
+# ------------------
+m4_define([_LT_ENABLE_INSTALL],
+[AC_ARG_ENABLE([ltdl-install],
+ [AS_HELP_STRING([--enable-ltdl-install], [install libltdl])])
+
+case ,${enable_ltdl_install},${enable_ltdl_convenience} in
+ *yes*) ;;
+ *) enable_ltdl_convenience=yes ;;
+esac
+
+m4_ifdef([AM_CONDITIONAL],
+[AM_CONDITIONAL(INSTALL_LTDL, test x"${enable_ltdl_install-no}" != xno)
+ AM_CONDITIONAL(CONVENIENCE_LTDL, test x"${enable_ltdl_convenience-no}" != xno)])
+])# _LT_ENABLE_INSTALL
+
+
+# LT_SYS_DLOPEN_DEPLIBS
+# ---------------------
+AC_DEFUN([LT_SYS_DLOPEN_DEPLIBS],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_CACHE_CHECK([whether deplibs are loaded by dlopen],
+ [lt_cv_sys_dlopen_deplibs],
+ [# PORTME does your system automatically load deplibs for dlopen?
+ # or its logical equivalent (e.g. shl_load for HP-UX < 11)
+ # For now, we just catch OSes we know something about -- in the
+ # future, we'll try test this programmatically.
+ lt_cv_sys_dlopen_deplibs=unknown
+ case $host_os in
+ aix3*|aix4.1.*|aix4.2.*)
+ # Unknown whether this is true for these versions of AIX, but
+ # we want this `case' here to explicitly catch those versions.
+ lt_cv_sys_dlopen_deplibs=unknown
+ ;;
+ aix[[4-9]]*)
+ lt_cv_sys_dlopen_deplibs=yes
+ ;;
+ amigaos*)
+ case $host_cpu in
+ powerpc)
+ lt_cv_sys_dlopen_deplibs=no
+ ;;
+ esac
+ ;;
+ darwin*)
+ # Assuming the user has installed a libdl from somewhere, this is true
+ # If you are looking for one http://www.opendarwin.org/projects/dlcompat
+ lt_cv_sys_dlopen_deplibs=yes
+ ;;
+ freebsd* | dragonfly*)
+ lt_cv_sys_dlopen_deplibs=yes
+ ;;
+ gnu* | linux* | k*bsd*-gnu | kopensolaris*-gnu)
+ # GNU and its variants, using gnu ld.so (Glibc)
+ lt_cv_sys_dlopen_deplibs=yes
+ ;;
+ hpux10*|hpux11*)
+ lt_cv_sys_dlopen_deplibs=yes
+ ;;
+ interix*)
+ lt_cv_sys_dlopen_deplibs=yes
+ ;;
+ irix[[12345]]*|irix6.[[01]]*)
+ # Catch all versions of IRIX before 6.2, and indicate that we don't
+ # know how it worked for any of those versions.
+ lt_cv_sys_dlopen_deplibs=unknown
+ ;;
+ irix*)
+ # The case above catches anything before 6.2, and it's known that
+ # at 6.2 and later dlopen does load deplibs.
+ lt_cv_sys_dlopen_deplibs=yes
+ ;;
+ netbsd*)
+ lt_cv_sys_dlopen_deplibs=yes
+ ;;
+ openbsd*)
+ lt_cv_sys_dlopen_deplibs=yes
+ ;;
+ osf[[1234]]*)
+ # dlopen did load deplibs (at least at 4.x), but until the 5.x series,
+ # it did *not* use an RPATH in a shared library to find objects the
+ # library depends on, so we explicitly say `no'.
+ lt_cv_sys_dlopen_deplibs=no
+ ;;
+ osf5.0|osf5.0a|osf5.1)
+ # dlopen *does* load deplibs and with the right loader patch applied
+ # it even uses RPATH in a shared library to search for shared objects
+ # that the library depends on, but there's no easy way to know if that
+ # patch is installed. Since this is the case, all we can really
+ # say is unknown -- it depends on the patch being installed. If
+ # it is, this changes to `yes'. Without it, it would be `no'.
+ lt_cv_sys_dlopen_deplibs=unknown
+ ;;
+ osf*)
+ # the two cases above should catch all versions of osf <= 5.1. Read
+ # the comments above for what we know about them.
+ # At > 5.1, deplibs are loaded *and* any RPATH in a shared library
+ # is used to find them so we can finally say `yes'.
+ lt_cv_sys_dlopen_deplibs=yes
+ ;;
+ qnx*)
+ lt_cv_sys_dlopen_deplibs=yes
+ ;;
+ solaris*)
+ lt_cv_sys_dlopen_deplibs=yes
+ ;;
+ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+ libltdl_cv_sys_dlopen_deplibs=yes
+ ;;
+ esac
+ ])
+if test "$lt_cv_sys_dlopen_deplibs" != yes; then
+ AC_DEFINE([LTDL_DLOPEN_DEPLIBS], [1],
+ [Define if the OS needs help to load dependent libraries for dlopen().])
+fi
+])# LT_SYS_DLOPEN_DEPLIBS
+
+# Old name:
+AU_ALIAS([AC_LTDL_SYS_DLOPEN_DEPLIBS], [LT_SYS_DLOPEN_DEPLIBS])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LTDL_SYS_DLOPEN_DEPLIBS], [])
+
+
+# LT_SYS_MODULE_EXT
+# -----------------
+AC_DEFUN([LT_SYS_MODULE_EXT],
+[m4_require([_LT_SYS_DYNAMIC_LINKER])dnl
+AC_CACHE_CHECK([which extension is used for runtime loadable modules],
+ [libltdl_cv_shlibext],
+[
+module=yes
+eval libltdl_cv_shlibext=$shrext_cmds
+module=no
+eval libltdl_cv_shrext=$shrext_cmds
+ ])
+if test -n "$libltdl_cv_shlibext"; then
+ m4_pattern_allow([LT_MODULE_EXT])dnl
+ AC_DEFINE_UNQUOTED([LT_MODULE_EXT], ["$libltdl_cv_shlibext"],
+ [Define to the extension used for runtime loadable modules, say, ".so".])
+fi
+if test "$libltdl_cv_shrext" != "$libltdl_cv_shlibext"; then
+ m4_pattern_allow([LT_SHARED_EXT])dnl
+ AC_DEFINE_UNQUOTED([LT_SHARED_EXT], ["$libltdl_cv_shrext"],
+ [Define to the shared library suffix, say, ".dylib".])
+fi
+])# LT_SYS_MODULE_EXT
+
+# Old name:
+AU_ALIAS([AC_LTDL_SHLIBEXT], [LT_SYS_MODULE_EXT])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LTDL_SHLIBEXT], [])
+
+
+# LT_SYS_MODULE_PATH
+# ------------------
+AC_DEFUN([LT_SYS_MODULE_PATH],
+[m4_require([_LT_SYS_DYNAMIC_LINKER])dnl
+AC_CACHE_CHECK([which variable specifies run-time module search path],
+ [lt_cv_module_path_var], [lt_cv_module_path_var="$shlibpath_var"])
+if test -n "$lt_cv_module_path_var"; then
+ m4_pattern_allow([LT_MODULE_PATH_VAR])dnl
+ AC_DEFINE_UNQUOTED([LT_MODULE_PATH_VAR], ["$lt_cv_module_path_var"],
+ [Define to the name of the environment variable that determines the run-time module search path.])
+fi
+])# LT_SYS_MODULE_PATH
+
+# Old name:
+AU_ALIAS([AC_LTDL_SHLIBPATH], [LT_SYS_MODULE_PATH])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LTDL_SHLIBPATH], [])
+
+
+# LT_SYS_DLSEARCH_PATH
+# --------------------
+AC_DEFUN([LT_SYS_DLSEARCH_PATH],
+[m4_require([_LT_SYS_DYNAMIC_LINKER])dnl
+AC_CACHE_CHECK([for the default library search path],
+ [lt_cv_sys_dlsearch_path],
+ [lt_cv_sys_dlsearch_path="$sys_lib_dlsearch_path_spec"])
+if test -n "$lt_cv_sys_dlsearch_path"; then
+ sys_dlsearch_path=
+ for dir in $lt_cv_sys_dlsearch_path; do
+ if test -z "$sys_dlsearch_path"; then
+ sys_dlsearch_path="$dir"
+ else
+ sys_dlsearch_path="$sys_dlsearch_path$PATH_SEPARATOR$dir"
+ fi
+ done
+ m4_pattern_allow([LT_DLSEARCH_PATH])dnl
+ AC_DEFINE_UNQUOTED([LT_DLSEARCH_PATH], ["$sys_dlsearch_path"],
+ [Define to the system default library search path.])
+fi
+])# LT_SYS_DLSEARCH_PATH
+
+# Old name:
+AU_ALIAS([AC_LTDL_SYSSEARCHPATH], [LT_SYS_DLSEARCH_PATH])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LTDL_SYSSEARCHPATH], [])
+
+
+# _LT_CHECK_DLPREOPEN
+# -------------------
+m4_defun([_LT_CHECK_DLPREOPEN],
+[m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl
+AC_CACHE_CHECK([whether libtool supports -dlopen/-dlpreopen],
+ [libltdl_cv_preloaded_symbols],
+ [if test -n "$lt_cv_sys_global_symbol_pipe"; then
+ libltdl_cv_preloaded_symbols=yes
+ else
+ libltdl_cv_preloaded_symbols=no
+ fi
+ ])
+if test x"$libltdl_cv_preloaded_symbols" = xyes; then
+ AC_DEFINE([HAVE_PRELOADED_SYMBOLS], [1],
+ [Define if libtool can extract symbol lists from object files.])
+fi
+])# _LT_CHECK_DLPREOPEN
+
+
+# LT_LIB_DLLOAD
+# -------------
+AC_DEFUN([LT_LIB_DLLOAD],
+[m4_pattern_allow([^LT_DLLOADERS$])
+LT_DLLOADERS=
+AC_SUBST([LT_DLLOADERS])
+
+AC_LANG_PUSH([C])
+
+LIBADD_DLOPEN=
+AC_SEARCH_LIBS([dlopen], [dl],
+ [AC_DEFINE([HAVE_LIBDL], [1],
+ [Define if you have the libdl library or equivalent.])
+ if test "$ac_cv_search_dlopen" != "none required" ; then
+ LIBADD_DLOPEN="-ldl"
+ fi
+ libltdl_cv_lib_dl_dlopen="yes"
+ LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dlopen.la"],
+ [AC_LINK_IFELSE([AC_LANG_PROGRAM([[#if HAVE_DLFCN_H
+# include <dlfcn.h>
+#endif
+ ]], [[dlopen(0, 0);]])],
+ [AC_DEFINE([HAVE_LIBDL], [1],
+ [Define if you have the libdl library or equivalent.])
+ libltdl_cv_func_dlopen="yes"
+ LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dlopen.la"],
+ [AC_CHECK_LIB([svld], [dlopen],
+ [AC_DEFINE([HAVE_LIBDL], [1],
+ [Define if you have the libdl library or equivalent.])
+ LIBADD_DLOPEN="-lsvld" libltdl_cv_func_dlopen="yes"
+ LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dlopen.la"])])])
+if test x"$libltdl_cv_func_dlopen" = xyes || test x"$libltdl_cv_lib_dl_dlopen" = xyes
+then
+ lt_save_LIBS="$LIBS"
+ LIBS="$LIBS $LIBADD_DLOPEN"
+ AC_CHECK_FUNCS([dlerror])
+ LIBS="$lt_save_LIBS"
+fi
+AC_SUBST([LIBADD_DLOPEN])
+
+LIBADD_SHL_LOAD=
+AC_CHECK_FUNC([shl_load],
+ [AC_DEFINE([HAVE_SHL_LOAD], [1],
+ [Define if you have the shl_load function.])
+ LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}shl_load.la"],
+ [AC_CHECK_LIB([dld], [shl_load],
+ [AC_DEFINE([HAVE_SHL_LOAD], [1],
+ [Define if you have the shl_load function.])
+ LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}shl_load.la"
+ LIBADD_SHL_LOAD="-ldld"])])
+AC_SUBST([LIBADD_SHL_LOAD])
+
+case $host_os in
+darwin[[1567]].*)
+# We only want this for pre-Mac OS X 10.4.
+ AC_CHECK_FUNC([_dyld_func_lookup],
+ [AC_DEFINE([HAVE_DYLD], [1],
+ [Define if you have the _dyld_func_lookup function.])
+ LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dyld.la"])
+ ;;
+beos*)
+ LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}load_add_on.la"
+ ;;
+cygwin* | mingw* | os2* | pw32*)
+ AC_CHECK_DECLS([cygwin_conv_path], [], [], [[#include <sys/cygwin.h>]])
+ LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}loadlibrary.la"
+ ;;
+esac
+
+AC_CHECK_LIB([dld], [dld_link],
+ [AC_DEFINE([HAVE_DLD], [1],
+ [Define if you have the GNU dld library.])
+ LT_DLLOADERS="$LT_DLLOADERS ${lt_dlopen_dir+$lt_dlopen_dir/}dld_link.la"])
+AC_SUBST([LIBADD_DLD_LINK])
+
+m4_pattern_allow([^LT_DLPREOPEN$])
+LT_DLPREOPEN=
+if test -n "$LT_DLLOADERS"
+then
+ for lt_loader in $LT_DLLOADERS; do
+ LT_DLPREOPEN="$LT_DLPREOPEN-dlpreopen $lt_loader "
+ done
+ AC_DEFINE([HAVE_LIBDLLOADER], [1],
+ [Define if libdlloader will be built on this platform])
+fi
+AC_SUBST([LT_DLPREOPEN])
+
+dnl This isn't used anymore, but set it for backwards compatibility
+LIBADD_DL="$LIBADD_DLOPEN $LIBADD_SHL_LOAD"
+AC_SUBST([LIBADD_DL])
+
+AC_LANG_POP
+])# LT_LIB_DLLOAD
+
+# Old name:
+AU_ALIAS([AC_LTDL_DLLIB], [LT_LIB_DLLOAD])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LTDL_DLLIB], [])
+
+
+# LT_SYS_SYMBOL_USCORE
+# --------------------
+# does the compiler prefix global symbols with an underscore?
+AC_DEFUN([LT_SYS_SYMBOL_USCORE],
+[m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl
+AC_CACHE_CHECK([for _ prefix in compiled symbols],
+ [lt_cv_sys_symbol_underscore],
+ [lt_cv_sys_symbol_underscore=no
+ cat > conftest.$ac_ext <<_LT_EOF
+void nm_test_func(){}
+int main(){nm_test_func;return 0;}
+_LT_EOF
+ if AC_TRY_EVAL(ac_compile); then
+ # Now try to grab the symbols.
+ ac_nlist=conftest.nm
+ if AC_TRY_EVAL(NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $ac_nlist) && test -s "$ac_nlist"; then
+ # See whether the symbols have a leading underscore.
+ if grep '^. _nm_test_func' "$ac_nlist" >/dev/null; then
+ lt_cv_sys_symbol_underscore=yes
+ else
+ if grep '^. nm_test_func ' "$ac_nlist" >/dev/null; then
+ :
+ else
+ echo "configure: cannot find nm_test_func in $ac_nlist" >&AS_MESSAGE_LOG_FD
+ fi
+ fi
+ else
+ echo "configure: cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD
+ fi
+ else
+ echo "configure: failed program was:" >&AS_MESSAGE_LOG_FD
+ cat conftest.c >&AS_MESSAGE_LOG_FD
+ fi
+ rm -rf conftest*
+ ])
+ sys_symbol_underscore=$lt_cv_sys_symbol_underscore
+ AC_SUBST([sys_symbol_underscore])
+])# LT_SYS_SYMBOL_USCORE
+
+# Old name:
+AU_ALIAS([AC_LTDL_SYMBOL_USCORE], [LT_SYS_SYMBOL_USCORE])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LTDL_SYMBOL_USCORE], [])
+
+
+# LT_FUNC_DLSYM_USCORE
+# --------------------
+AC_DEFUN([LT_FUNC_DLSYM_USCORE],
+[AC_REQUIRE([LT_SYS_SYMBOL_USCORE])dnl
+if test x"$lt_cv_sys_symbol_underscore" = xyes; then
+ if test x"$libltdl_cv_func_dlopen" = xyes ||
+ test x"$libltdl_cv_lib_dl_dlopen" = xyes ; then
+ AC_CACHE_CHECK([whether we have to add an underscore for dlsym],
+ [libltdl_cv_need_uscore],
+ [libltdl_cv_need_uscore=unknown
+ save_LIBS="$LIBS"
+ LIBS="$LIBS $LIBADD_DLOPEN"
+ _LT_TRY_DLOPEN_SELF(
+ [libltdl_cv_need_uscore=no], [libltdl_cv_need_uscore=yes],
+ [], [libltdl_cv_need_uscore=cross])
+ LIBS="$save_LIBS"
+ ])
+ fi
+fi
+
+if test x"$libltdl_cv_need_uscore" = xyes; then
+ AC_DEFINE([NEED_USCORE], [1],
+ [Define if dlsym() requires a leading underscore in symbol names.])
+fi
+])# LT_FUNC_DLSYM_USCORE
-# Copyright (C) 2002, 2003, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+# Old name:
+AU_ALIAS([AC_LTDL_DLSYM_USCORE], [LT_FUNC_DLSYM_USCORE])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LTDL_DLSYM_USCORE], [])
+
+# Copyright (C) 2002-2012 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
+# serial 8
+
# AM_AUTOMAKE_VERSION(VERSION)
# ----------------------------
# Automake X.Y traces this macro to ensure aclocal.m4 has been
# generated from the m4 files accompanying Automake X.Y.
# (This private macro should not be called outside this file.)
AC_DEFUN([AM_AUTOMAKE_VERSION],
-[am__api_version='1.11'
+[am__api_version='1.12'
dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to
dnl require some minimum version. Point them to the right macro.
-m4_if([$1], [1.11.1], [],
+m4_if([$1], [1.12.2], [],
[AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl
])
@@ -50,14 +870,14 @@ m4_define([_AM_AUTOCONF_VERSION], [])
# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced.
# This function is AC_REQUIREd by AM_INIT_AUTOMAKE.
AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION],
-[AM_AUTOMAKE_VERSION([1.11.1])dnl
+[AM_AUTOMAKE_VERSION([1.12.2])dnl
m4_ifndef([AC_AUTOCONF_VERSION],
[m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
# Figure out how to run the assembler. -*- Autoconf -*-
-# Copyright (C) 2001, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+# Copyright (C) 2001-2012 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -79,15 +899,17 @@ _AM_IF_OPTION([no-dependencies],, [_AM_DEPENDENCIES([CCAS])])dnl
# AM_AUX_DIR_EXPAND -*- Autoconf -*-
-# Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc.
+# Copyright (C) 2001-2012 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
+# serial 2
+
# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets
-# $ac_aux_dir to `$srcdir/foo'. In other projects, it is set to
-# `$srcdir', `$srcdir/..', or `$srcdir/../..'.
+# $ac_aux_dir to '$srcdir/foo'. In other projects, it is set to
+# '$srcdir', '$srcdir/..', or '$srcdir/../..'.
#
# Of course, Automake must honor this variable whenever it calls a
# tool from the auxiliary directory. The problem is that $srcdir (and
@@ -106,7 +928,7 @@ _AM_IF_OPTION([no-dependencies],, [_AM_DEPENDENCIES([CCAS])])dnl
#
# The reason of the latter failure is that $top_srcdir and $ac_aux_dir
# are both prefixed by $srcdir. In an in-source build this is usually
-# harmless because $srcdir is `.', but things will broke when you
+# harmless because $srcdir is '.', but things will broke when you
# start a VPATH build or use an absolute $srcdir.
#
# So we could use something similar to $top_srcdir/$ac_aux_dir/missing,
@@ -132,22 +954,21 @@ am_aux_dir=`cd $ac_aux_dir && pwd`
# AM_CONDITIONAL -*- Autoconf -*-
-# Copyright (C) 1997, 2000, 2001, 2003, 2004, 2005, 2006, 2008
-# Free Software Foundation, Inc.
+# Copyright (C) 1997-2012 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 9
+# serial 10
# AM_CONDITIONAL(NAME, SHELL-CONDITION)
# -------------------------------------
# Define a conditional.
AC_DEFUN([AM_CONDITIONAL],
-[AC_PREREQ(2.52)dnl
- ifelse([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])],
- [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl
+[AC_PREREQ([2.52])dnl
+ m4_if([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])],
+ [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl
AC_SUBST([$1_TRUE])dnl
AC_SUBST([$1_FALSE])dnl
_AM_SUBST_NOTMAKE([$1_TRUE])dnl
@@ -166,16 +987,15 @@ AC_CONFIG_COMMANDS_PRE(
Usually this means the macro was only invoked conditionally.]])
fi])])
-# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2009
-# Free Software Foundation, Inc.
+# Copyright (C) 1999-2012 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 10
+# serial 17
-# There are a few dirty hacks below to avoid letting `AC_PROG_CC' be
+# There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be
# written in clear, in which case automake, when reading aclocal.m4,
# will think it sees a *use*, and therefore will trigger all it's
# C support machinery. Also note that it means that autoscan, seeing
@@ -185,7 +1005,7 @@ fi])])
# _AM_DEPENDENCIES(NAME)
# ----------------------
# See how the compiler implements dependency checking.
-# NAME is "CC", "CXX", "GCJ", or "OBJC".
+# NAME is "CC", "CXX", "OBJC", "OBJCXX", "UPC", or "GJC".
# We try a few techniques and use that to set a single cache variable.
#
# We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was
@@ -198,12 +1018,13 @@ AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl
AC_REQUIRE([AM_MAKE_INCLUDE])dnl
AC_REQUIRE([AM_DEP_TRACK])dnl
-ifelse([$1], CC, [depcc="$CC" am_compiler_list=],
- [$1], CXX, [depcc="$CXX" am_compiler_list=],
- [$1], OBJC, [depcc="$OBJC" am_compiler_list='gcc3 gcc'],
- [$1], UPC, [depcc="$UPC" am_compiler_list=],
- [$1], GCJ, [depcc="$GCJ" am_compiler_list='gcc3 gcc'],
- [depcc="$$1" am_compiler_list=])
+m4_if([$1], [CC], [depcc="$CC" am_compiler_list=],
+ [$1], [CXX], [depcc="$CXX" am_compiler_list=],
+ [$1], [OBJC], [depcc="$OBJC" am_compiler_list='gcc3 gcc'],
+ [$1], [OBJCXX], [depcc="$OBJCXX" am_compiler_list='gcc3 gcc'],
+ [$1], [UPC], [depcc="$UPC" am_compiler_list=],
+ [$1], [GCJ], [depcc="$GCJ" am_compiler_list='gcc3 gcc'],
+ [depcc="$$1" am_compiler_list=])
AC_CACHE_CHECK([dependency style of $depcc],
[am_cv_$1_dependencies_compiler_type],
@@ -211,8 +1032,9 @@ AC_CACHE_CHECK([dependency style of $depcc],
# We make a subdir and do the tests there. Otherwise we can end up
# making bogus files that we don't know about and never remove. For
# instance it was reported that on HP-UX the gcc test will end up
- # making a dummy file named `D' -- because `-MD' means `put the output
- # in D'.
+ # making a dummy file named 'D' -- because '-MD' means "put the output
+ # in D".
+ rm -rf conftest.dir
mkdir conftest.dir
# Copy depcomp to subdir because otherwise we won't find it if we're
# using a relative directory.
@@ -251,16 +1073,16 @@ AC_CACHE_CHECK([dependency style of $depcc],
: > sub/conftest.c
for i in 1 2 3 4 5 6; do
echo '#include "conftst'$i'.h"' >> sub/conftest.c
- # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
- # Solaris 8's {/usr,}/bin/sh.
- touch sub/conftst$i.h
+ # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with
+ # Solaris 10 /bin/sh.
+ echo '/* dummy */' > sub/conftst$i.h
done
echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
- # We check with `-c' and `-o' for the sake of the "dashmstdout"
+ # We check with '-c' and '-o' for the sake of the "dashmstdout"
# mode. It turns out that the SunPro C++ compiler does not properly
- # handle `-M -o', and we need to detect this. Also, some Intel
- # versions had trouble with output in subdirs
+ # handle '-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs.
am__obj=sub/conftest.${OBJEXT-o}
am__minus_obj="-o $am__obj"
case $depmode in
@@ -269,16 +1091,16 @@ AC_CACHE_CHECK([dependency style of $depcc],
test "$am__universal" = false || continue
;;
nosideeffect)
- # after this tag, mechanisms are not by side-effect, so they'll
- # only be used when explicitly requested
+ # After this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested.
if test "x$enable_dependency_tracking" = xyes; then
continue
else
break
fi
;;
- msvisualcpp | msvcmsys)
- # This compiler won't grok `-c -o', but also, the minuso test has
+ msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+ # This compiler won't grok '-c -o', but also, the minuso test has
# not run yet. These depmodes are late enough in the game, and
# so weak that their functioning should not be impacted.
am__obj=conftest.${OBJEXT-o}
@@ -326,7 +1148,7 @@ AM_CONDITIONAL([am__fastdep$1], [
# AM_SET_DEPDIR
# -------------
# Choose a directory name for dependency files.
-# This macro is AC_REQUIREd in _AM_DEPENDENCIES
+# This macro is AC_REQUIREd in _AM_DEPENDENCIES.
AC_DEFUN([AM_SET_DEPDIR],
[AC_REQUIRE([AM_SET_LEADING_DOT])dnl
AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl
@@ -336,28 +1158,34 @@ AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl
# AM_DEP_TRACK
# ------------
AC_DEFUN([AM_DEP_TRACK],
-[AC_ARG_ENABLE(dependency-tracking,
-[ --disable-dependency-tracking speeds up one-time build
- --enable-dependency-tracking do not reject slow dependency extractors])
+[AC_ARG_ENABLE([dependency-tracking], [dnl
+AS_HELP_STRING(
+ [--enable-dependency-tracking],
+ [do not reject slow dependency extractors])
+AS_HELP_STRING(
+ [--disable-dependency-tracking],
+ [speeds up one-time build])])
if test "x$enable_dependency_tracking" != xno; then
am_depcomp="$ac_aux_dir/depcomp"
AMDEPBACKSLASH='\'
+ am__nodep='_no'
fi
AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno])
AC_SUBST([AMDEPBACKSLASH])dnl
_AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl
+AC_SUBST([am__nodep])dnl
+_AM_SUBST_NOTMAKE([am__nodep])dnl
])
# Generate code to set up dependency tracking. -*- Autoconf -*-
-# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008
-# Free Software Foundation, Inc.
+# Copyright (C) 1999-2012 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-#serial 5
+# serial 6
# _AM_OUTPUT_DEPENDENCY_COMMANDS
# ------------------------------
@@ -376,7 +1204,7 @@ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
# Strip MF so we end up with the name of the file.
mf=`echo "$mf" | sed -e 's/:.*$//'`
# Check whether this is an Automake generated Makefile or not.
- # We used to match only the files named `Makefile.in', but
+ # We used to match only the files named 'Makefile.in', but
# some people rename them; so instead we look at the file content.
# Grep'ing the first line is not enough: some people post-process
# each Makefile.in and add a new line on top of each file to say so.
@@ -388,21 +1216,19 @@ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
continue
fi
# Extract the definition of DEPDIR, am__include, and am__quote
- # from the Makefile without running `make'.
+ # from the Makefile without running 'make'.
DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
test -z "$DEPDIR" && continue
am__include=`sed -n 's/^am__include = //p' < "$mf"`
test -z "am__include" && continue
am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
- # When using ansi2knr, U may be empty or an underscore; expand it
- U=`sed -n 's/^U = //p' < "$mf"`
# Find all dependency output files, they are included files with
# $(DEPDIR) in their names. We invoke sed twice because it is the
# simplest approach to changing $(DEPDIR) to its actual value in the
# expansion.
for file in `sed -n "
s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \
- sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do
+ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do
# Make sure the directory exists.
test -f "$dirpart/$file" && continue
fdir=`AS_DIRNAME(["$file"])`
@@ -420,7 +1246,7 @@ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
# This macro should only be invoked once -- use via AC_REQUIRE.
#
# This code is only required when automatic dependency tracking
-# is enabled. FIXME. This creates each `.P' file that we will
+# is enabled. FIXME. This creates each '.P' file that we will
# need in order to bootstrap the dependency handling code.
AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
[AC_CONFIG_COMMANDS([depfiles],
@@ -430,14 +1256,13 @@ AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
# Do all the work for Automake. -*- Autoconf -*-
-# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
-# 2005, 2006, 2008, 2009 Free Software Foundation, Inc.
+# Copyright (C) 1996-2012 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 16
+# serial 19
# This macro actually does too much. Some checks are only needed if
# your package does certain things. But this isn't really a big deal.
@@ -483,31 +1308,41 @@ AC_SUBST([CYGPATH_W])
# Define the identity of the package.
dnl Distinguish between old-style and new-style calls.
m4_ifval([$2],
-[m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl
+[AC_DIAGNOSE([obsolete],
+[$0: two- and three-arguments forms are deprecated. For more info, see:
+http://www.gnu.org/software/automake/manual/automake.html#Modernize-AM_INIT_AUTOMAKE-invocation])
+m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl
AC_SUBST([PACKAGE], [$1])dnl
AC_SUBST([VERSION], [$2])],
[_AM_SET_OPTIONS([$1])dnl
dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT.
-m4_if(m4_ifdef([AC_PACKAGE_NAME], 1)m4_ifdef([AC_PACKAGE_VERSION], 1), 11,,
+m4_if(
+ m4_ifdef([AC_PACKAGE_NAME], [ok]):m4_ifdef([AC_PACKAGE_VERSION], [ok]),
+ [ok:ok],,
[m4_fatal([AC_INIT should be called with package and version arguments])])dnl
AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl
AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl
_AM_IF_OPTION([no-define],,
-[AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [Name of package])
- AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Version number of package])])dnl
+[AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package])
+ AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl
# Some tools Automake needs.
AC_REQUIRE([AM_SANITY_CHECK])dnl
AC_REQUIRE([AC_ARG_PROGRAM])dnl
-AM_MISSING_PROG(ACLOCAL, aclocal-${am__api_version})
-AM_MISSING_PROG(AUTOCONF, autoconf)
-AM_MISSING_PROG(AUTOMAKE, automake-${am__api_version})
-AM_MISSING_PROG(AUTOHEADER, autoheader)
-AM_MISSING_PROG(MAKEINFO, makeinfo)
+AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}])
+AM_MISSING_PROG([AUTOCONF], [autoconf])
+AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}])
+AM_MISSING_PROG([AUTOHEADER], [autoheader])
+AM_MISSING_PROG([MAKEINFO], [makeinfo])
AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl
-AC_REQUIRE([AM_PROG_MKDIR_P])dnl
+AC_REQUIRE([AC_PROG_MKDIR_P])dnl
+# For better backward compatibility. To be removed once Automake 1.9.x
+# dies out for good. For more background, see:
+# <http://lists.gnu.org/archive/html/automake/2012-07/msg00001.html>
+# <http://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
+AC_SUBST([mkdir_p], ['$(MKDIR_P)'])
# We need awk for the "check" target. The system "awk" is bad on
# some platforms.
AC_REQUIRE([AC_PROG_AWK])dnl
@@ -518,28 +1353,35 @@ _AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])],
[_AM_PROG_TAR([v7])])])
_AM_IF_OPTION([no-dependencies],,
[AC_PROVIDE_IFELSE([AC_PROG_CC],
- [_AM_DEPENDENCIES(CC)],
- [define([AC_PROG_CC],
- defn([AC_PROG_CC])[_AM_DEPENDENCIES(CC)])])dnl
+ [_AM_DEPENDENCIES([CC])],
+ [m4_define([AC_PROG_CC],
+ m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl
AC_PROVIDE_IFELSE([AC_PROG_CXX],
- [_AM_DEPENDENCIES(CXX)],
- [define([AC_PROG_CXX],
- defn([AC_PROG_CXX])[_AM_DEPENDENCIES(CXX)])])dnl
+ [_AM_DEPENDENCIES([CXX])],
+ [m4_define([AC_PROG_CXX],
+ m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl
AC_PROVIDE_IFELSE([AC_PROG_OBJC],
- [_AM_DEPENDENCIES(OBJC)],
- [define([AC_PROG_OBJC],
- defn([AC_PROG_OBJC])[_AM_DEPENDENCIES(OBJC)])])dnl
+ [_AM_DEPENDENCIES([OBJC])],
+ [m4_define([AC_PROG_OBJC],
+ m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl
+dnl Support for Objective C++ was only introduced in Autoconf 2.65,
+dnl but we still cater to Autoconf 2.62.
+m4_ifdef([AC_PROG_OBJCXX],
+[AC_PROVIDE_IFELSE([AC_PROG_OBJCXX],
+ [_AM_DEPENDENCIES([OBJCXX])],
+ [m4_define([AC_PROG_OBJCXX],
+ m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])])dnl
])
_AM_IF_OPTION([silent-rules], [AC_REQUIRE([AM_SILENT_RULES])])dnl
-dnl The `parallel-tests' driver may need to know about EXEEXT, so add the
-dnl `am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This macro
+dnl The 'parallel-tests' driver may need to know about EXEEXT, so add the
+dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This macro
dnl is hooked onto _AC_COMPILER_EXEEXT early, see below.
AC_CONFIG_COMMANDS_PRE(dnl
[m4_provide_if([_AM_COMPILER_EXEEXT],
[AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl
])
-dnl Hook into `_AC_COMPILER_EXEEXT' early to learn its expansion. Do not
+dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not
dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further
dnl mangled by Autoconf and run in a shell conditional statement.
m4_define([_AC_COMPILER_EXEEXT],
@@ -567,12 +1409,14 @@ for _am_header in $config_headers :; do
done
echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count])
-# Copyright (C) 2001, 2003, 2005, 2008 Free Software Foundation, Inc.
+# Copyright (C) 2001-2012 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
+# serial 8
+
# AM_PROG_INSTALL_SH
# ------------------
# Define $install_sh.
@@ -586,9 +1430,9 @@ if test x"${install_sh}" != xset; then
install_sh="\${SHELL} $am_aux_dir/install-sh"
esac
fi
-AC_SUBST(install_sh)])
+AC_SUBST([install_sh])])
-# Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+# Copyright (C) 2003-2012 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -612,20 +1456,19 @@ AC_SUBST([am__leading_dot])])
# Add --enable-maintainer-mode option to configure. -*- Autoconf -*-
# From Jim Meyering
-# Copyright (C) 1996, 1998, 2000, 2001, 2002, 2003, 2004, 2005, 2008
-# Free Software Foundation, Inc.
+# Copyright (C) 1996-2012 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 5
+# serial 7
# AM_MAINTAINER_MODE([DEFAULT-MODE])
# ----------------------------------
# Control maintainer-specific portions of Makefiles.
-# Default is to disable them, unless `enable' is passed literally.
-# For symmetry, `disable' may be passed as well. Anyway, the user
+# Default is to disable them, unless 'enable' is passed literally.
+# For symmetry, 'disable' may be passed as well. Anyway, the user
# can override the default with the --enable/--disable switch.
AC_DEFUN([AM_MAINTAINER_MODE],
[m4_case(m4_default([$1], [disable]),
@@ -633,13 +1476,14 @@ AC_DEFUN([AM_MAINTAINER_MODE],
[disable], [m4_define([am_maintainer_other], [enable])],
[m4_define([am_maintainer_other], [enable])
m4_warn([syntax], [unexpected argument to AM@&t@_MAINTAINER_MODE: $1])])
-AC_MSG_CHECKING([whether to am_maintainer_other maintainer-specific portions of Makefiles])
+AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles])
dnl maintainer-mode's default is 'disable' unless 'enable' is passed
AC_ARG_ENABLE([maintainer-mode],
-[ --][am_maintainer_other][-maintainer-mode am_maintainer_other make rules and dependencies not useful
- (and sometimes confusing) to the casual installer],
- [USE_MAINTAINER_MODE=$enableval],
- [USE_MAINTAINER_MODE=]m4_if(am_maintainer_other, [enable], [no], [yes]))
+ [AS_HELP_STRING([--]am_maintainer_other[-maintainer-mode],
+ am_maintainer_other[ make rules and dependencies not useful
+ (and sometimes confusing) to the casual installer])],
+ [USE_MAINTAINER_MODE=$enableval],
+ [USE_MAINTAINER_MODE=]m4_if(am_maintainer_other, [enable], [no], [yes]))
AC_MSG_RESULT([$USE_MAINTAINER_MODE])
AM_CONDITIONAL([MAINTAINER_MODE], [test $USE_MAINTAINER_MODE = yes])
MAINT=$MAINTAINER_MODE_TRUE
@@ -651,13 +1495,13 @@ AU_DEFUN([jm_MAINTAINER_MODE], [AM_MAINTAINER_MODE])
# Check to see how 'make' treats includes. -*- Autoconf -*-
-# Copyright (C) 2001, 2002, 2003, 2005, 2009 Free Software Foundation, Inc.
+# Copyright (C) 2001-2012 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 4
+# serial 5
# AM_MAKE_INCLUDE()
# -----------------
@@ -676,7 +1520,7 @@ am__quote=
_am_result=none
# First try GNU make style include.
echo "include confinc" > confmf
-# Ignore all kinds of additional output from `make'.
+# Ignore all kinds of additional output from 'make'.
case `$am_make -s -f confmf 2> /dev/null` in #(
*the\ am__doit\ target*)
am__include=include
@@ -701,8 +1545,7 @@ AC_MSG_RESULT([$_am_result])
rm -f confinc confmf
])
-# Copyright (C) 1999, 2000, 2001, 2003, 2004, 2005, 2008
-# Free Software Foundation, Inc.
+# Copyright (C) 1999-2012 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -738,14 +1581,13 @@ m4_define([AC_PROG_CC],
# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*-
-# Copyright (C) 1997, 1999, 2000, 2001, 2003, 2004, 2005, 2008
-# Free Software Foundation, Inc.
+# Copyright (C) 1997-2012 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 6
+# serial 7
# AM_MISSING_PROG(NAME, PROGRAM)
# ------------------------------
@@ -775,45 +1617,19 @@ if eval "$MISSING --run true"; then
am_missing_run="$MISSING --run "
else
am_missing_run=
- AC_MSG_WARN([`missing' script is too old or missing])
+ AC_MSG_WARN(['missing' script is too old or missing])
fi
])
-# Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
-#
-# This file is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# AM_PROG_MKDIR_P
-# ---------------
-# Check for `mkdir -p'.
-AC_DEFUN([AM_PROG_MKDIR_P],
-[AC_PREREQ([2.60])dnl
-AC_REQUIRE([AC_PROG_MKDIR_P])dnl
-dnl Automake 1.8 to 1.9.6 used to define mkdir_p. We now use MKDIR_P,
-dnl while keeping a definition of mkdir_p for backward compatibility.
-dnl @MKDIR_P@ is magic: AC_OUTPUT adjusts its value for each Makefile.
-dnl However we cannot define mkdir_p as $(MKDIR_P) for the sake of
-dnl Makefile.ins that do not define MKDIR_P, so we do our own
-dnl adjustment using top_builddir (which is defined more often than
-dnl MKDIR_P).
-AC_SUBST([mkdir_p], ["$MKDIR_P"])dnl
-case $mkdir_p in
- [[\\/$]]* | ?:[[\\/]]*) ;;
- */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;;
-esac
-])
-
# Helper functions for option handling. -*- Autoconf -*-
-# Copyright (C) 2001, 2002, 2003, 2005, 2008 Free Software Foundation, Inc.
+# Copyright (C) 2001-2012 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 4
+# serial 6
# _AM_MANGLE_OPTION(NAME)
# -----------------------
@@ -821,13 +1637,13 @@ AC_DEFUN([_AM_MANGLE_OPTION],
[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])])
# _AM_SET_OPTION(NAME)
-# ------------------------------
+# --------------------
# Set option NAME. Presently that only means defining a flag for this option.
AC_DEFUN([_AM_SET_OPTION],
-[m4_define(_AM_MANGLE_OPTION([$1]), 1)])
+[m4_define(_AM_MANGLE_OPTION([$1]), [1])])
# _AM_SET_OPTIONS(OPTIONS)
-# ----------------------------------
+# ------------------------
# OPTIONS is a space-separated list of Automake options.
AC_DEFUN([_AM_SET_OPTIONS],
[m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])])
@@ -840,22 +1656,18 @@ AC_DEFUN([_AM_IF_OPTION],
# Check to make sure that the build environment is sane. -*- Autoconf -*-
-# Copyright (C) 1996, 1997, 2000, 2001, 2003, 2005, 2008
-# Free Software Foundation, Inc.
+# Copyright (C) 1996-2012 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 5
+# serial 9
# AM_SANITY_CHECK
# ---------------
AC_DEFUN([AM_SANITY_CHECK],
[AC_MSG_CHECKING([whether build environment is sane])
-# Just in case
-sleep 1
-echo timestamp > conftest.file
# Reject unsafe characters in $srcdir or the absolute working directory
# name. Accept space and tab only in the latter.
am_lf='
@@ -866,32 +1678,40 @@ case `pwd` in
esac
case $srcdir in
*[[\\\"\#\$\&\'\`$am_lf\ \ ]]*)
- AC_MSG_ERROR([unsafe srcdir value: `$srcdir']);;
+ AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);;
esac
-# Do `set' in a subshell so we don't clobber the current shell's
+# Do 'set' in a subshell so we don't clobber the current shell's
# arguments. Must try -L first in case configure is actually a
# symlink; some systems play weird games with the mod time of symlinks
# (eg FreeBSD returns the mod time of the symlink's containing
# directory).
if (
- set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
- if test "$[*]" = "X"; then
- # -L didn't work.
- set X `ls -t "$srcdir/configure" conftest.file`
- fi
- rm -f conftest.file
- if test "$[*]" != "X $srcdir/configure conftest.file" \
- && test "$[*]" != "X conftest.file $srcdir/configure"; then
-
- # If neither matched, then we have a broken ls. This can happen
- # if, for instance, CONFIG_SHELL is bash and it inherits a
- # broken ls alias from the environment. This has actually
- # happened. Such a system could not be considered "sane".
- AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken
-alias in your environment])
- fi
-
+ am_has_slept=no
+ for am_try in 1 2; do
+ echo "timestamp, slept: $am_has_slept" > conftest.file
+ set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
+ if test "$[*]" = "X"; then
+ # -L didn't work.
+ set X `ls -t "$srcdir/configure" conftest.file`
+ fi
+ if test "$[*]" != "X $srcdir/configure conftest.file" \
+ && test "$[*]" != "X conftest.file $srcdir/configure"; then
+
+ # If neither matched, then we have a broken ls. This can happen
+ # if, for instance, CONFIG_SHELL is bash and it inherits a
+ # broken ls alias from the environment. This has actually
+ # happened. Such a system could not be considered "sane".
+ AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken
+ alias in your environment])
+ fi
+ if test "$[2]" = conftest.file || test $am_try -eq 2; then
+ break
+ fi
+ # Just in case.
+ sleep 1
+ am_has_slept=yes
+ done
test "$[2]" = conftest.file
)
then
@@ -901,43 +1721,61 @@ else
AC_MSG_ERROR([newly created file is older than distributed files!
Check your system clock])
fi
-AC_MSG_RESULT(yes)])
+AC_MSG_RESULT([yes])
+# If we didn't sleep, we still need to ensure time stamps of config.status and
+# generated files are strictly newer.
+am_sleep_pid=
+if grep 'slept: no' conftest.file >/dev/null 2>&1; then
+ ( sleep 1 ) &
+ am_sleep_pid=$!
+fi
+AC_CONFIG_COMMANDS_PRE(
+ [AC_MSG_CHECKING([that generated files are newer than configure])
+ if test -n "$am_sleep_pid"; then
+ # Hide warnings about reused PIDs.
+ wait $am_sleep_pid 2>/dev/null
+ fi
+ AC_MSG_RESULT([done])])
+rm -f conftest.file
+])
-# Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc.
+# Copyright (C) 2001-2012 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
+# serial 2
+
# AM_PROG_INSTALL_STRIP
# ---------------------
-# One issue with vendor `install' (even GNU) is that you can't
+# One issue with vendor 'install' (even GNU) is that you can't
# specify the program used to strip binaries. This is especially
# annoying in cross-compiling environments, where the build's strip
# is unlikely to handle the host's binaries.
# Fortunately install-sh will honor a STRIPPROG variable, so we
-# always use install-sh in `make install-strip', and initialize
+# always use install-sh in "make install-strip", and initialize
# STRIPPROG with the value of the STRIP variable (set by the user).
AC_DEFUN([AM_PROG_INSTALL_STRIP],
[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
-# Installed binaries are usually stripped using `strip' when the user
-# run `make install-strip'. However `strip' might not be the right
+# Installed binaries are usually stripped using 'strip' when the user
+# run "make install-strip". However 'strip' might not be the right
# tool to use in cross-compilation environments, therefore Automake
-# will honor the `STRIP' environment variable to overrule this program.
-dnl Don't test for $cross_compiling = yes, because it might be `maybe'.
+# will honor the 'STRIP' environment variable to overrule this program.
+dnl Don't test for $cross_compiling = yes, because it might be 'maybe'.
if test "$cross_compiling" != no; then
AC_CHECK_TOOL([STRIP], [strip], :)
fi
INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
AC_SUBST([INSTALL_STRIP_PROGRAM])])
-# Copyright (C) 2006, 2008 Free Software Foundation, Inc.
+# Copyright (C) 2006-2012 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 2
+# serial 3
# _AM_SUBST_NOTMAKE(VARIABLE)
# ---------------------------
@@ -946,24 +1784,24 @@ AC_SUBST([INSTALL_STRIP_PROGRAM])])
AC_DEFUN([_AM_SUBST_NOTMAKE])
# AM_SUBST_NOTMAKE(VARIABLE)
-# ---------------------------
+# --------------------------
# Public sister of _AM_SUBST_NOTMAKE.
AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
# Check how to create a tarball. -*- Autoconf -*-
-# Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+# Copyright (C) 2004-2012 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
-# serial 2
+# serial 3
# _AM_PROG_TAR(FORMAT)
# --------------------
# Check how to create a tarball in format FORMAT.
-# FORMAT should be one of `v7', `ustar', or `pax'.
+# FORMAT should be one of 'v7', 'ustar', or 'pax'.
#
# Substitute a variable $(am__tar) that is a command
# writing to stdout a FORMAT-tarball containing the directory
@@ -974,10 +1812,11 @@ AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
# a tarball read from stdin.
# $(am__untar) < result.tar
AC_DEFUN([_AM_PROG_TAR],
-[# Always define AMTAR for backward compatibility.
-AM_MISSING_PROG([AMTAR], [tar])
+[# Always define AMTAR for backward compatibility. Yes, it's still used
+# in the wild :-( We should find a proper way to deprecate it ...
+AC_SUBST([AMTAR], ['$${TAR-tar}'])
m4_if([$1], [v7],
- [am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -'],
+ [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'],
[m4_case([$1], [ustar],, [pax],,
[m4_fatal([Unknown tar format])])
AC_MSG_CHECKING([how to create a $1 tar archive])
@@ -985,7 +1824,7 @@ AC_MSG_CHECKING([how to create a $1 tar archive])
_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none'
_am_tools=${am_cv_prog_tar_$1-$_am_tools}
# Do not fold the above two line into one, because Tru64 sh and
-# Solaris sh will not grok spaces in the rhs of `-'.
+# Solaris sh will not grok spaces in the rhs of '-'.
for _am_tool in $_am_tools
do
case $_am_tool in
@@ -1046,6 +1885,16 @@ AC_SUBST([am__tar])
AC_SUBST([am__untar])
]) # _AM_PROG_TAR
+m4_include([m4/asmcfi.m4])
+m4_include([m4/ax_append_flag.m4])
+m4_include([m4/ax_cc_maxopt.m4])
+m4_include([m4/ax_cflags_warn_all.m4])
+m4_include([m4/ax_check_compile_flag.m4])
+m4_include([m4/ax_compiler_vendor.m4])
+m4_include([m4/ax_configure_args.m4])
+m4_include([m4/ax_enable_builddir.m4])
+m4_include([m4/ax_gcc_archflag.m4])
+m4_include([m4/ax_gcc_x86_cpuid.m4])
m4_include([m4/libtool.m4])
m4_include([m4/ltoptions.m4])
m4_include([m4/ltsugar.m4])
diff --git a/Modules/_ctypes/libffi/build-ios.sh b/Modules/_ctypes/libffi/build-ios.sh
new file mode 100755
index 0000000..3dea242
--- /dev/null
+++ b/Modules/_ctypes/libffi/build-ios.sh
@@ -0,0 +1,67 @@
+#!/bin/sh
+
+PLATFORM_IOS=/Developer/Platforms/iPhoneOS.platform/
+PLATFORM_IOS_SIM=/Developer/Platforms/iPhoneSimulator.platform/
+SDK_IOS_VERSION="4.2"
+MIN_IOS_VERSION="3.0"
+OUTPUT_DIR="universal-ios"
+
+build_target () {
+ local platform=$1
+ local sdk=$2
+ local arch=$3
+ local triple=$4
+ local builddir=$5
+
+ mkdir -p "${builddir}"
+ pushd "${builddir}"
+ export CC="${platform}"/Developer/usr/bin/gcc-4.2
+ export CFLAGS="-arch ${arch} -isysroot ${sdk} -miphoneos-version-min=${MIN_IOS_VERSION}"
+ ../configure --host=${triple} && make
+ popd
+}
+
+# Build all targets
+build_target "${PLATFORM_IOS}" "${PLATFORM_IOS}/Developer/SDKs/iPhoneOS${SDK_IOS_VERSION}.sdk/" armv6 arm-apple-darwin10 armv6-ios
+build_target "${PLATFORM_IOS}" "${PLATFORM_IOS}/Developer/SDKs/iPhoneOS${SDK_IOS_VERSION}.sdk/" armv7 arm-apple-darwin10 armv7-ios
+build_target "${PLATFORM_IOS_SIM}" "${PLATFORM_IOS_SIM}/Developer/SDKs/iPhoneSimulator${SDK_IOS_VERSION}.sdk/" i386 i386-apple-darwin10 i386-ios-sim
+
+# Create universal output directories
+mkdir -p "${OUTPUT_DIR}"
+mkdir -p "${OUTPUT_DIR}/include"
+mkdir -p "${OUTPUT_DIR}/include/armv6"
+mkdir -p "${OUTPUT_DIR}/include/armv7"
+mkdir -p "${OUTPUT_DIR}/include/i386"
+
+# Create the universal binary
+lipo -create armv6-ios/.libs/libffi.a armv7-ios/.libs/libffi.a i386-ios-sim/.libs/libffi.a -output "${OUTPUT_DIR}/libffi.a"
+
+# Copy in the headers
+copy_headers () {
+ local src=$1
+ local dest=$2
+
+ # Fix non-relative header reference
+ sed 's/<ffitarget.h>/"ffitarget.h"/' < "${src}/include/ffi.h" > "${dest}/ffi.h"
+ cp "${src}/include/ffitarget.h" "${dest}"
+}
+
+copy_headers armv6-ios "${OUTPUT_DIR}/include/armv6"
+copy_headers armv7-ios "${OUTPUT_DIR}/include/armv7"
+copy_headers i386-ios-sim "${OUTPUT_DIR}/include/i386"
+
+# Create top-level header
+(
+cat << EOF
+#ifdef __arm__
+ #include <arm/arch.h>
+ #ifdef _ARM_ARCH_6
+ #include "include/armv6/ffi.h"
+ #elif _ARM_ARCH_7
+ #include "include/armv7/ffi.h"
+ #endif
+#elif defined(__i386__)
+ #include "include/i386/ffi.h"
+#endif
+EOF
+) > "${OUTPUT_DIR}/ffi.h"
diff --git a/Modules/_ctypes/libffi/compile b/Modules/_ctypes/libffi/compile
index 1b1d232..c0096a7 100755
--- a/Modules/_ctypes/libffi/compile
+++ b/Modules/_ctypes/libffi/compile
@@ -1,9 +1,10 @@
#! /bin/sh
# Wrapper for compilers which do not understand `-c -o'.
-scriptversion=2005-05-14.22
+scriptversion=2009-10-06.20; # UTC
-# Copyright (C) 1999, 2000, 2003, 2004, 2005 Free Software Foundation, Inc.
+# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2009 Free Software
+# Foundation, Inc.
# Written by Tom Tromey <tromey@cygnus.com>.
#
# This program is free software; you can redistribute it and/or modify
@@ -17,8 +18,7 @@ scriptversion=2005-05-14.22
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
@@ -103,13 +103,13 @@ if test -z "$ofile" || test -z "$cfile"; then
fi
# Name of file we expect compiler to create.
-cofile=`echo "$cfile" | sed -e 's|^.*/||' -e 's/\.c$/.o/'`
+cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'`
# Create the lock directory.
-# Note: use `[/.-]' here to ensure that we don't use the same name
+# Note: use `[/\\:.-]' here to ensure that we don't use the same name
# that we are using for the .o file. Also, base the name on the expected
# object file name, since that is what matters with a parallel build.
-lockdir=`echo "$cofile" | sed -e 's|[/.-]|_|g'`.d
+lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d
while true; do
if mkdir "$lockdir" >/dev/null 2>&1; then
break
@@ -124,9 +124,9 @@ trap "rmdir '$lockdir'; exit 1" 1 2 15
ret=$?
if test -f "$cofile"; then
- mv "$cofile" "$ofile"
+ test "$cofile" = "$ofile" || mv "$cofile" "$ofile"
elif test -f "${cofile}bj"; then
- mv "${cofile}bj" "$ofile"
+ test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile"
fi
rmdir "$lockdir"
@@ -138,5 +138,6 @@ exit $ret
# eval: (add-hook 'write-file-hooks 'time-stamp)
# time-stamp-start: "scriptversion="
# time-stamp-format: "%:y-%02m-%02d.%02H"
-# time-stamp-end: "$"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
# End:
diff --git a/Modules/_ctypes/libffi/config.guess b/Modules/_ctypes/libffi/config.guess
index d53e309..1804e9f 100755
--- a/Modules/_ctypes/libffi/config.guess
+++ b/Modules/_ctypes/libffi/config.guess
@@ -1,14 +1,14 @@
#! /bin/sh
# Attempt to guess a canonical system name.
# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
-# Free Software Foundation, Inc.
+# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013 Free Software Foundation, Inc.
-timestamp='2009-11-19'
+timestamp='2012-12-29'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
+# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
@@ -17,26 +17,22 @@ timestamp='2009-11-19'
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
-# 02110-1301, USA.
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
-
-
-# Originally written by Per Bothner. Please send patches (context
-# diff format) to <config-patches@gnu.org> and include a ChangeLog
-# entry.
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
#
-# This script attempts to guess a canonical system name similar to
-# config.sub. If it succeeds, it prints the system name on stdout, and
-# exits with 0. Otherwise, it exits with 1.
+# Originally written by Per Bothner.
#
# You can get the latest version of this script from:
# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+#
+# Please send patches with a ChangeLog entry to config-patches@gnu.org.
+
me=`echo "$0" | sed -e 's,.*/,,'`
@@ -56,8 +52,9 @@ version="\
GNU config.guess ($timestamp)
Originally written by Per Bothner.
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
-2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
+2012, 2013 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -144,7 +141,7 @@ UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
*:NetBSD:*:*)
# NetBSD (nbsd) targets should (where applicable) match one or
- # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*,
+ # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
# *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
# switched to ELF, *-*-netbsd* would select the old
# object file format. This provides both forward
@@ -180,7 +177,7 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
fi
;;
*)
- os=netbsd
+ os=netbsd
;;
esac
# The OS release
@@ -201,6 +198,10 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
echo "${machine}-${os}${release}"
exit ;;
+ *:Bitrig:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE}
+ exit ;;
*:OpenBSD:*:*)
UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
@@ -223,7 +224,7 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
;;
*5.*)
- UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
;;
esac
# According to Compaq, /usr/sbin/psrinfo has been available on
@@ -269,7 +270,10 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
# A Xn.n version is an unreleased experimental baselevel.
# 1.2 uses "1.2" for uname -r.
echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
- exit ;;
+ # Reset EXIT trap before exiting to avoid spurious non-zero exit code.
+ exitcode=$?
+ trap '' 0
+ exit $exitcode ;;
Alpha\ *:Windows_NT*:*)
# How do we know it's Interix rather than the generic POSIX subsystem?
# Should we change UNAME_MACHINE based on the output of uname instead
@@ -295,12 +299,12 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
echo s390-ibm-zvmoe
exit ;;
*:OS400:*:*)
- echo powerpc-ibm-os400
+ echo powerpc-ibm-os400
exit ;;
arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
echo arm-acorn-riscix${UNAME_RELEASE}
exit ;;
- arm:riscos:*:*|arm:RISCOS:*:*)
+ arm*:riscos:*:*|arm*:RISCOS:*:*)
echo arm-unknown-riscos
exit ;;
SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
@@ -333,6 +337,9 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
exit ;;
+ i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
+ echo i386-pc-auroraux${UNAME_RELEASE}
+ exit ;;
i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
eval $set_cc_for_build
SUN_ARCH="i386"
@@ -391,23 +398,23 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
# MiNT. But MiNT is downward compatible to TOS, so this should
# be no problem.
atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
+ echo m68k-atari-mint${UNAME_RELEASE}
exit ;;
atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
echo m68k-atari-mint${UNAME_RELEASE}
- exit ;;
+ exit ;;
*falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
+ echo m68k-atari-mint${UNAME_RELEASE}
exit ;;
milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
- echo m68k-milan-mint${UNAME_RELEASE}
- exit ;;
+ echo m68k-milan-mint${UNAME_RELEASE}
+ exit ;;
hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
- echo m68k-hades-mint${UNAME_RELEASE}
- exit ;;
+ echo m68k-hades-mint${UNAME_RELEASE}
+ exit ;;
*:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
- echo m68k-unknown-mint${UNAME_RELEASE}
- exit ;;
+ echo m68k-unknown-mint${UNAME_RELEASE}
+ exit ;;
m68k:machten:*:*)
echo m68k-apple-machten${UNAME_RELEASE}
exit ;;
@@ -477,8 +484,8 @@ EOF
echo m88k-motorola-sysv3
exit ;;
AViiON:dgux:*:*)
- # DG/UX returns AViiON for all architectures
- UNAME_PROCESSOR=`/usr/bin/uname -p`
+ # DG/UX returns AViiON for all architectures
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
then
if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
@@ -491,7 +498,7 @@ EOF
else
echo i586-dg-dgux${UNAME_RELEASE}
fi
- exit ;;
+ exit ;;
M88*:DolphinOS:*:*) # DolphinOS (SVR3)
echo m88k-dolphin-sysv3
exit ;;
@@ -548,7 +555,7 @@ EOF
echo rs6000-ibm-aix3.2
fi
exit ;;
- *:AIX:*:[456])
+ *:AIX:*:[4567])
IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
IBM_ARCH=rs6000
@@ -591,52 +598,52 @@ EOF
9000/[678][0-9][0-9])
if [ -x /usr/bin/getconf ]; then
sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
- sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
- case "${sc_cpu_version}" in
- 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
- 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
- 532) # CPU_PA_RISC2_0
- case "${sc_kernel_bits}" in
- 32) HP_ARCH="hppa2.0n" ;;
- 64) HP_ARCH="hppa2.0w" ;;
+ sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+ case "${sc_cpu_version}" in
+ 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
+ 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
+ 532) # CPU_PA_RISC2_0
+ case "${sc_kernel_bits}" in
+ 32) HP_ARCH="hppa2.0n" ;;
+ 64) HP_ARCH="hppa2.0w" ;;
'') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
- esac ;;
- esac
+ esac ;;
+ esac
fi
if [ "${HP_ARCH}" = "" ]; then
eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
+ sed 's/^ //' << EOF >$dummy.c
- #define _HPUX_SOURCE
- #include <stdlib.h>
- #include <unistd.h>
+ #define _HPUX_SOURCE
+ #include <stdlib.h>
+ #include <unistd.h>
- int main ()
- {
- #if defined(_SC_KERNEL_BITS)
- long bits = sysconf(_SC_KERNEL_BITS);
- #endif
- long cpu = sysconf (_SC_CPU_VERSION);
+ int main ()
+ {
+ #if defined(_SC_KERNEL_BITS)
+ long bits = sysconf(_SC_KERNEL_BITS);
+ #endif
+ long cpu = sysconf (_SC_CPU_VERSION);
- switch (cpu)
- {
- case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
- case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
- case CPU_PA_RISC2_0:
- #if defined(_SC_KERNEL_BITS)
- switch (bits)
- {
- case 64: puts ("hppa2.0w"); break;
- case 32: puts ("hppa2.0n"); break;
- default: puts ("hppa2.0"); break;
- } break;
- #else /* !defined(_SC_KERNEL_BITS) */
- puts ("hppa2.0"); break;
- #endif
- default: puts ("hppa1.0"); break;
- }
- exit (0);
- }
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+ case CPU_PA_RISC2_0:
+ #if defined(_SC_KERNEL_BITS)
+ switch (bits)
+ {
+ case 64: puts ("hppa2.0w"); break;
+ case 32: puts ("hppa2.0n"); break;
+ default: puts ("hppa2.0"); break;
+ } break;
+ #else /* !defined(_SC_KERNEL_BITS) */
+ puts ("hppa2.0"); break;
+ #endif
+ default: puts ("hppa1.0"); break;
+ }
+ exit (0);
+ }
EOF
(CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
test -z "$HP_ARCH" && HP_ARCH=hppa
@@ -727,22 +734,22 @@ EOF
exit ;;
C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
echo c1-convex-bsd
- exit ;;
+ exit ;;
C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
if getsysinfo -f scalar_acc
then echo c32-convex-bsd
else echo c2-convex-bsd
fi
- exit ;;
+ exit ;;
C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
echo c34-convex-bsd
- exit ;;
+ exit ;;
C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
echo c38-convex-bsd
- exit ;;
+ exit ;;
C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
echo c4-convex-bsd
- exit ;;
+ exit ;;
CRAY*Y-MP:*:*:*)
echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
exit ;;
@@ -766,14 +773,14 @@ EOF
exit ;;
F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
- FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
- FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
- echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
- exit ;;
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+ echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
5000:UNIX_System_V:4.*:*)
- FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
- FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
- echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
+ echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
exit ;;
i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
@@ -785,30 +792,35 @@ EOF
echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
exit ;;
*:FreeBSD:*:*)
- case ${UNAME_MACHINE} in
- pc98)
- echo i386-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ case ${UNAME_PROCESSOR} in
amd64)
echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
*)
- echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
esac
exit ;;
i*:CYGWIN*:*)
echo ${UNAME_MACHINE}-pc-cygwin
exit ;;
+ *:MINGW64*:*)
+ echo ${UNAME_MACHINE}-pc-mingw64
+ exit ;;
*:MINGW*:*)
echo ${UNAME_MACHINE}-pc-mingw32
exit ;;
+ i*:MSYS*:*)
+ echo ${UNAME_MACHINE}-pc-msys
+ exit ;;
i*:windows32*:*)
- # uname -m includes "-pc" on this system.
- echo ${UNAME_MACHINE}-mingw32
+ # uname -m includes "-pc" on this system.
+ echo ${UNAME_MACHINE}-mingw32
exit ;;
i*:PW*:*)
echo ${UNAME_MACHINE}-pc-pw32
exit ;;
*:Interix*:*)
- case ${UNAME_MACHINE} in
+ case ${UNAME_MACHINE} in
x86)
echo i586-pc-interix${UNAME_RELEASE}
exit ;;
@@ -854,6 +866,13 @@ EOF
i*86:Minix:*:*)
echo ${UNAME_MACHINE}-pc-minix
exit ;;
+ aarch64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ aarch64_be:Linux:*:*)
+ UNAME_MACHINE=aarch64_be
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
alpha:Linux:*:*)
case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
EV5) UNAME_MACHINE=alphaev5 ;;
@@ -863,7 +882,7 @@ EOF
EV6) UNAME_MACHINE=alphaev6 ;;
EV67) UNAME_MACHINE=alphaev67 ;;
EV68*) UNAME_MACHINE=alphaev68 ;;
- esac
+ esac
objdump --private-headers /bin/sh | grep -q ld.so.1
if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
@@ -875,20 +894,29 @@ EOF
then
echo ${UNAME_MACHINE}-unknown-linux-gnu
else
- echo ${UNAME_MACHINE}-unknown-linux-gnueabi
+ if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_PCS_VFP
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-gnueabi
+ else
+ echo ${UNAME_MACHINE}-unknown-linux-gnueabihf
+ fi
fi
exit ;;
avr32*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
cris:Linux:*:*)
- echo cris-axis-linux-gnu
+ echo ${UNAME_MACHINE}-axis-linux-gnu
exit ;;
crisv32:Linux:*:*)
- echo crisv32-axis-linux-gnu
+ echo ${UNAME_MACHINE}-axis-linux-gnu
exit ;;
frv:Linux:*:*)
- echo frv-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ hexagon:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
i*86:Linux:*:*)
LIBC=gnu
@@ -930,7 +958,7 @@ EOF
test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
;;
or32:Linux:*:*)
- echo or32-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
padre:Linux:*:*)
echo sparc-unknown-linux-gnu
@@ -956,7 +984,7 @@ EOF
echo ${UNAME_MACHINE}-ibm-linux
exit ;;
sh64*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
sh*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
@@ -964,14 +992,17 @@ EOF
sparc:Linux:*:* | sparc64:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
+ tile*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
vax:Linux:*:*)
echo ${UNAME_MACHINE}-dec-linux-gnu
exit ;;
x86_64:Linux:*:*)
- echo x86_64-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
xtensa*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
i*86:DYNIX/ptx:4*:*)
# ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
@@ -980,11 +1011,11 @@ EOF
echo i386-sequent-sysv4
exit ;;
i*86:UNIX_SV:4.2MP:2.*)
- # Unixware is an offshoot of SVR4, but it has its own version
- # number series starting with 2...
- # I am not positive that other SVR4 systems won't match this,
+ # Unixware is an offshoot of SVR4, but it has its own version
+ # number series starting with 2...
+ # I am not positive that other SVR4 systems won't match this,
# I just have to hope. -- rms.
- # Use sysv4.2uw... so that sysv4* matches it.
+ # Use sysv4.2uw... so that sysv4* matches it.
echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
exit ;;
i*86:OS/2:*:*)
@@ -1016,7 +1047,7 @@ EOF
fi
exit ;;
i*86:*:5:[678]*)
- # UnixWare 7.x, OpenUNIX and OpenServer 6.
+ # UnixWare 7.x, OpenUNIX and OpenServer 6.
case `/bin/uname -X | grep "^Machine"` in
*486*) UNAME_MACHINE=i486 ;;
*Pentium) UNAME_MACHINE=i586 ;;
@@ -1044,13 +1075,13 @@ EOF
exit ;;
pc:*:*:*)
# Left here for compatibility:
- # uname -m prints for DJGPP always 'pc', but it prints nothing about
- # the processor, so we play safe by assuming i586.
+ # uname -m prints for DJGPP always 'pc', but it prints nothing about
+ # the processor, so we play safe by assuming i586.
# Note: whatever this is, it MUST be the same as what config.sub
# prints for the "djgpp" host, or else GDB configury will decide that
# this is a cross-build.
echo i586-pc-msdosdjgpp
- exit ;;
+ exit ;;
Intel:Mach:3*:*)
echo i386-pc-mach3
exit ;;
@@ -1085,8 +1116,8 @@ EOF
/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
&& { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
- /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
- && { echo i486-ncr-sysv4; exit; } ;;
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4; exit; } ;;
NCR*:*:4.2:* | MPRAS*:*:4.2:*)
OS_REL='.3'
test -r /etc/.relid \
@@ -1129,10 +1160,10 @@ EOF
echo ns32k-sni-sysv
fi
exit ;;
- PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
- # says <Richard.M.Bartel@ccMail.Census.GOV>
- echo i586-unisys-sysv4
- exit ;;
+ PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+ # says <Richard.M.Bartel@ccMail.Census.GOV>
+ echo i586-unisys-sysv4
+ exit ;;
*:UNIX_System_V:4*:FTX*)
# From Gerald Hewes <hewes@openmarket.com>.
# How about differentiating between stratus architectures? -djm
@@ -1158,11 +1189,11 @@ EOF
exit ;;
R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
if [ -d /usr/nec ]; then
- echo mips-nec-sysv${UNAME_RELEASE}
+ echo mips-nec-sysv${UNAME_RELEASE}
else
- echo mips-unknown-sysv${UNAME_RELEASE}
+ echo mips-unknown-sysv${UNAME_RELEASE}
fi
- exit ;;
+ exit ;;
BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
echo powerpc-be-beos
exit ;;
@@ -1175,6 +1206,9 @@ EOF
BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
echo i586-pc-haiku
exit ;;
+ x86_64:Haiku:*:*)
+ echo x86_64-unknown-haiku
+ exit ;;
SX-4:SUPER-UX:*:*)
echo sx4-nec-superux${UNAME_RELEASE}
exit ;;
@@ -1227,7 +1261,10 @@ EOF
*:QNX:*:4*)
echo i386-pc-qnx
exit ;;
- NSE-?:NONSTOP_KERNEL:*:*)
+ NEO-?:NONSTOP_KERNEL:*:*)
+ echo neo-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSE-*:NONSTOP_KERNEL:*:*)
echo nse-tandem-nsk${UNAME_RELEASE}
exit ;;
NSR-?:NONSTOP_KERNEL:*:*)
@@ -1272,13 +1309,13 @@ EOF
echo pdp10-unknown-its
exit ;;
SEI:*:*:SEIUX)
- echo mips-sei-seiux${UNAME_RELEASE}
+ echo mips-sei-seiux${UNAME_RELEASE}
exit ;;
*:DragonFly:*:*)
echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
exit ;;
*:*VMS:*:*)
- UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
case "${UNAME_MACHINE}" in
A*) echo alpha-dec-vms ; exit ;;
I*) echo ia64-dec-vms ; exit ;;
@@ -1296,11 +1333,11 @@ EOF
i*86:AROS:*:*)
echo ${UNAME_MACHINE}-pc-aros
exit ;;
+ x86_64:VMkernel:*:*)
+ echo ${UNAME_MACHINE}-unknown-esx
+ exit ;;
esac
-#echo '(No uname command or uname output not recognized.)' 1>&2
-#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
-
eval $set_cc_for_build
cat >$dummy.c <<EOF
#ifdef _SEQUENT_
@@ -1318,11 +1355,11 @@ main ()
#include <sys/param.h>
printf ("m68k-sony-newsos%s\n",
#ifdef NEWSOS4
- "4"
+ "4"
#else
- ""
+ ""
#endif
- ); exit (0);
+ ); exit (0);
#endif
#endif
diff --git a/Modules/_ctypes/libffi/config.sub b/Modules/_ctypes/libffi/config.sub
index 17c9145..802a224 100755
--- a/Modules/_ctypes/libffi/config.sub
+++ b/Modules/_ctypes/libffi/config.sub
@@ -1,38 +1,33 @@
#! /bin/sh
# Configuration validation subroutine script.
# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
-# Free Software Foundation, Inc.
+# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013 Free Software Foundation, Inc.
-timestamp='2009-11-07'
+timestamp='2012-12-29'
-# This file is (in principle) common to ALL GNU software.
-# The presence of a machine in this file suggests that SOME GNU software
-# can handle that machine. It does not imply ALL GNU software can.
-#
-# This file is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
-# 02110-1301, USA.
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
-# Please send patches to <config-patches@gnu.org>. Submit a context
-# diff and a properly formatted GNU ChangeLog entry.
+# Please send patches with a ChangeLog entry to config-patches@gnu.org.
#
# Configuration subroutine to validate and canonicalize a configuration type.
# Supply the specified configuration type as an argument.
@@ -75,8 +70,9 @@ Report bugs and patches to <config-patches@gnu.org>."
version="\
GNU config.sub ($timestamp)
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
-2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
+2012, 2013 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -123,13 +119,18 @@ esac
# Here we must recognize all the valid KERNEL-OS combinations.
maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
case $maybe_os in
- nto-qnx* | linux-gnu* | linux-dietlibc | linux-newlib* | linux-uclibc* | \
- uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | \
+ nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
+ linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
+ knetbsd*-gnu* | netbsd*-gnu* | \
kopensolaris*-gnu* | \
storm-chaos* | os2-emx* | rtmk-nova*)
os=-$maybe_os
basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
;;
+ android-linux)
+ os=-linux-android
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown
+ ;;
*)
basic_machine=`echo $1 | sed 's/-[^-]*$//'`
if [ $basic_machine != $1 ]
@@ -152,12 +153,12 @@ case $os in
-convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
-c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
-harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
- -apple | -axis | -knuth | -cray | -microblaze)
+ -apple | -axis | -knuth | -cray | -microblaze*)
os=
basic_machine=$1
;;
- -bluegene*)
- os=-cnk
+ -bluegene*)
+ os=-cnk
;;
-sim | -cisco | -oki | -wec | -winbond)
os=
@@ -173,10 +174,10 @@ case $os in
os=-chorusos
basic_machine=$1
;;
- -chorusrdb)
- os=-chorusrdb
+ -chorusrdb)
+ os=-chorusrdb
basic_machine=$1
- ;;
+ ;;
-hiux*)
os=-hiuxwe2
;;
@@ -221,6 +222,12 @@ case $os in
-isc*)
basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
;;
+ -lynx*178)
+ os=-lynxos178
+ ;;
+ -lynx*5)
+ os=-lynxos5
+ ;;
-lynx*)
os=-lynxos
;;
@@ -245,20 +252,27 @@ case $basic_machine in
# Some are omitted here because they have special meanings below.
1750a | 580 \
| a29k \
+ | aarch64 | aarch64_be \
| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
| am33_2.0 \
- | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
+ | arc \
+ | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \
+ | avr | avr32 \
+ | be32 | be64 \
| bfin \
| c4x | clipper \
| d10v | d30v | dlx | dsp16xx \
+ | epiphany \
| fido | fr30 | frv \
| h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+ | hexagon \
| i370 | i860 | i960 | ia64 \
| ip2k | iq2000 \
+ | le32 | le64 \
| lm32 \
| m32c | m32r | m32rle | m68000 | m68k | m88k \
- | maxq | mb | microblaze | mcore | mep | metag \
+ | maxq | mb | microblaze | microblazeel | mcore | mep | metag \
| mips | mipsbe | mipseb | mipsel | mipsle \
| mips16 \
| mips64 | mips64el \
@@ -281,29 +295,39 @@ case $basic_machine in
| moxie \
| mt \
| msp430 \
+ | nds32 | nds32le | nds32be \
| nios | nios2 \
| ns16k | ns32k \
+ | open8 \
| or32 \
| pdp10 | pdp11 | pj | pjl \
- | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \
+ | powerpc | powerpc64 | powerpc64le | powerpcle \
| pyramid \
- | rx \
+ | rl78 | rx \
| score \
| sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
| sh64 | sh64le \
| sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
| sparcv8 | sparcv9 | sparcv9b | sparcv9v \
- | spu | strongarm \
- | tahoe | thumb | tic4x | tic80 | tron \
+ | spu \
+ | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
| ubicom32 \
- | v850 | v850e \
+ | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
| we32k \
- | x86 | xc16x | xscale | xscalee[bl] | xstormy16 | xtensa \
+ | x86 | xc16x | xstormy16 | xtensa \
| z8k | z80)
basic_machine=$basic_machine-unknown
;;
- m6811 | m68hc11 | m6812 | m68hc12 | picochip)
- # Motorola 68HC11/12.
+ c54x)
+ basic_machine=tic54x-unknown
+ ;;
+ c55x)
+ basic_machine=tic55x-unknown
+ ;;
+ c6x)
+ basic_machine=tic6x-unknown
+ ;;
+ m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip)
basic_machine=$basic_machine-unknown
os=-none
;;
@@ -313,6 +337,21 @@ case $basic_machine in
basic_machine=mt-unknown
;;
+ strongarm | thumb | xscale)
+ basic_machine=arm-unknown
+ ;;
+ xgate)
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ xscaleeb)
+ basic_machine=armeb-unknown
+ ;;
+
+ xscaleel)
+ basic_machine=armel-unknown
+ ;;
+
# We use `pc' rather than `unknown'
# because (1) that's what they normally are, and
# (2) the word "unknown" tends to confuse beginning users.
@@ -327,25 +366,30 @@ case $basic_machine in
# Recognize the basic CPU types with company name.
580-* \
| a29k-* \
+ | aarch64-* | aarch64_be-* \
| alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
| alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
| arm-* | armbe-* | armle-* | armeb-* | armv*-* \
| avr-* | avr32-* \
+ | be32-* | be64-* \
| bfin-* | bs2000-* \
- | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \
+ | c[123]* | c30-* | [cjt]90-* | c4x-* \
| clipper-* | craynv-* | cydra-* \
| d10v-* | d30v-* | dlx-* \
| elxsi-* \
| f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
| h8300-* | h8500-* \
| hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
+ | hexagon-* \
| i*86-* | i860-* | i960-* | ia64-* \
| ip2k-* | iq2000-* \
+ | le32-* | le64-* \
| lm32-* \
| m32c-* | m32r-* | m32rle-* \
| m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
- | m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \
+ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
+ | microblaze-* | microblazeel-* \
| mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
| mips16-* \
| mips64-* | mips64el-* \
@@ -367,25 +411,29 @@ case $basic_machine in
| mmix-* \
| mt-* \
| msp430-* \
+ | nds32-* | nds32le-* | nds32be-* \
| nios-* | nios2-* \
| none-* | np1-* | ns16k-* | ns32k-* \
+ | open8-* \
| orion-* \
| pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
- | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \
+ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
| pyramid-* \
- | romp-* | rs6000-* | rx-* \
+ | rl78-* | romp-* | rs6000-* | rx-* \
| sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
| shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
| sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
| sparclite-* \
- | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | strongarm-* | sv1-* | sx?-* \
- | tahoe-* | thumb-* \
- | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* | tile-* \
+ | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \
+ | tahoe-* \
+ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
+ | tile*-* \
| tron-* \
| ubicom32-* \
- | v850-* | v850e-* | vax-* \
+ | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
+ | vax-* \
| we32k-* \
- | x86-* | x86_64-* | xc16x-* | xps100-* | xscale-* | xscalee[bl]-* \
+ | x86-* | x86_64-* | xc16x-* | xps100-* \
| xstormy16-* | xtensa*-* \
| ymp-* \
| z8k-* | z80-*)
@@ -410,7 +458,7 @@ case $basic_machine in
basic_machine=a29k-amd
os=-udi
;;
- abacus)
+ abacus)
basic_machine=abacus-unknown
;;
adobe68k)
@@ -480,11 +528,20 @@ case $basic_machine in
basic_machine=powerpc-ibm
os=-cnk
;;
+ c54x-*)
+ basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c55x-*)
+ basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c6x-*)
+ basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
c90)
basic_machine=c90-cray
os=-unicos
;;
- cegcc)
+ cegcc)
basic_machine=arm-unknown
os=-cegcc
;;
@@ -516,7 +573,7 @@ case $basic_machine in
basic_machine=craynv-cray
os=-unicosmp
;;
- cr16)
+ cr16 | cr16-*)
basic_machine=cr16-unknown
os=-elf
;;
@@ -674,7 +731,6 @@ case $basic_machine in
i370-ibm* | ibm*)
basic_machine=i370-ibm
;;
-# I'm not sure what "Sysv32" means. Should this be sysv3.2?
i*86v32)
basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
os=-sysv32
@@ -732,9 +788,13 @@ case $basic_machine in
basic_machine=ns32k-utek
os=-sysv
;;
- microblaze)
+ microblaze*)
basic_machine=microblaze-xilinx
;;
+ mingw64)
+ basic_machine=x86_64-pc
+ os=-mingw64
+ ;;
mingw32)
basic_machine=i386-pc
os=-mingw32
@@ -771,10 +831,18 @@ case $basic_machine in
ms1-*)
basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
;;
+ msys)
+ basic_machine=i386-pc
+ os=-msys
+ ;;
mvs)
basic_machine=i370-ibm
os=-mvs
;;
+ nacl)
+ basic_machine=le32-unknown
+ os=-nacl
+ ;;
ncr3000)
basic_machine=i486-ncr
os=-sysv4
@@ -839,6 +907,12 @@ case $basic_machine in
np1)
basic_machine=np1-gould
;;
+ neo-tandem)
+ basic_machine=neo-tandem
+ ;;
+ nse-tandem)
+ basic_machine=nse-tandem
+ ;;
nsr-tandem)
basic_machine=nsr-tandem
;;
@@ -921,9 +995,10 @@ case $basic_machine in
;;
power) basic_machine=power-ibm
;;
- ppc) basic_machine=powerpc-unknown
+ ppc | ppcbe) basic_machine=powerpc-unknown
;;
- ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ppc-* | ppcbe-*)
+ basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
;;
ppcle | powerpclittle | ppc-le | powerpc-little)
basic_machine=powerpcle-unknown
@@ -948,7 +1023,11 @@ case $basic_machine in
basic_machine=i586-unknown
os=-pw32
;;
- rdos)
+ rdos | rdos64)
+ basic_machine=x86_64-pc
+ os=-rdos
+ ;;
+ rdos32)
basic_machine=i386-pc
os=-rdos
;;
@@ -1017,6 +1096,9 @@ case $basic_machine in
basic_machine=i860-stratus
os=-sysv4
;;
+ strongarm-* | thumb-*)
+ basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
sun2)
basic_machine=m68000-sun
;;
@@ -1073,20 +1155,8 @@ case $basic_machine in
basic_machine=t90-cray
os=-unicos
;;
- tic54x | c54x*)
- basic_machine=tic54x-unknown
- os=-coff
- ;;
- tic55x | c55x*)
- basic_machine=tic55x-unknown
- os=-coff
- ;;
- tic6x | c6x*)
- basic_machine=tic6x-unknown
- os=-coff
- ;;
tile*)
- basic_machine=tile-unknown
+ basic_machine=$basic_machine-unknown
os=-linux-gnu
;;
tx39)
@@ -1156,6 +1226,9 @@ case $basic_machine in
xps | xps100)
basic_machine=xps100-honeywell
;;
+ xscale-* | xscalee[bl]-*)
+ basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'`
+ ;;
ymp)
basic_machine=ymp-cray
os=-unicos
@@ -1253,9 +1326,12 @@ esac
if [ x"$os" != x"" ]
then
case $os in
- # First match some system type aliases
- # that might get confused with valid system types.
+ # First match some system type aliases
+ # that might get confused with valid system types.
# -solaris* is a basic system type, with this one exception.
+ -auroraux)
+ os=-auroraux
+ ;;
-solaris1 | -solaris1.*)
os=`echo $os | sed -e 's|solaris1|sunos4|'`
;;
@@ -1277,21 +1353,22 @@ case $os in
# -sysv* is not here because it comes later, after sysvr4.
-gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
| -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
- | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
- | -kopensolaris* \
+ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
+ | -sym* | -kopensolaris* \
| -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
| -aos* | -aros* \
| -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
| -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
| -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
- | -openbsd* | -solidbsd* \
+ | -bitrig* | -openbsd* | -solidbsd* \
| -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
| -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
| -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
| -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
| -chorusos* | -chorusrdb* | -cegcc* \
- | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
- | -mingw32* | -linux-gnu* | -linux-newlib* | -linux-uclibc* \
+ | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+ | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
+ | -linux-newlib* | -linux-musl* | -linux-uclibc* \
| -uxpv* | -beos* | -mpeix* | -udk* \
| -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
| -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
@@ -1338,7 +1415,7 @@ case $os in
-opened*)
os=-openedition
;;
- -os400*)
+ -os400*)
os=-os400
;;
-wince*)
@@ -1387,7 +1464,7 @@ case $os in
-sinix*)
os=-sysv4
;;
- -tpf*)
+ -tpf*)
os=-tpf
;;
-triton*)
@@ -1432,6 +1509,8 @@ case $os in
-dicos*)
os=-dicos
;;
+ -nacl*)
+ ;;
-none)
;;
*)
@@ -1454,10 +1533,10 @@ else
# system, and we'll never get to this point.
case $basic_machine in
- score-*)
+ score-*)
os=-elf
;;
- spu-*)
+ spu-*)
os=-elf
;;
*-acorn)
@@ -1469,8 +1548,20 @@ case $basic_machine in
arm*-semi)
os=-aout
;;
- c4x-* | tic4x-*)
- os=-coff
+ c4x-* | tic4x-*)
+ os=-coff
+ ;;
+ hexagon-*)
+ os=-elf
+ ;;
+ tic54x-*)
+ os=-coff
+ ;;
+ tic55x-*)
+ os=-coff
+ ;;
+ tic6x-*)
+ os=-coff
;;
# This must come before the *-dec entry.
pdp10-*)
@@ -1490,14 +1581,11 @@ case $basic_machine in
;;
m68000-sun)
os=-sunos3
- # This also exists in the configure program, but was not the
- # default.
- # os=-sunos4
;;
m68*-cisco)
os=-aout
;;
- mep-*)
+ mep-*)
os=-elf
;;
mips*-cisco)
@@ -1524,7 +1612,7 @@ case $basic_machine in
*-ibm)
os=-aix
;;
- *-knuth)
+ *-knuth)
os=-mmixware
;;
*-wec)
diff --git a/Modules/_ctypes/libffi/configure b/Modules/_ctypes/libffi/configure
index c5c70b1..8230830 100755
--- a/Modules/_ctypes/libffi/configure
+++ b/Modules/_ctypes/libffi/configure
@@ -1,13 +1,11 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.65 for libffi 3.0.10rc0.
+# Generated by GNU Autoconf 2.69 for libffi 3.0.13.
#
-# Report bugs to <http://gcc.gnu.org/bugs.html>.
+# Report bugs to <http://github.com/atgreen/libffi/issues>.
#
#
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
-# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
-# Inc.
+# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
#
#
# This configure script is free software; the Free Software Foundation
@@ -91,6 +89,7 @@ fi
IFS=" "" $as_nl"
# Find who we are. Look in the path if we contain no directory separator.
+as_myself=
case $0 in #((
*[\\/]* ) as_myself=$0 ;;
*) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
@@ -135,6 +134,31 @@ export LANGUAGE
# CDPATH.
(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+# Use a proper internal environment variable to ensure we don't fall
+ # into an infinite loop, continuously re-executing ourselves.
+ if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then
+ _as_can_reexec=no; export _as_can_reexec;
+ # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+as_fn_exit 255
+ fi
+ # We don't want this to propagate to other subprocesses.
+ { _as_can_reexec=; unset _as_can_reexec;}
if test "x$CONFIG_SHELL" = x; then
as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then :
emulate sh
@@ -168,11 +192,20 @@ if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then :
else
exitcode=1; echo positional parameters were not saved.
fi
-test x\$exitcode = x0 || exit 1"
+test x\$exitcode = x0 || exit 1
+test -x / || exit 1"
as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1
+
+ test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || (
+ ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+ ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO
+ ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO
+ PATH=/empty FPATH=/empty; export PATH FPATH
+ test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\
+ || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1
test \$(( 1 + 1 )) = 2 || exit 1"
if (eval "$as_required") 2>/dev/null; then :
as_have_required=yes
@@ -213,14 +246,25 @@ IFS=$as_save_IFS
if test "x$CONFIG_SHELL" != x; then :
- # We cannot yet assume a decent shell, so we have to provide a
- # neutralization value for shells without unset; and this also
- # works around shells that cannot unset nonexistent variables.
- BASH_ENV=/dev/null
- ENV=/dev/null
- (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
- export CONFIG_SHELL
- exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"}
+ export CONFIG_SHELL
+ # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+exit 255
fi
if test x$as_have_required = xno; then :
@@ -231,8 +275,8 @@ fi
$as_echo "$0: be upgraded to zsh 4.3.4 or later."
else
$as_echo "$0: Please tell bug-autoconf@gnu.org and
-$0: http://gcc.gnu.org/bugs.html about your system,
-$0: including any error possibly output before this
+$0: http://github.com/atgreen/libffi/issues about your
+$0: system, including any error possibly output before this
$0: message. Then install a modern shell, or manually run
$0: the script under such a shell if you do have one."
fi
@@ -319,10 +363,18 @@ $as_echo X"$as_dir" |
test -d "$as_dir" && break
done
test -z "$as_dirs" || eval "mkdir $as_dirs"
- } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir"
+ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
} # as_fn_mkdir_p
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+ test -f "$1" && test -x "$1"
+} # as_fn_executable_p
# as_fn_append VAR VALUE
# ----------------------
# Append the text in VALUE to the end of the definition contained in VAR. Take
@@ -359,19 +411,19 @@ else
fi # as_fn_arith
-# as_fn_error ERROR [LINENO LOG_FD]
-# ---------------------------------
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
-# script with status $?, using 1 if that was 0.
+# script with STATUS, using 1 if that was 0.
as_fn_error ()
{
- as_status=$?; test $as_status -eq 0 && as_status=1
- if test "$3"; then
- as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
- $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3
+ as_status=$1; test $as_status -eq 0 && as_status=1
+ if test "$4"; then
+ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
fi
- $as_echo "$as_me: error: $1" >&2
+ $as_echo "$as_me: error: $2" >&2
as_fn_exit $as_status
} # as_fn_error
@@ -444,6 +496,10 @@ as_cr_alnum=$as_cr_Letters$as_cr_digits
chmod +x "$as_me.lineno" ||
{ $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
+ # If we had to re-execute with $CONFIG_SHELL, we're ensured to have
+ # already done that, so ensure we don't try to do so again and fall
+ # in an infinite loop. This has already happened in practice.
+ _as_can_reexec=no; export _as_can_reexec
# Don't try to exec as it changes $[0], causing all sort of problems
# (the dirname of $[0] is not the place where we might find the
# original and so on. Autoconf is especially sensitive to this).
@@ -478,16 +534,16 @@ if (echo >conf$$.file) 2>/dev/null; then
# ... but there are two gotchas:
# 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
# 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
- # In both cases, we have to default to `cp -p'.
+ # In both cases, we have to default to `cp -pR'.
ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
elif ln conf$$.file conf$$ 2>/dev/null; then
as_ln_s=ln
else
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
fi
else
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
fi
rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
rmdir conf$$.dir 2>/dev/null
@@ -499,28 +555,8 @@ else
as_mkdir_p=false
fi
-if test -x / >/dev/null 2>&1; then
- as_test_x='test -x'
-else
- if ls -dL / >/dev/null 2>&1; then
- as_ls_L_option=L
- else
- as_ls_L_option=
- fi
- as_test_x='
- eval sh -c '\''
- if test -d "$1"; then
- test -d "$1/.";
- else
- case $1 in #(
- -*)set "./$1";;
- esac;
- case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
- ???[sx]*):;;*)false;;esac;fi
- '\'' sh
- '
-fi
-as_executable_p=$as_test_x
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
# Sed expression to map a string onto a valid CPP name.
as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
@@ -528,161 +564,14 @@ as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
# Sed expression to map a string onto a valid variable name.
as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
-
-
-# Check that we are running under the correct shell.
SHELL=${CONFIG_SHELL-/bin/sh}
-case X$lt_ECHO in
-X*--fallback-echo)
- # Remove one level of quotation (which was required for Make).
- ECHO=`echo "$lt_ECHO" | sed 's,\\\\\$\\$0,'$0','`
- ;;
-esac
-
-ECHO=${lt_ECHO-echo}
-if test "X$1" = X--no-reexec; then
- # Discard the --no-reexec flag, and continue.
- shift
-elif test "X$1" = X--fallback-echo; then
- # Avoid inline document here, it may be left over
- :
-elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' ; then
- # Yippee, $ECHO works!
- :
-else
- # Restart under the correct shell.
- exec $SHELL "$0" --no-reexec ${1+"$@"}
-fi
-
-if test "X$1" = X--fallback-echo; then
- # used as fallback echo
- shift
- cat <<_LT_EOF
-$*
-_LT_EOF
- exit 0
-fi
-
-# The HP-UX ksh and POSIX shell print the target directory to stdout
-# if CDPATH is set.
-(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
-
-if test -z "$lt_ECHO"; then
- if test "X${echo_test_string+set}" != Xset; then
- # find a string as large as possible, as long as the shell can cope with it
- for cmd in 'sed 50q "$0"' 'sed 20q "$0"' 'sed 10q "$0"' 'sed 2q "$0"' 'echo test'; do
- # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ...
- if { echo_test_string=`eval $cmd`; } 2>/dev/null &&
- { test "X$echo_test_string" = "X$echo_test_string"; } 2>/dev/null
- then
- break
- fi
- done
- fi
-
- if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' &&
- echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` &&
- test "X$echo_testing_string" = "X$echo_test_string"; then
- :
- else
- # The Solaris, AIX, and Digital Unix default echo programs unquote
- # backslashes. This makes it impossible to quote backslashes using
- # echo "$something" | sed 's/\\/\\\\/g'
- #
- # So, first we look for a working echo in the user's PATH.
-
- lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
- for dir in $PATH /usr/ucb; do
- IFS="$lt_save_ifs"
- if (test -f $dir/echo || test -f $dir/echo$ac_exeext) &&
- test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' &&
- echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` &&
- test "X$echo_testing_string" = "X$echo_test_string"; then
- ECHO="$dir/echo"
- break
- fi
- done
- IFS="$lt_save_ifs"
-
- if test "X$ECHO" = Xecho; then
- # We didn't find a better echo, so look for alternatives.
- if test "X`{ print -r '\t'; } 2>/dev/null`" = 'X\t' &&
- echo_testing_string=`{ print -r "$echo_test_string"; } 2>/dev/null` &&
- test "X$echo_testing_string" = "X$echo_test_string"; then
- # This shell has a builtin print -r that does the trick.
- ECHO='print -r'
- elif { test -f /bin/ksh || test -f /bin/ksh$ac_exeext; } &&
- test "X$CONFIG_SHELL" != X/bin/ksh; then
- # If we have ksh, try running configure again with it.
- ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh}
- export ORIGINAL_CONFIG_SHELL
- CONFIG_SHELL=/bin/ksh
- export CONFIG_SHELL
- exec $CONFIG_SHELL "$0" --no-reexec ${1+"$@"}
- else
- # Try using printf.
- ECHO='printf %s\n'
- if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' &&
- echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` &&
- test "X$echo_testing_string" = "X$echo_test_string"; then
- # Cool, printf works
- :
- elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` &&
- test "X$echo_testing_string" = 'X\t' &&
- echo_testing_string=`($ORIGINAL_CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` &&
- test "X$echo_testing_string" = "X$echo_test_string"; then
- CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL
- export CONFIG_SHELL
- SHELL="$CONFIG_SHELL"
- export SHELL
- ECHO="$CONFIG_SHELL $0 --fallback-echo"
- elif echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo '\t') 2>/dev/null` &&
- test "X$echo_testing_string" = 'X\t' &&
- echo_testing_string=`($CONFIG_SHELL "$0" --fallback-echo "$echo_test_string") 2>/dev/null` &&
- test "X$echo_testing_string" = "X$echo_test_string"; then
- ECHO="$CONFIG_SHELL $0 --fallback-echo"
- else
- # maybe with a smaller string...
- prev=:
-
- for cmd in 'echo test' 'sed 2q "$0"' 'sed 10q "$0"' 'sed 20q "$0"' 'sed 50q "$0"'; do
- if { test "X$echo_test_string" = "X`eval $cmd`"; } 2>/dev/null
- then
- break
- fi
- prev="$cmd"
- done
-
- if test "$prev" != 'sed 50q "$0"'; then
- echo_test_string=`eval $prev`
- export echo_test_string
- exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "$0" ${1+"$@"}
- else
- # Oops. We lost completely, so just stick with echo.
- ECHO=echo
- fi
- fi
- fi
- fi
- fi
-fi
-
-# Copy echo and quote the copy suitably for passing to libtool from
-# the Makefile, instead of quoting the original, which is used later.
-lt_ECHO=$ECHO
-if test "X$lt_ECHO" = "X$CONFIG_SHELL $0 --fallback-echo"; then
- lt_ECHO="$CONFIG_SHELL \\\$\$0 --fallback-echo"
-fi
-
-
-
test -n "$DJDIR" || exec 7<&0 </dev/null
exec 6>&1
# Name of the host.
-# hostname on some systems (SVR3.2, Linux) returns a bogus exit status,
+# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status,
# so uname gets run too.
ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
@@ -701,9 +590,9 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='libffi'
PACKAGE_TARNAME='libffi'
-PACKAGE_VERSION='3.0.10rc0'
-PACKAGE_STRING='libffi 3.0.10rc0'
-PACKAGE_BUGREPORT='http://gcc.gnu.org/bugs.html'
+PACKAGE_VERSION='3.0.13'
+PACKAGE_STRING='libffi 3.0.13'
+PACKAGE_BUGREPORT='http://github.com/atgreen/libffi/issues'
PACKAGE_URL=''
# Factoring default headers for most tests.
@@ -748,10 +637,20 @@ LTLIBOBJS
LIBOBJS
toolexeclibdir
toolexecdir
+FFI_DEBUG_FALSE
+FFI_DEBUG_TRUE
TARGETDIR
TARGET
+FFI_EXEC_TRAMPOLINE_TABLE
+FFI_EXEC_TRAMPOLINE_TABLE_FALSE
+FFI_EXEC_TRAMPOLINE_TABLE_TRUE
+sys_symbol_underscore
HAVE_LONG_DOUBLE
ALLOCA
+XTENSA_FALSE
+XTENSA_TRUE
+TILE_FALSE
+TILE_TRUE
PA64_HPUX_FALSE
PA64_HPUX_TRUE
PA_HPUX_FALSE
@@ -774,6 +673,8 @@ AVR32_FALSE
AVR32_TRUE
ARM_FALSE
ARM_TRUE
+AARCH64_FALSE
+AARCH64_TRUE
POWERPC_FREEBSD_FALSE
POWERPC_FREEBSD_TRUE
POWERPC_DARWIN_FALSE
@@ -782,6 +683,12 @@ POWERPC_AIX_FALSE
POWERPC_AIX_TRUE
POWERPC_FALSE
POWERPC_TRUE
+MOXIE_FALSE
+MOXIE_TRUE
+METAG_FALSE
+METAG_TRUE
+MICROBLAZE_FALSE
+MICROBLAZE_TRUE
M68K_FALSE
M68K_TRUE
M32R_FALSE
@@ -802,6 +709,8 @@ X86_FALSE
X86_TRUE
SPARC_FALSE
SPARC_TRUE
+BFIN_FALSE
+BFIN_TRUE
MIPS_FALSE
MIPS_TRUE
AM_LTLDFLAGS
@@ -811,15 +720,18 @@ TESTSUBDIR_TRUE
MAINT
MAINTAINER_MODE_FALSE
MAINTAINER_MODE_TRUE
+PRTDIAG
CPP
OTOOL64
OTOOL
LIPO
NMEDIT
DSYMUTIL
-lt_ECHO
+MANIFEST_TOOL
RANLIB
+ac_ct_AR
AR
+DLLTOOL
OBJDUMP
LN_S
NM
@@ -839,6 +751,7 @@ CCAS
am__fastdepCC_FALSE
am__fastdepCC_TRUE
CCDEPMODE
+am__nodep
AMDEPBACKSLASH
AMDEP_FALSE
AMDEP_TRUE
@@ -875,6 +788,7 @@ am__isrc
INSTALL_DATA
INSTALL_SCRIPT
INSTALL_PROGRAM
+ax_enable_builddir_sed
target_os
target_vendor
target_cpu
@@ -928,14 +842,19 @@ SHELL'
ac_subst_files=''
ac_user_opts='
enable_option_checking
+enable_builddir
enable_dependency_tracking
enable_shared
enable_static
with_pic
enable_fast_install
with_gnu_ld
+with_sysroot
enable_libtool_lock
+enable_portable_binary
+with_gcc_arch
enable_maintainer_mode
+enable_pax_emutramp
enable_debug
enable_structs
enable_raw_api
@@ -1010,8 +929,9 @@ do
fi
case $ac_option in
- *=*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
- *) ac_optarg=yes ;;
+ *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
+ *=) ac_optarg= ;;
+ *) ac_optarg=yes ;;
esac
# Accept the important Cygnus configure options, so we can diagnose typos.
@@ -1056,7 +976,7 @@ do
ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
# Reject names that are not valid shell variable names.
expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
- as_fn_error "invalid feature name: $ac_useropt"
+ as_fn_error $? "invalid feature name: $ac_useropt"
ac_useropt_orig=$ac_useropt
ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
case $ac_user_opts in
@@ -1082,7 +1002,7 @@ do
ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
# Reject names that are not valid shell variable names.
expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
- as_fn_error "invalid feature name: $ac_useropt"
+ as_fn_error $? "invalid feature name: $ac_useropt"
ac_useropt_orig=$ac_useropt
ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
case $ac_user_opts in
@@ -1286,7 +1206,7 @@ do
ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
# Reject names that are not valid shell variable names.
expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
- as_fn_error "invalid package name: $ac_useropt"
+ as_fn_error $? "invalid package name: $ac_useropt"
ac_useropt_orig=$ac_useropt
ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
case $ac_user_opts in
@@ -1302,7 +1222,7 @@ do
ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
# Reject names that are not valid shell variable names.
expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
- as_fn_error "invalid package name: $ac_useropt"
+ as_fn_error $? "invalid package name: $ac_useropt"
ac_useropt_orig=$ac_useropt
ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
case $ac_user_opts in
@@ -1332,8 +1252,8 @@ do
| --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
x_libraries=$ac_optarg ;;
- -*) as_fn_error "unrecognized option: \`$ac_option'
-Try \`$0 --help' for more information."
+ -*) as_fn_error $? "unrecognized option: \`$ac_option'
+Try \`$0 --help' for more information"
;;
*=*)
@@ -1341,7 +1261,7 @@ Try \`$0 --help' for more information."
# Reject names that are not valid shell variable names.
case $ac_envvar in #(
'' | [0-9]* | *[!_$as_cr_alnum]* )
- as_fn_error "invalid variable name: \`$ac_envvar'" ;;
+ as_fn_error $? "invalid variable name: \`$ac_envvar'" ;;
esac
eval $ac_envvar=\$ac_optarg
export $ac_envvar ;;
@@ -1351,7 +1271,7 @@ Try \`$0 --help' for more information."
$as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
$as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
- : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}
+ : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}"
;;
esac
@@ -1359,13 +1279,13 @@ done
if test -n "$ac_prev"; then
ac_option=--`echo $ac_prev | sed 's/_/-/g'`
- as_fn_error "missing argument to $ac_option"
+ as_fn_error $? "missing argument to $ac_option"
fi
if test -n "$ac_unrecognized_opts"; then
case $enable_option_checking in
no) ;;
- fatal) as_fn_error "unrecognized options: $ac_unrecognized_opts" ;;
+ fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;;
*) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
esac
fi
@@ -1388,7 +1308,7 @@ do
[\\/$]* | ?:[\\/]* ) continue;;
NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
esac
- as_fn_error "expected an absolute directory name for --$ac_var: $ac_val"
+ as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val"
done
# There might be people who depend on the old broken behavior: `$host'
@@ -1402,8 +1322,6 @@ target=$target_alias
if test "x$host_alias" != x; then
if test "x$build_alias" = x; then
cross_compiling=maybe
- $as_echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host.
- If a cross compiler is detected then cross compile mode will be used." >&2
elif test "x$build_alias" != "x$host_alias"; then
cross_compiling=yes
fi
@@ -1418,9 +1336,9 @@ test "$silent" = yes && exec 6>/dev/null
ac_pwd=`pwd` && test -n "$ac_pwd" &&
ac_ls_di=`ls -di .` &&
ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
- as_fn_error "working directory cannot be determined"
+ as_fn_error $? "working directory cannot be determined"
test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
- as_fn_error "pwd does not report name of working directory"
+ as_fn_error $? "pwd does not report name of working directory"
# Find the source files, if location was not specified.
@@ -1459,11 +1377,11 @@ else
fi
if test ! -r "$srcdir/$ac_unique_file"; then
test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
- as_fn_error "cannot find sources ($ac_unique_file) in $srcdir"
+ as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir"
fi
ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
ac_abs_confdir=`(
- cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error "$ac_msg"
+ cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg"
pwd)`
# When building in place, set srcdir=.
if test "$ac_abs_confdir" = "$ac_pwd"; then
@@ -1489,7 +1407,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
-\`configure' configures libffi 3.0.10rc0 to adapt to many kinds of systems.
+\`configure' configures libffi 3.0.13 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1503,7 +1421,7 @@ Configuration:
--help=short display options specific to this package
--help=recursive display the short help of all the included packages
-V, --version display version information and exit
- -q, --quiet, --silent do not print \`checking...' messages
+ -q, --quiet, --silent do not print \`checking ...' messages
--cache-file=FILE cache test results in FILE [disabled]
-C, --config-cache alias for \`--cache-file=config.cache'
-n, --no-create do not create output files
@@ -1560,7 +1478,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of libffi 3.0.10rc0:";;
+ short | recursive ) echo "Configuration of libffi 3.0.13:";;
esac
cat <<\_ACEOF
@@ -1568,15 +1486,24 @@ Optional Features:
--disable-option-checking ignore unrecognized --enable/--with options
--disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
--enable-FEATURE[=ARG] include FEATURE [ARG=yes]
- --disable-dependency-tracking speeds up one-time build
- --enable-dependency-tracking do not reject slow dependency extractors
+ --disable-builddir disable automatic build in subdir of sources
+
+ --enable-dependency-tracking
+ do not reject slow dependency extractors
+ --disable-dependency-tracking
+ speeds up one-time build
--enable-shared[=PKGS] build shared libraries [default=yes]
--enable-static[=PKGS] build static libraries [default=yes]
--enable-fast-install[=PKGS]
optimize for fast installation [default=yes]
--disable-libtool-lock avoid locking (might break parallel builds)
- --enable-maintainer-mode enable make rules and dependencies not useful
- (and sometimes confusing) to the casual installer
+ --enable-portable-binary
+ disable compiler optimizations that would produce
+ unportable binaries
+ --enable-maintainer-mode
+ enable make rules and dependencies not useful (and
+ sometimes confusing) to the casual installer
+ --enable-pax_emutramp enable pax emulated trampolines, for we can't use PROT_EXEC
--enable-debug debugging mode
--disable-structs omit code for struct support
--disable-raw-api make the raw api unavailable
@@ -1585,9 +1512,13 @@ Optional Features:
Optional Packages:
--with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
--without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no)
- --with-pic try to use only PIC/non-PIC objects [default=use
+ --with-pic[=PKGS] try to use only PIC/non-PIC objects [default=use
both]
--with-gnu-ld assume the C compiler uses GNU ld [default=no]
+ --with-sysroot=DIR Search for dependent libraries within DIR
+ (or the compiler's sysroot if not specified).
+ --with-gcc-arch=<arch> use architecture <arch> for gcc -march/-mtune,
+ instead of guessing
Some influential environment variables:
CC C compiler command
@@ -1604,7 +1535,7 @@ Some influential environment variables:
Use these variables to override the choices made by `configure' or to help
it to find libraries and programs with nonstandard names/locations.
-Report bugs to <http://gcc.gnu.org/bugs.html>.
+Report bugs to <http://github.com/atgreen/libffi/issues>.
_ACEOF
ac_status=$?
fi
@@ -1667,10 +1598,10 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
-libffi configure 3.0.10rc0
-generated by GNU Autoconf 2.65
+libffi configure 3.0.13
+generated by GNU Autoconf 2.69
-Copyright (C) 2009 Free Software Foundation, Inc.
+Copyright (C) 2012 Free Software Foundation, Inc.
This configure script is free software; the Free Software Foundation
gives unlimited permission to copy, distribute and modify it.
_ACEOF
@@ -1714,7 +1645,7 @@ sed 's/^/| /' conftest.$ac_ext >&5
ac_retval=1
fi
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
as_fn_set_status $ac_retval
} # ac_fn_c_try_compile
@@ -1746,7 +1677,7 @@ $as_echo "$ac_try_echo"; } >&5
test ! -s conftest.err
} && test -s conftest$ac_exeext && {
test "$cross_compiling" = yes ||
- $as_test_x conftest$ac_exeext
+ test -x conftest$ac_exeext
}; then :
ac_retval=0
else
@@ -1760,7 +1691,7 @@ fi
# interfere with the next link command; also delete a directory that is
# left behind by Apple's compiler. We do this before executing the actions.
rm -rf conftest.dSYM conftest_ipa8_conftest.oo
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
as_fn_set_status $ac_retval
} # ac_fn_c_try_link
@@ -1774,7 +1705,7 @@ ac_fn_c_check_header_compile ()
as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
+if eval \${$3+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -1792,7 +1723,7 @@ fi
eval ac_res=\$$3
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
$as_echo "$ac_res" >&6; }
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
} # ac_fn_c_check_header_compile
@@ -1817,7 +1748,7 @@ $as_echo "$ac_try_echo"; } >&5
mv -f conftest.er1 conftest.err
fi
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
- test $ac_status = 0; } >/dev/null && {
+ test $ac_status = 0; } > conftest.i && {
test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
test ! -s conftest.err
}; then :
@@ -1828,7 +1759,7 @@ sed 's/^/| /' conftest.$ac_ext >&5
ac_retval=1
fi
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
as_fn_set_status $ac_retval
} # ac_fn_c_try_cpp
@@ -1870,7 +1801,7 @@ sed 's/^/| /' conftest.$ac_ext >&5
ac_retval=$ac_status
fi
rm -rf conftest.dSYM conftest_ipa8_conftest.oo
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
as_fn_set_status $ac_retval
} # ac_fn_c_try_run
@@ -1883,7 +1814,7 @@ ac_fn_c_check_func ()
as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
+if eval \${$3+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -1938,103 +1869,10 @@ fi
eval ac_res=\$$3
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
$as_echo "$ac_res" >&6; }
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
} # ac_fn_c_check_func
-# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES
-# -------------------------------------------------------
-# Tests whether HEADER exists, giving a warning if it cannot be compiled using
-# the include files in INCLUDES and setting the cache variable VAR
-# accordingly.
-ac_fn_c_check_header_mongrel ()
-{
- as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
- if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
- $as_echo_n "(cached) " >&6
-fi
-eval ac_res=\$$3
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
-else
- # Is the header compilable?
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5
-$as_echo_n "checking $2 usability... " >&6; }
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-$4
-#include <$2>
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
- ac_header_compiler=yes
-else
- ac_header_compiler=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5
-$as_echo "$ac_header_compiler" >&6; }
-
-# Is the header present?
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5
-$as_echo_n "checking $2 presence... " >&6; }
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-#include <$2>
-_ACEOF
-if ac_fn_c_try_cpp "$LINENO"; then :
- ac_header_preproc=yes
-else
- ac_header_preproc=no
-fi
-rm -f conftest.err conftest.$ac_ext
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5
-$as_echo "$ac_header_preproc" >&6; }
-
-# So? What about this header?
-case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #((
- yes:no: )
- { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5
-$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;}
- { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
-$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
- ;;
- no:yes:* )
- { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5
-$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;}
- { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5
-$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;}
- { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5
-$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;}
- { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5
-$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;}
- { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
-$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
-( cat <<\_ASBOX
-## ------------------------------------------- ##
-## Report this to http://gcc.gnu.org/bugs.html ##
-## ------------------------------------------- ##
-_ASBOX
- ) | sed "s/^/$as_me: WARNING: /" >&2
- ;;
-esac
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
- $as_echo_n "(cached) " >&6
-else
- eval "$3=\$ac_header_compiler"
-fi
-eval ac_res=\$$3
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
-fi
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-
-} # ac_fn_c_check_header_mongrel
-
# ac_fn_c_compute_int LINENO EXPR VAR INCLUDES
# --------------------------------------------
# Tries to find the compile-time value of EXPR in a program that includes
@@ -2052,7 +1890,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) >= 0)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -2068,7 +1907,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) <= $ac_mid)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -2094,7 +1934,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) < 0)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -2110,7 +1951,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) >= $ac_mid)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -2144,7 +1986,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) <= $ac_mid)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -2208,16 +2051,161 @@ rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
rm -f conftest.val
fi
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
as_fn_set_status $ac_retval
} # ac_fn_c_compute_int
+
+# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES
+# -------------------------------------------------------
+# Tests whether HEADER exists, giving a warning if it cannot be compiled using
+# the include files in INCLUDES and setting the cache variable VAR
+# accordingly.
+ac_fn_c_check_header_mongrel ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ if eval \${$3+:} false; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+else
+ # Is the header compilable?
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5
+$as_echo_n "checking $2 usability... " >&6; }
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+#include <$2>
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_header_compiler=yes
+else
+ ac_header_compiler=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5
+$as_echo "$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5
+$as_echo_n "checking $2 presence... " >&6; }
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <$2>
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+ ac_header_preproc=yes
+else
+ ac_header_preproc=no
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5
+$as_echo "$ac_header_preproc" >&6; }
+
+# So? What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #((
+ yes:no: )
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5
+$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
+ ;;
+ no:yes:* )
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5
+$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5
+$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5
+$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5
+$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
+( $as_echo "## ------------------------------------------------------ ##
+## Report this to http://github.com/atgreen/libffi/issues ##
+## ------------------------------------------------------ ##"
+ ) | sed "s/^/$as_me: WARNING: /" >&2
+ ;;
+esac
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ eval "$3=\$ac_header_compiler"
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+fi
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_header_mongrel
+
+# ac_fn_c_check_type LINENO TYPE VAR INCLUDES
+# -------------------------------------------
+# Tests whether TYPE exists after having included INCLUDES, setting cache
+# variable VAR accordingly.
+ac_fn_c_check_type ()
+{
+ as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ eval "$3=no"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+if (sizeof ($2))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+$4
+int
+main ()
+{
+if (sizeof (($2)))
+ return 0;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+ eval "$3=yes"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_type
cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
-It was created by libffi $as_me 3.0.10rc0, which was
-generated by GNU Autoconf 2.65. Invocation command line was
+It was created by libffi $as_me 3.0.13, which was
+generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@@ -2327,11 +2315,9 @@ trap 'exit_status=$?
{
echo
- cat <<\_ASBOX
-## ---------------- ##
+ $as_echo "## ---------------- ##
## Cache variables. ##
-## ---------------- ##
-_ASBOX
+## ---------------- ##"
echo
# The following way of writing the cache mishandles newlines in values,
(
@@ -2365,11 +2351,9 @@ $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
)
echo
- cat <<\_ASBOX
-## ----------------- ##
+ $as_echo "## ----------------- ##
## Output variables. ##
-## ----------------- ##
-_ASBOX
+## ----------------- ##"
echo
for ac_var in $ac_subst_vars
do
@@ -2382,11 +2366,9 @@ _ASBOX
echo
if test -n "$ac_subst_files"; then
- cat <<\_ASBOX
-## ------------------- ##
+ $as_echo "## ------------------- ##
## File substitutions. ##
-## ------------------- ##
-_ASBOX
+## ------------------- ##"
echo
for ac_var in $ac_subst_files
do
@@ -2400,11 +2382,9 @@ _ASBOX
fi
if test -s confdefs.h; then
- cat <<\_ASBOX
-## ----------- ##
+ $as_echo "## ----------- ##
## confdefs.h. ##
-## ----------- ##
-_ASBOX
+## ----------- ##"
echo
cat confdefs.h
echo
@@ -2459,7 +2439,12 @@ _ACEOF
ac_site_file1=NONE
ac_site_file2=NONE
if test -n "$CONFIG_SITE"; then
- ac_site_file1=$CONFIG_SITE
+ # We do not want a PATH search for config.site.
+ case $CONFIG_SITE in #((
+ -*) ac_site_file1=./$CONFIG_SITE;;
+ */*) ac_site_file1=$CONFIG_SITE;;
+ *) ac_site_file1=./$CONFIG_SITE;;
+ esac
elif test "x$prefix" != xNONE; then
ac_site_file1=$prefix/share/config.site
ac_site_file2=$prefix/etc/config.site
@@ -2474,7 +2459,11 @@ do
{ $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
$as_echo "$as_me: loading site script $ac_site_file" >&6;}
sed 's/^/| /' "$ac_site_file" >&5
- . "$ac_site_file"
+ . "$ac_site_file" \
+ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "failed to load site script $ac_site_file
+See \`config.log' for more details" "$LINENO" 5; }
fi
done
@@ -2550,7 +2539,7 @@ if $ac_cache_corrupted; then
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
{ $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
- as_fn_error "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5
+ as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5
fi
## -------------------- ##
## Main body of script. ##
@@ -2568,16 +2557,22 @@ ac_config_headers="$ac_config_headers fficonfig.h"
ac_aux_dir=
for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do
- for ac_t in install-sh install.sh shtool; do
- if test -f "$ac_dir/$ac_t"; then
- ac_aux_dir=$ac_dir
- ac_install_sh="$ac_aux_dir/$ac_t -c"
- break 2
- fi
- done
+ if test -f "$ac_dir/install-sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install-sh -c"
+ break
+ elif test -f "$ac_dir/install.sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install.sh -c"
+ break
+ elif test -f "$ac_dir/shtool"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/shtool install -c"
+ break
+ fi
done
if test -z "$ac_aux_dir"; then
- as_fn_error "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5
+ as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5
fi
# These three variables are undocumented and unsupported,
@@ -2591,27 +2586,27 @@ ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
# Make sure we can run config.sub.
$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 ||
- as_fn_error "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5
+ as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5
$as_echo_n "checking build system type... " >&6; }
-if test "${ac_cv_build+set}" = set; then :
+if ${ac_cv_build+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_build_alias=$build_alias
test "x$ac_build_alias" = x &&
ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"`
test "x$ac_build_alias" = x &&
- as_fn_error "cannot guess build type; you must specify one" "$LINENO" 5
+ as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5
ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` ||
- as_fn_error "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5
+ as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5
$as_echo "$ac_cv_build" >&6; }
case $ac_cv_build in
*-*-*) ;;
-*) as_fn_error "invalid value of canonical build" "$LINENO" 5;;
+*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;;
esac
build=$ac_cv_build
ac_save_IFS=$IFS; IFS='-'
@@ -2629,14 +2624,14 @@ case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5
$as_echo_n "checking host system type... " >&6; }
-if test "${ac_cv_host+set}" = set; then :
+if ${ac_cv_host+:} false; then :
$as_echo_n "(cached) " >&6
else
if test "x$host_alias" = x; then
ac_cv_host=$ac_cv_build
else
ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` ||
- as_fn_error "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5
+ as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5
fi
fi
@@ -2644,7 +2639,7 @@ fi
$as_echo "$ac_cv_host" >&6; }
case $ac_cv_host in
*-*-*) ;;
-*) as_fn_error "invalid value of canonical host" "$LINENO" 5;;
+*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;;
esac
host=$ac_cv_host
ac_save_IFS=$IFS; IFS='-'
@@ -2662,14 +2657,14 @@ case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking target system type" >&5
$as_echo_n "checking target system type... " >&6; }
-if test "${ac_cv_target+set}" = set; then :
+if ${ac_cv_target+:} false; then :
$as_echo_n "(cached) " >&6
else
if test "x$target_alias" = x; then
ac_cv_target=$ac_cv_host
else
ac_cv_target=`$SHELL "$ac_aux_dir/config.sub" $target_alias` ||
- as_fn_error "$SHELL $ac_aux_dir/config.sub $target_alias failed" "$LINENO" 5
+ as_fn_error $? "$SHELL $ac_aux_dir/config.sub $target_alias failed" "$LINENO" 5
fi
fi
@@ -2677,7 +2672,7 @@ fi
$as_echo "$ac_cv_target" >&6; }
case $ac_cv_target in
*-*-*) ;;
-*) as_fn_error "invalid value of canonical target" "$LINENO" 5;;
+*) as_fn_error $? "invalid value of canonical target" "$LINENO" 5;;
esac
target=$ac_cv_target
ac_save_IFS=$IFS; IFS='-'
@@ -2704,7 +2699,111 @@ target_alias=${target_alias-$host_alias}
. ${srcdir}/configure.host
-am__api_version='1.11'
+
+ # [$]@ is unsable in 2.60+ but earlier autoconf had no ac_configure_args
+ if test "${ac_configure_args+set}" != "set" ; then
+ ac_configure_args=
+ for ac_arg in ${1+"$@"}; do
+ ac_configure_args="$ac_configure_args '$ac_arg'"
+ done
+ fi
+
+# expand $ac_aux_dir to an absolute path
+am_aux_dir=`cd $ac_aux_dir && pwd`
+
+
+ax_enable_builddir="."
+# Check whether --enable-builddir was given.
+if test "${enable_builddir+set}" = set; then :
+ enableval=$enable_builddir; ax_enable_builddir="$enableval"
+else
+ ax_enable_builddir="auto"
+fi
+
+if test ".$ac_srcdir_defaulted" != ".no" ; then
+if test ".$srcdir" = ".." ; then
+ if test -f config.status ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: toplevel srcdir already configured... skipping subdir build" >&5
+$as_echo "$as_me: toplevel srcdir already configured... skipping subdir build" >&6;}
+ else
+ test ".$ax_enable_builddir" = "." && ax_enable_builddir="."
+ test ".$ax_enable_builddir" = ".no" && ax_enable_builddir="."
+ test ".$TARGET" = "." && TARGET="$target"
+ test ".$ax_enable_builddir" = ".auto" && ax_enable_builddir="$TARGET"
+ if test ".$ax_enable_builddir" != ".." ; then # we know where to go and
+ as_dir=$ax_enable_builddir; as_fn_mkdir_p
+ echo __.$ax_enable_builddir.__ > $ax_enable_builddir/conftest.tmp
+ cd $ax_enable_builddir
+ if grep __.$ax_enable_builddir.__ conftest.tmp >/dev/null 2>/dev/null ; then
+ rm conftest.tmp
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: continue configure in default builddir \"./$ax_enable_builddir\"" >&5
+$as_echo "continue configure in default builddir \"./$ax_enable_builddir\"" >&6; }
+ else
+ as_fn_error $? "could not change to default builddir \"./$ax_enable_builddir\"" "$LINENO" 5
+ fi
+ srcdir=`echo "$ax_enable_builddir" |
+ sed -e 's,^\./,,;s,[^/]$,&/,;s,[^/]*/,../,g;s,[/]$,,;'`
+ # going to restart from subdirectory location
+ test -f $srcdir/config.log && mv $srcdir/config.log .
+ test -f $srcdir/confdefs.h && mv $srcdir/confdefs.h .
+ test -f $srcdir/conftest.log && mv $srcdir/conftest.log .
+ test -f $srcdir/$cache_file && mv $srcdir/$cache_file .
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ....exec $SHELL $srcdir/$0 \"--srcdir=$srcdir\" \"--enable-builddir=$ax_enable_builddir\" ${1+\"$@\"}" >&5
+$as_echo "....exec $SHELL $srcdir/$0 \"--srcdir=$srcdir\" \"--enable-builddir=$ax_enable_builddir\" ${1+\"$@\"}" >&6; }
+ case "$0" in # restart
+ /\\*) eval $SHELL "'$0'" "'--srcdir=$srcdir'" "'--enable-builddir=$ax_enable_builddir'" $ac_configure_args ;;
+ *) eval $SHELL "'$srcdir/$0'" "'--srcdir=$srcdir'" "'--enable-builddir=$ax_enable_builddir'" $ac_configure_args ;;
+ esac ; exit $?
+ fi
+ fi
+fi fi
+test ".$ax_enable_builddir" = ".auto" && ax_enable_builddir="."
+# Extract the first word of "gsed sed", so it can be a program name with args.
+set dummy gsed sed; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_ax_enable_builddir_sed+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $ax_enable_builddir_sed in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_ax_enable_builddir_sed="$ax_enable_builddir_sed" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_ax_enable_builddir_sed="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ test -z "$ac_cv_path_ax_enable_builddir_sed" && ac_cv_path_ax_enable_builddir_sed="sed"
+ ;;
+esac
+fi
+ax_enable_builddir_sed=$ac_cv_path_ax_enable_builddir_sed
+if test -n "$ax_enable_builddir_sed"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_enable_builddir_sed" >&5
+$as_echo "$ax_enable_builddir_sed" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ax_enable_builddir_auxdir="$am_aux_dir"
+ac_config_commands="$ac_config_commands buildir"
+
+
+am__api_version='1.12'
# Find a good install program. We prefer a C program (faster),
# so one script is as good as another. But avoid the broken or
@@ -2723,7 +2822,7 @@ am__api_version='1.11'
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5
$as_echo_n "checking for a BSD-compatible install... " >&6; }
if test -z "$INSTALL"; then
-if test "${ac_cv_path_install+set}" = set; then :
+if ${ac_cv_path_install+:} false; then :
$as_echo_n "(cached) " >&6
else
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
@@ -2743,7 +2842,7 @@ case $as_dir/ in #((
# by default.
for ac_prog in ginstall scoinst install; do
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then
if test $ac_prog = install &&
grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
# AIX install. It has an incompatible calling convention.
@@ -2801,56 +2900,71 @@ test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5
$as_echo_n "checking whether build environment is sane... " >&6; }
-# Just in case
-sleep 1
-echo timestamp > conftest.file
# Reject unsafe characters in $srcdir or the absolute working directory
# name. Accept space and tab only in the latter.
am_lf='
'
case `pwd` in
*[\\\"\#\$\&\'\`$am_lf]*)
- as_fn_error "unsafe absolute working directory name" "$LINENO" 5;;
+ as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;;
esac
case $srcdir in
*[\\\"\#\$\&\'\`$am_lf\ \ ]*)
- as_fn_error "unsafe srcdir value: \`$srcdir'" "$LINENO" 5;;
+ as_fn_error $? "unsafe srcdir value: '$srcdir'" "$LINENO" 5;;
esac
-# Do `set' in a subshell so we don't clobber the current shell's
+# Do 'set' in a subshell so we don't clobber the current shell's
# arguments. Must try -L first in case configure is actually a
# symlink; some systems play weird games with the mod time of symlinks
# (eg FreeBSD returns the mod time of the symlink's containing
# directory).
if (
- set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
- if test "$*" = "X"; then
- # -L didn't work.
- set X `ls -t "$srcdir/configure" conftest.file`
- fi
- rm -f conftest.file
- if test "$*" != "X $srcdir/configure conftest.file" \
- && test "$*" != "X conftest.file $srcdir/configure"; then
-
- # If neither matched, then we have a broken ls. This can happen
- # if, for instance, CONFIG_SHELL is bash and it inherits a
- # broken ls alias from the environment. This has actually
- # happened. Such a system could not be considered "sane".
- as_fn_error "ls -t appears to fail. Make sure there is not a broken
-alias in your environment" "$LINENO" 5
- fi
-
+ am_has_slept=no
+ for am_try in 1 2; do
+ echo "timestamp, slept: $am_has_slept" > conftest.file
+ set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
+ if test "$*" = "X"; then
+ # -L didn't work.
+ set X `ls -t "$srcdir/configure" conftest.file`
+ fi
+ if test "$*" != "X $srcdir/configure conftest.file" \
+ && test "$*" != "X conftest.file $srcdir/configure"; then
+
+ # If neither matched, then we have a broken ls. This can happen
+ # if, for instance, CONFIG_SHELL is bash and it inherits a
+ # broken ls alias from the environment. This has actually
+ # happened. Such a system could not be considered "sane".
+ as_fn_error $? "ls -t appears to fail. Make sure there is not a broken
+ alias in your environment" "$LINENO" 5
+ fi
+ if test "$2" = conftest.file || test $am_try -eq 2; then
+ break
+ fi
+ # Just in case.
+ sleep 1
+ am_has_slept=yes
+ done
test "$2" = conftest.file
)
then
# Ok.
:
else
- as_fn_error "newly created file is older than distributed files!
+ as_fn_error $? "newly created file is older than distributed files!
Check your system clock" "$LINENO" 5
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
+# If we didn't sleep, we still need to ensure time stamps of config.status and
+# generated files are strictly newer.
+am_sleep_pid=
+if grep 'slept: no' conftest.file >/dev/null 2>&1; then
+ ( sleep 1 ) &
+ am_sleep_pid=$!
+fi
+
+rm -f conftest.file
+
test "$program_prefix" != NONE &&
program_transform_name="s&^&$program_prefix&;$program_transform_name"
# Use a double $ so make ignores it.
@@ -2861,9 +2975,6 @@ test "$program_suffix" != NONE &&
ac_script='s/[\\$]/&&/g;s/;s,x,x,$//'
program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"`
-# expand $ac_aux_dir to an absolute path
-am_aux_dir=`cd $ac_aux_dir && pwd`
-
if test x"${MISSING+set}" != xset; then
case $am_aux_dir in
*\ * | *\ *)
@@ -2877,8 +2988,8 @@ if eval "$MISSING --run true"; then
am_missing_run="$MISSING --run "
else
am_missing_run=
- { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`missing' script is too old or missing" >&5
-$as_echo "$as_me: WARNING: \`missing' script is too old or missing" >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5
+$as_echo "$as_me: WARNING: 'missing' script is too old or missing" >&2;}
fi
if test x"${install_sh}" != xset; then
@@ -2890,17 +3001,17 @@ if test x"${install_sh}" != xset; then
esac
fi
-# Installed binaries are usually stripped using `strip' when the user
-# run `make install-strip'. However `strip' might not be the right
+# Installed binaries are usually stripped using 'strip' when the user
+# run "make install-strip". However 'strip' might not be the right
# tool to use in cross-compilation environments, therefore Automake
-# will honor the `STRIP' environment variable to overrule this program.
+# will honor the 'STRIP' environment variable to overrule this program.
if test "$cross_compiling" != no; then
if test -n "$ac_tool_prefix"; then
# Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
set dummy ${ac_tool_prefix}strip; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_STRIP+set}" = set; then :
+if ${ac_cv_prog_STRIP+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$STRIP"; then
@@ -2912,7 +3023,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_STRIP="${ac_tool_prefix}strip"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -2940,7 +3051,7 @@ if test -z "$ac_cv_prog_STRIP"; then
set dummy strip; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then :
+if ${ac_cv_prog_ac_ct_STRIP+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$ac_ct_STRIP"; then
@@ -2952,7 +3063,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_STRIP="strip"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -2993,7 +3104,7 @@ INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5
$as_echo_n "checking for a thread-safe mkdir -p... " >&6; }
if test -z "$MKDIR_P"; then
- if test "${ac_cv_path_mkdir+set}" = set; then :
+ if ${ac_cv_path_mkdir+:} false; then :
$as_echo_n "(cached) " >&6
else
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
@@ -3003,7 +3114,7 @@ do
test -z "$as_dir" && as_dir=.
for ac_prog in mkdir gmkdir; do
for ac_exec_ext in '' $ac_executable_extensions; do
- { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; } || continue
+ as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue
case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #(
'mkdir (GNU coreutils) '* | \
'mkdir (coreutils) '* | \
@@ -3032,19 +3143,13 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5
$as_echo "$MKDIR_P" >&6; }
-mkdir_p="$MKDIR_P"
-case $mkdir_p in
- [\\/$]* | ?:[\\/]*) ;;
- */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;;
-esac
-
for ac_prog in gawk mawk nawk awk
do
# Extract the first word of "$ac_prog", so it can be a program name with args.
set dummy $ac_prog; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_AWK+set}" = set; then :
+if ${ac_cv_prog_AWK+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$AWK"; then
@@ -3056,7 +3161,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_AWK="$ac_prog"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3084,7 +3189,7 @@ done
$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; }
set x ${MAKE-make}
ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'`
-if { as_var=ac_cv_prog_make_${ac_make}_set; eval "test \"\${$as_var+set}\" = set"; }; then :
+if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then :
$as_echo_n "(cached) " >&6
else
cat >conftest.make <<\_ACEOF
@@ -3092,7 +3197,7 @@ SHELL = /bin/sh
all:
@echo '@@@%%%=$(MAKE)=@@@%%%'
_ACEOF
-# GNU make sometimes prints "make[1]: Entering...", which would confuse us.
+# GNU make sometimes prints "make[1]: Entering ...", which would confuse us.
case `${MAKE-make} -f conftest.make 2>/dev/null` in
*@@@%%%=?*=@@@%%%*)
eval ac_cv_prog_make_${ac_make}_set=yes;;
@@ -3126,7 +3231,7 @@ if test "`cd $srcdir && pwd`" != "`pwd`"; then
am__isrc=' -I$(srcdir)'
# test to see if srcdir already configured
if test -f $srcdir/config.status; then
- as_fn_error "source directory already configured; run \"make distclean\" there first" "$LINENO" 5
+ as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5
fi
fi
@@ -3142,7 +3247,7 @@ fi
# Define the identity of the package.
PACKAGE='libffi'
- VERSION='3.0.10rc0'
+ VERSION='3.0.13'
cat >>confdefs.h <<_ACEOF
@@ -3170,13 +3275,19 @@ AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"}
MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"}
+# For better backward compatibility. To be removed once Automake 1.9.x
+# dies out for good. For more background, see:
+# <http://lists.gnu.org/archive/html/automake/2012-07/msg00001.html>
+# <http://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
+mkdir_p='$(MKDIR_P)'
+
# We need awk for the "check" target. The system "awk" is bad on
# some platforms.
-# Always define AMTAR for backward compatibility.
+# Always define AMTAR for backward compatibility. Yes, it's still used
+# in the wild :-( We should find a proper way to deprecate it ...
+AMTAR='$${TAR-tar}'
-AMTAR=${AMTAR-"${am_missing_run}tar"}
-
-am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -'
+am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'
@@ -3187,9 +3298,12 @@ am__tar='${AMTAR} chof - "$$tardir"'; am__untar='${AMTAR} xf -'
# We must force CC to /not/ be precious variables; otherwise
# the wrong, non-multilib-adjusted value will be used in multilibs.
# As a side effect, we have to subst CFLAGS ourselves.
+# Also save and restore CFLAGS, since AC_PROG_CC will come up with
+# defaults of its own if none are provided.
+save_CFLAGS=$CFLAGS
ac_ext=c
ac_cpp='$CPP $CPPFLAGS'
ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
@@ -3200,7 +3314,7 @@ if test -n "$ac_tool_prefix"; then
set dummy ${ac_tool_prefix}gcc; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
+if ${ac_cv_prog_CC+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$CC"; then
@@ -3212,7 +3326,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_CC="${ac_tool_prefix}gcc"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3240,7 +3354,7 @@ if test -z "$ac_cv_prog_CC"; then
set dummy gcc; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_CC+set}" = set; then :
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$ac_ct_CC"; then
@@ -3252,7 +3366,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_CC="gcc"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3293,7 +3407,7 @@ if test -z "$CC"; then
set dummy ${ac_tool_prefix}cc; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
+if ${ac_cv_prog_CC+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$CC"; then
@@ -3305,7 +3419,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_CC="${ac_tool_prefix}cc"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3333,7 +3447,7 @@ if test -z "$CC"; then
set dummy cc; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
+if ${ac_cv_prog_CC+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$CC"; then
@@ -3346,7 +3460,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
ac_prog_rejected=yes
continue
@@ -3392,7 +3506,7 @@ if test -z "$CC"; then
set dummy $ac_tool_prefix$ac_prog; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
+if ${ac_cv_prog_CC+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$CC"; then
@@ -3404,7 +3518,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3436,7 +3550,7 @@ do
set dummy $ac_prog; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_CC+set}" = set; then :
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$ac_ct_CC"; then
@@ -3448,7 +3562,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_CC="$ac_prog"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3490,8 +3604,8 @@ fi
test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "no acceptable C compiler found in \$PATH
-See \`config.log' for more details." "$LINENO" 5; }
+as_fn_error $? "no acceptable C compiler found in \$PATH
+See \`config.log' for more details" "$LINENO" 5; }
# Provide some information about the compiler.
$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
@@ -3605,9 +3719,8 @@ sed 's/^/| /' conftest.$ac_ext >&5
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-{ as_fn_set_status 77
-as_fn_error "C compiler cannot create executables
-See \`config.log' for more details." "$LINENO" 5; }; }
+as_fn_error 77 "C compiler cannot create executables
+See \`config.log' for more details" "$LINENO" 5; }
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
@@ -3649,8 +3762,8 @@ done
else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "cannot compute suffix of executables: cannot compile and link
-See \`config.log' for more details." "$LINENO" 5; }
+as_fn_error $? "cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details" "$LINENO" 5; }
fi
rm -f conftest conftest$ac_cv_exeext
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
@@ -3707,9 +3820,9 @@ $as_echo "$ac_try_echo"; } >&5
else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "cannot run C compiled programs.
+as_fn_error $? "cannot run C compiled programs.
If you meant to cross compile, use \`--host'.
-See \`config.log' for more details." "$LINENO" 5; }
+See \`config.log' for more details" "$LINENO" 5; }
fi
fi
fi
@@ -3720,7 +3833,7 @@ rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out
ac_clean_files=$ac_clean_files_save
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
$as_echo_n "checking for suffix of object files... " >&6; }
-if test "${ac_cv_objext+set}" = set; then :
+if ${ac_cv_objext+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -3760,8 +3873,8 @@ sed 's/^/| /' conftest.$ac_ext >&5
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "cannot compute suffix of object files: cannot compile
-See \`config.log' for more details." "$LINENO" 5; }
+as_fn_error $? "cannot compute suffix of object files: cannot compile
+See \`config.log' for more details" "$LINENO" 5; }
fi
rm -f conftest.$ac_cv_objext conftest.$ac_ext
fi
@@ -3771,7 +3884,7 @@ OBJEXT=$ac_cv_objext
ac_objext=$OBJEXT
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5
$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
-if test "${ac_cv_c_compiler_gnu+set}" = set; then :
+if ${ac_cv_c_compiler_gnu+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -3808,7 +3921,7 @@ ac_test_CFLAGS=${CFLAGS+set}
ac_save_CFLAGS=$CFLAGS
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
$as_echo_n "checking whether $CC accepts -g... " >&6; }
-if test "${ac_cv_prog_cc_g+set}" = set; then :
+if ${ac_cv_prog_cc_g+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_save_c_werror_flag=$ac_c_werror_flag
@@ -3886,7 +3999,7 @@ else
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5
$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
-if test "${ac_cv_prog_cc_c89+set}" = set; then :
+if ${ac_cv_prog_cc_c89+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_cv_prog_cc_c89=no
@@ -3895,8 +4008,7 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
#include <stdarg.h>
#include <stdio.h>
-#include <sys/types.h>
-#include <sys/stat.h>
+struct stat;
/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */
struct buf { int x; };
FILE * (*rcsopen) (struct buf *, struct stat *, int);
@@ -3999,7 +4111,7 @@ am__quote=
_am_result=none
# First try GNU make style include.
echo "include confinc" > confmf
-# Ignore all kinds of additional output from `make'.
+# Ignore all kinds of additional output from 'make'.
case `$am_make -s -f confmf 2> /dev/null` in #(
*the\ am__doit\ target*)
am__include=include
@@ -4032,6 +4144,7 @@ fi
if test "x$enable_dependency_tracking" != xno; then
am_depcomp="$ac_aux_dir/depcomp"
AMDEPBACKSLASH='\'
+ am__nodep='_no'
fi
if test "x$enable_dependency_tracking" != xno; then
AMDEP_TRUE=
@@ -4047,15 +4160,16 @@ depcc="$CC" am_compiler_list=
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
$as_echo_n "checking dependency style of $depcc... " >&6; }
-if test "${am_cv_CC_dependencies_compiler_type+set}" = set; then :
+if ${am_cv_CC_dependencies_compiler_type+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
# We make a subdir and do the tests there. Otherwise we can end up
# making bogus files that we don't know about and never remove. For
# instance it was reported that on HP-UX the gcc test will end up
- # making a dummy file named `D' -- because `-MD' means `put the output
- # in D'.
+ # making a dummy file named 'D' -- because '-MD' means "put the output
+ # in D".
+ rm -rf conftest.dir
mkdir conftest.dir
# Copy depcomp to subdir because otherwise we won't find it if we're
# using a relative directory.
@@ -4089,16 +4203,16 @@ else
: > sub/conftest.c
for i in 1 2 3 4 5 6; do
echo '#include "conftst'$i'.h"' >> sub/conftest.c
- # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
- # Solaris 8's {/usr,}/bin/sh.
- touch sub/conftst$i.h
+ # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with
+ # Solaris 10 /bin/sh.
+ echo '/* dummy */' > sub/conftst$i.h
done
echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
- # We check with `-c' and `-o' for the sake of the "dashmstdout"
+ # We check with '-c' and '-o' for the sake of the "dashmstdout"
# mode. It turns out that the SunPro C++ compiler does not properly
- # handle `-M -o', and we need to detect this. Also, some Intel
- # versions had trouble with output in subdirs
+ # handle '-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs.
am__obj=sub/conftest.${OBJEXT-o}
am__minus_obj="-o $am__obj"
case $depmode in
@@ -4107,16 +4221,16 @@ else
test "$am__universal" = false || continue
;;
nosideeffect)
- # after this tag, mechanisms are not by side-effect, so they'll
- # only be used when explicitly requested
+ # After this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested.
if test "x$enable_dependency_tracking" = xyes; then
continue
else
break
fi
;;
- msvisualcpp | msvcmsys)
- # This compiler won't grok `-c -o', but also, the minuso test has
+ msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+ # This compiler won't grok '-c -o', but also, the minuso test has
# not run yet. These depmodes are late enough in the game, and
# so weak that their functioning should not be impacted.
am__obj=conftest.${OBJEXT-o}
@@ -4170,6 +4284,7 @@ else
fi
+CFLAGS=$save_CFLAGS
@@ -4186,15 +4301,16 @@ depcc="$CCAS" am_compiler_list=
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
$as_echo_n "checking dependency style of $depcc... " >&6; }
-if test "${am_cv_CCAS_dependencies_compiler_type+set}" = set; then :
+if ${am_cv_CCAS_dependencies_compiler_type+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
# We make a subdir and do the tests there. Otherwise we can end up
# making bogus files that we don't know about and never remove. For
# instance it was reported that on HP-UX the gcc test will end up
- # making a dummy file named `D' -- because `-MD' means `put the output
- # in D'.
+ # making a dummy file named 'D' -- because '-MD' means "put the output
+ # in D".
+ rm -rf conftest.dir
mkdir conftest.dir
# Copy depcomp to subdir because otherwise we won't find it if we're
# using a relative directory.
@@ -4226,16 +4342,16 @@ else
: > sub/conftest.c
for i in 1 2 3 4 5 6; do
echo '#include "conftst'$i'.h"' >> sub/conftest.c
- # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
- # Solaris 8's {/usr,}/bin/sh.
- touch sub/conftst$i.h
+ # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with
+ # Solaris 10 /bin/sh.
+ echo '/* dummy */' > sub/conftst$i.h
done
echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
- # We check with `-c' and `-o' for the sake of the "dashmstdout"
+ # We check with '-c' and '-o' for the sake of the "dashmstdout"
# mode. It turns out that the SunPro C++ compiler does not properly
- # handle `-M -o', and we need to detect this. Also, some Intel
- # versions had trouble with output in subdirs
+ # handle '-M -o', and we need to detect this. Also, some Intel
+ # versions had trouble with output in subdirs.
am__obj=sub/conftest.${OBJEXT-o}
am__minus_obj="-o $am__obj"
case $depmode in
@@ -4244,16 +4360,16 @@ else
test "$am__universal" = false || continue
;;
nosideeffect)
- # after this tag, mechanisms are not by side-effect, so they'll
- # only be used when explicitly requested
+ # After this tag, mechanisms are not by side-effect, so they'll
+ # only be used when explicitly requested.
if test "x$enable_dependency_tracking" = xyes; then
continue
else
break
fi
;;
- msvisualcpp | msvcmsys)
- # This compiler won't grok `-c -o', but also, the minuso test has
+ msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+ # This compiler won't grok '-c -o', but also, the minuso test has
# not run yet. These depmodes are late enough in the game, and
# so weak that their functioning should not be impacted.
am__obj=conftest.${OBJEXT-o}
@@ -4316,7 +4432,7 @@ $as_echo_n "checking whether cc understands -c and -o together... " >&6; }
fi
set dummy $CC; ac_cc=`$as_echo "$2" |
sed 's/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/'`
-if { as_var=ac_cv_prog_cc_${ac_cc}_c_o; eval "test \"\${$as_var+set}\" = set"; }; then :
+if eval \${ac_cv_prog_cc_${ac_cc}_c_o+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -4441,8 +4557,8 @@ esac
-macro_version='2.2.6'
-macro_revision='1.3012'
+macro_version='2.4.2'
+macro_revision='1.3337'
@@ -4458,9 +4574,78 @@ macro_revision='1.3012'
ltmain="$ac_aux_dir/ltmain.sh"
+# Backslashify metacharacters that are still active within
+# double-quoted strings.
+sed_quote_subst='s/\(["`$\\]\)/\\\1/g'
+
+# Same as above, but do not quote variable references.
+double_quote_subst='s/\(["`\\]\)/\\\1/g'
+
+# Sed substitution to delay expansion of an escaped shell variable in a
+# double_quote_subst'ed string.
+delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
+
+# Sed substitution to delay expansion of an escaped single quote.
+delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g'
+
+# Sed substitution to avoid accidental globbing in evaled expressions
+no_glob_subst='s/\*/\\\*/g'
+
+ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO
+ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5
+$as_echo_n "checking how to print strings... " >&6; }
+# Test print first, because it will be a builtin if present.
+if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \
+ test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then
+ ECHO='print -r --'
+elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then
+ ECHO='printf %s\n'
+else
+ # Use this function as a fallback that always works.
+ func_fallback_echo ()
+ {
+ eval 'cat <<_LTECHO_EOF
+$1
+_LTECHO_EOF'
+ }
+ ECHO='func_fallback_echo'
+fi
+
+# func_echo_all arg...
+# Invoke $ECHO with all args, space-separated.
+func_echo_all ()
+{
+ $ECHO ""
+}
+
+case "$ECHO" in
+ printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5
+$as_echo "printf" >&6; } ;;
+ print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5
+$as_echo "print -r" >&6; } ;;
+ *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5
+$as_echo "cat" >&6; } ;;
+esac
+
+
+
+
+
+
+
+
+
+
+
+
+
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5
$as_echo_n "checking for a sed that does not truncate output... " >&6; }
-if test "${ac_cv_path_SED+set}" = set; then :
+if ${ac_cv_path_SED+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/
@@ -4480,7 +4665,7 @@ do
for ac_prog in sed gsed; do
for ac_exec_ext in '' $ac_executable_extensions; do
ac_path_SED="$as_dir/$ac_prog$ac_exec_ext"
- { test -f "$ac_path_SED" && $as_test_x "$ac_path_SED"; } || continue
+ as_fn_executable_p "$ac_path_SED" || continue
# Check for GNU ac_path_SED and select it if it is found.
# Check for GNU $ac_path_SED
case `"$ac_path_SED" --version 2>&1` in
@@ -4515,7 +4700,7 @@ esac
done
IFS=$as_save_IFS
if test -z "$ac_cv_path_SED"; then
- as_fn_error "no acceptable sed could be found in \$PATH" "$LINENO" 5
+ as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5
fi
else
ac_cv_path_SED=$SED
@@ -4542,7 +4727,7 @@ Xsed="$SED -e 1s/^X//"
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
-if test "${ac_cv_path_GREP+set}" = set; then :
+if ${ac_cv_path_GREP+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -z "$GREP"; then
@@ -4556,7 +4741,7 @@ do
for ac_prog in grep ggrep; do
for ac_exec_ext in '' $ac_executable_extensions; do
ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
- { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue
+ as_fn_executable_p "$ac_path_GREP" || continue
# Check for GNU ac_path_GREP and select it if it is found.
# Check for GNU $ac_path_GREP
case `"$ac_path_GREP" --version 2>&1` in
@@ -4591,7 +4776,7 @@ esac
done
IFS=$as_save_IFS
if test -z "$ac_cv_path_GREP"; then
- as_fn_error "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+ as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
fi
else
ac_cv_path_GREP=$GREP
@@ -4605,7 +4790,7 @@ $as_echo "$ac_cv_path_GREP" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
$as_echo_n "checking for egrep... " >&6; }
-if test "${ac_cv_path_EGREP+set}" = set; then :
+if ${ac_cv_path_EGREP+:} false; then :
$as_echo_n "(cached) " >&6
else
if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
@@ -4622,7 +4807,7 @@ do
for ac_prog in egrep; do
for ac_exec_ext in '' $ac_executable_extensions; do
ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
- { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue
+ as_fn_executable_p "$ac_path_EGREP" || continue
# Check for GNU ac_path_EGREP and select it if it is found.
# Check for GNU $ac_path_EGREP
case `"$ac_path_EGREP" --version 2>&1` in
@@ -4657,7 +4842,7 @@ esac
done
IFS=$as_save_IFS
if test -z "$ac_cv_path_EGREP"; then
- as_fn_error "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+ as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
fi
else
ac_cv_path_EGREP=$EGREP
@@ -4672,7 +4857,7 @@ $as_echo "$ac_cv_path_EGREP" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5
$as_echo_n "checking for fgrep... " >&6; }
-if test "${ac_cv_path_FGREP+set}" = set; then :
+if ${ac_cv_path_FGREP+:} false; then :
$as_echo_n "(cached) " >&6
else
if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1
@@ -4689,7 +4874,7 @@ do
for ac_prog in fgrep; do
for ac_exec_ext in '' $ac_executable_extensions; do
ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext"
- { test -f "$ac_path_FGREP" && $as_test_x "$ac_path_FGREP"; } || continue
+ as_fn_executable_p "$ac_path_FGREP" || continue
# Check for GNU ac_path_FGREP and select it if it is found.
# Check for GNU $ac_path_FGREP
case `"$ac_path_FGREP" --version 2>&1` in
@@ -4724,7 +4909,7 @@ esac
done
IFS=$as_save_IFS
if test -z "$ac_cv_path_FGREP"; then
- as_fn_error "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+ as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
fi
else
ac_cv_path_FGREP=$FGREP
@@ -4803,7 +4988,7 @@ else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5
$as_echo_n "checking for non-GNU ld... " >&6; }
fi
-if test "${lt_cv_path_LD+set}" = set; then :
+if ${lt_cv_path_LD+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -z "$LD"; then
@@ -4840,10 +5025,10 @@ else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
fi
-test -z "$LD" && as_fn_error "no acceptable ld found in \$PATH" "$LINENO" 5
+test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5
$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; }
-if test "${lt_cv_prog_gnu_ld+set}" = set; then :
+if ${lt_cv_prog_gnu_ld+:} false; then :
$as_echo_n "(cached) " >&6
else
# I'd rather use --version here, but apparently some GNU lds only accept -v.
@@ -4870,7 +5055,7 @@ with_gnu_ld=$lt_cv_prog_gnu_ld
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5
$as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; }
-if test "${lt_cv_path_NM+set}" = set; then :
+if ${lt_cv_path_NM+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$NM"; then
@@ -4923,14 +5108,17 @@ if test "$lt_cv_path_NM" != "no"; then
NM="$lt_cv_path_NM"
else
# Didn't find any BSD compatible name lister, look for dumpbin.
- if test -n "$ac_tool_prefix"; then
- for ac_prog in "dumpbin -symbols" "link -dump -symbols"
+ if test -n "$DUMPBIN"; then :
+ # Let the user override the test.
+ else
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in dumpbin "link -dump"
do
# Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
set dummy $ac_tool_prefix$ac_prog; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_DUMPBIN+set}" = set; then :
+if ${ac_cv_prog_DUMPBIN+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$DUMPBIN"; then
@@ -4942,7 +5130,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -4968,13 +5156,13 @@ fi
fi
if test -z "$DUMPBIN"; then
ac_ct_DUMPBIN=$DUMPBIN
- for ac_prog in "dumpbin -symbols" "link -dump -symbols"
+ for ac_prog in dumpbin "link -dump"
do
# Extract the first word of "$ac_prog", so it can be a program name with args.
set dummy $ac_prog; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_DUMPBIN+set}" = set; then :
+if ${ac_cv_prog_ac_ct_DUMPBIN+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$ac_ct_DUMPBIN"; then
@@ -4986,7 +5174,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_DUMPBIN="$ac_prog"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -5023,6 +5211,15 @@ esac
fi
fi
+ case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in
+ *COFF*)
+ DUMPBIN="$DUMPBIN -symbols"
+ ;;
+ *)
+ DUMPBIN=:
+ ;;
+ esac
+ fi
if test "$DUMPBIN" != ":"; then
NM="$DUMPBIN"
@@ -5037,18 +5234,18 @@ test -z "$NM" && NM=nm
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5
$as_echo_n "checking the name lister ($NM) interface... " >&6; }
-if test "${lt_cv_nm_interface+set}" = set; then :
+if ${lt_cv_nm_interface+:} false; then :
$as_echo_n "(cached) " >&6
else
lt_cv_nm_interface="BSD nm"
echo "int some_variable = 0;" > conftest.$ac_ext
- (eval echo "\"\$as_me:5045: $ac_compile\"" >&5)
+ (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5)
(eval "$ac_compile" 2>conftest.err)
cat conftest.err >&5
- (eval echo "\"\$as_me:5048: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
+ (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
(eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out)
cat conftest.err >&5
- (eval echo "\"\$as_me:5051: output\"" >&5)
+ (eval echo "\"\$as_me:$LINENO: output\"" >&5)
cat conftest.out >&5
if $GREP 'External.*some_variable' conftest.out > /dev/null; then
lt_cv_nm_interface="MS dumpbin"
@@ -5072,7 +5269,7 @@ fi
# find the maximum length of command line arguments
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5
$as_echo_n "checking the maximum length of command line arguments... " >&6; }
-if test "${lt_cv_sys_max_cmd_len+set}" = set; then :
+if ${lt_cv_sys_max_cmd_len+:} false; then :
$as_echo_n "(cached) " >&6
else
i=0
@@ -5105,6 +5302,11 @@ else
lt_cv_sys_max_cmd_len=8192;
;;
+ mint*)
+ # On MiNT this can take a long time and run out of memory.
+ lt_cv_sys_max_cmd_len=8192;
+ ;;
+
amigaos*)
# On AmigaOS with pdksh, this test takes hours, literally.
# So we just punt and use a minimum line length of 8192.
@@ -5130,6 +5332,11 @@ else
lt_cv_sys_max_cmd_len=196608
;;
+ os2*)
+ # The test takes a long time on OS/2.
+ lt_cv_sys_max_cmd_len=8192
+ ;;
+
osf*)
# Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure
# due to this test when exec_disable_arg_limit is 1 on Tru64. It is not
@@ -5156,7 +5363,8 @@ else
;;
*)
lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null`
- if test -n "$lt_cv_sys_max_cmd_len"; then
+ if test -n "$lt_cv_sys_max_cmd_len" && \
+ test undefined != "$lt_cv_sys_max_cmd_len"; then
lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
else
@@ -5169,8 +5377,8 @@ else
# If test is not a shell built-in, we'll probably end up computing a
# maximum length that is only half of the actual maximum length, but
# we can't tell.
- while { test "X"`$SHELL $0 --fallback-echo "X$teststring$teststring" 2>/dev/null` \
- = "XX$teststring$teststring"; } >/dev/null 2>&1 &&
+ while { test "X"`env echo "$teststring$teststring" 2>/dev/null` \
+ = "X$teststring$teststring"; } >/dev/null 2>&1 &&
test $i != 17 # 1/2 MB should be enough
do
i=`expr $i + 1`
@@ -5212,8 +5420,8 @@ $as_echo_n "checking whether the shell understands some XSI constructs... " >&6;
# Try some XSI features
xsi_shell=no
( _lt_dummy="a/b/c"
- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \
- = c,a/b,, \
+ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \
+ = c,a/b,b/c, \
&& eval 'test $(( 1 + 1 )) -eq 2 \
&& test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \
&& xsi_shell=yes
@@ -5262,9 +5470,83 @@ esac
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5
+$as_echo_n "checking how to convert $build file names to $host format... " >&6; }
+if ${lt_cv_to_host_file_cmd+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $host in
+ *-*-mingw* )
+ case $build in
+ *-*-mingw* ) # actually msys
+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32
+ ;;
+ *-*-cygwin* )
+ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32
+ ;;
+ * ) # otherwise, assume *nix
+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32
+ ;;
+ esac
+ ;;
+ *-*-cygwin* )
+ case $build in
+ *-*-mingw* ) # actually msys
+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin
+ ;;
+ *-*-cygwin* )
+ lt_cv_to_host_file_cmd=func_convert_file_noop
+ ;;
+ * ) # otherwise, assume *nix
+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin
+ ;;
+ esac
+ ;;
+ * ) # unhandled hosts (and "normal" native builds)
+ lt_cv_to_host_file_cmd=func_convert_file_noop
+ ;;
+esac
+
+fi
+
+to_host_file_cmd=$lt_cv_to_host_file_cmd
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5
+$as_echo "$lt_cv_to_host_file_cmd" >&6; }
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5
+$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; }
+if ${lt_cv_to_tool_file_cmd+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ #assume ordinary cross tools, or native build.
+lt_cv_to_tool_file_cmd=func_convert_file_noop
+case $host in
+ *-*-mingw* )
+ case $build in
+ *-*-mingw* ) # actually msys
+ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32
+ ;;
+ esac
+ ;;
+esac
+
+fi
+
+to_tool_file_cmd=$lt_cv_to_tool_file_cmd
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5
+$as_echo "$lt_cv_to_tool_file_cmd" >&6; }
+
+
+
+
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5
$as_echo_n "checking for $LD option to reload object files... " >&6; }
-if test "${lt_cv_ld_reload_flag+set}" = set; then :
+if ${lt_cv_ld_reload_flag+:} false; then :
$as_echo_n "(cached) " >&6
else
lt_cv_ld_reload_flag='-r'
@@ -5278,6 +5560,11 @@ case $reload_flag in
esac
reload_cmds='$LD$reload_flag -o $output$reload_objs'
case $host_os in
+ cygwin* | mingw* | pw32* | cegcc*)
+ if test "$GCC" != yes; then
+ reload_cmds=false
+ fi
+ ;;
darwin*)
if test "$GCC" = yes; then
reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs'
@@ -5300,7 +5587,7 @@ if test -n "$ac_tool_prefix"; then
set dummy ${ac_tool_prefix}objdump; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_OBJDUMP+set}" = set; then :
+if ${ac_cv_prog_OBJDUMP+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$OBJDUMP"; then
@@ -5312,7 +5599,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -5340,7 +5627,7 @@ if test -z "$ac_cv_prog_OBJDUMP"; then
set dummy objdump; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_OBJDUMP+set}" = set; then :
+if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$ac_ct_OBJDUMP"; then
@@ -5352,7 +5639,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_OBJDUMP="objdump"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -5399,7 +5686,7 @@ test -z "$OBJDUMP" && OBJDUMP=objdump
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5
$as_echo_n "checking how to recognize dependent libraries... " >&6; }
-if test "${lt_cv_deplibs_check_method+set}" = set; then :
+if ${lt_cv_deplibs_check_method+:} false; then :
$as_echo_n "(cached) " >&6
else
lt_cv_file_magic_cmd='$MAGIC_CMD'
@@ -5441,16 +5728,18 @@ mingw* | pw32*)
# Base MSYS/MinGW do not provide the 'file' command needed by
# func_win32_libid shell function, so use a weaker test based on 'objdump',
# unless we find 'file', for example because we are cross-compiling.
- if ( file / ) >/dev/null 2>&1; then
+ # func_win32_libid assumes BSD nm, so disallow it if using MS dumpbin.
+ if ( test "$lt_cv_nm_interface" = "BSD nm" && file / ) >/dev/null 2>&1; then
lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
lt_cv_file_magic_cmd='func_win32_libid'
else
- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?'
+ # Keep this pattern in sync with the one in func_win32_libid.
+ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)'
lt_cv_file_magic_cmd='$OBJDUMP -f'
fi
;;
-cegcc)
+cegcc*)
# use the weaker test based on 'objdump'. See mingw*.
lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?'
lt_cv_file_magic_cmd='$OBJDUMP -f'
@@ -5480,6 +5769,10 @@ gnu*)
lt_cv_deplibs_check_method=pass_all
;;
+haiku*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
hpux10.20* | hpux11*)
lt_cv_file_magic_cmd=/usr/bin/file
case $host_cpu in
@@ -5488,11 +5781,11 @@ hpux10.20* | hpux11*)
lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so
;;
hppa*64*)
- lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]'
+ lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]'
lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl
;;
*)
- lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9].[0-9]) shared library'
+ lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library'
lt_cv_file_magic_test_file=/usr/lib/libc.sl
;;
esac
@@ -5513,8 +5806,8 @@ irix5* | irix6* | nonstopux*)
lt_cv_deplibs_check_method=pass_all
;;
-# This must be Linux ELF.
-linux* | k*bsd*-gnu)
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu)
lt_cv_deplibs_check_method=pass_all
;;
@@ -5595,6 +5888,21 @@ esac
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5
$as_echo "$lt_cv_deplibs_check_method" >&6; }
+
+file_magic_glob=
+want_nocaseglob=no
+if test "$build" = "$host"; then
+ case $host_os in
+ mingw* | pw32*)
+ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then
+ want_nocaseglob=yes
+ else
+ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"`
+ fi
+ ;;
+ esac
+fi
+
file_magic_cmd=$lt_cv_file_magic_cmd
deplibs_check_method=$lt_cv_deplibs_check_method
test -z "$deplibs_check_method" && deplibs_check_method=unknown
@@ -5610,12 +5918,166 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown
+
+
+
+
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args.
+set dummy ${ac_tool_prefix}dlltool; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_DLLTOOL+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$DLLTOOL"; then
+ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+DLLTOOL=$ac_cv_prog_DLLTOOL
+if test -n "$DLLTOOL"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5
+$as_echo "$DLLTOOL" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_DLLTOOL"; then
+ ac_ct_DLLTOOL=$DLLTOOL
+ # Extract the first word of "dlltool", so it can be a program name with args.
+set dummy dlltool; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_DLLTOOL"; then
+ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_DLLTOOL="dlltool"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL
+if test -n "$ac_ct_DLLTOOL"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5
+$as_echo "$ac_ct_DLLTOOL" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_DLLTOOL" = x; then
+ DLLTOOL="false"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ DLLTOOL=$ac_ct_DLLTOOL
+ fi
+else
+ DLLTOOL="$ac_cv_prog_DLLTOOL"
+fi
+
+test -z "$DLLTOOL" && DLLTOOL=dlltool
+
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5
+$as_echo_n "checking how to associate runtime and link libraries... " >&6; }
+if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_sharedlib_from_linklib_cmd='unknown'
+
+case $host_os in
+cygwin* | mingw* | pw32* | cegcc*)
+ # two different shell functions defined in ltmain.sh
+ # decide which to use based on capabilities of $DLLTOOL
+ case `$DLLTOOL --help 2>&1` in
+ *--identify-strict*)
+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib
+ ;;
+ *)
+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback
+ ;;
+ esac
+ ;;
+*)
+ # fallback: assume linklib IS sharedlib
+ lt_cv_sharedlib_from_linklib_cmd="$ECHO"
+ ;;
+esac
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5
+$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; }
+sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd
+test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO
+
+
+
+
+
+
+
+
if test -n "$ac_tool_prefix"; then
- # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args.
-set dummy ${ac_tool_prefix}ar; ac_word=$2
+ for ac_prog in ar
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_AR+set}" = set; then :
+if ${ac_cv_prog_AR+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$AR"; then
@@ -5627,8 +6089,8 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
- ac_cv_prog_AR="${ac_tool_prefix}ar"
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_AR="$ac_tool_prefix$ac_prog"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
fi
@@ -5648,14 +6110,18 @@ $as_echo "no" >&6; }
fi
+ test -n "$AR" && break
+ done
fi
-if test -z "$ac_cv_prog_AR"; then
+if test -z "$AR"; then
ac_ct_AR=$AR
- # Extract the first word of "ar", so it can be a program name with args.
-set dummy ar; ac_word=$2
+ for ac_prog in ar
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_AR+set}" = set; then :
+if ${ac_cv_prog_ac_ct_AR+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$ac_ct_AR"; then
@@ -5667,8 +6133,8 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
- ac_cv_prog_ac_ct_AR="ar"
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_AR="$ac_prog"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
fi
@@ -5687,6 +6153,10 @@ else
$as_echo "no" >&6; }
fi
+
+ test -n "$ac_ct_AR" && break
+done
+
if test "x$ac_ct_AR" = x; then
AR="false"
else
@@ -5698,12 +6168,13 @@ ac_tool_warned=yes ;;
esac
AR=$ac_ct_AR
fi
-else
- AR="$ac_cv_prog_AR"
fi
-test -z "$AR" && AR=ar
-test -z "$AR_FLAGS" && AR_FLAGS=cru
+: ${AR=ar}
+: ${AR_FLAGS=cru}
+
+
+
@@ -5712,6 +6183,61 @@ test -z "$AR_FLAGS" && AR_FLAGS=cru
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5
+$as_echo_n "checking for archiver @FILE support... " >&6; }
+if ${lt_cv_ar_at_file+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_ar_at_file=no
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ echo conftest.$ac_objext > conftest.lst
+ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5'
+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5
+ (eval $lt_ar_try) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+ if test "$ac_status" -eq 0; then
+ # Ensure the archiver fails upon bogus file names.
+ rm -f conftest.$ac_objext libconftest.a
+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5
+ (eval $lt_ar_try) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+ if test "$ac_status" -ne 0; then
+ lt_cv_ar_at_file=@
+ fi
+ fi
+ rm -f conftest.* libconftest.a
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5
+$as_echo "$lt_cv_ar_at_file" >&6; }
+
+if test "x$lt_cv_ar_at_file" = xno; then
+ archiver_list_spec=
+else
+ archiver_list_spec=$lt_cv_ar_at_file
+fi
+
+
+
+
@@ -5720,7 +6246,7 @@ if test -n "$ac_tool_prefix"; then
set dummy ${ac_tool_prefix}strip; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_STRIP+set}" = set; then :
+if ${ac_cv_prog_STRIP+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$STRIP"; then
@@ -5732,7 +6258,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_STRIP="${ac_tool_prefix}strip"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -5760,7 +6286,7 @@ if test -z "$ac_cv_prog_STRIP"; then
set dummy strip; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then :
+if ${ac_cv_prog_ac_ct_STRIP+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$ac_ct_STRIP"; then
@@ -5772,7 +6298,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_STRIP="strip"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -5819,7 +6345,7 @@ if test -n "$ac_tool_prefix"; then
set dummy ${ac_tool_prefix}ranlib; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_RANLIB+set}" = set; then :
+if ${ac_cv_prog_RANLIB+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$RANLIB"; then
@@ -5831,7 +6357,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -5859,7 +6385,7 @@ if test -z "$ac_cv_prog_RANLIB"; then
set dummy ranlib; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then :
+if ${ac_cv_prog_ac_ct_RANLIB+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$ac_ct_RANLIB"; then
@@ -5871,7 +6397,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_RANLIB="ranlib"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -5921,15 +6447,27 @@ old_postuninstall_cmds=
if test -n "$RANLIB"; then
case $host_os in
openbsd*)
- old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib"
+ old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib"
;;
*)
- old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib"
+ old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib"
;;
esac
- old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib"
+ old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib"
fi
+case $host_os in
+ darwin*)
+ lock_old_archive_extraction=yes ;;
+ *)
+ lock_old_archive_extraction=no ;;
+esac
+
+
+
+
+
+
@@ -5976,7 +6514,7 @@ compiler=$CC
# Check for command to grab the raw symbol name followed by C symbol from nm.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5
$as_echo_n "checking command to parse $NM output from $compiler object... " >&6; }
-if test "${lt_cv_sys_global_symbol_pipe+set}" = set; then :
+if ${lt_cv_sys_global_symbol_pipe+:} false; then :
$as_echo_n "(cached) " >&6
else
@@ -6037,8 +6575,8 @@ esac
lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'"
# Transform an extracted symbol line into symbol name and symbol address
-lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'"
-lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'"
+lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'"
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'"
# Handle CRLF in mingw tool chain
opt_cr=
@@ -6062,6 +6600,7 @@ for ac_symprfx in "" "_"; do
# which start with @ or ?.
lt_cv_sys_global_symbol_pipe="$AWK '"\
" {last_section=section; section=\$ 3};"\
+" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\
" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\
" \$ 0!~/External *\|/{next};"\
" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\
@@ -6074,6 +6613,7 @@ for ac_symprfx in "" "_"; do
else
lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'"
fi
+ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'"
# Check to see that the pipe works correctly.
pipe_works=no
@@ -6099,8 +6639,8 @@ _LT_EOF
test $ac_status = 0; }; then
# Now try to grab the symbols.
nlist=conftest.nm
- if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist\""; } >&5
- (eval $NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) 2>&5
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5
+ (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5
ac_status=$?
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; } && test -s "$nlist"; then
@@ -6115,6 +6655,18 @@ _LT_EOF
if $GREP ' nm_test_var$' "$nlist" >/dev/null; then
if $GREP ' nm_test_func$' "$nlist" >/dev/null; then
cat <<_LT_EOF > conftest.$ac_ext
+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */
+#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE)
+/* DATA imports from DLLs on WIN32 con't be const, because runtime
+ relocations are performed -- see ld's documentation on pseudo-relocs. */
+# define LT_DLSYM_CONST
+#elif defined(__osf__)
+/* This system does not cope well with relocations in const data. */
+# define LT_DLSYM_CONST
+#else
+# define LT_DLSYM_CONST const
+#endif
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -6126,7 +6678,7 @@ _LT_EOF
cat <<_LT_EOF >> conftest.$ac_ext
/* The mapping between symbol names and symbols. */
-const struct {
+LT_DLSYM_CONST struct {
const char *name;
void *address;
}
@@ -6152,8 +6704,8 @@ static const void *lt_preloaded_setup() {
_LT_EOF
# Now try linking the two files.
mv conftest.$ac_objext conftstm.$ac_objext
- lt_save_LIBS="$LIBS"
- lt_save_CFLAGS="$CFLAGS"
+ lt_globsym_save_LIBS=$LIBS
+ lt_globsym_save_CFLAGS=$CFLAGS
LIBS="conftstm.$ac_objext"
CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag"
if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5
@@ -6163,8 +6715,8 @@ _LT_EOF
test $ac_status = 0; } && test -s conftest${ac_exeext}; then
pipe_works=yes
fi
- LIBS="$lt_save_LIBS"
- CFLAGS="$lt_save_CFLAGS"
+ LIBS=$lt_globsym_save_LIBS
+ CFLAGS=$lt_globsym_save_CFLAGS
else
echo "cannot find nm_test_func in $nlist" >&5
fi
@@ -6201,6 +6753,14 @@ else
$as_echo "ok" >&6; }
fi
+# Response file support.
+if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+ nm_file_list_spec='@'
+elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then
+ nm_file_list_spec='@'
+fi
+
+
@@ -6223,6 +6783,46 @@ fi
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5
+$as_echo_n "checking for sysroot... " >&6; }
+
+# Check whether --with-sysroot was given.
+if test "${with_sysroot+set}" = set; then :
+ withval=$with_sysroot;
+else
+ with_sysroot=no
+fi
+
+
+lt_sysroot=
+case ${with_sysroot} in #(
+ yes)
+ if test "$GCC" = yes; then
+ lt_sysroot=`$CC --print-sysroot 2>/dev/null`
+ fi
+ ;; #(
+ /*)
+ lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"`
+ ;; #(
+ no|'')
+ ;; #(
+ *)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_sysroot}" >&5
+$as_echo "${with_sysroot}" >&6; }
+ as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5
+ ;;
+esac
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5
+$as_echo "${lt_sysroot:-no}" >&6; }
+
+
+
+
+
# Check whether --enable-libtool-lock was given.
if test "${enable_libtool_lock+set}" = set; then :
enableval=$enable_libtool_lock;
@@ -6254,7 +6854,7 @@ ia64-*-hpux*)
;;
*-*-irix6*)
# Find out which ABI we are using.
- echo '#line 6257 "configure"' > conftest.$ac_ext
+ echo '#line '$LINENO' "configure"' > conftest.$ac_ext
if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
(eval $ac_compile) 2>&5
ac_status=$?
@@ -6305,7 +6905,14 @@ s390*-*linux*|s390*-*tpf*|sparc*-*linux*)
LD="${LD-ld} -m elf_i386_fbsd"
;;
x86_64-*linux*)
- LD="${LD-ld} -m elf_i386"
+ case `/usr/bin/file conftest.o` in
+ *x86-64*)
+ LD="${LD-ld} -m elf32_x86_64"
+ ;;
+ *)
+ LD="${LD-ld} -m elf_i386"
+ ;;
+ esac
;;
ppc64-*linux*|powerpc64-*linux*)
LD="${LD-ld} -m elf32ppclinux"
@@ -6348,7 +6955,7 @@ s390*-*linux*|s390*-*tpf*|sparc*-*linux*)
CFLAGS="$CFLAGS -belf"
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5
$as_echo_n "checking whether the C compiler needs -belf... " >&6; }
-if test "${lt_cv_cc_needs_belf+set}" = set; then :
+if ${lt_cv_cc_needs_belf+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_ext=c
@@ -6389,7 +6996,7 @@ $as_echo "$lt_cv_cc_needs_belf" >&6; }
CFLAGS="$SAVE_CFLAGS"
fi
;;
-sparc*-*solaris*)
+*-*solaris*)
# Find out which ABI we are using.
echo 'int i;' > conftest.$ac_ext
if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
@@ -6400,7 +7007,20 @@ sparc*-*solaris*)
case `/usr/bin/file conftest.o` in
*64-bit*)
case $lt_cv_prog_gnu_ld in
- yes*) LD="${LD-ld} -m elf64_sparc" ;;
+ yes*)
+ case $host in
+ i?86-*-solaris*)
+ LD="${LD-ld} -m elf_x86_64"
+ ;;
+ sparc*-*-solaris*)
+ LD="${LD-ld} -m elf64_sparc"
+ ;;
+ esac
+ # GNU ld 2.21 introduced _sol2 emulations. Use them if available.
+ if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then
+ LD="${LD-ld}_sol2"
+ fi
+ ;;
*)
if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then
LD="${LD-ld} -64"
@@ -6416,6 +7036,123 @@ esac
need_locks="$enable_libtool_lock"
+if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args.
+set dummy ${ac_tool_prefix}mt; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_MANIFEST_TOOL+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$MANIFEST_TOOL"; then
+ ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL
+if test -n "$MANIFEST_TOOL"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5
+$as_echo "$MANIFEST_TOOL" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_MANIFEST_TOOL"; then
+ ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL
+ # Extract the first word of "mt", so it can be a program name with args.
+set dummy mt; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_MANIFEST_TOOL"; then
+ ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_MANIFEST_TOOL="mt"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL
+if test -n "$ac_ct_MANIFEST_TOOL"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5
+$as_echo "$ac_ct_MANIFEST_TOOL" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_ct_MANIFEST_TOOL" = x; then
+ MANIFEST_TOOL=":"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL
+ fi
+else
+ MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL"
+fi
+
+test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5
+$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; }
+if ${lt_cv_path_mainfest_tool+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_path_mainfest_tool=no
+ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5
+ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out
+ cat conftest.err >&5
+ if $GREP 'Manifest Tool' conftest.out > /dev/null; then
+ lt_cv_path_mainfest_tool=yes
+ fi
+ rm -f conftest*
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5
+$as_echo "$lt_cv_path_mainfest_tool" >&6; }
+if test "x$lt_cv_path_mainfest_tool" != xyes; then
+ MANIFEST_TOOL=:
+fi
+
+
+
+
+
case $host_os in
rhapsody* | darwin*)
@@ -6424,7 +7161,7 @@ need_locks="$enable_libtool_lock"
set dummy ${ac_tool_prefix}dsymutil; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_DSYMUTIL+set}" = set; then :
+if ${ac_cv_prog_DSYMUTIL+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$DSYMUTIL"; then
@@ -6436,7 +7173,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -6464,7 +7201,7 @@ if test -z "$ac_cv_prog_DSYMUTIL"; then
set dummy dsymutil; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_DSYMUTIL+set}" = set; then :
+if ${ac_cv_prog_ac_ct_DSYMUTIL+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$ac_ct_DSYMUTIL"; then
@@ -6476,7 +7213,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_DSYMUTIL="dsymutil"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -6516,7 +7253,7 @@ fi
set dummy ${ac_tool_prefix}nmedit; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_NMEDIT+set}" = set; then :
+if ${ac_cv_prog_NMEDIT+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$NMEDIT"; then
@@ -6528,7 +7265,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -6556,7 +7293,7 @@ if test -z "$ac_cv_prog_NMEDIT"; then
set dummy nmedit; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_NMEDIT+set}" = set; then :
+if ${ac_cv_prog_ac_ct_NMEDIT+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$ac_ct_NMEDIT"; then
@@ -6568,7 +7305,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_NMEDIT="nmedit"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -6608,7 +7345,7 @@ fi
set dummy ${ac_tool_prefix}lipo; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_LIPO+set}" = set; then :
+if ${ac_cv_prog_LIPO+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$LIPO"; then
@@ -6620,7 +7357,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_LIPO="${ac_tool_prefix}lipo"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -6648,7 +7385,7 @@ if test -z "$ac_cv_prog_LIPO"; then
set dummy lipo; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_LIPO+set}" = set; then :
+if ${ac_cv_prog_ac_ct_LIPO+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$ac_ct_LIPO"; then
@@ -6660,7 +7397,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_LIPO="lipo"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -6700,7 +7437,7 @@ fi
set dummy ${ac_tool_prefix}otool; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_OTOOL+set}" = set; then :
+if ${ac_cv_prog_OTOOL+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$OTOOL"; then
@@ -6712,7 +7449,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_OTOOL="${ac_tool_prefix}otool"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -6740,7 +7477,7 @@ if test -z "$ac_cv_prog_OTOOL"; then
set dummy otool; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_OTOOL+set}" = set; then :
+if ${ac_cv_prog_ac_ct_OTOOL+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$ac_ct_OTOOL"; then
@@ -6752,7 +7489,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_OTOOL="otool"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -6792,7 +7529,7 @@ fi
set dummy ${ac_tool_prefix}otool64; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_OTOOL64+set}" = set; then :
+if ${ac_cv_prog_OTOOL64+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$OTOOL64"; then
@@ -6804,7 +7541,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -6832,7 +7569,7 @@ if test -z "$ac_cv_prog_OTOOL64"; then
set dummy otool64; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_OTOOL64+set}" = set; then :
+if ${ac_cv_prog_ac_ct_OTOOL64+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$ac_ct_OTOOL64"; then
@@ -6844,7 +7581,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_OTOOL64="otool64"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -6907,7 +7644,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5
$as_echo_n "checking for -single_module linker flag... " >&6; }
-if test "${lt_cv_apple_cc_single_mod+set}" = set; then :
+if ${lt_cv_apple_cc_single_mod+:} false; then :
$as_echo_n "(cached) " >&6
else
lt_cv_apple_cc_single_mod=no
@@ -6923,7 +7660,13 @@ else
$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
-dynamiclib -Wl,-single_module conftest.c 2>conftest.err
_lt_result=$?
- if test -f libconftest.dylib && test ! -s conftest.err && test $_lt_result = 0; then
+ # If there is a non-empty error log, and "single_module"
+ # appears in it, assume the flag caused a linker warning
+ if test -s conftest.err && $GREP single_module conftest.err; then
+ cat conftest.err >&5
+ # Otherwise, if the output was created with a 0 exit code from
+ # the compiler, it worked.
+ elif test -f libconftest.dylib && test $_lt_result -eq 0; then
lt_cv_apple_cc_single_mod=yes
else
cat conftest.err >&5
@@ -6934,9 +7677,10 @@ else
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5
$as_echo "$lt_cv_apple_cc_single_mod" >&6; }
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5
$as_echo_n "checking for -exported_symbols_list linker flag... " >&6; }
-if test "${lt_cv_ld_exported_symbols_list+set}" = set; then :
+if ${lt_cv_ld_exported_symbols_list+:} false; then :
$as_echo_n "(cached) " >&6
else
lt_cv_ld_exported_symbols_list=no
@@ -6966,6 +7710,41 @@ rm -f core conftest.err conftest.$ac_objext \
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5
$as_echo "$lt_cv_ld_exported_symbols_list" >&6; }
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5
+$as_echo_n "checking for -force_load linker flag... " >&6; }
+if ${lt_cv_ld_force_load+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_ld_force_load=no
+ cat > conftest.c << _LT_EOF
+int forced_loaded() { return 2;}
+_LT_EOF
+ echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5
+ $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5
+ echo "$AR cru libconftest.a conftest.o" >&5
+ $AR cru libconftest.a conftest.o 2>&5
+ echo "$RANLIB libconftest.a" >&5
+ $RANLIB libconftest.a 2>&5
+ cat > conftest.c << _LT_EOF
+int main() { return 0;}
+_LT_EOF
+ echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5
+ $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err
+ _lt_result=$?
+ if test -s conftest.err && $GREP force_load conftest.err; then
+ cat conftest.err >&5
+ elif test -f conftest && test $_lt_result -eq 0 && $GREP forced_load conftest >/dev/null 2>&1 ; then
+ lt_cv_ld_force_load=yes
+ else
+ cat conftest.err >&5
+ fi
+ rm -f conftest.err libconftest.a conftest conftest.c
+ rm -rf conftest.dSYM
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5
+$as_echo "$lt_cv_ld_force_load" >&6; }
case $host_os in
rhapsody* | darwin1.[012])
_lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;;
@@ -6993,7 +7772,7 @@ $as_echo "$lt_cv_ld_exported_symbols_list" >&6; }
else
_lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}'
fi
- if test "$DSYMUTIL" != ":"; then
+ if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then
_lt_dsymutil='~$DSYMUTIL $lib || :'
else
_lt_dsymutil=
@@ -7013,7 +7792,7 @@ if test -n "$CPP" && test -d "$CPP"; then
CPP=
fi
if test -z "$CPP"; then
- if test "${ac_cv_prog_CPP+set}" = set; then :
+ if ${ac_cv_prog_CPP+:} false; then :
$as_echo_n "(cached) " >&6
else
# Double quotes because CPP needs to be expanded
@@ -7043,7 +7822,7 @@ else
# Broken: fails on valid input.
continue
fi
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.err conftest.i conftest.$ac_ext
# OK, works on sane cases. Now check whether nonexistent headers
# can be detected and how.
@@ -7059,11 +7838,11 @@ else
ac_preproc_ok=:
break
fi
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.err conftest.i conftest.$ac_ext
done
# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.i conftest.err conftest.$ac_ext
if $ac_preproc_ok; then :
break
fi
@@ -7102,7 +7881,7 @@ else
# Broken: fails on valid input.
continue
fi
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.err conftest.i conftest.$ac_ext
# OK, works on sane cases. Now check whether nonexistent headers
# can be detected and how.
@@ -7118,18 +7897,18 @@ else
ac_preproc_ok=:
break
fi
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.err conftest.i conftest.$ac_ext
done
# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
-rm -f conftest.err conftest.$ac_ext
+rm -f conftest.i conftest.err conftest.$ac_ext
if $ac_preproc_ok; then :
else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "C preprocessor \"$CPP\" fails sanity check
-See \`config.log' for more details." "$LINENO" 5; }
+as_fn_error $? "C preprocessor \"$CPP\" fails sanity check
+See \`config.log' for more details" "$LINENO" 5; }
fi
ac_ext=c
@@ -7141,7 +7920,7 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
$as_echo_n "checking for ANSI C header files... " >&6; }
-if test "${ac_cv_header_stdc+set}" = set; then :
+if ${ac_cv_header_stdc+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -7258,8 +8037,7 @@ do :
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default
"
-eval as_val=\$$as_ac_Header
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
_ACEOF
@@ -7273,7 +8051,7 @@ for ac_header in dlfcn.h
do :
ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default
"
-if test "x$ac_cv_header_dlfcn_h" = x""yes; then :
+if test "x$ac_cv_header_dlfcn_h" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_DLFCN_H 1
_ACEOF
@@ -7284,6 +8062,8 @@ done
+
+
# Set options
@@ -7359,7 +8139,22 @@ fi
# Check whether --with-pic was given.
if test "${with_pic+set}" = set; then :
- withval=$with_pic; pic_mode="$withval"
+ withval=$with_pic; lt_p=${PACKAGE-default}
+ case $withval in
+ yes|no) pic_mode=$withval ;;
+ *)
+ pic_mode=default
+ # Look at the argument we got. We use all the common list separators.
+ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+ for lt_pkg in $withval; do
+ IFS="$lt_save_ifs"
+ if test "X$lt_pkg" = "X$lt_p"; then
+ pic_mode=yes
+ fi
+ done
+ IFS="$lt_save_ifs"
+ ;;
+ esac
else
pic_mode=default
fi
@@ -7436,6 +8231,11 @@ LIBTOOL='$(SHELL) $(top_builddir)/libtool'
+
+
+
+
+
test -z "$LN_S" && LN_S="ln -s"
@@ -7457,7 +8257,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5
$as_echo_n "checking for objdir... " >&6; }
-if test "${lt_cv_objdir+set}" = set; then :
+if ${lt_cv_objdir+:} false; then :
$as_echo_n "(cached) " >&6
else
rm -f .libs 2>/dev/null
@@ -7485,19 +8285,6 @@ _ACEOF
-
-
-
-
-
-
-
-
-
-
-
-
-
case $host_os in
aix3*)
# AIX sometimes has problems with the GCC collect2 program. For some
@@ -7510,23 +8297,6 @@ aix3*)
;;
esac
-# Sed substitution that helps us do robust quoting. It backslashifies
-# metacharacters that are still active within double-quoted strings.
-sed_quote_subst='s/\(["`$\\]\)/\\\1/g'
-
-# Same as above, but do not quote variable references.
-double_quote_subst='s/\(["`\\]\)/\\\1/g'
-
-# Sed substitution to delay expansion of an escaped shell variable in a
-# double_quote_subst'ed string.
-delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
-
-# Sed substitution to delay expansion of an escaped single quote.
-delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g'
-
-# Sed substitution to avoid accidental globbing in evaled expressions
-no_glob_subst='s/\*/\\\*/g'
-
# Global variables:
ofile=libtool
can_build_shared=yes
@@ -7555,7 +8325,7 @@ for cc_temp in $compiler""; do
*) break;;
esac
done
-cc_basename=`$ECHO "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"`
+cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
# Only perform the check for file, if the check method requires it
@@ -7565,7 +8335,7 @@ file_magic*)
if test "$file_magic_cmd" = '$MAGIC_CMD'; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5
$as_echo_n "checking for ${ac_tool_prefix}file... " >&6; }
-if test "${lt_cv_path_MAGIC_CMD+set}" = set; then :
+if ${lt_cv_path_MAGIC_CMD+:} false; then :
$as_echo_n "(cached) " >&6
else
case $MAGIC_CMD in
@@ -7631,7 +8401,7 @@ if test -z "$lt_cv_path_MAGIC_CMD"; then
if test -n "$ac_tool_prefix"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5
$as_echo_n "checking for file... " >&6; }
-if test "${lt_cv_path_MAGIC_CMD+set}" = set; then :
+if ${lt_cv_path_MAGIC_CMD+:} false; then :
$as_echo_n "(cached) " >&6
else
case $MAGIC_CMD in
@@ -7764,11 +8534,16 @@ if test -n "$compiler"; then
lt_prog_compiler_no_builtin_flag=
if test "$GCC" = yes; then
- lt_prog_compiler_no_builtin_flag=' -fno-builtin'
+ case $cc_basename in
+ nvcc*)
+ lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;;
+ *)
+ lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;;
+ esac
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5
$as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; }
-if test "${lt_cv_prog_compiler_rtti_exceptions+set}" = set; then :
+if ${lt_cv_prog_compiler_rtti_exceptions+:} false; then :
$as_echo_n "(cached) " >&6
else
lt_cv_prog_compiler_rtti_exceptions=no
@@ -7784,15 +8559,15 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:7787: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
- echo "$as_me:7791: \$? = $ac_status" >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
- $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp
+ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
$SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
lt_cv_prog_compiler_rtti_exceptions=yes
@@ -7821,8 +8596,6 @@ fi
lt_prog_compiler_pic=
lt_prog_compiler_static=
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5
-$as_echo_n "checking for $compiler option to produce PIC... " >&6; }
if test "$GCC" = yes; then
lt_prog_compiler_wl='-Wl,'
@@ -7870,6 +8643,12 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; }
lt_prog_compiler_pic='-fno-common'
;;
+ haiku*)
+ # PIC is the default for Haiku.
+ # The "-static" flag exists, but is broken.
+ lt_prog_compiler_static=
+ ;;
+
hpux*)
# PIC is the default for 64-bit PA HP-UX, but not for 32-bit
# PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag
@@ -7912,6 +8691,15 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; }
lt_prog_compiler_pic='-fPIC'
;;
esac
+
+ case $cc_basename in
+ nvcc*) # Cuda Compiler Driver 2.2
+ lt_prog_compiler_wl='-Xlinker '
+ if test -n "$lt_prog_compiler_pic"; then
+ lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic"
+ fi
+ ;;
+ esac
else
# PORTME Check for flag to pass linker flags through the system compiler.
case $host_os in
@@ -7953,7 +8741,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; }
lt_prog_compiler_static='-non_shared'
;;
- linux* | k*bsd*-gnu)
+ linux* | k*bsd*-gnu | kopensolaris*-gnu)
case $cc_basename in
# old Intel for x86_64 which still supported -KPIC.
ecc*)
@@ -7974,7 +8762,13 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; }
lt_prog_compiler_pic='--shared'
lt_prog_compiler_static='--static'
;;
- pgcc* | pgf77* | pgf90* | pgf95*)
+ nagfor*)
+ # NAG Fortran compiler
+ lt_prog_compiler_wl='-Wl,-Wl,,'
+ lt_prog_compiler_pic='-PIC'
+ lt_prog_compiler_static='-Bstatic'
+ ;;
+ pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*)
# Portland Group compilers (*not* the Pentium gcc compiler,
# which looks to be a dead project)
lt_prog_compiler_wl='-Wl,'
@@ -7986,25 +8780,40 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; }
# All Alpha code is PIC.
lt_prog_compiler_static='-non_shared'
;;
- xl*)
- # IBM XL C 8.0/Fortran 10.1 on PPC
+ xl* | bgxl* | bgf* | mpixl*)
+ # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene
lt_prog_compiler_wl='-Wl,'
lt_prog_compiler_pic='-qpic'
lt_prog_compiler_static='-qstaticlink'
;;
*)
case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*)
+ # Sun Fortran 8.3 passes all unrecognized flags to the linker
+ lt_prog_compiler_pic='-KPIC'
+ lt_prog_compiler_static='-Bstatic'
+ lt_prog_compiler_wl=''
+ ;;
+ *Sun\ F* | *Sun*Fortran*)
+ lt_prog_compiler_pic='-KPIC'
+ lt_prog_compiler_static='-Bstatic'
+ lt_prog_compiler_wl='-Qoption ld '
+ ;;
*Sun\ C*)
# Sun C 5.9
lt_prog_compiler_pic='-KPIC'
lt_prog_compiler_static='-Bstatic'
lt_prog_compiler_wl='-Wl,'
;;
- *Sun\ F*)
- # Sun Fortran 8.3 passes all unrecognized flags to the linker
- lt_prog_compiler_pic='-KPIC'
+ *Intel*\ [CF]*Compiler*)
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_pic='-fPIC'
+ lt_prog_compiler_static='-static'
+ ;;
+ *Portland\ Group*)
+ lt_prog_compiler_wl='-Wl,'
+ lt_prog_compiler_pic='-fpic'
lt_prog_compiler_static='-Bstatic'
- lt_prog_compiler_wl=''
;;
esac
;;
@@ -8036,7 +8845,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; }
lt_prog_compiler_pic='-KPIC'
lt_prog_compiler_static='-Bstatic'
case $cc_basename in
- f77* | f90* | f95*)
+ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*)
lt_prog_compiler_wl='-Qoption ld ';;
*)
lt_prog_compiler_wl='-Wl,';;
@@ -8093,13 +8902,17 @@ case $host_os in
lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC"
;;
esac
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5
-$as_echo "$lt_prog_compiler_pic" >&6; }
-
-
-
-
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5
+$as_echo_n "checking for $compiler option to produce PIC... " >&6; }
+if ${lt_cv_prog_compiler_pic+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler_pic=$lt_prog_compiler_pic
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5
+$as_echo "$lt_cv_prog_compiler_pic" >&6; }
+lt_prog_compiler_pic=$lt_cv_prog_compiler_pic
#
# Check to make sure the PIC flag actually works.
@@ -8107,7 +8920,7 @@ $as_echo "$lt_prog_compiler_pic" >&6; }
if test -n "$lt_prog_compiler_pic"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5
$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; }
-if test "${lt_cv_prog_compiler_pic_works+set}" = set; then :
+if ${lt_cv_prog_compiler_pic_works+:} false; then :
$as_echo_n "(cached) " >&6
else
lt_cv_prog_compiler_pic_works=no
@@ -8123,15 +8936,15 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:8126: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
- echo "$as_me:8130: \$? = $ac_status" >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
- $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp
+ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
$SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
lt_cv_prog_compiler_pic_works=yes
@@ -8160,13 +8973,18 @@ fi
+
+
+
+
+
#
# Check to make sure the static flag actually works.
#
wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\"
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5
$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; }
-if test "${lt_cv_prog_compiler_static_works+set}" = set; then :
+if ${lt_cv_prog_compiler_static_works+:} false; then :
$as_echo_n "(cached) " >&6
else
lt_cv_prog_compiler_static_works=no
@@ -8179,7 +8997,7 @@ else
if test -s conftest.err; then
# Append any errors to the config.log.
cat conftest.err 1>&5
- $ECHO "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp
+ $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
$SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
if diff conftest.exp conftest.er2 >/dev/null; then
lt_cv_prog_compiler_static_works=yes
@@ -8209,7 +9027,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
-if test "${lt_cv_prog_compiler_c_o+set}" = set; then :
+if ${lt_cv_prog_compiler_c_o+:} false; then :
$as_echo_n "(cached) " >&6
else
lt_cv_prog_compiler_c_o=no
@@ -8228,16 +9046,16 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:8231: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
- echo "$as_me:8235: \$? = $ac_status" >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings
- $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp
+ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
$SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
lt_cv_prog_compiler_c_o=yes
@@ -8264,7 +9082,7 @@ $as_echo "$lt_cv_prog_compiler_c_o" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
-if test "${lt_cv_prog_compiler_c_o+set}" = set; then :
+if ${lt_cv_prog_compiler_c_o+:} false; then :
$as_echo_n "(cached) " >&6
else
lt_cv_prog_compiler_c_o=no
@@ -8283,16 +9101,16 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:8286: $lt_compile\"" >&5)
+ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
- echo "$as_me:8290: \$? = $ac_status" >&5
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings
- $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp
+ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
$SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
lt_cv_prog_compiler_c_o=yes
@@ -8358,7 +9176,6 @@ $as_echo_n "checking whether the $compiler linker ($LD) supports shared librarie
hardcode_direct=no
hardcode_direct_absolute=no
hardcode_libdir_flag_spec=
- hardcode_libdir_flag_spec_ld=
hardcode_libdir_separator=
hardcode_minus_L=no
hardcode_shlibpath_var=unsupported
@@ -8405,7 +9222,33 @@ $as_echo_n "checking whether the $compiler linker ($LD) supports shared librarie
esac
ld_shlibs=yes
+
+ # On some targets, GNU ld is compatible enough with the native linker
+ # that we're better off using the native interface for both.
+ lt_use_gnu_ld_interface=no
if test "$with_gnu_ld" = yes; then
+ case $host_os in
+ aix*)
+ # The AIX port of GNU ld has always aspired to compatibility
+ # with the native linker. However, as the warning in the GNU ld
+ # block says, versions before 2.19.5* couldn't really create working
+ # shared libraries, regardless of the interface used.
+ case `$LD -v 2>&1` in
+ *\ \(GNU\ Binutils\)\ 2.19.5*) ;;
+ *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;;
+ *\ \(GNU\ Binutils\)\ [3-9]*) ;;
+ *)
+ lt_use_gnu_ld_interface=yes
+ ;;
+ esac
+ ;;
+ *)
+ lt_use_gnu_ld_interface=yes
+ ;;
+ esac
+ fi
+
+ if test "$lt_use_gnu_ld_interface" = yes; then
# If archive_cmds runs LD, not CC, wlarc should be empty
wlarc='${wl}'
@@ -8423,6 +9266,7 @@ $as_echo_n "checking whether the $compiler linker ($LD) supports shared librarie
fi
supports_anon_versioning=no
case `$LD -v 2>&1` in
+ *GNU\ gold*) supports_anon_versioning=yes ;;
*\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11
*\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ...
*\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ...
@@ -8438,11 +9282,12 @@ $as_echo_n "checking whether the $compiler linker ($LD) supports shared librarie
ld_shlibs=no
cat <<_LT_EOF 1>&2
-*** Warning: the GNU linker, at least up to release 2.9.1, is reported
+*** Warning: the GNU linker, at least up to release 2.19, is reported
*** to be unable to reliably create shared libraries on AIX.
*** Therefore, libtool is disabling shared libraries support. If you
-*** really care for shared libraries, you may want to modify your PATH
-*** so that a non-GNU linker is found, and then restart.
+*** really care for shared libraries, you may want to install binutils
+*** 2.20 or above, or modify your PATH so that a non-GNU linker is found.
+*** You will then need to restart the configuration process.
_LT_EOF
fi
@@ -8478,10 +9323,12 @@ _LT_EOF
# _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless,
# as there is no search path for DLLs.
hardcode_libdir_flag_spec='-L$libdir'
+ export_dynamic_flag_spec='${wl}--export-all-symbols'
allow_undefined_flag=unsupported
always_export_symbols=no
enable_shared_with_static_runtimes=yes
- export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols'
+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols'
+ exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'
if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
@@ -8499,6 +9346,11 @@ _LT_EOF
fi
;;
+ haiku*)
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ link_all_deplibs=yes
+ ;;
+
interix[3-9]*)
hardcode_direct=no
hardcode_shlibpath_var=no
@@ -8514,7 +9366,7 @@ _LT_EOF
archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
;;
- gnu* | linux* | tpf* | k*bsd*-gnu)
+ gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu)
tmp_diet=no
if test "$host_os" = linux-dietlibc; then
case $cc_basename in
@@ -8524,15 +9376,16 @@ _LT_EOF
if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \
&& test "$tmp_diet" = no
then
- tmp_addflag=
+ tmp_addflag=' $pic_flag'
tmp_sharedflag='-shared'
case $cc_basename,$host_cpu in
pgcc*) # Portland Group C compiler
- whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive'
+ whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
tmp_addflag=' $pic_flag'
;;
- pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers
- whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive'
+ pgf77* | pgf90* | pgf95* | pgfortran*)
+ # Portland Group f77 and f90 compilers
+ whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
tmp_addflag=' $pic_flag -Mnomain' ;;
ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64
tmp_addflag=' -i_dynamic' ;;
@@ -8543,13 +9396,17 @@ _LT_EOF
lf95*) # Lahey Fortran 8.1
whole_archive_flag_spec=
tmp_sharedflag='--shared' ;;
- xl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below)
+ xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below)
tmp_sharedflag='-qmkshrobj'
tmp_addflag= ;;
+ nvcc*) # Cuda Compiler Driver 2.2
+ whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ compiler_needs_object=yes
+ ;;
esac
case `$CC -V 2>&1 | sed 5q` in
*Sun\ C*) # Sun C 5.9
- whole_archive_flag_spec='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive'
+ whole_archive_flag_spec='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
compiler_needs_object=yes
tmp_sharedflag='-G' ;;
*Sun\ F*) # Sun Fortran 8.3
@@ -8565,17 +9422,16 @@ _LT_EOF
fi
case $cc_basename in
- xlf*)
+ xlf* | bgf* | bgxlf* | mpixlf*)
# IBM XL Fortran 10.1 on PPC cannot create shared libs itself
whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive'
- hardcode_libdir_flag_spec=
- hardcode_libdir_flag_spec_ld='-rpath $libdir'
- archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib'
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib'
if test "x$supports_anon_versioning" = xyes; then
archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~
cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
echo "local: *; };" >> $output_objdir/$libname.ver~
- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib'
+ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib'
fi
;;
esac
@@ -8589,8 +9445,8 @@ _LT_EOF
archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
wlarc=
else
- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
fi
;;
@@ -8608,8 +9464,8 @@ _LT_EOF
_LT_EOF
elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
else
ld_shlibs=no
fi
@@ -8655,8 +9511,8 @@ _LT_EOF
*)
if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
else
ld_shlibs=no
fi
@@ -8696,8 +9552,10 @@ _LT_EOF
else
# If we're using GNU nm, then we don't want the "-C" option.
# -C means demangle to AIX nm, but means don't demangle with GNU nm
+ # Also, AIX nm treats weak defined symbols like other global
+ # defined symbols, whereas GNU nm marks them as "W".
if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
- export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
else
export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
fi
@@ -8784,7 +9642,13 @@ _LT_EOF
allow_undefined_flag='-berok'
# Determine the default libpath from the value encoded in an
# empty executable.
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ if test "${lt_cv_aix_libpath+set}" = set; then
+ aix_libpath=$lt_cv_aix_libpath
+else
+ if ${lt_cv_aix_libpath_+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
int
@@ -8797,25 +9661,32 @@ main ()
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
-lt_aix_libpath_sed='
- /Import File Strings/,/^$/ {
- /^0/ {
- s/^0 *\(.*\)$/\1/
- p
- }
- }'
-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
-# Check for a 64-bit object if we didn't find anything.
-if test -z "$aix_libpath"; then
- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
-fi
+ lt_aix_libpath_sed='
+ /Import File Strings/,/^$/ {
+ /^0/ {
+ s/^0 *\([^ ]*\) *$/\1/
+ p
+ }
+ }'
+ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ # Check for a 64-bit object if we didn't find anything.
+ if test -z "$lt_cv_aix_libpath_"; then
+ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ fi
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
+ if test -z "$lt_cv_aix_libpath_"; then
+ lt_cv_aix_libpath_="/usr/lib:/lib"
+ fi
+
+fi
+
+ aix_libpath=$lt_cv_aix_libpath_
+fi
hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath"
- archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
+ archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
else
if test "$host_cpu" = ia64; then
hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib'
@@ -8824,7 +9695,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
else
# Determine the default libpath from the value encoded in an
# empty executable.
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ if test "${lt_cv_aix_libpath+set}" = set; then
+ aix_libpath=$lt_cv_aix_libpath
+else
+ if ${lt_cv_aix_libpath_+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
int
@@ -8837,30 +9714,42 @@ main ()
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
-lt_aix_libpath_sed='
- /Import File Strings/,/^$/ {
- /^0/ {
- s/^0 *\(.*\)$/\1/
- p
- }
- }'
-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
-# Check for a 64-bit object if we didn't find anything.
-if test -z "$aix_libpath"; then
- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
-fi
+ lt_aix_libpath_sed='
+ /Import File Strings/,/^$/ {
+ /^0/ {
+ s/^0 *\([^ ]*\) *$/\1/
+ p
+ }
+ }'
+ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ # Check for a 64-bit object if we didn't find anything.
+ if test -z "$lt_cv_aix_libpath_"; then
+ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ fi
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
+ if test -z "$lt_cv_aix_libpath_"; then
+ lt_cv_aix_libpath_="/usr/lib:/lib"
+ fi
+
+fi
+
+ aix_libpath=$lt_cv_aix_libpath_
+fi
hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath"
# Warning - without using the other run time loading flags,
# -berok will link without error, but may produce a broken library.
no_undefined_flag=' ${wl}-bernotok'
allow_undefined_flag=' ${wl}-berok'
- # Exported symbols can be pulled into shared objects from archives
- whole_archive_flag_spec='$convenience'
+ if test "$with_gnu_ld" = yes; then
+ # We only use this code for GNU lds that support --whole-archive.
+ whole_archive_flag_spec='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+ else
+ # Exported symbols can be pulled into shared objects from archives
+ whole_archive_flag_spec='$convenience'
+ fi
archive_cmds_need_lc=yes
# This is similar to how AIX traditionally builds its shared libraries.
archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
@@ -8892,20 +9781,64 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
# Microsoft Visual C++.
# hardcode_libdir_flag_spec is actually meaningless, as there is
# no search path for DLLs.
- hardcode_libdir_flag_spec=' '
- allow_undefined_flag=unsupported
- # Tell ltmain to make .lib files, not .a files.
- libext=lib
- # Tell ltmain to make .dll files, not .so files.
- shrext_cmds=".dll"
- # FIXME: Setting linknames here is a bad hack.
- archive_cmds='$CC -o $lib $libobjs $compiler_flags `$ECHO "X$deplibs" | $Xsed -e '\''s/ -lc$//'\''` -link -dll~linknames='
- # The linker will automatically build a .lib file if we build a DLL.
- old_archive_from_new_cmds='true'
- # FIXME: Should let the user specify the lib program.
- old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs'
- fix_srcfile_path='`cygpath -w "$srcfile"`'
- enable_shared_with_static_runtimes=yes
+ case $cc_basename in
+ cl*)
+ # Native MSVC
+ hardcode_libdir_flag_spec=' '
+ allow_undefined_flag=unsupported
+ always_export_symbols=yes
+ file_list_spec='@'
+ # Tell ltmain to make .lib files, not .a files.
+ libext=lib
+ # Tell ltmain to make .dll files, not .so files.
+ shrext_cmds=".dll"
+ # FIXME: Setting linknames here is a bad hack.
+ archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames='
+ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp;
+ else
+ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp;
+ fi~
+ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+ linknames='
+ # The linker will not automatically build a static lib if we build a DLL.
+ # _LT_TAGVAR(old_archive_from_new_cmds, )='true'
+ enable_shared_with_static_runtimes=yes
+ exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols'
+ # Don't use ranlib
+ old_postinstall_cmds='chmod 644 $oldlib'
+ postlink_cmds='lt_outputfile="@OUTPUT@"~
+ lt_tool_outputfile="@TOOL_OUTPUT@"~
+ case $lt_outputfile in
+ *.exe|*.EXE) ;;
+ *)
+ lt_outputfile="$lt_outputfile.exe"
+ lt_tool_outputfile="$lt_tool_outputfile.exe"
+ ;;
+ esac~
+ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then
+ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+ $RM "$lt_outputfile.manifest";
+ fi'
+ ;;
+ *)
+ # Assume MSVC wrapper
+ hardcode_libdir_flag_spec=' '
+ allow_undefined_flag=unsupported
+ # Tell ltmain to make .lib files, not .a files.
+ libext=lib
+ # Tell ltmain to make .dll files, not .so files.
+ shrext_cmds=".dll"
+ # FIXME: Setting linknames here is a bad hack.
+ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames='
+ # The linker will automatically build a .lib file if we build a DLL.
+ old_archive_from_new_cmds='true'
+ # FIXME: Should let the user specify the lib program.
+ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs'
+ enable_shared_with_static_runtimes=yes
+ ;;
+ esac
;;
darwin* | rhapsody*)
@@ -8915,7 +9848,12 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
hardcode_direct=no
hardcode_automatic=yes
hardcode_shlibpath_var=unsupported
- whole_archive_flag_spec=''
+ if test "$lt_cv_ld_force_load" = "yes"; then
+ whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
+
+ else
+ whole_archive_flag_spec=''
+ fi
link_all_deplibs=yes
allow_undefined_flag="$_lt_dar_allow_undefined"
case $cc_basename in
@@ -8923,7 +9861,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
*) _lt_dar_can_shared=$GCC ;;
esac
if test "$_lt_dar_can_shared" = "yes"; then
- output_verbose_link_cmd=echo
+ output_verbose_link_cmd=func_echo_all
archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}"
module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}"
archive_expsym_cmds="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}"
@@ -8941,10 +9879,6 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
hardcode_shlibpath_var=no
;;
- freebsd1*)
- ld_shlibs=no
- ;;
-
# FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
# support. Future versions do this automatically, but an explicit c++rt0.o
# does not break anything, and helps significantly (at the cost of a little
@@ -8957,7 +9891,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
;;
# Unfortunately, older versions of FreeBSD 2 do not have this feature.
- freebsd2*)
+ freebsd2.*)
archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
hardcode_direct=yes
hardcode_minus_L=yes
@@ -8966,7 +9900,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
# FreeBSD 3 and greater uses gcc -shared to do shared libraries.
freebsd* | dragonfly*)
- archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags'
+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
hardcode_libdir_flag_spec='-R$libdir'
hardcode_direct=yes
hardcode_shlibpath_var=no
@@ -8974,7 +9908,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
hpux9*)
if test "$GCC" = yes; then
- archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+ archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
else
archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
fi
@@ -8989,14 +9923,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
;;
hpux10*)
- if test "$GCC" = yes -a "$with_gnu_ld" = no; then
- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+ if test "$GCC" = yes && test "$with_gnu_ld" = no; then
+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
else
archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
fi
if test "$with_gnu_ld" = no; then
hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
- hardcode_libdir_flag_spec_ld='+b $libdir'
hardcode_libdir_separator=:
hardcode_direct=yes
hardcode_direct_absolute=yes
@@ -9008,16 +9941,16 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
;;
hpux11*)
- if test "$GCC" = yes -a "$with_gnu_ld" = no; then
+ if test "$GCC" = yes && test "$with_gnu_ld" = no; then
case $host_cpu in
hppa*64*)
archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
;;
ia64*)
- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
;;
*)
- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
;;
esac
else
@@ -9029,7 +9962,46 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
;;
*)
- archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+
+ # Older versions of the 11.00 compiler do not understand -b yet
+ # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does)
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5
+$as_echo_n "checking if $CC understands -b... " >&6; }
+if ${lt_cv_prog_compiler__b+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_prog_compiler__b=no
+ save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$LDFLAGS -b"
+ echo "$lt_simple_link_test_code" > conftest.$ac_ext
+ if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+ # The linker can only warn and ignore the option if not recognized
+ # So say no if there are warnings
+ if test -s conftest.err; then
+ # Append any errors to the config.log.
+ cat conftest.err 1>&5
+ $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
+ $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+ if diff conftest.exp conftest.er2 >/dev/null; then
+ lt_cv_prog_compiler__b=yes
+ fi
+ else
+ lt_cv_prog_compiler__b=yes
+ fi
+ fi
+ $RM -r conftest*
+ LDFLAGS="$save_LDFLAGS"
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5
+$as_echo "$lt_cv_prog_compiler__b" >&6; }
+
+if test x"$lt_cv_prog_compiler__b" = xyes; then
+ archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+else
+ archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
+fi
+
;;
esac
fi
@@ -9057,26 +10029,39 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
irix5* | irix6* | nonstopux*)
if test "$GCC" = yes; then
- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
# Try to use the -exported_symbol ld option, if it does not
# work, assume that -exports_file does not work either and
# implicitly export all symbols.
- save_LDFLAGS="$LDFLAGS"
- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null"
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ # This should be the same for all languages, so no per-tag cache variable.
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5
+$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; }
+if ${lt_cv_irix_exported_symbol+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
-int foo(void) {}
+int foo (void) { return 0; }
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib'
-
+ lt_cv_irix_exported_symbol=yes
+else
+ lt_cv_irix_exported_symbol=no
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
- LDFLAGS="$save_LDFLAGS"
+ LDFLAGS="$save_LDFLAGS"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5
+$as_echo "$lt_cv_irix_exported_symbol" >&6; }
+ if test "$lt_cv_irix_exported_symbol" = yes; then
+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib'
+ fi
else
- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib'
- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib'
+ archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+ archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib'
fi
archive_cmds_need_lc='no'
hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
@@ -9138,17 +10123,17 @@ rm -f core conftest.err conftest.$ac_objext \
hardcode_libdir_flag_spec='-L$libdir'
hardcode_minus_L=yes
allow_undefined_flag=unsupported
- archive_cmds='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$ECHO DATA >> $output_objdir/$libname.def~$ECHO " SINGLE NONSHARED" >> $output_objdir/$libname.def~$ECHO EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def'
+ archive_cmds='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def'
old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def'
;;
osf3*)
if test "$GCC" = yes; then
allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
else
allow_undefined_flag=' -expect_unresolved \*'
- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
fi
archive_cmds_need_lc='no'
hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
@@ -9158,13 +10143,13 @@ rm -f core conftest.err conftest.$ac_objext \
osf4* | osf5*) # as osf3* with the addition of -msym flag
if test "$GCC" = yes; then
allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
else
allow_undefined_flag=' -expect_unresolved \*'
- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib'
+ archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~
- $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp'
+ $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp'
# Both c and cxx compiler support -rpath directly
hardcode_libdir_flag_spec='-rpath $libdir'
@@ -9177,9 +10162,9 @@ rm -f core conftest.err conftest.$ac_objext \
no_undefined_flag=' -z defs'
if test "$GCC" = yes; then
wlarc='${wl}'
- archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
else
case `$CC -V 2>&1` in
*"Compilers 5.0"*)
@@ -9367,44 +10352,50 @@ x|xyes)
# to ld, don't add -lc before -lgcc.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5
$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; }
- $RM conftest*
- echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+if ${lt_cv_archive_cmds_need_lc+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ $RM conftest*
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
- if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
(eval $ac_compile) 2>&5
ac_status=$?
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; } 2>conftest.err; then
- soname=conftest
- lib=conftest
- libobjs=conftest.$ac_objext
- deplibs=
- wl=$lt_prog_compiler_wl
- pic_flag=$lt_prog_compiler_pic
- compiler_flags=-v
- linker_flags=-v
- verstring=
- output_objdir=.
- libname=conftest
- lt_save_allow_undefined_flag=$allow_undefined_flag
- allow_undefined_flag=
- if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5
+ soname=conftest
+ lib=conftest
+ libobjs=conftest.$ac_objext
+ deplibs=
+ wl=$lt_prog_compiler_wl
+ pic_flag=$lt_prog_compiler_pic
+ compiler_flags=-v
+ linker_flags=-v
+ verstring=
+ output_objdir=.
+ libname=conftest
+ lt_save_allow_undefined_flag=$allow_undefined_flag
+ allow_undefined_flag=
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5
(eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5
ac_status=$?
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }
- then
- archive_cmds_need_lc=no
- else
- archive_cmds_need_lc=yes
- fi
- allow_undefined_flag=$lt_save_allow_undefined_flag
- else
- cat conftest.err 1>&5
- fi
- $RM conftest*
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $archive_cmds_need_lc" >&5
-$as_echo "$archive_cmds_need_lc" >&6; }
+ then
+ lt_cv_archive_cmds_need_lc=no
+ else
+ lt_cv_archive_cmds_need_lc=yes
+ fi
+ allow_undefined_flag=$lt_save_allow_undefined_flag
+ else
+ cat conftest.err 1>&5
+ fi
+ $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5
+$as_echo "$lt_cv_archive_cmds_need_lc" >&6; }
+ archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc
;;
esac
fi
@@ -9562,11 +10553,6 @@ esac
-
-
-
-
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5
$as_echo_n "checking dynamic linker characteristics... " >&6; }
@@ -9575,16 +10561,23 @@ if test "$GCC" = yes; then
darwin*) lt_awk_arg="/^libraries:/,/LR/" ;;
*) lt_awk_arg="/^libraries:/" ;;
esac
- lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e "s,=/,/,g"`
- if $ECHO "$lt_search_path_spec" | $GREP ';' >/dev/null ; then
+ case $host_os in
+ mingw* | cegcc*) lt_sed_strip_eq="s,=\([A-Za-z]:\),\1,g" ;;
+ *) lt_sed_strip_eq="s,=/,/,g" ;;
+ esac
+ lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq`
+ case $lt_search_path_spec in
+ *\;*)
# if the path contains ";" then we assume it to be the separator
# otherwise default to the standard path separator (i.e. ":") - it is
# assumed that no part of a normal pathname contains ";" but that should
# okay in the real world where ";" in dirpaths is itself problematic.
- lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e 's/;/ /g'`
- else
- lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
- fi
+ lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'`
+ ;;
+ *)
+ lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"`
+ ;;
+ esac
# Ok, now we have the path, separated by spaces, we can step through it
# and add multilib dir if necessary.
lt_tmp_lt_search_path_spec=
@@ -9597,7 +10590,7 @@ if test "$GCC" = yes; then
lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path"
fi
done
- lt_search_path_spec=`$ECHO $lt_tmp_lt_search_path_spec | awk '
+ lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk '
BEGIN {RS=" "; FS="/|\n";} {
lt_foo="";
lt_count=0;
@@ -9617,7 +10610,13 @@ BEGIN {RS=" "; FS="/|\n";} {
if (lt_foo != "") { lt_freq[lt_foo]++; }
if (lt_freq[lt_foo] == 1) { print lt_foo; }
}'`
- sys_lib_search_path_spec=`$ECHO $lt_search_path_spec`
+ # AWK program above erroneously prepends '/' to C:/dos/paths
+ # for these hosts.
+ case $host_os in
+ mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\
+ $SED 's,/\([A-Za-z]:\),\1,g'` ;;
+ esac
+ sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP`
else
sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
fi
@@ -9643,7 +10642,7 @@ need_version=unknown
case $host_os in
aix3*)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a'
shlibpath_var=LIBPATH
@@ -9652,7 +10651,7 @@ aix3*)
;;
aix[4-9]*)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
need_lib_prefix=no
need_version=no
hardcode_into_libs=yes
@@ -9705,7 +10704,7 @@ amigaos*)
m68k)
library_names_spec='$libname.ixlibrary $libname.a'
# Create ${libname}_ixlibrary.a entries in /sys/libs.
- finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$ECHO "X$lib" | $Xsed -e '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
+ finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
;;
esac
;;
@@ -9717,7 +10716,7 @@ beos*)
;;
bsdi[45]*)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
need_version=no
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
soname_spec='${libname}${release}${shared_ext}$major'
@@ -9736,8 +10735,9 @@ cygwin* | mingw* | pw32* | cegcc*)
need_version=no
need_lib_prefix=no
- case $GCC,$host_os in
- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*)
+ case $GCC,$cc_basename in
+ yes,*)
+ # gcc
library_names_spec='$libname.dll.a'
# DLL is installed to $(libdir)/../bin by postinstall_cmds
postinstall_cmds='base_file=`basename \${file}`~
@@ -9758,36 +10758,83 @@ cygwin* | mingw* | pw32* | cegcc*)
cygwin*)
# Cygwin DLLs use 'cyg' prefix rather than 'lib'
soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
- sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib"
+
+ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"
;;
mingw* | cegcc*)
# MinGW DLLs use traditional 'lib' prefix
soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
- sys_lib_search_path_spec=`$CC -print-search-dirs | $GREP "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"`
- if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then
- # It is most probably a Windows format PATH printed by
- # mingw gcc, but we are running on Cygwin. Gcc prints its search
- # path with ; separators, and with drive letters. We can handle the
- # drive letters (cygwin fileutils understands them), so leave them,
- # especially as we might pass files found there to a mingw objdump,
- # which wouldn't understand a cygwinified path. Ahh.
- sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
- else
- sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
- fi
;;
pw32*)
# pw32 DLLs use 'pw' prefix rather than 'lib'
library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
;;
esac
+ dynamic_linker='Win32 ld.exe'
+ ;;
+
+ *,cl*)
+ # Native MSVC
+ libname_spec='$name'
+ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+ library_names_spec='${libname}.dll.lib'
+
+ case $build_os in
+ mingw*)
+ sys_lib_search_path_spec=
+ lt_save_ifs=$IFS
+ IFS=';'
+ for lt_path in $LIB
+ do
+ IFS=$lt_save_ifs
+ # Let DOS variable expansion print the short 8.3 style file name.
+ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"`
+ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path"
+ done
+ IFS=$lt_save_ifs
+ # Convert to MSYS style.
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'`
+ ;;
+ cygwin*)
+ # Convert to unix form, then to dos form, then back to unix form
+ # but this time dos style (no spaces!) so that the unix form looks
+ # like /cygdrive/c/PROGRA~1:/cygdr...
+ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"`
+ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null`
+ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+ ;;
+ *)
+ sys_lib_search_path_spec="$LIB"
+ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then
+ # It is most probably a Windows format PATH.
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
+ else
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+ fi
+ # FIXME: find the short name or the path components, as spaces are
+ # common. (e.g. "Program Files" -> "PROGRA~1")
+ ;;
+ esac
+
+ # DLL is installed to $(libdir)/../bin by postinstall_cmds
+ postinstall_cmds='base_file=`basename \${file}`~
+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+ dldir=$destdir/`dirname \$dlpath`~
+ test -d \$dldir || mkdir -p \$dldir~
+ $install_prog $dir/$dlname \$dldir/$dlname'
+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+ dlpath=$dir/\$dldll~
+ $RM \$dlpath'
+ shlibpath_overrides_runpath=yes
+ dynamic_linker='Win32 link.exe'
;;
*)
+ # Assume MSVC wrapper
library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib'
+ dynamic_linker='Win32 ld.exe'
;;
esac
- dynamic_linker='Win32 ld.exe'
# FIXME: first we should search . and the directory the executable is in
shlibpath_var=PATH
;;
@@ -9808,7 +10855,7 @@ darwin* | rhapsody*)
;;
dgux*)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
need_lib_prefix=no
need_version=no
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext'
@@ -9816,10 +10863,6 @@ dgux*)
shlibpath_var=LD_LIBRARY_PATH
;;
-freebsd1*)
- dynamic_linker=no
- ;;
-
freebsd* | dragonfly*)
# DragonFly does not have aout. When/if they implement a new
# versioning mechanism, adjust this.
@@ -9827,7 +10870,7 @@ freebsd* | dragonfly*)
objformat=`/usr/bin/objformat`
else
case $host_os in
- freebsd[123]*) objformat=aout ;;
+ freebsd[23].*) objformat=aout ;;
*) objformat=elf ;;
esac
fi
@@ -9845,7 +10888,7 @@ freebsd* | dragonfly*)
esac
shlibpath_var=LD_LIBRARY_PATH
case $host_os in
- freebsd2*)
+ freebsd2.*)
shlibpath_overrides_runpath=yes
;;
freebsd3.[01]* | freebsdelf3.[01]*)
@@ -9865,12 +10908,26 @@ freebsd* | dragonfly*)
;;
gnu*)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
need_lib_prefix=no
need_version=no
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
soname_spec='${libname}${release}${shared_ext}$major'
shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+
+haiku*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ dynamic_linker="$host_os runtime_loader"
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib'
hardcode_into_libs=yes
;;
@@ -9916,12 +10973,14 @@ hpux9* | hpux10* | hpux11*)
soname_spec='${libname}${release}${shared_ext}$major'
;;
esac
- # HP-UX runs *really* slowly unless shared libraries are mode 555.
+ # HP-UX runs *really* slowly unless shared libraries are mode 555, ...
postinstall_cmds='chmod 555 $lib'
+ # or fails outright, so override atomically:
+ install_override_mode=555
;;
interix[3-9]*)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
need_lib_prefix=no
need_version=no
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
@@ -9937,7 +10996,7 @@ irix5* | irix6* | nonstopux*)
nonstopux*) version_type=nonstopux ;;
*)
if test "$lt_cv_prog_gnu_ld" = yes; then
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
else
version_type=irix
fi ;;
@@ -9974,9 +11033,9 @@ linux*oldld* | linux*aout* | linux*coff*)
dynamic_linker=no
;;
-# This must be Linux ELF.
-linux* | k*bsd*-gnu)
- version_type=linux
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu)
+ version_type=linux # correct to gnu/linux during the next big refactor
need_lib_prefix=no
need_version=no
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
@@ -9984,12 +11043,17 @@ linux* | k*bsd*-gnu)
finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
shlibpath_var=LD_LIBRARY_PATH
shlibpath_overrides_runpath=no
+
# Some binutils ld are patched to set DT_RUNPATH
- save_LDFLAGS=$LDFLAGS
- save_libdir=$libdir
- eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \
- LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\""
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ if ${lt_cv_shlibpath_overrides_runpath+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_shlibpath_overrides_runpath=no
+ save_LDFLAGS=$LDFLAGS
+ save_libdir=$libdir
+ eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \
+ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\""
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
int
@@ -10002,13 +11066,17 @@ main ()
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then :
- shlibpath_overrides_runpath=yes
+ lt_cv_shlibpath_overrides_runpath=yes
fi
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
- LDFLAGS=$save_LDFLAGS
- libdir=$save_libdir
+ LDFLAGS=$save_LDFLAGS
+ libdir=$save_libdir
+
+fi
+
+ shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath
# This implies no fast_install, which is unacceptable.
# Some rework will be needed to allow for fast_install
@@ -10020,8 +11088,9 @@ rm -f core conftest.err conftest.$ac_objext \
# Append ld.so.conf contents to the search path
if test -f /etc/ld.so.conf; then
- lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '`
+ lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '`
sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra"
+
fi
# We used to test for /lib/ld.so.1 and disable shared libraries on
@@ -10052,7 +11121,7 @@ netbsd*)
;;
newsos6)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
shlibpath_var=LD_LIBRARY_PATH
shlibpath_overrides_runpath=yes
@@ -10121,7 +11190,7 @@ rdos*)
;;
solaris*)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
need_lib_prefix=no
need_version=no
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
@@ -10146,7 +11215,7 @@ sunos4*)
;;
sysv4 | sysv4.3*)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
soname_spec='${libname}${release}${shared_ext}$major'
shlibpath_var=LD_LIBRARY_PATH
@@ -10170,7 +11239,7 @@ sysv4 | sysv4.3*)
sysv4*MP*)
if test -d /usr/nec ;then
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}'
soname_spec='$libname${shared_ext}.$major'
shlibpath_var=LD_LIBRARY_PATH
@@ -10201,7 +11270,7 @@ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
tpf*)
# TPF is a cross-target only. Preferred cross-host = GNU/Linux.
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
need_lib_prefix=no
need_version=no
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
@@ -10211,7 +11280,7 @@ tpf*)
;;
uts4*)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
soname_spec='${libname}${release}${shared_ext}$major'
shlibpath_var=LD_LIBRARY_PATH
@@ -10323,6 +11392,11 @@ fi
+
+
+
+
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5
$as_echo_n "checking how to hardcode library paths into programs... " >&6; }
hardcode_action=
@@ -10395,7 +11469,7 @@ else
# if libdl is installed we need to link against it
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5
$as_echo_n "checking for dlopen in -ldl... " >&6; }
-if test "${ac_cv_lib_dl_dlopen+set}" = set; then :
+if ${ac_cv_lib_dl_dlopen+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -10429,7 +11503,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5
$as_echo "$ac_cv_lib_dl_dlopen" >&6; }
-if test "x$ac_cv_lib_dl_dlopen" = x""yes; then :
+if test "x$ac_cv_lib_dl_dlopen" = xyes; then :
lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"
else
@@ -10443,12 +11517,12 @@ fi
*)
ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load"
-if test "x$ac_cv_func_shl_load" = x""yes; then :
+if test "x$ac_cv_func_shl_load" = xyes; then :
lt_cv_dlopen="shl_load"
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5
$as_echo_n "checking for shl_load in -ldld... " >&6; }
-if test "${ac_cv_lib_dld_shl_load+set}" = set; then :
+if ${ac_cv_lib_dld_shl_load+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -10482,16 +11556,16 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5
$as_echo "$ac_cv_lib_dld_shl_load" >&6; }
-if test "x$ac_cv_lib_dld_shl_load" = x""yes; then :
+if test "x$ac_cv_lib_dld_shl_load" = xyes; then :
lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"
else
ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen"
-if test "x$ac_cv_func_dlopen" = x""yes; then :
+if test "x$ac_cv_func_dlopen" = xyes; then :
lt_cv_dlopen="dlopen"
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5
$as_echo_n "checking for dlopen in -ldl... " >&6; }
-if test "${ac_cv_lib_dl_dlopen+set}" = set; then :
+if ${ac_cv_lib_dl_dlopen+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -10525,12 +11599,12 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5
$as_echo "$ac_cv_lib_dl_dlopen" >&6; }
-if test "x$ac_cv_lib_dl_dlopen" = x""yes; then :
+if test "x$ac_cv_lib_dl_dlopen" = xyes; then :
lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5
$as_echo_n "checking for dlopen in -lsvld... " >&6; }
-if test "${ac_cv_lib_svld_dlopen+set}" = set; then :
+if ${ac_cv_lib_svld_dlopen+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -10564,12 +11638,12 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5
$as_echo "$ac_cv_lib_svld_dlopen" >&6; }
-if test "x$ac_cv_lib_svld_dlopen" = x""yes; then :
+if test "x$ac_cv_lib_svld_dlopen" = xyes; then :
lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5
$as_echo_n "checking for dld_link in -ldld... " >&6; }
-if test "${ac_cv_lib_dld_dld_link+set}" = set; then :
+if ${ac_cv_lib_dld_dld_link+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -10603,7 +11677,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5
$as_echo "$ac_cv_lib_dld_dld_link" >&6; }
-if test "x$ac_cv_lib_dld_dld_link" = x""yes; then :
+if test "x$ac_cv_lib_dld_dld_link" = xyes; then :
lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"
fi
@@ -10644,7 +11718,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5
$as_echo_n "checking whether a program can dlopen itself... " >&6; }
-if test "${lt_cv_dlopen_self+set}" = set; then :
+if ${lt_cv_dlopen_self+:} false; then :
$as_echo_n "(cached) " >&6
else
if test "$cross_compiling" = yes; then :
@@ -10653,7 +11727,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 10656 "configure"
+#line $LINENO "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -10694,7 +11768,13 @@ else
# endif
#endif
-void fnord() { int i=42;}
+/* When -fvisbility=hidden is used, assume the code has been annotated
+ correspondingly for the symbols needed. */
+#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
+int fnord () __attribute__((visibility("default")));
+#endif
+
+int fnord () { return 42; }
int main ()
{
void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
@@ -10703,7 +11783,11 @@ int main ()
if (self)
{
if (dlsym (self,"fnord")) status = $lt_dlno_uscore;
- else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore;
+ else
+ {
+ if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore;
+ else puts (dlerror ());
+ }
/* dlclose (self); */
}
else
@@ -10740,7 +11824,7 @@ $as_echo "$lt_cv_dlopen_self" >&6; }
wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\"
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5
$as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; }
-if test "${lt_cv_dlopen_self_static+set}" = set; then :
+if ${lt_cv_dlopen_self_static+:} false; then :
$as_echo_n "(cached) " >&6
else
if test "$cross_compiling" = yes; then :
@@ -10749,7 +11833,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 10752 "configure"
+#line $LINENO "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -10790,7 +11874,13 @@ else
# endif
#endif
-void fnord() { int i=42;}
+/* When -fvisbility=hidden is used, assume the code has been annotated
+ correspondingly for the symbols needed. */
+#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
+int fnord () __attribute__((visibility("default")));
+#endif
+
+int fnord () { return 42; }
int main ()
{
void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
@@ -10799,7 +11889,11 @@ int main ()
if (self)
{
if (dlsym (self,"fnord")) status = $lt_dlno_uscore;
- else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore;
+ else
+ {
+ if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore;
+ else puts (dlerror ());
+ }
/* dlclose (self); */
}
else
@@ -10968,6 +12062,8 @@ CC="$lt_save_CC"
+
+
ac_config_commands="$ac_config_commands libtool"
@@ -10978,6 +12074,1021 @@ CC="$lt_save_CC"
+# Test for 64-bit build.
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of size_t" >&5
+$as_echo_n "checking size of size_t... " >&6; }
+if ${ac_cv_sizeof_size_t+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (size_t))" "ac_cv_sizeof_size_t" "$ac_includes_default"; then :
+
+else
+ if test "$ac_cv_type_size_t" = yes; then
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (size_t)
+See \`config.log' for more details" "$LINENO" 5; }
+ else
+ ac_cv_sizeof_size_t=0
+ fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_size_t" >&5
+$as_echo "$ac_cv_sizeof_size_t" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_SIZE_T $ac_cv_sizeof_size_t
+_ACEOF
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler vendor" >&5
+$as_echo_n "checking for C compiler vendor... " >&6; }
+if ${ax_cv_c_compiler_vendor+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ # note: don't check for gcc first since some other compilers define __GNUC__
+ vendors="intel: __ICC,__ECC,__INTEL_COMPILER
+ ibm: __xlc__,__xlC__,__IBMC__,__IBMCPP__
+ pathscale: __PATHCC__,__PATHSCALE__
+ clang: __clang__
+ gnu: __GNUC__
+ sun: __SUNPRO_C,__SUNPRO_CC
+ hp: __HP_cc,__HP_aCC
+ dec: __DECC,__DECCXX,__DECC_VER,__DECCXX_VER
+ borland: __BORLANDC__,__TURBOC__
+ comeau: __COMO__
+ cray: _CRAYC
+ kai: __KCC
+ lcc: __LCC__
+ sgi: __sgi,sgi
+ microsoft: _MSC_VER
+ metrowerks: __MWERKS__
+ watcom: __WATCOMC__
+ portland: __PGI
+ unknown: UNKNOWN"
+ for ventest in $vendors; do
+ case $ventest in
+ *:) vendor=$ventest; continue ;;
+ *) vencpp="defined("`echo $ventest | sed 's/,/) || defined(/g'`")" ;;
+ esac
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ #if !($vencpp)
+ thisisanerror;
+ #endif
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ break
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ done
+ ax_cv_c_compiler_vendor=`echo $vendor | cut -d: -f1`
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_c_compiler_vendor" >&5
+$as_echo "$ax_cv_c_compiler_vendor" >&6; }
+
+
+
+
+
+
+# Check whether --enable-portable-binary was given.
+if test "${enable_portable_binary+set}" = set; then :
+ enableval=$enable_portable_binary; acx_maxopt_portable=$enableval
+else
+ acx_maxopt_portable=no
+fi
+
+
+# Try to determine "good" native compiler flags if none specified via CFLAGS
+if test "$ac_test_CFLAGS" != "set"; then
+ CFLAGS=""
+ case $ax_cv_c_compiler_vendor in
+ dec) CFLAGS="-newc -w0 -O5 -ansi_alias -ansi_args -fp_reorder -tune host"
+ if test "x$acx_maxopt_portable" = xno; then
+ CFLAGS="$CFLAGS -arch host"
+ fi;;
+
+ sun) CFLAGS="-native -fast -xO5 -dalign"
+ if test "x$acx_maxopt_portable" = xyes; then
+ CFLAGS="$CFLAGS -xarch=generic"
+ fi;;
+
+ hp) CFLAGS="+Oall +Optrs_ansi +DSnative"
+ if test "x$acx_maxopt_portable" = xyes; then
+ CFLAGS="$CFLAGS +DAportable"
+ fi;;
+
+ ibm) if test "x$acx_maxopt_portable" = xno; then
+ xlc_opt="-qarch=auto -qtune=auto"
+ else
+ xlc_opt="-qtune=auto"
+ fi
+ as_CACHEVAR=`$as_echo "ax_cv_check_cflags__$xlc_opt" | $as_tr_sh`
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C compiler accepts $xlc_opt" >&5
+$as_echo_n "checking whether C compiler accepts $xlc_opt... " >&6; }
+if eval \${$as_CACHEVAR+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ ax_check_save_flags=$CFLAGS
+ CFLAGS="$CFLAGS $xlc_opt"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ eval "$as_CACHEVAR=yes"
+else
+ eval "$as_CACHEVAR=no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ CFLAGS=$ax_check_save_flags
+fi
+eval ac_res=\$$as_CACHEVAR
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+if test x"`eval 'as_val=${'$as_CACHEVAR'};$as_echo "$as_val"'`" = xyes; then :
+ CFLAGS="-O3 -qansialias -w $xlc_opt"
+else
+ CFLAGS="-O3 -qansialias -w"
+ echo "******************************************************"
+ echo "* You seem to have the IBM C compiler. It is *"
+ echo "* recommended for best performance that you use: *"
+ echo "* *"
+ echo "* CFLAGS=-O3 -qarch=xxx -qtune=xxx -qansialias -w *"
+ echo "* ^^^ ^^^ *"
+ echo "* where xxx is pwr2, pwr3, 604, or whatever kind of *"
+ echo "* CPU you have. (Set the CFLAGS environment var. *"
+ echo "* and re-run configure.) For more info, man cc. *"
+ echo "******************************************************"
+fi
+
+ ;;
+
+ intel) CFLAGS="-O3 -ansi_alias"
+ if test "x$acx_maxopt_portable" = xno; then
+ icc_archflag=unknown
+ icc_flags=""
+ case $host_cpu in
+ i686*|x86_64*)
+ # icc accepts gcc assembly syntax, so these should work:
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for x86 cpuid 0 output" >&5
+$as_echo_n "checking for x86 cpuid 0 output... " >&6; }
+if ${ax_cv_gcc_x86_cpuid_0+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test "$cross_compiling" = yes; then :
+ ax_cv_gcc_x86_cpuid_0=unknown
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdio.h>
+int
+main ()
+{
+
+ int op = 0, eax, ebx, ecx, edx;
+ FILE *f;
+ __asm__("cpuid"
+ : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "a" (op));
+ f = fopen("conftest_cpuid", "w"); if (!f) return 1;
+ fprintf(f, "%x:%x:%x:%x\n", eax, ebx, ecx, edx);
+ fclose(f);
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+ ax_cv_gcc_x86_cpuid_0=`cat conftest_cpuid`; rm -f conftest_cpuid
+else
+ ax_cv_gcc_x86_cpuid_0=unknown; rm -f conftest_cpuid
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_gcc_x86_cpuid_0" >&5
+$as_echo "$ax_cv_gcc_x86_cpuid_0" >&6; }
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for x86 cpuid 1 output" >&5
+$as_echo_n "checking for x86 cpuid 1 output... " >&6; }
+if ${ax_cv_gcc_x86_cpuid_1+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test "$cross_compiling" = yes; then :
+ ax_cv_gcc_x86_cpuid_1=unknown
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdio.h>
+int
+main ()
+{
+
+ int op = 1, eax, ebx, ecx, edx;
+ FILE *f;
+ __asm__("cpuid"
+ : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "a" (op));
+ f = fopen("conftest_cpuid", "w"); if (!f) return 1;
+ fprintf(f, "%x:%x:%x:%x\n", eax, ebx, ecx, edx);
+ fclose(f);
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+ ax_cv_gcc_x86_cpuid_1=`cat conftest_cpuid`; rm -f conftest_cpuid
+else
+ ax_cv_gcc_x86_cpuid_1=unknown; rm -f conftest_cpuid
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_gcc_x86_cpuid_1" >&5
+$as_echo "$ax_cv_gcc_x86_cpuid_1" >&6; }
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+ case $ax_cv_gcc_x86_cpuid_0 in # see AX_GCC_ARCHFLAG
+ *:756e6547:*:*) # Intel
+ case $ax_cv_gcc_x86_cpuid_1 in
+ *6a?:*[234]:*:*|*6[789b]?:*:*:*) icc_flags="-xK";;
+ *f3[347]:*:*:*|*f41347:*:*:*) icc_flags="-xP -xN -xW -xK";;
+ *f??:*:*:*) icc_flags="-xN -xW -xK";;
+ esac ;;
+ esac ;;
+ esac
+ if test "x$icc_flags" != x; then
+ for flag in $icc_flags; do
+ as_CACHEVAR=`$as_echo "ax_cv_check_cflags__$flag" | $as_tr_sh`
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C compiler accepts $flag" >&5
+$as_echo_n "checking whether C compiler accepts $flag... " >&6; }
+if eval \${$as_CACHEVAR+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ ax_check_save_flags=$CFLAGS
+ CFLAGS="$CFLAGS $flag"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ eval "$as_CACHEVAR=yes"
+else
+ eval "$as_CACHEVAR=no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ CFLAGS=$ax_check_save_flags
+fi
+eval ac_res=\$$as_CACHEVAR
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+if test x"`eval 'as_val=${'$as_CACHEVAR'};$as_echo "$as_val"'`" = xyes; then :
+ icc_archflag=$flag; break
+else
+ :
+fi
+
+ done
+ fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for icc architecture flag" >&5
+$as_echo_n "checking for icc architecture flag... " >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $icc_archflag" >&5
+$as_echo "$icc_archflag" >&6; }
+ if test "x$icc_archflag" != xunknown; then
+ CFLAGS="$CFLAGS $icc_archflag"
+ fi
+ fi
+ ;;
+
+ gnu)
+ # default optimization flags for gcc on all systems
+ CFLAGS="-O3 -fomit-frame-pointer"
+
+ # -malign-double for x86 systems
+ # LIBFFI -- DON'T DO THIS - CHANGES ABI
+ # AX_CHECK_COMPILE_FLAG(-malign-double, CFLAGS="$CFLAGS -malign-double")
+
+ # -fstrict-aliasing for gcc-2.95+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C compiler accepts -fstrict-aliasing" >&5
+$as_echo_n "checking whether C compiler accepts -fstrict-aliasing... " >&6; }
+if ${ax_cv_check_cflags___fstrict_aliasing+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ ax_check_save_flags=$CFLAGS
+ CFLAGS="$CFLAGS -fstrict-aliasing"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ax_cv_check_cflags___fstrict_aliasing=yes
+else
+ ax_cv_check_cflags___fstrict_aliasing=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ CFLAGS=$ax_check_save_flags
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_check_cflags___fstrict_aliasing" >&5
+$as_echo "$ax_cv_check_cflags___fstrict_aliasing" >&6; }
+if test x"$ax_cv_check_cflags___fstrict_aliasing" = xyes; then :
+ CFLAGS="$CFLAGS -fstrict-aliasing"
+else
+ :
+fi
+
+
+ # note that we enable "unsafe" fp optimization with other compilers, too
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C compiler accepts -ffast-math" >&5
+$as_echo_n "checking whether C compiler accepts -ffast-math... " >&6; }
+if ${ax_cv_check_cflags___ffast_math+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ ax_check_save_flags=$CFLAGS
+ CFLAGS="$CFLAGS -ffast-math"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ax_cv_check_cflags___ffast_math=yes
+else
+ ax_cv_check_cflags___ffast_math=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ CFLAGS=$ax_check_save_flags
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_check_cflags___ffast_math" >&5
+$as_echo "$ax_cv_check_cflags___ffast_math" >&6; }
+if test x"$ax_cv_check_cflags___ffast_math" = xyes; then :
+ CFLAGS="$CFLAGS -ffast-math"
+else
+ :
+fi
+
+
+
+
+
+
+# Check whether --with-gcc-arch was given.
+if test "${with_gcc_arch+set}" = set; then :
+ withval=$with_gcc_arch; ax_gcc_arch=$withval
+else
+ ax_gcc_arch=yes
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for gcc architecture flag" >&5
+$as_echo_n "checking for gcc architecture flag... " >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
+$as_echo "" >&6; }
+if ${ax_cv_gcc_archflag+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ax_cv_gcc_archflag="unknown"
+
+if test "$GCC" = yes; then
+
+if test "x$ax_gcc_arch" = xyes; then
+ax_gcc_arch=""
+if test "$cross_compiling" = no; then
+case $host_cpu in
+ i[3456]86*|x86_64*) # use cpuid codes
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for x86 cpuid 0 output" >&5
+$as_echo_n "checking for x86 cpuid 0 output... " >&6; }
+if ${ax_cv_gcc_x86_cpuid_0+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test "$cross_compiling" = yes; then :
+ ax_cv_gcc_x86_cpuid_0=unknown
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdio.h>
+int
+main ()
+{
+
+ int op = 0, eax, ebx, ecx, edx;
+ FILE *f;
+ __asm__("cpuid"
+ : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "a" (op));
+ f = fopen("conftest_cpuid", "w"); if (!f) return 1;
+ fprintf(f, "%x:%x:%x:%x\n", eax, ebx, ecx, edx);
+ fclose(f);
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+ ax_cv_gcc_x86_cpuid_0=`cat conftest_cpuid`; rm -f conftest_cpuid
+else
+ ax_cv_gcc_x86_cpuid_0=unknown; rm -f conftest_cpuid
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_gcc_x86_cpuid_0" >&5
+$as_echo "$ax_cv_gcc_x86_cpuid_0" >&6; }
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for x86 cpuid 1 output" >&5
+$as_echo_n "checking for x86 cpuid 1 output... " >&6; }
+if ${ax_cv_gcc_x86_cpuid_1+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test "$cross_compiling" = yes; then :
+ ax_cv_gcc_x86_cpuid_1=unknown
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdio.h>
+int
+main ()
+{
+
+ int op = 1, eax, ebx, ecx, edx;
+ FILE *f;
+ __asm__("cpuid"
+ : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "a" (op));
+ f = fopen("conftest_cpuid", "w"); if (!f) return 1;
+ fprintf(f, "%x:%x:%x:%x\n", eax, ebx, ecx, edx);
+ fclose(f);
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+ ax_cv_gcc_x86_cpuid_1=`cat conftest_cpuid`; rm -f conftest_cpuid
+else
+ ax_cv_gcc_x86_cpuid_1=unknown; rm -f conftest_cpuid
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_gcc_x86_cpuid_1" >&5
+$as_echo "$ax_cv_gcc_x86_cpuid_1" >&6; }
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+ case $ax_cv_gcc_x86_cpuid_0 in
+ *:756e6547:*:*) # Intel
+ case $ax_cv_gcc_x86_cpuid_1 in
+ *5[48]?:*:*:*) ax_gcc_arch="pentium-mmx pentium" ;;
+ *5??:*:*:*) ax_gcc_arch=pentium ;;
+ *0?6[3456]?:*:*:*) ax_gcc_arch="pentium2 pentiumpro" ;;
+ *0?6a?:*[01]:*:*) ax_gcc_arch="pentium2 pentiumpro" ;;
+ *0?6a?:*[234]:*:*) ax_gcc_arch="pentium3 pentiumpro" ;;
+ *0?6[9de]?:*:*:*) ax_gcc_arch="pentium-m pentium3 pentiumpro" ;;
+ *0?6[78b]?:*:*:*) ax_gcc_arch="pentium3 pentiumpro" ;;
+ *0?6f?:*:*:*|*1?66?:*:*:*) ax_gcc_arch="core2 pentium-m pentium3 pentiumpro" ;;
+ *1?6[7d]?:*:*:*) ax_gcc_arch="penryn core2 pentium-m pentium3 pentiumpro" ;;
+ *1?6[aef]?:*:*:*|*2?6[5cef]?:*:*:*) ax_gcc_arch="corei7 core2 pentium-m pentium3 pentiumpro" ;;
+ *1?6c?:*:*:*|*[23]?66?:*:*:*) ax_gcc_arch="atom core2 pentium-m pentium3 pentiumpro" ;;
+ *2?6[ad]?:*:*:*) ax_gcc_arch="corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;;
+ *0?6??:*:*:*) ax_gcc_arch=pentiumpro ;;
+ *6??:*:*:*) ax_gcc_arch="core2 pentiumpro" ;;
+ ?000?f3[347]:*:*:*|?000?f41347:*:*:*|?000?f6?:*:*:*)
+ case $host_cpu in
+ x86_64*) ax_gcc_arch="nocona pentium4 pentiumpro" ;;
+ *) ax_gcc_arch="prescott pentium4 pentiumpro" ;;
+ esac ;;
+ ?000?f??:*:*:*) ax_gcc_arch="pentium4 pentiumpro";;
+ esac ;;
+ *:68747541:*:*) # AMD
+ case $ax_cv_gcc_x86_cpuid_1 in
+ *5[67]?:*:*:*) ax_gcc_arch=k6 ;;
+ *5[8d]?:*:*:*) ax_gcc_arch="k6-2 k6" ;;
+ *5[9]?:*:*:*) ax_gcc_arch="k6-3 k6" ;;
+ *60?:*:*:*) ax_gcc_arch=k7 ;;
+ *6[12]?:*:*:*) ax_gcc_arch="athlon k7" ;;
+ *6[34]?:*:*:*) ax_gcc_arch="athlon-tbird k7" ;;
+ *67?:*:*:*) ax_gcc_arch="athlon-4 athlon k7" ;;
+ *6[68a]?:*:*:*)
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for x86 cpuid 0x80000006 output" >&5
+$as_echo_n "checking for x86 cpuid 0x80000006 output... " >&6; }
+if ${ax_cv_gcc_x86_cpuid_0x80000006+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test "$cross_compiling" = yes; then :
+ ax_cv_gcc_x86_cpuid_0x80000006=unknown
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include <stdio.h>
+int
+main ()
+{
+
+ int op = 0x80000006, eax, ebx, ecx, edx;
+ FILE *f;
+ __asm__("cpuid"
+ : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "a" (op));
+ f = fopen("conftest_cpuid", "w"); if (!f) return 1;
+ fprintf(f, "%x:%x:%x:%x\n", eax, ebx, ecx, edx);
+ fclose(f);
+ return 0;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+ ax_cv_gcc_x86_cpuid_0x80000006=`cat conftest_cpuid`; rm -f conftest_cpuid
+else
+ ax_cv_gcc_x86_cpuid_0x80000006=unknown; rm -f conftest_cpuid
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_gcc_x86_cpuid_0x80000006" >&5
+$as_echo "$ax_cv_gcc_x86_cpuid_0x80000006" >&6; }
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+ # L2 cache size
+ case $ax_cv_gcc_x86_cpuid_0x80000006 in
+ *:*:*[1-9a-f]??????:*) # (L2 = ecx >> 16) >= 256
+ ax_gcc_arch="athlon-xp athlon-4 athlon k7" ;;
+ *) ax_gcc_arch="athlon-4 athlon k7" ;;
+ esac ;;
+ ?00??f[4cef8b]?:*:*:*) ax_gcc_arch="athlon64 k8" ;;
+ ?00??f5?:*:*:*) ax_gcc_arch="opteron k8" ;;
+ ?00??f7?:*:*:*) ax_gcc_arch="athlon-fx opteron k8" ;;
+ ?00??f??:*:*:*) ax_gcc_arch="k8" ;;
+ ?05??f??:*:*:*) ax_gcc_arch="btver1 amdfam10 k8" ;;
+ ?06??f??:*:*:*) ax_gcc_arch="bdver1 amdfam10 k8" ;;
+ *f??:*:*:*) ax_gcc_arch="amdfam10 k8" ;;
+ esac ;;
+ *:746e6543:*:*) # IDT
+ case $ax_cv_gcc_x86_cpuid_1 in
+ *54?:*:*:*) ax_gcc_arch=winchip-c6 ;;
+ *58?:*:*:*) ax_gcc_arch=winchip2 ;;
+ *6[78]?:*:*:*) ax_gcc_arch=c3 ;;
+ *69?:*:*:*) ax_gcc_arch="c3-2 c3" ;;
+ esac ;;
+ esac
+ if test x"$ax_gcc_arch" = x; then # fallback
+ case $host_cpu in
+ i586*) ax_gcc_arch=pentium ;;
+ i686*) ax_gcc_arch=pentiumpro ;;
+ esac
+ fi
+ ;;
+
+ sparc*)
+ # Extract the first word of "prtdiag", so it can be a program name with args.
+set dummy prtdiag; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_PRTDIAG+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $PRTDIAG in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_PRTDIAG="$PRTDIAG" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+as_dummy="$PATH:/usr/platform/`uname -i`/sbin/:/usr/platform/`uname -m`/sbin/"
+for as_dir in $as_dummy
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_PRTDIAG="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ test -z "$ac_cv_path_PRTDIAG" && ac_cv_path_PRTDIAG="prtdiag"
+ ;;
+esac
+fi
+PRTDIAG=$ac_cv_path_PRTDIAG
+if test -n "$PRTDIAG"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PRTDIAG" >&5
+$as_echo "$PRTDIAG" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ cputype=`(((grep cpu /proc/cpuinfo | cut -d: -f2) ; ($PRTDIAG -v |grep -i sparc) ; grep -i cpu /var/run/dmesg.boot ) | head -n 1) 2> /dev/null`
+ cputype=`echo "$cputype" | tr -d ' -' |tr $as_cr_LETTERS $as_cr_letters`
+ case $cputype in
+ *ultrasparciv*) ax_gcc_arch="ultrasparc4 ultrasparc3 ultrasparc v9" ;;
+ *ultrasparciii*) ax_gcc_arch="ultrasparc3 ultrasparc v9" ;;
+ *ultrasparc*) ax_gcc_arch="ultrasparc v9" ;;
+ *supersparc*|*tms390z5[05]*) ax_gcc_arch="supersparc v8" ;;
+ *hypersparc*|*rt62[056]*) ax_gcc_arch="hypersparc v8" ;;
+ *cypress*) ax_gcc_arch=cypress ;;
+ esac ;;
+
+ alphaev5) ax_gcc_arch=ev5 ;;
+ alphaev56) ax_gcc_arch=ev56 ;;
+ alphapca56) ax_gcc_arch="pca56 ev56" ;;
+ alphapca57) ax_gcc_arch="pca57 pca56 ev56" ;;
+ alphaev6) ax_gcc_arch=ev6 ;;
+ alphaev67) ax_gcc_arch=ev67 ;;
+ alphaev68) ax_gcc_arch="ev68 ev67" ;;
+ alphaev69) ax_gcc_arch="ev69 ev68 ev67" ;;
+ alphaev7) ax_gcc_arch="ev7 ev69 ev68 ev67" ;;
+ alphaev79) ax_gcc_arch="ev79 ev7 ev69 ev68 ev67" ;;
+
+ powerpc*)
+ cputype=`((grep cpu /proc/cpuinfo | head -n 1 | cut -d: -f2 | cut -d, -f1 | sed 's/ //g') ; /usr/bin/machine ; /bin/machine; grep CPU /var/run/dmesg.boot | head -n 1 | cut -d" " -f2) 2> /dev/null`
+ cputype=`echo $cputype | sed -e 's/ppc//g;s/ *//g'`
+ case $cputype in
+ *750*) ax_gcc_arch="750 G3" ;;
+ *740[0-9]*) ax_gcc_arch="$cputype 7400 G4" ;;
+ *74[4-5][0-9]*) ax_gcc_arch="$cputype 7450 G4" ;;
+ *74[0-9][0-9]*) ax_gcc_arch="$cputype G4" ;;
+ *970*) ax_gcc_arch="970 G5 power4";;
+ *POWER4*|*power4*|*gq*) ax_gcc_arch="power4 970";;
+ *POWER5*|*power5*|*gr*|*gs*) ax_gcc_arch="power5 power4 970";;
+ 603ev|8240) ax_gcc_arch="$cputype 603e 603";;
+ *) ax_gcc_arch=$cputype ;;
+ esac
+ ax_gcc_arch="$ax_gcc_arch powerpc"
+ ;;
+esac
+fi # not cross-compiling
+fi # guess arch
+
+if test "x$ax_gcc_arch" != x -a "x$ax_gcc_arch" != xno; then
+for arch in $ax_gcc_arch; do
+ if test "x$acx_maxopt_portable" = xyes; then # if we require portable code
+ flags="-mtune=$arch"
+ # -mcpu=$arch and m$arch generate nonportable code on every arch except
+ # x86. And some other arches (e.g. Alpha) don't accept -mtune. Grrr.
+ case $host_cpu in i*86|x86_64*) flags="$flags -mcpu=$arch -m$arch";; esac
+ else
+ flags="-march=$arch -mcpu=$arch -m$arch"
+ fi
+ for flag in $flags; do
+ as_CACHEVAR=`$as_echo "ax_cv_check_cflags__$flag" | $as_tr_sh`
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C compiler accepts $flag" >&5
+$as_echo_n "checking whether C compiler accepts $flag... " >&6; }
+if eval \${$as_CACHEVAR+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ ax_check_save_flags=$CFLAGS
+ CFLAGS="$CFLAGS $flag"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ eval "$as_CACHEVAR=yes"
+else
+ eval "$as_CACHEVAR=no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ CFLAGS=$ax_check_save_flags
+fi
+eval ac_res=\$$as_CACHEVAR
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+if test x"`eval 'as_val=${'$as_CACHEVAR'};$as_echo "$as_val"'`" = xyes; then :
+ ax_cv_gcc_archflag=$flag; break
+else
+ :
+fi
+
+ done
+ test "x$ax_cv_gcc_archflag" = xunknown || break
+done
+fi
+
+fi # $GCC=yes
+
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for gcc architecture flag" >&5
+$as_echo_n "checking for gcc architecture flag... " >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_gcc_archflag" >&5
+$as_echo "$ax_cv_gcc_archflag" >&6; }
+if test "x$ax_cv_gcc_archflag" = xunknown; then
+ :
+else
+ CFLAGS="$CFLAGS $ax_cv_gcc_archflag"
+fi
+
+ ;;
+ esac
+
+ if test -z "$CFLAGS"; then
+ echo ""
+ echo "********************************************************"
+ echo "* WARNING: Don't know the best CFLAGS for this system *"
+ echo "* Use ./configure CFLAGS=... to specify your own flags *"
+ echo "* (otherwise, a default of CFLAGS=-O3 will be used) *"
+ echo "********************************************************"
+ echo ""
+ CFLAGS="-O3"
+ fi
+
+ as_CACHEVAR=`$as_echo "ax_cv_check_cflags__$CFLAGS" | $as_tr_sh`
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C compiler accepts $CFLAGS" >&5
+$as_echo_n "checking whether C compiler accepts $CFLAGS... " >&6; }
+if eval \${$as_CACHEVAR+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ ax_check_save_flags=$CFLAGS
+ CFLAGS="$CFLAGS $CFLAGS"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ eval "$as_CACHEVAR=yes"
+else
+ eval "$as_CACHEVAR=no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ CFLAGS=$ax_check_save_flags
+fi
+eval ac_res=\$$as_CACHEVAR
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+if test x"`eval 'as_val=${'$as_CACHEVAR'};$as_echo "$as_val"'`" = xyes; then :
+ :
+else
+
+ echo ""
+ echo "********************************************************"
+ echo "* WARNING: The guessed CFLAGS don't seem to work with *"
+ echo "* your compiler. *"
+ echo "* Use ./configure CFLAGS=... to specify your own flags *"
+ echo "********************************************************"
+ echo ""
+ CFLAGS=""
+
+fi
+
+
+fi
+
+# The AX_CFLAGS_WARN_ALL macro doesn't currently work for sunpro
+# compiler.
+if test "$ax_cv_c_compiler_vendor" != "sun"; then
+ if ${CFLAGS+:} false; then :
+ case " $CFLAGS " in
+ *" "*)
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: : CFLAGS already contains "; } >&5
+ (: CFLAGS already contains ) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+ ;;
+ *)
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: : CFLAGS=\"\$CFLAGS \""; } >&5
+ (: CFLAGS="$CFLAGS ") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+ CFLAGS="$CFLAGS "
+ ;;
+ esac
+else
+ CFLAGS=""
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking CFLAGS for maximum warnings" >&5
+$as_echo_n "checking CFLAGS for maximum warnings... " >&6; }
+if ${ac_cv_cflags_warn_all+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_cv_cflags_warn_all="no, unknown"
+ac_save_CFLAGS="$CFLAGS"
+for ac_arg in "-warn all % -warn all" "-pedantic % -Wall" "-xstrconst % -v" "-std1 % -verbose -w0 -warnprotos" "-qlanglvl=ansi % -qsrcmsg -qinfo=all:noppt:noppc:noobs:nocnd" "-ansi -ansiE % -fullwarn" "+ESlit % +w1" "-Xc % -pvctl,fullmsg" "-h conform % -h msglevel 2" #
+do CFLAGS="$ac_save_CFLAGS "`echo $ac_arg | sed -e 's,%%.*,,' -e 's,%,,'`
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_cflags_warn_all=`echo $ac_arg | sed -e 's,.*% *,,'` ; break
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+done
+CFLAGS="$ac_save_CFLAGS"
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cflags_warn_all" >&5
+$as_echo "$ac_cv_cflags_warn_all" >&6; }
+
+case ".$ac_cv_cflags_warn_all" in
+ .ok|.ok,*) ;;
+ .|.no|.no,*) ;;
+ *) if ${CFLAGS+:} false; then :
+ case " $CFLAGS " in
+ *" $ac_cv_cflags_warn_all "*)
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: : CFLAGS already contains \$ac_cv_cflags_warn_all"; } >&5
+ (: CFLAGS already contains $ac_cv_cflags_warn_all) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+ ;;
+ *)
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: : CFLAGS=\"\$CFLAGS \$ac_cv_cflags_warn_all\""; } >&5
+ (: CFLAGS="$CFLAGS $ac_cv_cflags_warn_all") 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }
+ CFLAGS="$CFLAGS $ac_cv_cflags_warn_all"
+ ;;
+ esac
+else
+ CFLAGS="$ac_cv_cflags_warn_all"
+fi
+ ;;
+esac
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+fi
+
+if test "x$GCC" = "xyes"; then
+ CFLAGS="$CFLAGS -fexceptions"
+ touch local.exp
+else
+ cat > local.exp <<EOF
+set CC_FOR_TARGET "$CC"
+EOF
+fi
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5
$as_echo_n "checking whether to enable maintainer-specific portions of Makefiles... " >&6; }
@@ -11005,7 +13116,7 @@ fi
for ac_header in sys/mman.h
do :
ac_fn_c_check_header_mongrel "$LINENO" "sys/mman.h" "ac_cv_header_sys_mman_h" "$ac_includes_default"
-if test "x$ac_cv_header_sys_mman_h" = x""yes; then :
+if test "x$ac_cv_header_sys_mman_h" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_SYS_MMAN_H 1
_ACEOF
@@ -11017,7 +13128,7 @@ done
for ac_func in mmap
do :
ac_fn_c_check_func "$LINENO" "mmap" "ac_cv_func_mmap"
-if test "x$ac_cv_func_mmap" = x""yes; then :
+if test "x$ac_cv_func_mmap" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_MMAP 1
_ACEOF
@@ -11027,7 +13138,7 @@ done
ac_fn_c_check_header_mongrel "$LINENO" "sys/mman.h" "ac_cv_header_sys_mman_h" "$ac_includes_default"
-if test "x$ac_cv_header_sys_mman_h" = x""yes; then :
+if test "x$ac_cv_header_sys_mman_h" = xyes; then :
libffi_header_sys_mman_h=yes
else
libffi_header_sys_mman_h=no
@@ -11035,7 +13146,7 @@ fi
ac_fn_c_check_func "$LINENO" "mmap" "ac_cv_func_mmap"
-if test "x$ac_cv_func_mmap" = x""yes; then :
+if test "x$ac_cv_func_mmap" = xyes; then :
libffi_func_mmap=yes
else
libffi_func_mmap=no
@@ -11049,7 +13160,7 @@ if test "$libffi_header_sys_mman_h" != yes \
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether read-only mmap of a plain file works" >&5
$as_echo_n "checking whether read-only mmap of a plain file works... " >&6; }
-if test "${ac_cv_func_mmap_file+set}" = set; then :
+if ${ac_cv_func_mmap_file+:} false; then :
$as_echo_n "(cached) " >&6
else
# Add a system to this blacklist if
@@ -11068,7 +13179,7 @@ fi
$as_echo "$ac_cv_func_mmap_file" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether mmap from /dev/zero works" >&5
$as_echo_n "checking whether mmap from /dev/zero works... " >&6; }
-if test "${ac_cv_func_mmap_dev_zero+set}" = set; then :
+if ${ac_cv_func_mmap_dev_zero+:} false; then :
$as_echo_n "(cached) " >&6
else
# Add a system to this blacklist if it has mmap() but /dev/zero
@@ -11094,7 +13205,7 @@ $as_echo "$ac_cv_func_mmap_dev_zero" >&6; }
# Unlike /dev/zero, the MAP_ANON(YMOUS) defines can be probed for.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for MAP_ANON(YMOUS)" >&5
$as_echo_n "checking for MAP_ANON(YMOUS)... " >&6; }
-if test "${ac_cv_decl_map_anon+set}" = set; then :
+if ${ac_cv_decl_map_anon+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -11130,7 +13241,7 @@ $as_echo "$ac_cv_decl_map_anon" >&6; }
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether mmap with MAP_ANON(YMOUS) works" >&5
$as_echo_n "checking whether mmap with MAP_ANON(YMOUS) works... " >&6; }
-if test "${ac_cv_func_mmap_anon+set}" = set; then :
+if ${ac_cv_func_mmap_anon+:} false; then :
$as_echo_n "(cached) " >&6
else
# Add a system to this blacklist if it has mmap() and MAP_ANON or
@@ -11178,9 +13289,13 @@ fi
TARGETDIR="unknown"
case "$host" in
+ aarch64*-*-*)
+ TARGET=AARCH64; TARGETDIR=aarch64
+ ;;
+
alpha*-*-*)
TARGET=ALPHA; TARGETDIR=alpha;
- # Support 128-bit long double, changable via command-line switch.
+ # Support 128-bit long double, changeable via command-line switch.
HAVE_LONG_DOUBLE='defined(__LONG_DOUBLE_128__)'
;;
@@ -11194,12 +13309,20 @@ case "$host" in
amd64-*-freebsd*)
TARGET=X86_64; TARGETDIR=x86
+ ;;
+
+ amd64-*-freebsd*)
+ TARGET=X86_64; TARGETDIR=x86
;;
avr32*-*-*)
TARGET=AVR32; TARGETDIR=avr32
;;
+ bfin*)
+ TARGET=BFIN; TARGETDIR=bfin
+ ;;
+
cris-*-*)
TARGET=LIBFFI_CRIS; TARGETDIR=cris
;;
@@ -11208,7 +13331,7 @@ case "$host" in
TARGET=FRV; TARGETDIR=frv
;;
- hppa*-*-linux* | parisc*-*-linux*)
+ hppa*-*-linux* | parisc*-*-linux* | hppa*-*-openbsd*)
TARGET=PA_LINUX; TARGETDIR=pa
;;
hppa*64-*-hpux*)
@@ -11221,22 +13344,65 @@ case "$host" in
i?86-*-freebsd* | i?86-*-openbsd*)
TARGET=X86_FREEBSD; TARGETDIR=x86
;;
- i?86-win32* | i?86-*-cygwin* | i?86-*-mingw*)
+ i?86-win32* | i?86-*-cygwin* | i?86-*-mingw* | i?86-*-os2* | i?86-*-interix*)
TARGET=X86_WIN32; TARGETDIR=x86
- # All mingw/cygwin/win32 builds require this for sharedlib
- AM_LTLDFLAGS="-no-undefined"
+ # All mingw/cygwin/win32 builds require -no-undefined for sharedlib.
+ # We must also check with_cross_host to decide if this is a native
+ # or cross-build and select where to install dlls appropriately.
+ if test -n "$with_cross_host" &&
+ test x"$with_cross_host" != x"no"; then
+ AM_LTLDFLAGS='-no-undefined -bindir "$(toolexeclibdir)"';
+ else
+ AM_LTLDFLAGS='-no-undefined -bindir "$(bindir)"';
+ fi
;;
i?86-*-darwin*)
TARGET=X86_DARWIN; TARGETDIR=x86
;;
i?86-*-solaris2.1[0-9]*)
- TARGET=X86_64; TARGETDIR=x86
+ TARGETDIR=x86
+ if test $ac_cv_sizeof_size_t = 4; then
+ TARGET=X86;
+ else
+ TARGET=X86_64;
+ fi
;;
+
i*86-*-nto-qnx*)
TARGET=X86; TARGETDIR=x86
;;
- i?86-*-*)
- TARGET=X86; TARGETDIR=x86
+
+ x86_64-*-darwin*)
+ TARGET=X86_DARWIN; TARGETDIR=x86
+ ;;
+
+ x86_64-*-cygwin* | x86_64-*-mingw*)
+ TARGET=X86_WIN64; TARGETDIR=x86
+ # All mingw/cygwin/win32 builds require -no-undefined for sharedlib.
+ # We must also check with_cross_host to decide if this is a native
+ # or cross-build and select where to install dlls appropriately.
+ if test -n "$with_cross_host" &&
+ test x"$with_cross_host" != x"no"; then
+ AM_LTLDFLAGS='-no-undefined -bindir "$(toolexeclibdir)"';
+ else
+ AM_LTLDFLAGS='-no-undefined -bindir "$(bindir)"';
+ fi
+ ;;
+
+ i?86-*-* | x86_64-*-*)
+ TARGETDIR=x86
+ if test $ac_cv_sizeof_size_t = 4; then
+ case "$host" in
+ *-gnux32)
+ TARGET=X86_64
+ ;;
+ *)
+ TARGET=X86
+ ;;
+ esac
+ else
+ TARGET=X86_64;
+ fi
;;
ia64*-*-*)
@@ -11251,10 +13417,22 @@ case "$host" in
TARGET=M68K; TARGETDIR=m68k
;;
- mips-sgi-irix5.* | mips-sgi-irix6.*)
+ microblaze*-*-*)
+ TARGET=MICROBLAZE; TARGETDIR=microblaze
+ ;;
+
+ moxie-*-*)
+ TARGET=MOXIE; TARGETDIR=moxie
+ ;;
+
+ metag-*-*)
+ TARGET=METAG; TARGETDIR=metag
+ ;;
+
+ mips-sgi-irix5.* | mips-sgi-irix6.* | mips*-*-rtems*)
TARGET=MIPS_IRIX; TARGETDIR=mips
;;
- mips*-*-linux*)
+ mips*-*-linux* | mips*-*-openbsd*)
# Support 128-bit long double for NewABI.
HAVE_LONG_DOUBLE='defined(__mips64)'
TARGET=MIPS_IRIX; TARGETDIR=mips
@@ -11263,18 +13441,24 @@ case "$host" in
powerpc*-*-linux* | powerpc-*-sysv*)
TARGET=POWERPC; TARGETDIR=powerpc
;;
+ powerpc-*-amigaos*)
+ TARGET=POWERPC; TARGETDIR=powerpc
+ ;;
powerpc-*-beos*)
TARGET=POWERPC; TARGETDIR=powerpc
;;
- powerpc-*-darwin*)
+ powerpc-*-darwin* | powerpc64-*-darwin*)
TARGET=POWERPC_DARWIN; TARGETDIR=powerpc
;;
powerpc-*-aix* | rs6000-*-aix*)
TARGET=POWERPC_AIX; TARGETDIR=powerpc
;;
- powerpc-*-freebsd*)
+ powerpc-*-freebsd* | powerpc-*-openbsd*)
TARGET=POWERPC_FREEBSD; TARGETDIR=powerpc
;;
+ powerpc64-*-freebsd*)
+ TARGET=POWERPC; TARGETDIR=powerpc
+ ;;
powerpc*-*-rtems*)
TARGET=POWERPC; TARGETDIR=powerpc
;;
@@ -11294,24 +13478,21 @@ case "$host" in
TARGET=SPARC; TARGETDIR=sparc
;;
- x86_64-*-darwin*)
- TARGET=X86_DARWIN; TARGETDIR=x86
- ;;
+ tile*-*)
+ TARGET=TILE; TARGETDIR=tile
+ ;;
- x86_64-*-cygwin* | x86_64-*-mingw*)
- TARGET=X86_WIN64; TARGETDIR=x86
+ xtensa*-*)
+ TARGET=XTENSA; TARGETDIR=xtensa
;;
- x86_64-*-*)
- TARGET=X86_64; TARGETDIR=x86
- ;;
esac
if test $TARGETDIR = unknown; then
- as_fn_error "\"libffi has not been ported to $host.\"" "$LINENO" 5
+ as_fn_error $? "\"libffi has not been ported to $host.\"" "$LINENO" 5
fi
if expr x$TARGET : 'xMIPS' > /dev/null; then
@@ -11322,6 +13503,14 @@ else
MIPS_FALSE=
fi
+ if test x$TARGET = xBFIN; then
+ BFIN_TRUE=
+ BFIN_FALSE='#'
+else
+ BFIN_TRUE='#'
+ BFIN_FALSE=
+fi
+
if test x$TARGET = xSPARC; then
SPARC_TRUE=
SPARC_FALSE='#'
@@ -11402,6 +13591,30 @@ else
M68K_FALSE=
fi
+ if test x$TARGET = xMICROBLAZE; then
+ MICROBLAZE_TRUE=
+ MICROBLAZE_FALSE='#'
+else
+ MICROBLAZE_TRUE='#'
+ MICROBLAZE_FALSE=
+fi
+
+ if test x$TARGET = xMETAG; then
+ METAG_TRUE=
+ METAG_FALSE='#'
+else
+ METAG_TRUE='#'
+ METAG_FALSE=
+fi
+
+ if test x$TARGET = xMOXIE; then
+ MOXIE_TRUE=
+ MOXIE_FALSE='#'
+else
+ MOXIE_TRUE='#'
+ MOXIE_FALSE=
+fi
+
if test x$TARGET = xPOWERPC; then
POWERPC_TRUE=
POWERPC_FALSE='#'
@@ -11434,6 +13647,14 @@ else
POWERPC_FREEBSD_FALSE=
fi
+ if test x$TARGET = xAARCH64; then
+ AARCH64_TRUE=
+ AARCH64_FALSE='#'
+else
+ AARCH64_TRUE='#'
+ AARCH64_FALSE=
+fi
+
if test x$TARGET = xARM; then
ARM_TRUE=
ARM_FALSE='#'
@@ -11522,10 +13743,26 @@ else
PA64_HPUX_FALSE=
fi
+ if test x$TARGET = xTILE; then
+ TILE_TRUE=
+ TILE_FALSE='#'
+else
+ TILE_TRUE='#'
+ TILE_FALSE=
+fi
+
+ if test x$TARGET = xXTENSA; then
+ XTENSA_TRUE=
+ XTENSA_FALSE='#'
+else
+ XTENSA_TRUE='#'
+ XTENSA_FALSE=
+fi
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
$as_echo_n "checking for ANSI C header files... " >&6; }
-if test "${ac_cv_header_stdc+set}" = set; then :
+if ${ac_cv_header_stdc+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -11638,7 +13875,7 @@ fi
for ac_func in memcpy
do :
ac_fn_c_check_func "$LINENO" "memcpy" "ac_cv_func_memcpy"
-if test "x$ac_cv_func_memcpy" = x""yes; then :
+if test "x$ac_cv_func_memcpy" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_MEMCPY 1
_ACEOF
@@ -11646,11 +13883,22 @@ _ACEOF
fi
done
+ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default"
+if test "x$ac_cv_type_size_t" = xyes; then :
+
+else
+
+cat >>confdefs.h <<_ACEOF
+#define size_t unsigned int
+_ACEOF
+
+fi
+
# The Ultrix 4.2 mips builtin alloca declared by alloca.h only works
# for constant arguments. Useless!
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for working alloca.h" >&5
$as_echo_n "checking for working alloca.h... " >&6; }
-if test "${ac_cv_working_alloca_h+set}" = set; then :
+if ${ac_cv_working_alloca_h+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -11683,7 +13931,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for alloca" >&5
$as_echo_n "checking for alloca... " >&6; }
-if test "${ac_cv_func_alloca_works+set}" = set; then :
+if ${ac_cv_func_alloca_works+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -11702,7 +13950,7 @@ else
#pragma alloca
# else
# ifndef alloca /* predefined by HP cc +Olibcalls */
-char *alloca ();
+void *alloca (size_t);
# endif
# endif
# endif
@@ -11746,7 +13994,7 @@ $as_echo "#define C_ALLOCA 1" >>confdefs.h
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether \`alloca.c' needs Cray hooks" >&5
$as_echo_n "checking whether \`alloca.c' needs Cray hooks... " >&6; }
-if test "${ac_cv_os_cray+set}" = set; then :
+if ${ac_cv_os_cray+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -11773,8 +14021,7 @@ if test $ac_cv_os_cray = yes; then
for ac_func in _getb67 GETB67 getb67; do
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-eval as_val=\$$as_ac_var
- if test "x$as_val" = x""yes; then :
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
cat >>confdefs.h <<_ACEOF
#define CRAY_STACKSEG_END $ac_func
@@ -11788,7 +14035,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking stack direction for C alloca" >&5
$as_echo_n "checking stack direction for C alloca... " >&6; }
-if test "${ac_cv_c_stack_direction+set}" = set; then :
+if ${ac_cv_c_stack_direction+:} false; then :
$as_echo_n "(cached) " >&6
else
if test "$cross_compiling" = yes; then :
@@ -11798,23 +14045,20 @@ else
/* end confdefs.h. */
$ac_includes_default
int
-find_stack_direction ()
+find_stack_direction (int *addr, int depth)
{
- static char *addr = 0;
- auto char dummy;
- if (addr == 0)
- {
- addr = &dummy;
- return find_stack_direction ();
- }
- else
- return (&dummy > addr) ? 1 : -1;
+ int dir, dummy = 0;
+ if (! addr)
+ addr = &dummy;
+ *addr = addr < &dummy ? 1 : addr == &dummy ? 0 : -1;
+ dir = depth ? find_stack_direction (addr, depth - 1) : 0;
+ return dir + dummy;
}
int
-main ()
+main (int argc, char **argv)
{
- return find_stack_direction () < 0;
+ return find_stack_direction (0, argc + !argv + 20) < 0;
}
_ACEOF
if ac_fn_c_try_run "$LINENO"; then :
@@ -11843,7 +14087,7 @@ fi
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of double" >&5
$as_echo_n "checking size of double... " >&6; }
-if test "${ac_cv_sizeof_double+set}" = set; then :
+if ${ac_cv_sizeof_double+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (double))" "ac_cv_sizeof_double" "$ac_includes_default"; then :
@@ -11852,9 +14096,8 @@ else
if test "$ac_cv_type_double" = yes; then
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-{ as_fn_set_status 77
-as_fn_error "cannot compute sizeof (double)
-See \`config.log' for more details." "$LINENO" 5; }; }
+as_fn_error 77 "cannot compute sizeof (double)
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof_double=0
fi
@@ -11877,7 +14120,7 @@ _ACEOF
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long double" >&5
$as_echo_n "checking size of long double... " >&6; }
-if test "${ac_cv_sizeof_long_double+set}" = set; then :
+if ${ac_cv_sizeof_long_double+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long double))" "ac_cv_sizeof_long_double" "$ac_includes_default"; then :
@@ -11886,9 +14129,8 @@ else
if test "$ac_cv_type_long_double" = yes; then
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-{ as_fn_set_status 77
-as_fn_error "cannot compute sizeof (long double)
-See \`config.log' for more details." "$LINENO" 5; }; }
+as_fn_error 77 "cannot compute sizeof (long double)
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof_long_double=0
fi
@@ -11922,7 +14164,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5
$as_echo_n "checking whether byte ordering is bigendian... " >&6; }
-if test "${ac_cv_c_bigendian+set}" = set; then :
+if ${ac_cv_c_bigendian+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_cv_c_bigendian=unknown
@@ -12140,18 +14382,18 @@ $as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h
;; #(
*)
- as_fn_error "unknown endianness
+ as_fn_error $? "unknown endianness
presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;;
esac
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler .cfi pseudo-op support" >&5
$as_echo_n "checking assembler .cfi pseudo-op support... " >&6; }
-if test "${libffi_cv_as_cfi_pseudo_op+set}" = set; then :
+if ${gcc_cv_as_cfi_pseudo_op+:} false; then :
$as_echo_n "(cached) " >&6
else
- libffi_cv_as_cfi_pseudo_op=unknown
+ gcc_cv_as_cfi_pseudo_op=unknown
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
asm (".cfi_startproc\n\t.cfi_endproc");
@@ -12164,25 +14406,26 @@ main ()
}
_ACEOF
if ac_fn_c_try_compile "$LINENO"; then :
- libffi_cv_as_cfi_pseudo_op=yes
+ gcc_cv_as_cfi_pseudo_op=yes
else
- libffi_cv_as_cfi_pseudo_op=no
+ gcc_cv_as_cfi_pseudo_op=no
fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libffi_cv_as_cfi_pseudo_op" >&5
-$as_echo "$libffi_cv_as_cfi_pseudo_op" >&6; }
-if test "x$libffi_cv_as_cfi_pseudo_op" = xyes; then
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_cfi_pseudo_op" >&5
+$as_echo "$gcc_cv_as_cfi_pseudo_op" >&6; }
+ if test "x$gcc_cv_as_cfi_pseudo_op" = xyes; then
$as_echo "#define HAVE_AS_CFI_PSEUDO_OP 1" >>confdefs.h
-fi
+ fi
+
if test x$TARGET = xSPARC; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler and linker support unaligned pc related relocs" >&5
$as_echo_n "checking assembler and linker support unaligned pc related relocs... " >&6; }
-if test "${libffi_cv_as_sparc_ua_pcrel+set}" = set; then :
+if ${libffi_cv_as_sparc_ua_pcrel+:} false; then :
$as_echo_n "(cached) " >&6
else
@@ -12221,7 +14464,7 @@ $as_echo "#define HAVE_AS_SPARC_UA_PCREL 1" >>confdefs.h
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler .register pseudo-op support" >&5
$as_echo_n "checking assembler .register pseudo-op support... " >&6; }
-if test "${libffi_cv_as_register_pseudo_op+set}" = set; then :
+if ${libffi_cv_as_register_pseudo_op+:} false; then :
$as_echo_n "(cached) " >&6
else
@@ -12229,11 +14472,11 @@ else
# Check if we have .register
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
-asm (".register %g2, #scratch");
+
int
main ()
{
-
+asm (".register %g2, #scratch");
;
return 0;
}
@@ -12258,14 +14501,14 @@ fi
if test x$TARGET = xX86 || test x$TARGET = xX86_WIN32 || test x$TARGET = xX86_64; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler supports pc related relocs" >&5
$as_echo_n "checking assembler supports pc related relocs... " >&6; }
-if test "${libffi_cv_as_x86_pcrel+set}" = set; then :
+if ${libffi_cv_as_x86_pcrel+:} false; then :
$as_echo_n "(cached) " >&6
else
- libffi_cv_as_x86_pcrel=yes
+ libffi_cv_as_x86_pcrel=no
echo '.text; foo: nop; .data; .long foo-.; .text' > conftest.s
- if $CC $CFLAGS -c conftest.s 2>&1 | grep -i warning > /dev/null; then
- libffi_cv_as_x86_pcrel=no
+ if $CC $CFLAGS -c conftest.s > /dev/null 2>&1; then
+ libffi_cv_as_x86_pcrel=yes
fi
fi
@@ -12276,77 +14519,255 @@ $as_echo "$libffi_cv_as_x86_pcrel" >&6; }
$as_echo "#define HAVE_AS_X86_PCREL 1" >>confdefs.h
fi
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler .ascii pseudo-op support" >&5
+$as_echo_n "checking assembler .ascii pseudo-op support... " >&6; }
+if ${libffi_cv_as_ascii_pseudo_op+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ libffi_cv_as_ascii_pseudo_op=unknown
+ # Check if we have .ascii
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+asm (".ascii \\"string\\"");
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ libffi_cv_as_ascii_pseudo_op=yes
+else
+ libffi_cv_as_ascii_pseudo_op=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libffi_cv_as_ascii_pseudo_op" >&5
+$as_echo "$libffi_cv_as_ascii_pseudo_op" >&6; }
+ if test "x$libffi_cv_as_ascii_pseudo_op" = xyes; then
+
+$as_echo "#define HAVE_AS_ASCII_PSEUDO_OP 1" >>confdefs.h
+
+ fi
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler .string pseudo-op support" >&5
+$as_echo_n "checking assembler .string pseudo-op support... " >&6; }
+if ${libffi_cv_as_string_pseudo_op+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ libffi_cv_as_string_pseudo_op=unknown
+ # Check if we have .string
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+asm (".string \\"string\\"");
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ libffi_cv_as_string_pseudo_op=yes
+else
+ libffi_cv_as_string_pseudo_op=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libffi_cv_as_string_pseudo_op" >&5
+$as_echo "$libffi_cv_as_string_pseudo_op" >&6; }
+ if test "x$libffi_cv_as_string_pseudo_op" = xyes; then
+
+$as_echo "#define HAVE_AS_STRING_PSEUDO_OP 1" >>confdefs.h
+
+ fi
fi
+# On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC.
+# Check whether --enable-pax_emutramp was given.
+if test "${enable_pax_emutramp+set}" = set; then :
+ enableval=$enable_pax_emutramp; if test "$enable_pax_emutramp" = "yes"; then
+
+$as_echo "#define FFI_MMAP_EXEC_EMUTRAMP_PAX 1" >>confdefs.h
+
+ fi
+fi
+
+
+if test x$TARGET = xX86_WIN64; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _ prefix in compiled symbols" >&5
+$as_echo_n "checking for _ prefix in compiled symbols... " >&6; }
+if ${lt_cv_sys_symbol_underscore+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ lt_cv_sys_symbol_underscore=no
+ cat > conftest.$ac_ext <<_LT_EOF
+void nm_test_func(){}
+int main(){nm_test_func;return 0;}
+_LT_EOF
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+ (eval $ac_compile) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; }; then
+ # Now try to grab the symbols.
+ ac_nlist=conftest.nm
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $ac_nlist\""; } >&5
+ (eval $NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $ac_nlist) 2>&5
+ ac_status=$?
+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+ test $ac_status = 0; } && test -s "$ac_nlist"; then
+ # See whether the symbols have a leading underscore.
+ if grep '^. _nm_test_func' "$ac_nlist" >/dev/null; then
+ lt_cv_sys_symbol_underscore=yes
+ else
+ if grep '^. nm_test_func ' "$ac_nlist" >/dev/null; then
+ :
+ else
+ echo "configure: cannot find nm_test_func in $ac_nlist" >&5
+ fi
+ fi
+ else
+ echo "configure: cannot run $lt_cv_sys_global_symbol_pipe" >&5
+ fi
+ else
+ echo "configure: failed program was:" >&5
+ cat conftest.c >&5
+ fi
+ rm -rf conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_symbol_underscore" >&5
+$as_echo "$lt_cv_sys_symbol_underscore" >&6; }
+ sys_symbol_underscore=$lt_cv_sys_symbol_underscore
+
+
+ if test "x$sys_symbol_underscore" = xyes; then
+
+$as_echo "#define SYMBOL_UNDERSCORE 1" >>confdefs.h
+
+ fi
+fi
+
+FFI_EXEC_TRAMPOLINE_TABLE=0
case "$target" in
- *-apple-darwin10* | *-*-freebsd* | *-*-openbsd* | *-pc-solaris*)
+ *arm*-apple-darwin*)
+ FFI_EXEC_TRAMPOLINE_TABLE=1
+
+$as_echo "#define FFI_EXEC_TRAMPOLINE_TABLE 1" >>confdefs.h
+
+ ;;
+ *-apple-darwin1* | *-*-freebsd* | *-*-kfreebsd* | *-*-openbsd* | *-pc-solaris*)
$as_echo "#define FFI_MMAP_EXEC_WRIT 1" >>confdefs.h
;;
esac
+ if test x$FFI_EXEC_TRAMPOLINE_TABLE = x1; then
+ FFI_EXEC_TRAMPOLINE_TABLE_TRUE=
+ FFI_EXEC_TRAMPOLINE_TABLE_FALSE='#'
+else
+ FFI_EXEC_TRAMPOLINE_TABLE_TRUE='#'
+ FFI_EXEC_TRAMPOLINE_TABLE_FALSE=
+fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether .eh_frame section should be read-only" >&5
-$as_echo_n "checking whether .eh_frame section should be read-only... " >&6; }
-if test "${libffi_cv_ro_eh_frame+set}" = set; then :
+
+
+if test x$TARGET = xX86_64; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler supports unwind section type" >&5
+$as_echo_n "checking assembler supports unwind section type... " >&6; }
+if ${libffi_cv_as_x86_64_unwind_section_type+:} false; then :
$as_echo_n "(cached) " >&6
else
- libffi_cv_ro_eh_frame=no
- echo 'extern void foo (void); void bar (void) { foo (); foo (); }' > conftest.c
- if $CC $CFLAGS -S -fpic -fexceptions -o conftest.s conftest.c > /dev/null 2>&1; then
- if grep '.section.*eh_frame.*"a"' conftest.s > /dev/null; then
- libffi_cv_ro_eh_frame=yes
- elif grep '.section.*eh_frame.*#alloc' conftest.c \
- | grep -v '#write' > /dev/null; then
- libffi_cv_ro_eh_frame=yes
- fi
+ libffi_cv_as_x86_64_unwind_section_type=yes
+ echo '.section .eh_frame,"a",@unwind' > conftest.s
+ if $CC $CFLAGS -c conftest.s 2>&1 | grep -i warning > /dev/null; then
+ libffi_cv_as_x86_64_unwind_section_type=no
fi
- rm -f conftest.*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libffi_cv_as_x86_64_unwind_section_type" >&5
+$as_echo "$libffi_cv_as_x86_64_unwind_section_type" >&6; }
+ if test "x$libffi_cv_as_x86_64_unwind_section_type" = xyes; then
+
+$as_echo "#define HAVE_AS_X86_64_UNWIND_SECTION_TYPE 1" >>confdefs.h
+
+ fi
+fi
+
+if test "x$GCC" = "xyes"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether .eh_frame section should be read-only" >&5
+$as_echo_n "checking whether .eh_frame section should be read-only... " >&6; }
+if ${libffi_cv_ro_eh_frame+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+
+ libffi_cv_ro_eh_frame=no
+ echo 'extern void foo (void); void bar (void) { foo (); foo (); }' > conftest.c
+ if $CC $CFLAGS -c -fpic -fexceptions -o conftest.o conftest.c > /dev/null 2>&1; then
+ objdump -h conftest.o > conftest.dump 2>&1
+ libffi_eh_frame_line=`grep -n eh_frame conftest.dump | cut -d: -f 1`
+ libffi_test_line=`expr $libffi_eh_frame_line + 1`p
+ sed -n $libffi_test_line conftest.dump > conftest.line
+ if grep READONLY conftest.line > /dev/null; then
+ libffi_cv_ro_eh_frame=yes
+ fi
+ fi
+ rm -f conftest.*
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libffi_cv_ro_eh_frame" >&5
$as_echo "$libffi_cv_ro_eh_frame" >&6; }
-if test "x$libffi_cv_ro_eh_frame" = xyes; then
+ if test "x$libffi_cv_ro_eh_frame" = xyes; then
$as_echo "#define HAVE_RO_EH_FRAME 1" >>confdefs.h
$as_echo "#define EH_FRAME_FLAGS \"a\"" >>confdefs.h
-else
+ else
$as_echo "#define EH_FRAME_FLAGS \"aw\"" >>confdefs.h
-fi
+ fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __attribute__((visibility(\"hidden\")))" >&5
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __attribute__((visibility(\"hidden\")))" >&5
$as_echo_n "checking for __attribute__((visibility(\"hidden\")))... " >&6; }
-if test "${libffi_cv_hidden_visibility_attribute+set}" = set; then :
+if ${libffi_cv_hidden_visibility_attribute+:} false; then :
$as_echo_n "(cached) " >&6
else
- echo 'int __attribute__ ((visibility ("hidden"))) foo (void) { return 1; }' > conftest.c
- libffi_cv_hidden_visibility_attribute=no
- if { ac_try='${CC-cc} -Werror -S conftest.c -o conftest.s 1>&5'
+ echo 'int __attribute__ ((visibility ("hidden"))) foo (void) { return 1 ; }' > conftest.c
+ libffi_cv_hidden_visibility_attribute=no
+ if { ac_try='${CC-cc} -Werror -S conftest.c -o conftest.s 1>&5'
{ { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
(eval $ac_try) 2>&5
ac_status=$?
$as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; }; then
- if grep '\.hidden.*foo' conftest.s >/dev/null; then
- libffi_cv_hidden_visibility_attribute=yes
- fi
- fi
- rm -f conftest.*
+ if grep '\.hidden.*foo' conftest.s >/dev/null; then
+ libffi_cv_hidden_visibility_attribute=yes
+ fi
+ fi
+ rm -f conftest.*
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libffi_cv_hidden_visibility_attribute" >&5
$as_echo "$libffi_cv_hidden_visibility_attribute" >&6; }
-if test $libffi_cv_hidden_visibility_attribute = yes; then
+ if test $libffi_cv_hidden_visibility_attribute = yes; then
$as_echo "#define HAVE_HIDDEN_VISIBILITY_ATTRIBUTE 1" >>confdefs.h
+ fi
fi
@@ -12365,6 +14786,14 @@ $as_echo "#define FFI_DEBUG 1" >>confdefs.h
fi
fi
+ if test "$enable_debug" = "yes"; then
+ FFI_DEBUG_TRUE=
+ FFI_DEBUG_FALSE='#'
+else
+ FFI_DEBUG_TRUE='#'
+ FFI_DEBUG_FALSE=
+fi
+
# Check whether --enable-structs was given.
if test "${enable_structs+set}" = set; then :
@@ -12375,6 +14804,14 @@ $as_echo "#define FFI_NO_STRUCTS 1" >>confdefs.h
fi
fi
+ if test "$enable_debug" = "yes"; then
+ FFI_DEBUG_TRUE=
+ FFI_DEBUG_FALSE='#'
+else
+ FFI_DEBUG_TRUE='#'
+ FFI_DEBUG_FALSE=
+fi
+
# Check whether --enable-raw-api was given.
if test "${enable_raw_api+set}" = set; then :
@@ -12396,28 +14833,28 @@ $as_echo "#define USING_PURIFY 1" >>confdefs.h
fi
-if test -n "$with_cross_host" &&
- test x"$with_cross_host" != x"no"; then
- toolexecdir='$(exec_prefix)/$(target_alias)'
- toolexeclibdir='$(toolexecdir)/lib'
+# These variables are only ever used when we cross-build to X86_WIN32.
+# And we only support this with GCC, so...
+if test "x$GCC" = "xyes"; then
+ if test -n "$with_cross_host" &&
+ test x"$with_cross_host" != x"no"; then
+ toolexecdir='$(exec_prefix)/$(target_alias)'
+ toolexeclibdir='$(toolexecdir)/lib'
+ else
+ toolexecdir='$(libdir)/gcc-lib/$(target_alias)'
+ toolexeclibdir='$(libdir)'
+ fi
+ multi_os_directory=`$CC -print-multi-os-directory`
+ case $multi_os_directory in
+ .) ;; # Avoid trailing /.
+ ../*) toolexeclibdir=$toolexeclibdir/$multi_os_directory ;;
+ esac
+
else
- toolexecdir='$(libdir)/gcc-lib/$(target_alias)'
toolexeclibdir='$(libdir)'
fi
-multi_os_directory=`$CC -print-multi-os-directory`
-case $multi_os_directory in
- .) ;; # Avoid trailing /.
- *) toolexeclibdir=$toolexeclibdir/$multi_os_directory ;;
-esac
-
-if test "${multilib}" = "yes"; then
- multilib_arg="--enable-multilib"
-else
- multilib_arg=
-fi
-
ac_config_commands="$ac_config_commands include"
ac_config_commands="$ac_config_commands src"
@@ -12499,10 +14936,21 @@ $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
:end' >>confcache
if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
if test -w "$cache_file"; then
- test "x$cache_file" != "x/dev/null" &&
+ if test "x$cache_file" != "x/dev/null"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
$as_echo "$as_me: updating cache $cache_file" >&6;}
- cat confcache >$cache_file
+ if test ! -f "$cache_file" || test -h "$cache_file"; then
+ cat confcache >"$cache_file"
+ else
+ case $cache_file in #(
+ */* | ?:*)
+ mv -f confcache "$cache_file"$$ &&
+ mv -f "$cache_file"$$ "$cache_file" ;; #(
+ *)
+ mv -f confcache "$cache_file" ;;
+ esac
+ fi
+ fi
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;}
@@ -12518,6 +14966,7 @@ DEFS=-DHAVE_CONFIG_H
ac_libobjs=
ac_ltlibobjs=
+U=
for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
# 1. Remove the extension, and $U if already installed.
ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
@@ -12532,6 +14981,14 @@ LIBOBJS=$ac_libobjs
LTLIBOBJS=$ac_ltlibobjs
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5
+$as_echo_n "checking that generated files are newer than configure... " >&6; }
+ if test -n "$am_sleep_pid"; then
+ # Hide warnings about reused PIDs.
+ wait $am_sleep_pid 2>/dev/null
+ fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: done" >&5
+$as_echo "done" >&6; }
if test -n "$EXEEXT"; then
am__EXEEXT_TRUE=
am__EXEEXT_FALSE='#'
@@ -12541,132 +14998,172 @@ else
fi
if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then
- as_fn_error "conditional \"AMDEP\" was never defined.
+ as_fn_error $? "conditional \"AMDEP\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then
- as_fn_error "conditional \"am__fastdepCC\" was never defined.
+ as_fn_error $? "conditional \"am__fastdepCC\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${am__fastdepCCAS_TRUE}" && test -z "${am__fastdepCCAS_FALSE}"; then
- as_fn_error "conditional \"am__fastdepCCAS\" was never defined.
+ as_fn_error $? "conditional \"am__fastdepCCAS\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${MAINTAINER_MODE_TRUE}" && test -z "${MAINTAINER_MODE_FALSE}"; then
- as_fn_error "conditional \"MAINTAINER_MODE\" was never defined.
+ as_fn_error $? "conditional \"MAINTAINER_MODE\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${TESTSUBDIR_TRUE}" && test -z "${TESTSUBDIR_FALSE}"; then
- as_fn_error "conditional \"TESTSUBDIR\" was never defined.
+ as_fn_error $? "conditional \"TESTSUBDIR\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${MIPS_TRUE}" && test -z "${MIPS_FALSE}"; then
- as_fn_error "conditional \"MIPS\" was never defined.
+ as_fn_error $? "conditional \"MIPS\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${BFIN_TRUE}" && test -z "${BFIN_FALSE}"; then
+ as_fn_error $? "conditional \"BFIN\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${SPARC_TRUE}" && test -z "${SPARC_FALSE}"; then
- as_fn_error "conditional \"SPARC\" was never defined.
+ as_fn_error $? "conditional \"SPARC\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${X86_TRUE}" && test -z "${X86_FALSE}"; then
- as_fn_error "conditional \"X86\" was never defined.
+ as_fn_error $? "conditional \"X86\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${X86_FREEBSD_TRUE}" && test -z "${X86_FREEBSD_FALSE}"; then
- as_fn_error "conditional \"X86_FREEBSD\" was never defined.
+ as_fn_error $? "conditional \"X86_FREEBSD\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${X86_WIN32_TRUE}" && test -z "${X86_WIN32_FALSE}"; then
- as_fn_error "conditional \"X86_WIN32\" was never defined.
+ as_fn_error $? "conditional \"X86_WIN32\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${X86_WIN64_TRUE}" && test -z "${X86_WIN64_FALSE}"; then
- as_fn_error "conditional \"X86_WIN64\" was never defined.
+ as_fn_error $? "conditional \"X86_WIN64\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${X86_DARWIN_TRUE}" && test -z "${X86_DARWIN_FALSE}"; then
- as_fn_error "conditional \"X86_DARWIN\" was never defined.
+ as_fn_error $? "conditional \"X86_DARWIN\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${ALPHA_TRUE}" && test -z "${ALPHA_FALSE}"; then
- as_fn_error "conditional \"ALPHA\" was never defined.
+ as_fn_error $? "conditional \"ALPHA\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${IA64_TRUE}" && test -z "${IA64_FALSE}"; then
- as_fn_error "conditional \"IA64\" was never defined.
+ as_fn_error $? "conditional \"IA64\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${M32R_TRUE}" && test -z "${M32R_FALSE}"; then
- as_fn_error "conditional \"M32R\" was never defined.
+ as_fn_error $? "conditional \"M32R\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${M68K_TRUE}" && test -z "${M68K_FALSE}"; then
- as_fn_error "conditional \"M68K\" was never defined.
+ as_fn_error $? "conditional \"M68K\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${MICROBLAZE_TRUE}" && test -z "${MICROBLAZE_FALSE}"; then
+ as_fn_error $? "conditional \"MICROBLAZE\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${METAG_TRUE}" && test -z "${METAG_FALSE}"; then
+ as_fn_error $? "conditional \"METAG\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${MOXIE_TRUE}" && test -z "${MOXIE_FALSE}"; then
+ as_fn_error $? "conditional \"MOXIE\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${POWERPC_TRUE}" && test -z "${POWERPC_FALSE}"; then
- as_fn_error "conditional \"POWERPC\" was never defined.
+ as_fn_error $? "conditional \"POWERPC\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${POWERPC_AIX_TRUE}" && test -z "${POWERPC_AIX_FALSE}"; then
- as_fn_error "conditional \"POWERPC_AIX\" was never defined.
+ as_fn_error $? "conditional \"POWERPC_AIX\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${POWERPC_DARWIN_TRUE}" && test -z "${POWERPC_DARWIN_FALSE}"; then
- as_fn_error "conditional \"POWERPC_DARWIN\" was never defined.
+ as_fn_error $? "conditional \"POWERPC_DARWIN\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${POWERPC_FREEBSD_TRUE}" && test -z "${POWERPC_FREEBSD_FALSE}"; then
- as_fn_error "conditional \"POWERPC_FREEBSD\" was never defined.
+ as_fn_error $? "conditional \"POWERPC_FREEBSD\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${AARCH64_TRUE}" && test -z "${AARCH64_FALSE}"; then
+ as_fn_error $? "conditional \"AARCH64\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${ARM_TRUE}" && test -z "${ARM_FALSE}"; then
- as_fn_error "conditional \"ARM\" was never defined.
+ as_fn_error $? "conditional \"ARM\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${AVR32_TRUE}" && test -z "${AVR32_FALSE}"; then
- as_fn_error "conditional \"AVR32\" was never defined.
+ as_fn_error $? "conditional \"AVR32\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${LIBFFI_CRIS_TRUE}" && test -z "${LIBFFI_CRIS_FALSE}"; then
- as_fn_error "conditional \"LIBFFI_CRIS\" was never defined.
+ as_fn_error $? "conditional \"LIBFFI_CRIS\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${FRV_TRUE}" && test -z "${FRV_FALSE}"; then
- as_fn_error "conditional \"FRV\" was never defined.
+ as_fn_error $? "conditional \"FRV\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${S390_TRUE}" && test -z "${S390_FALSE}"; then
- as_fn_error "conditional \"S390\" was never defined.
+ as_fn_error $? "conditional \"S390\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${X86_64_TRUE}" && test -z "${X86_64_FALSE}"; then
- as_fn_error "conditional \"X86_64\" was never defined.
+ as_fn_error $? "conditional \"X86_64\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${SH_TRUE}" && test -z "${SH_FALSE}"; then
- as_fn_error "conditional \"SH\" was never defined.
+ as_fn_error $? "conditional \"SH\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${SH64_TRUE}" && test -z "${SH64_FALSE}"; then
- as_fn_error "conditional \"SH64\" was never defined.
+ as_fn_error $? "conditional \"SH64\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${PA_LINUX_TRUE}" && test -z "${PA_LINUX_FALSE}"; then
- as_fn_error "conditional \"PA_LINUX\" was never defined.
+ as_fn_error $? "conditional \"PA_LINUX\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${PA_HPUX_TRUE}" && test -z "${PA_HPUX_FALSE}"; then
- as_fn_error "conditional \"PA_HPUX\" was never defined.
+ as_fn_error $? "conditional \"PA_HPUX\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${PA64_HPUX_TRUE}" && test -z "${PA64_HPUX_FALSE}"; then
- as_fn_error "conditional \"PA64_HPUX\" was never defined.
+ as_fn_error $? "conditional \"PA64_HPUX\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${TILE_TRUE}" && test -z "${TILE_FALSE}"; then
+ as_fn_error $? "conditional \"TILE\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${XTENSA_TRUE}" && test -z "${XTENSA_FALSE}"; then
+ as_fn_error $? "conditional \"XTENSA\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
+if test -z "${FFI_EXEC_TRAMPOLINE_TABLE_TRUE}" && test -z "${FFI_EXEC_TRAMPOLINE_TABLE_FALSE}"; then
+ as_fn_error $? "conditional \"FFI_EXEC_TRAMPOLINE_TABLE\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${FFI_DEBUG_TRUE}" && test -z "${FFI_DEBUG_FALSE}"; then
+ as_fn_error $? "conditional \"FFI_DEBUG\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${FFI_DEBUG_TRUE}" && test -z "${FFI_DEBUG_FALSE}"; then
+ as_fn_error $? "conditional \"FFI_DEBUG\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
-: ${CONFIG_STATUS=./config.status}
+: "${CONFIG_STATUS=./config.status}"
ac_write_fail=0
ac_clean_files_save=$ac_clean_files
ac_clean_files="$ac_clean_files $CONFIG_STATUS"
@@ -12767,6 +15264,7 @@ fi
IFS=" "" $as_nl"
# Find who we are. Look in the path if we contain no directory separator.
+as_myself=
case $0 in #((
*[\\/]* ) as_myself=$0 ;;
*) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
@@ -12812,19 +15310,19 @@ export LANGUAGE
(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
-# as_fn_error ERROR [LINENO LOG_FD]
-# ---------------------------------
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
-# script with status $?, using 1 if that was 0.
+# script with STATUS, using 1 if that was 0.
as_fn_error ()
{
- as_status=$?; test $as_status -eq 0 && as_status=1
- if test "$3"; then
- as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
- $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3
+ as_status=$1; test $as_status -eq 0 && as_status=1
+ if test "$4"; then
+ as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+ $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
fi
- $as_echo "$as_me: error: $1" >&2
+ $as_echo "$as_me: error: $2" >&2
as_fn_exit $as_status
} # as_fn_error
@@ -12962,16 +15460,16 @@ if (echo >conf$$.file) 2>/dev/null; then
# ... but there are two gotchas:
# 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
# 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
- # In both cases, we have to default to `cp -p'.
+ # In both cases, we have to default to `cp -pR'.
ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
elif ln conf$$.file conf$$ 2>/dev/null; then
as_ln_s=ln
else
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
fi
else
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
fi
rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
rmdir conf$$.dir 2>/dev/null
@@ -13020,7 +15518,7 @@ $as_echo X"$as_dir" |
test -d "$as_dir" && break
done
test -z "$as_dirs" || eval "mkdir $as_dirs"
- } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir"
+ } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
} # as_fn_mkdir_p
@@ -13031,28 +15529,16 @@ else
as_mkdir_p=false
fi
-if test -x / >/dev/null 2>&1; then
- as_test_x='test -x'
-else
- if ls -dL / >/dev/null 2>&1; then
- as_ls_L_option=L
- else
- as_ls_L_option=
- fi
- as_test_x='
- eval sh -c '\''
- if test -d "$1"; then
- test -d "$1/.";
- else
- case $1 in #(
- -*)set "./$1";;
- esac;
- case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
- ???[sx]*):;;*)false;;esac;fi
- '\'' sh
- '
-fi
-as_executable_p=$as_test_x
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+ test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
# Sed expression to map a string onto a valid CPP name.
as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
@@ -13073,8 +15559,8 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
-This file was extended by libffi $as_me 3.0.10rc0, which was
-generated by GNU Autoconf 2.65. Invocation command line was
+This file was extended by libffi $as_me 3.0.13, which was
+generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
CONFIG_HEADERS = $CONFIG_HEADERS
@@ -13137,17 +15623,17 @@ $config_links
Configuration commands:
$config_commands
-Report bugs to <http://gcc.gnu.org/bugs.html>."
+Report bugs to <http://github.com/atgreen/libffi/issues>."
_ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
-libffi config.status 3.0.10rc0
-configured by $0, generated by GNU Autoconf 2.65,
+libffi config.status 3.0.13
+configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"
-Copyright (C) 2009 Free Software Foundation, Inc.
+Copyright (C) 2012 Free Software Foundation, Inc.
This config.status script is free software; the Free Software Foundation
gives unlimited permission to copy, distribute and modify it."
@@ -13165,11 +15651,16 @@ ac_need_defaults=:
while test $# != 0
do
case $1 in
- --*=*)
+ --*=?*)
ac_option=`expr "X$1" : 'X\([^=]*\)='`
ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
ac_shift=:
;;
+ --*=)
+ ac_option=`expr "X$1" : 'X\([^=]*\)='`
+ ac_optarg=
+ ac_shift=:
+ ;;
*)
ac_option=$1
ac_optarg=$2
@@ -13191,6 +15682,7 @@ do
$ac_shift
case $ac_optarg in
*\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+ '') as_fn_error $? "missing file argument" ;;
esac
as_fn_append CONFIG_FILES " '$ac_optarg'"
ac_need_defaults=false;;
@@ -13203,7 +15695,7 @@ do
ac_need_defaults=false;;
--he | --h)
# Conflict between --help and --header
- as_fn_error "ambiguous option: \`$1'
+ as_fn_error $? "ambiguous option: \`$1'
Try \`$0 --help' for more information.";;
--help | --hel | -h )
$as_echo "$ac_cs_usage"; exit ;;
@@ -13212,7 +15704,7 @@ Try \`$0 --help' for more information.";;
ac_cs_silent=: ;;
# This is an error.
- -*) as_fn_error "unrecognized option: \`$1'
+ -*) as_fn_error $? "unrecognized option: \`$1'
Try \`$0 --help' for more information." ;;
*) as_fn_append ac_config_targets " $1"
@@ -13232,7 +15724,7 @@ fi
_ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
if \$ac_cs_recheck; then
- set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+ set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
shift
\$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
CONFIG_SHELL='$SHELL'
@@ -13256,6 +15748,14 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
#
# INIT-COMMANDS
#
+ax_enable_builddir_srcdir="$srcdir" # $srcdir
+ax_enable_builddir_host="$HOST" # $HOST / $host
+ax_enable_builddir_version="$VERSION" # $VERSION
+ax_enable_builddir_package="$PACKAGE" # $PACKAGE
+ax_enable_builddir_auxdir="$ax_enable_builddir_auxdir" # $AUX
+ax_enable_builddir_sed="$ax_enable_builddir_sed" # $SED
+ax_enable_builddir="$ax_enable_builddir" # $SUB
+
AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"
@@ -13266,131 +15766,154 @@ AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"
sed_quote_subst='$sed_quote_subst'
double_quote_subst='$double_quote_subst'
delay_variable_subst='$delay_variable_subst'
-macro_version='`$ECHO "X$macro_version" | $Xsed -e "$delay_single_quote_subst"`'
-macro_revision='`$ECHO "X$macro_revision" | $Xsed -e "$delay_single_quote_subst"`'
-enable_shared='`$ECHO "X$enable_shared" | $Xsed -e "$delay_single_quote_subst"`'
-enable_static='`$ECHO "X$enable_static" | $Xsed -e "$delay_single_quote_subst"`'
-pic_mode='`$ECHO "X$pic_mode" | $Xsed -e "$delay_single_quote_subst"`'
-enable_fast_install='`$ECHO "X$enable_fast_install" | $Xsed -e "$delay_single_quote_subst"`'
-host_alias='`$ECHO "X$host_alias" | $Xsed -e "$delay_single_quote_subst"`'
-host='`$ECHO "X$host" | $Xsed -e "$delay_single_quote_subst"`'
-host_os='`$ECHO "X$host_os" | $Xsed -e "$delay_single_quote_subst"`'
-build_alias='`$ECHO "X$build_alias" | $Xsed -e "$delay_single_quote_subst"`'
-build='`$ECHO "X$build" | $Xsed -e "$delay_single_quote_subst"`'
-build_os='`$ECHO "X$build_os" | $Xsed -e "$delay_single_quote_subst"`'
-SED='`$ECHO "X$SED" | $Xsed -e "$delay_single_quote_subst"`'
-Xsed='`$ECHO "X$Xsed" | $Xsed -e "$delay_single_quote_subst"`'
-GREP='`$ECHO "X$GREP" | $Xsed -e "$delay_single_quote_subst"`'
-EGREP='`$ECHO "X$EGREP" | $Xsed -e "$delay_single_quote_subst"`'
-FGREP='`$ECHO "X$FGREP" | $Xsed -e "$delay_single_quote_subst"`'
-LD='`$ECHO "X$LD" | $Xsed -e "$delay_single_quote_subst"`'
-NM='`$ECHO "X$NM" | $Xsed -e "$delay_single_quote_subst"`'
-LN_S='`$ECHO "X$LN_S" | $Xsed -e "$delay_single_quote_subst"`'
-max_cmd_len='`$ECHO "X$max_cmd_len" | $Xsed -e "$delay_single_quote_subst"`'
-ac_objext='`$ECHO "X$ac_objext" | $Xsed -e "$delay_single_quote_subst"`'
-exeext='`$ECHO "X$exeext" | $Xsed -e "$delay_single_quote_subst"`'
-lt_unset='`$ECHO "X$lt_unset" | $Xsed -e "$delay_single_quote_subst"`'
-lt_SP2NL='`$ECHO "X$lt_SP2NL" | $Xsed -e "$delay_single_quote_subst"`'
-lt_NL2SP='`$ECHO "X$lt_NL2SP" | $Xsed -e "$delay_single_quote_subst"`'
-reload_flag='`$ECHO "X$reload_flag" | $Xsed -e "$delay_single_quote_subst"`'
-reload_cmds='`$ECHO "X$reload_cmds" | $Xsed -e "$delay_single_quote_subst"`'
-OBJDUMP='`$ECHO "X$OBJDUMP" | $Xsed -e "$delay_single_quote_subst"`'
-deplibs_check_method='`$ECHO "X$deplibs_check_method" | $Xsed -e "$delay_single_quote_subst"`'
-file_magic_cmd='`$ECHO "X$file_magic_cmd" | $Xsed -e "$delay_single_quote_subst"`'
-AR='`$ECHO "X$AR" | $Xsed -e "$delay_single_quote_subst"`'
-AR_FLAGS='`$ECHO "X$AR_FLAGS" | $Xsed -e "$delay_single_quote_subst"`'
-STRIP='`$ECHO "X$STRIP" | $Xsed -e "$delay_single_quote_subst"`'
-RANLIB='`$ECHO "X$RANLIB" | $Xsed -e "$delay_single_quote_subst"`'
-old_postinstall_cmds='`$ECHO "X$old_postinstall_cmds" | $Xsed -e "$delay_single_quote_subst"`'
-old_postuninstall_cmds='`$ECHO "X$old_postuninstall_cmds" | $Xsed -e "$delay_single_quote_subst"`'
-old_archive_cmds='`$ECHO "X$old_archive_cmds" | $Xsed -e "$delay_single_quote_subst"`'
-CC='`$ECHO "X$CC" | $Xsed -e "$delay_single_quote_subst"`'
-CFLAGS='`$ECHO "X$CFLAGS" | $Xsed -e "$delay_single_quote_subst"`'
-compiler='`$ECHO "X$compiler" | $Xsed -e "$delay_single_quote_subst"`'
-GCC='`$ECHO "X$GCC" | $Xsed -e "$delay_single_quote_subst"`'
-lt_cv_sys_global_symbol_pipe='`$ECHO "X$lt_cv_sys_global_symbol_pipe" | $Xsed -e "$delay_single_quote_subst"`'
-lt_cv_sys_global_symbol_to_cdecl='`$ECHO "X$lt_cv_sys_global_symbol_to_cdecl" | $Xsed -e "$delay_single_quote_subst"`'
-lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "X$lt_cv_sys_global_symbol_to_c_name_address" | $Xsed -e "$delay_single_quote_subst"`'
-lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "X$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $Xsed -e "$delay_single_quote_subst"`'
-objdir='`$ECHO "X$objdir" | $Xsed -e "$delay_single_quote_subst"`'
-SHELL='`$ECHO "X$SHELL" | $Xsed -e "$delay_single_quote_subst"`'
-ECHO='`$ECHO "X$ECHO" | $Xsed -e "$delay_single_quote_subst"`'
-MAGIC_CMD='`$ECHO "X$MAGIC_CMD" | $Xsed -e "$delay_single_quote_subst"`'
-lt_prog_compiler_no_builtin_flag='`$ECHO "X$lt_prog_compiler_no_builtin_flag" | $Xsed -e "$delay_single_quote_subst"`'
-lt_prog_compiler_wl='`$ECHO "X$lt_prog_compiler_wl" | $Xsed -e "$delay_single_quote_subst"`'
-lt_prog_compiler_pic='`$ECHO "X$lt_prog_compiler_pic" | $Xsed -e "$delay_single_quote_subst"`'
-lt_prog_compiler_static='`$ECHO "X$lt_prog_compiler_static" | $Xsed -e "$delay_single_quote_subst"`'
-lt_cv_prog_compiler_c_o='`$ECHO "X$lt_cv_prog_compiler_c_o" | $Xsed -e "$delay_single_quote_subst"`'
-need_locks='`$ECHO "X$need_locks" | $Xsed -e "$delay_single_quote_subst"`'
-DSYMUTIL='`$ECHO "X$DSYMUTIL" | $Xsed -e "$delay_single_quote_subst"`'
-NMEDIT='`$ECHO "X$NMEDIT" | $Xsed -e "$delay_single_quote_subst"`'
-LIPO='`$ECHO "X$LIPO" | $Xsed -e "$delay_single_quote_subst"`'
-OTOOL='`$ECHO "X$OTOOL" | $Xsed -e "$delay_single_quote_subst"`'
-OTOOL64='`$ECHO "X$OTOOL64" | $Xsed -e "$delay_single_quote_subst"`'
-libext='`$ECHO "X$libext" | $Xsed -e "$delay_single_quote_subst"`'
-shrext_cmds='`$ECHO "X$shrext_cmds" | $Xsed -e "$delay_single_quote_subst"`'
-extract_expsyms_cmds='`$ECHO "X$extract_expsyms_cmds" | $Xsed -e "$delay_single_quote_subst"`'
-archive_cmds_need_lc='`$ECHO "X$archive_cmds_need_lc" | $Xsed -e "$delay_single_quote_subst"`'
-enable_shared_with_static_runtimes='`$ECHO "X$enable_shared_with_static_runtimes" | $Xsed -e "$delay_single_quote_subst"`'
-export_dynamic_flag_spec='`$ECHO "X$export_dynamic_flag_spec" | $Xsed -e "$delay_single_quote_subst"`'
-whole_archive_flag_spec='`$ECHO "X$whole_archive_flag_spec" | $Xsed -e "$delay_single_quote_subst"`'
-compiler_needs_object='`$ECHO "X$compiler_needs_object" | $Xsed -e "$delay_single_quote_subst"`'
-old_archive_from_new_cmds='`$ECHO "X$old_archive_from_new_cmds" | $Xsed -e "$delay_single_quote_subst"`'
-old_archive_from_expsyms_cmds='`$ECHO "X$old_archive_from_expsyms_cmds" | $Xsed -e "$delay_single_quote_subst"`'
-archive_cmds='`$ECHO "X$archive_cmds" | $Xsed -e "$delay_single_quote_subst"`'
-archive_expsym_cmds='`$ECHO "X$archive_expsym_cmds" | $Xsed -e "$delay_single_quote_subst"`'
-module_cmds='`$ECHO "X$module_cmds" | $Xsed -e "$delay_single_quote_subst"`'
-module_expsym_cmds='`$ECHO "X$module_expsym_cmds" | $Xsed -e "$delay_single_quote_subst"`'
-with_gnu_ld='`$ECHO "X$with_gnu_ld" | $Xsed -e "$delay_single_quote_subst"`'
-allow_undefined_flag='`$ECHO "X$allow_undefined_flag" | $Xsed -e "$delay_single_quote_subst"`'
-no_undefined_flag='`$ECHO "X$no_undefined_flag" | $Xsed -e "$delay_single_quote_subst"`'
-hardcode_libdir_flag_spec='`$ECHO "X$hardcode_libdir_flag_spec" | $Xsed -e "$delay_single_quote_subst"`'
-hardcode_libdir_flag_spec_ld='`$ECHO "X$hardcode_libdir_flag_spec_ld" | $Xsed -e "$delay_single_quote_subst"`'
-hardcode_libdir_separator='`$ECHO "X$hardcode_libdir_separator" | $Xsed -e "$delay_single_quote_subst"`'
-hardcode_direct='`$ECHO "X$hardcode_direct" | $Xsed -e "$delay_single_quote_subst"`'
-hardcode_direct_absolute='`$ECHO "X$hardcode_direct_absolute" | $Xsed -e "$delay_single_quote_subst"`'
-hardcode_minus_L='`$ECHO "X$hardcode_minus_L" | $Xsed -e "$delay_single_quote_subst"`'
-hardcode_shlibpath_var='`$ECHO "X$hardcode_shlibpath_var" | $Xsed -e "$delay_single_quote_subst"`'
-hardcode_automatic='`$ECHO "X$hardcode_automatic" | $Xsed -e "$delay_single_quote_subst"`'
-inherit_rpath='`$ECHO "X$inherit_rpath" | $Xsed -e "$delay_single_quote_subst"`'
-link_all_deplibs='`$ECHO "X$link_all_deplibs" | $Xsed -e "$delay_single_quote_subst"`'
-fix_srcfile_path='`$ECHO "X$fix_srcfile_path" | $Xsed -e "$delay_single_quote_subst"`'
-always_export_symbols='`$ECHO "X$always_export_symbols" | $Xsed -e "$delay_single_quote_subst"`'
-export_symbols_cmds='`$ECHO "X$export_symbols_cmds" | $Xsed -e "$delay_single_quote_subst"`'
-exclude_expsyms='`$ECHO "X$exclude_expsyms" | $Xsed -e "$delay_single_quote_subst"`'
-include_expsyms='`$ECHO "X$include_expsyms" | $Xsed -e "$delay_single_quote_subst"`'
-prelink_cmds='`$ECHO "X$prelink_cmds" | $Xsed -e "$delay_single_quote_subst"`'
-file_list_spec='`$ECHO "X$file_list_spec" | $Xsed -e "$delay_single_quote_subst"`'
-variables_saved_for_relink='`$ECHO "X$variables_saved_for_relink" | $Xsed -e "$delay_single_quote_subst"`'
-need_lib_prefix='`$ECHO "X$need_lib_prefix" | $Xsed -e "$delay_single_quote_subst"`'
-need_version='`$ECHO "X$need_version" | $Xsed -e "$delay_single_quote_subst"`'
-version_type='`$ECHO "X$version_type" | $Xsed -e "$delay_single_quote_subst"`'
-runpath_var='`$ECHO "X$runpath_var" | $Xsed -e "$delay_single_quote_subst"`'
-shlibpath_var='`$ECHO "X$shlibpath_var" | $Xsed -e "$delay_single_quote_subst"`'
-shlibpath_overrides_runpath='`$ECHO "X$shlibpath_overrides_runpath" | $Xsed -e "$delay_single_quote_subst"`'
-libname_spec='`$ECHO "X$libname_spec" | $Xsed -e "$delay_single_quote_subst"`'
-library_names_spec='`$ECHO "X$library_names_spec" | $Xsed -e "$delay_single_quote_subst"`'
-soname_spec='`$ECHO "X$soname_spec" | $Xsed -e "$delay_single_quote_subst"`'
-postinstall_cmds='`$ECHO "X$postinstall_cmds" | $Xsed -e "$delay_single_quote_subst"`'
-postuninstall_cmds='`$ECHO "X$postuninstall_cmds" | $Xsed -e "$delay_single_quote_subst"`'
-finish_cmds='`$ECHO "X$finish_cmds" | $Xsed -e "$delay_single_quote_subst"`'
-finish_eval='`$ECHO "X$finish_eval" | $Xsed -e "$delay_single_quote_subst"`'
-hardcode_into_libs='`$ECHO "X$hardcode_into_libs" | $Xsed -e "$delay_single_quote_subst"`'
-sys_lib_search_path_spec='`$ECHO "X$sys_lib_search_path_spec" | $Xsed -e "$delay_single_quote_subst"`'
-sys_lib_dlsearch_path_spec='`$ECHO "X$sys_lib_dlsearch_path_spec" | $Xsed -e "$delay_single_quote_subst"`'
-hardcode_action='`$ECHO "X$hardcode_action" | $Xsed -e "$delay_single_quote_subst"`'
-enable_dlopen='`$ECHO "X$enable_dlopen" | $Xsed -e "$delay_single_quote_subst"`'
-enable_dlopen_self='`$ECHO "X$enable_dlopen_self" | $Xsed -e "$delay_single_quote_subst"`'
-enable_dlopen_self_static='`$ECHO "X$enable_dlopen_self_static" | $Xsed -e "$delay_single_quote_subst"`'
-old_striplib='`$ECHO "X$old_striplib" | $Xsed -e "$delay_single_quote_subst"`'
-striplib='`$ECHO "X$striplib" | $Xsed -e "$delay_single_quote_subst"`'
+macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`'
+macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`'
+enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`'
+enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`'
+pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`'
+enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`'
+SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`'
+ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`'
+PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`'
+host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`'
+host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`'
+host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`'
+build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`'
+build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`'
+build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`'
+SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`'
+Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`'
+GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`'
+EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`'
+FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`'
+LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`'
+NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`'
+LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`'
+max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`'
+ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`'
+exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`'
+lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`'
+lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`'
+lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`'
+lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`'
+lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`'
+reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`'
+reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`'
+OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`'
+deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`'
+file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`'
+file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`'
+want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`'
+DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`'
+sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`'
+AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`'
+AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`'
+archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`'
+STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`'
+RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`'
+old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`'
+old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`'
+old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`'
+lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`'
+CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`'
+CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`'
+compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`'
+GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`'
+nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`'
+lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`'
+objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`'
+MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`'
+lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`'
+need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`'
+MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`'
+DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`'
+NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`'
+LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`'
+OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`'
+OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`'
+libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`'
+shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`'
+extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`'
+archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`'
+enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`'
+export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`'
+whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`'
+compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`'
+old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`'
+old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`'
+archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`'
+archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`'
+module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`'
+module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`'
+with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`'
+allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`'
+no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`'
+hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`'
+hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`'
+hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`'
+hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`'
+hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`'
+hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`'
+hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`'
+inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`'
+link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`'
+always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`'
+export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`'
+exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`'
+include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`'
+prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`'
+postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`'
+file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`'
+variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`'
+need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`'
+need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`'
+version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`'
+runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`'
+shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`'
+shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`'
+libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`'
+library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`'
+soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`'
+install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`'
+postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`'
+postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`'
+finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`'
+finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`'
+hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`'
+sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`'
+sys_lib_dlsearch_path_spec='`$ECHO "$sys_lib_dlsearch_path_spec" | $SED "$delay_single_quote_subst"`'
+hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`'
+enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`'
+enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`'
+enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`'
+old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`'
+striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`'
LTCC='$LTCC'
LTCFLAGS='$LTCFLAGS'
compiler='$compiler_DEFAULT'
+# A function that is used when there is no print builtin or printf.
+func_fallback_echo ()
+{
+ eval 'cat <<_LTECHO_EOF
+\$1
+_LTECHO_EOF'
+}
+
# Quote evaled strings.
-for var in SED \
+for var in SHELL \
+ECHO \
+PATH_SEPARATOR \
+SED \
GREP \
EGREP \
FGREP \
@@ -13403,8 +15926,13 @@ reload_flag \
OBJDUMP \
deplibs_check_method \
file_magic_cmd \
+file_magic_glob \
+want_nocaseglob \
+DLLTOOL \
+sharedlib_from_linklib_cmd \
AR \
AR_FLAGS \
+archiver_list_spec \
STRIP \
RANLIB \
CC \
@@ -13414,14 +15942,14 @@ lt_cv_sys_global_symbol_pipe \
lt_cv_sys_global_symbol_to_cdecl \
lt_cv_sys_global_symbol_to_c_name_address \
lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \
-SHELL \
-ECHO \
+nm_file_list_spec \
lt_prog_compiler_no_builtin_flag \
-lt_prog_compiler_wl \
lt_prog_compiler_pic \
+lt_prog_compiler_wl \
lt_prog_compiler_static \
lt_cv_prog_compiler_c_o \
need_locks \
+MANIFEST_TOOL \
DSYMUTIL \
NMEDIT \
LIPO \
@@ -13435,9 +15963,7 @@ with_gnu_ld \
allow_undefined_flag \
no_undefined_flag \
hardcode_libdir_flag_spec \
-hardcode_libdir_flag_spec_ld \
hardcode_libdir_separator \
-fix_srcfile_path \
exclude_expsyms \
include_expsyms \
file_list_spec \
@@ -13445,12 +15971,13 @@ variables_saved_for_relink \
libname_spec \
library_names_spec \
soname_spec \
+install_override_mode \
finish_eval \
old_striplib \
striplib; do
- case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in
+ case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
*[\\\\\\\`\\"\\\$]*)
- eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$sed_quote_subst\\"\\\`\\\\\\""
+ eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\""
;;
*)
eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
@@ -13472,14 +15999,15 @@ module_cmds \
module_expsym_cmds \
export_symbols_cmds \
prelink_cmds \
+postlink_cmds \
postinstall_cmds \
postuninstall_cmds \
finish_cmds \
sys_lib_search_path_spec \
sys_lib_dlsearch_path_spec; do
- case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in
+ case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
*[\\\\\\\`\\"\\\$]*)
- eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\""
+ eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\""
;;
*)
eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
@@ -13487,12 +16015,6 @@ sys_lib_dlsearch_path_spec; do
esac
done
-# Fix-up fallback echo if it was mangled by the above quoting rules.
-case \$lt_ECHO in
-*'\\\$0 --fallback-echo"') lt_ECHO=\`\$ECHO "X\$lt_ECHO" | \$Xsed -e 's/\\\\\\\\\\\\\\\$0 --fallback-echo"\$/\$0 --fallback-echo"/'\`
- ;;
-esac
-
ac_aux_dir='$ac_aux_dir'
xsi_shell='$xsi_shell'
lt_shell_append='$lt_shell_append'
@@ -13523,6 +16045,7 @@ for ac_config_target in $ac_config_targets
do
case $ac_config_target in
"fficonfig.h") CONFIG_HEADERS="$CONFIG_HEADERS fficonfig.h" ;;
+ "buildir") CONFIG_COMMANDS="$CONFIG_COMMANDS buildir" ;;
"depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;;
"libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;;
"include") CONFIG_COMMANDS="$CONFIG_COMMANDS include" ;;
@@ -13537,7 +16060,7 @@ do
"include/ffi_common.h") CONFIG_LINKS="$CONFIG_LINKS include/ffi_common.h:include/ffi_common.h" ;;
"fficonfig.py") CONFIG_FILES="$CONFIG_FILES fficonfig.py" ;;
- *) as_fn_error "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
+ *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
esac
done
@@ -13561,9 +16084,10 @@ fi
# after its creation but before its name has been assigned to `$tmp'.
$debug ||
{
- tmp=
+ tmp= ac_tmp=
trap 'exit_status=$?
- { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status
+ : "${ac_tmp:=$tmp}"
+ { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status
' 0
trap 'as_fn_exit 1' 1 2 13 15
}
@@ -13571,12 +16095,13 @@ $debug ||
{
tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
- test -n "$tmp" && test -d "$tmp"
+ test -d "$tmp"
} ||
{
tmp=./conf$$-$RANDOM
(umask 077 && mkdir "$tmp")
-} || as_fn_error "cannot create a temporary directory in ." "$LINENO" 5
+} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5
+ac_tmp=$tmp
# Set up the scripts for CONFIG_FILES section.
# No need to generate them if there are no CONFIG_FILES.
@@ -13593,12 +16118,12 @@ if test "x$ac_cr" = x; then
fi
ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
- ac_cs_awk_cr='\r'
+ ac_cs_awk_cr='\\r'
else
ac_cs_awk_cr=$ac_cr
fi
-echo 'BEGIN {' >"$tmp/subs1.awk" &&
+echo 'BEGIN {' >"$ac_tmp/subs1.awk" &&
_ACEOF
@@ -13607,18 +16132,18 @@ _ACEOF
echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
echo "_ACEOF"
} >conf$$subs.sh ||
- as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5
-ac_delim_num=`echo "$ac_subst_vars" | grep -c '$'`
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'`
ac_delim='%!_!# '
for ac_last_try in false false false false false :; do
. ./conf$$subs.sh ||
- as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
if test $ac_delim_n = $ac_delim_num; then
break
elif $ac_last_try; then
- as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5
+ as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
else
ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
fi
@@ -13626,7 +16151,7 @@ done
rm -f conf$$subs.sh
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-cat >>"\$tmp/subs1.awk" <<\\_ACAWK &&
+cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK &&
_ACEOF
sed -n '
h
@@ -13674,7 +16199,7 @@ t delim
rm -f conf$$subs.awk
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
_ACAWK
-cat >>"\$tmp/subs1.awk" <<_ACAWK &&
+cat >>"\$ac_tmp/subs1.awk" <<_ACAWK &&
for (key in S) S_is_set[key] = 1
FS = ""
@@ -13706,21 +16231,29 @@ if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
else
cat
-fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \
- || as_fn_error "could not setup config files machinery" "$LINENO" 5
+fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \
+ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5
_ACEOF
-# VPATH may cause trouble with some makes, so we remove $(srcdir),
-# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and
+# VPATH may cause trouble with some makes, so we remove sole $(srcdir),
+# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and
# trailing colons and then remove the whole line if VPATH becomes empty
# (actually we leave an empty line to preserve line numbers).
if test "x$srcdir" = x.; then
- ac_vpsub='/^[ ]*VPATH[ ]*=/{
-s/:*\$(srcdir):*/:/
-s/:*\${srcdir}:*/:/
-s/:*@srcdir@:*/:/
-s/^\([^=]*=[ ]*\):*/\1/
+ ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{
+h
+s///
+s/^/:/
+s/[ ]*$/:/
+s/:\$(srcdir):/:/g
+s/:\${srcdir}:/:/g
+s/:@srcdir@:/:/g
+s/^:*//
s/:*$//
+x
+s/\(=[ ]*\).*/\1/
+G
+s/\n//
s/^[^=]*=[ ]*$//
}'
fi
@@ -13732,7 +16265,7 @@ fi # test -n "$CONFIG_FILES"
# No need to generate them if there are no CONFIG_HEADERS.
# This happens for instance with `./config.status Makefile'.
if test -n "$CONFIG_HEADERS"; then
-cat >"$tmp/defines.awk" <<\_ACAWK ||
+cat >"$ac_tmp/defines.awk" <<\_ACAWK ||
BEGIN {
_ACEOF
@@ -13744,11 +16277,11 @@ _ACEOF
# handling of long lines.
ac_delim='%!_!# '
for ac_last_try in false false :; do
- ac_t=`sed -n "/$ac_delim/p" confdefs.h`
- if test -z "$ac_t"; then
+ ac_tt=`sed -n "/$ac_delim/p" confdefs.h`
+ if test -z "$ac_tt"; then
break
elif $ac_last_try; then
- as_fn_error "could not make $CONFIG_HEADERS" "$LINENO" 5
+ as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5
else
ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
fi
@@ -13833,7 +16366,7 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
_ACAWK
_ACEOF
cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
- as_fn_error "could not setup config headers machinery" "$LINENO" 5
+ as_fn_error $? "could not setup config headers machinery" "$LINENO" 5
fi # test -n "$CONFIG_HEADERS"
@@ -13846,7 +16379,7 @@ do
esac
case $ac_mode$ac_tag in
:[FHL]*:*);;
- :L* | :C*:*) as_fn_error "invalid tag \`$ac_tag'" "$LINENO" 5;;
+ :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;;
:[FH]-) ac_tag=-:-;;
:[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
esac
@@ -13865,7 +16398,7 @@ do
for ac_f
do
case $ac_f in
- -) ac_f="$tmp/stdin";;
+ -) ac_f="$ac_tmp/stdin";;
*) # Look for the file first in the build tree, then in the source tree
# (if the path is not absolute). The absolute path cannot be DOS-style,
# because $ac_f cannot contain `:'.
@@ -13874,7 +16407,7 @@ do
[\\/$]*) false;;
*) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
esac ||
- as_fn_error "cannot find input file: \`$ac_f'" "$LINENO" 5;;
+ as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;;
esac
case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
as_fn_append ac_file_inputs " '$ac_f'"
@@ -13900,8 +16433,8 @@ $as_echo "$as_me: creating $ac_file" >&6;}
esac
case $ac_tag in
- *:-:* | *:-) cat >"$tmp/stdin" \
- || as_fn_error "could not create $ac_file" "$LINENO" 5 ;;
+ *:-:* | *:-) cat >"$ac_tmp/stdin" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;;
esac
;;
esac
@@ -14037,23 +16570,24 @@ s&@INSTALL@&$ac_INSTALL&;t t
s&@MKDIR_P@&$ac_MKDIR_P&;t t
$ac_datarootdir_hack
"
-eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \
- || as_fn_error "could not create $ac_file" "$LINENO" 5
+eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \
+ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5
test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
- { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } &&
- { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } &&
+ { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } &&
+ { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \
+ "$ac_tmp/out"`; test -z "$ac_out"; } &&
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
-which seems to be undefined. Please make sure it is defined." >&5
+which seems to be undefined. Please make sure it is defined" >&5
$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
-which seems to be undefined. Please make sure it is defined." >&2;}
+which seems to be undefined. Please make sure it is defined" >&2;}
- rm -f "$tmp/stdin"
+ rm -f "$ac_tmp/stdin"
case $ac_file in
- -) cat "$tmp/out" && rm -f "$tmp/out";;
- *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";;
+ -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";;
+ *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";;
esac \
- || as_fn_error "could not create $ac_file" "$LINENO" 5
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
;;
:H)
#
@@ -14062,21 +16596,21 @@ which seems to be undefined. Please make sure it is defined." >&2;}
if test x"$ac_file" != x-; then
{
$as_echo "/* $configure_input */" \
- && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs"
- } >"$tmp/config.h" \
- || as_fn_error "could not create $ac_file" "$LINENO" 5
- if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then
+ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs"
+ } >"$ac_tmp/config.h" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
$as_echo "$as_me: $ac_file is unchanged" >&6;}
else
rm -f "$ac_file"
- mv "$tmp/config.h" "$ac_file" \
- || as_fn_error "could not create $ac_file" "$LINENO" 5
+ mv "$ac_tmp/config.h" "$ac_file" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5
fi
else
$as_echo "/* $configure_input */" \
- && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \
- || as_fn_error "could not create -" "$LINENO" 5
+ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \
+ || as_fn_error $? "could not create -" "$LINENO" 5
fi
# Compute "$ac_file"'s index in $config_headers.
_am_arg="$ac_file"
@@ -14130,19 +16664,19 @@ $as_echo X"$_am_arg" |
$as_echo "$as_me: linking $ac_source to $ac_file" >&6;}
if test ! -r "$ac_source"; then
- as_fn_error "$ac_source: file not found" "$LINENO" 5
+ as_fn_error $? "$ac_source: file not found" "$LINENO" 5
fi
rm -f "$ac_file"
# Try a relative symlink, then a hard link, then a copy.
- case $srcdir in
+ case $ac_source in
[\\/$]* | ?:[\\/]* ) ac_rel_source=$ac_source ;;
*) ac_rel_source=$ac_top_build_prefix$ac_source ;;
esac
ln -s "$ac_rel_source" "$ac_file" 2>/dev/null ||
ln "$ac_source" "$ac_file" 2>/dev/null ||
cp -p "$ac_source" "$ac_file" ||
- as_fn_error "cannot link or copy $ac_source to $ac_file" "$LINENO" 5
+ as_fn_error $? "cannot link or copy $ac_source to $ac_file" "$LINENO" 5
fi
;;
:C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5
@@ -14152,6 +16686,150 @@ $as_echo "$as_me: executing $ac_file commands" >&6;}
case $ac_file$ac_mode in
+ "buildir":C) ac_top_srcdir="$ax_enable_builddir_srcdir"
+ if test ".$ax_enable_builddir" = ".." ; then
+ if test -f "$top_srcdir/Makefile" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: skipping top_srcdir/Makefile - left untouched" >&5
+$as_echo "$as_me: skipping top_srcdir/Makefile - left untouched" >&6;}
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: skipping top_srcdir/Makefile - not created" >&5
+$as_echo "$as_me: skipping top_srcdir/Makefile - not created" >&6;}
+ fi
+ else
+ if test -f "$ac_top_srcdir/Makefile" ; then
+ a=`grep "^VERSION " "$ac_top_srcdir/Makefile"` ; b=`grep "^VERSION " Makefile`
+ test "$a" != "$b" && rm "$ac_top_srcdir/Makefile"
+ fi
+ if test -f "$ac_top_srcdir/Makefile" ; then
+ echo "$ac_top_srcdir/Makefile : $ac_top_srcdir/Makefile.in" > $tmp/conftemp.mk
+ echo " @ echo 'REMOVED,,,' >\$@" >> $tmp/conftemp.mk
+ eval "${MAKE-make} -f $tmp/conftemp.mk 2>/dev/null >/dev/null"
+ if grep '^REMOVED,,,' "$ac_top_srcdir/Makefile" >/dev/null
+ then rm $ac_top_srcdir/Makefile ; fi
+ cp $tmp/conftemp.mk $ac_top_srcdir/makefiles.mk~ ## DEBUGGING
+ fi
+ if test ! -f "$ac_top_srcdir/Makefile" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: create top_srcdir/Makefile guessed from local Makefile" >&5
+$as_echo "$as_me: create top_srcdir/Makefile guessed from local Makefile" >&6;}
+ x='`' ; cat >$tmp/conftemp.sed <<_EOF
+/^\$/n
+x
+/^\$/bS
+x
+/\\\\\$/{H;d;}
+{H;s/.*//;x;}
+bM
+:S
+x
+/\\\\\$/{h;d;}
+{h;s/.*//;x;}
+:M
+s/\\(\\n\\) /\\1 /g
+/^ /d
+/^[ ]*[\\#]/d
+/^VPATH *=/d
+s/^srcdir *=.*/srcdir = ./
+s/^top_srcdir *=.*/top_srcdir = ./
+/[:=]/!d
+/^\\./d
+/ = /b
+/ .= /b
+/:/!b
+s/:.*/:/
+s/ / /g
+s/ \\([a-z][a-z-]*[a-zA-Z0-9]\\)\\([ :]\\)/ \\1 \\1-all\\2/g
+s/^\\([a-z][a-z-]*[a-zA-Z0-9]\\)\\([ :]\\)/\\1 \\1-all\\2/
+s/ / /g
+/^all all-all[ :]/i\\
+all-configured : all-all
+s/ [a-zA-Z0-9-]*-all [a-zA-Z0-9-]*-all-all//g
+/-all-all/d
+a\\
+ @ HOST="\$(HOST)\" \\\\\\
+ ; test ".\$\$HOST" = "." && HOST=$x sh $ax_enable_builddir_auxdir/config.guess $x \\\\\\
+ ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\
+ ; use=$x basename "\$\@" -all $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\
+ ; echo "MAKE \$\$HOST : \$\$n * \$\@"; if test "\$\$n" -eq "0" ; then : \\\\\\
+ ; BUILD=$x grep "^####.*|" Makefile |tail -1| sed -e 's/.*|//' $x ; fi \\\\\\
+ ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\
+ ; test "\$\$use" = "\$\@" && BUILD=$x echo "\$\$BUILD" | tail -1 $x \\\\\\
+ ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\
+ ; (cd "\$\$i" && test ! -f configure && \$(MAKE) \$\$use) || exit; done
+/dist-all *:/a\\
+ @ HOST="\$(HOST)\" \\\\\\
+ ; test ".\$\$HOST" = "." && HOST=$x sh $ax_enable_builddir_auxdir/config.guess $x \\\\\\
+ ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\
+ ; found=$x echo \$\$BUILD | wc -w $x \\\\\\
+ ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).tar.*" \\\\\\
+ ; if test "\$\$found" -eq "0" ; then : \\\\\\
+ ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\
+ ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\
+ ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).tar.* \\\\\\
+ ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done
+/dist-[a-zA-Z0-9]*-all *:/a\\
+ @ HOST="\$(HOST)\" \\\\\\
+ ; test ".\$\$HOST" = "." && HOST=$x sh ./config.guess $x \\\\\\
+ ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\
+ ; found=$x echo \$\$BUILD | wc -w $x \\\\\\
+ ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).*" \\\\\\
+ ; if test "\$\$found" -eq "0" ; then : \\\\\\
+ ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\
+ ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\
+ ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).* \\\\\\
+ ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done
+/distclean-all *:/a\\
+ @ HOST="\$(HOST)\" \\\\\\
+ ; test ".\$\$HOST" = "." && HOST=$x sh $ax_enable_builddir_auxdir/config.guess $x \\\\\\
+ ; BUILD=$x grep "^#### .*|" Makefile | sed -e 's/.*|//' $x \\\\\\
+ ; use=$x basename "\$\@" -all $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\
+ ; echo "MAKE \$\$HOST : \$\$n * \$\@ (all local builds)" \\\\\\
+ ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\
+ ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\
+ ; echo "# rm -r \$\$i"; done ; echo "# (sleep 3)" ; sleep 3 \\\\\\
+ ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\
+ ; echo "\$\$i" | grep "^/" > /dev/null && continue \\\\\\
+ ; echo "\$\$i" | grep "^../" > /dev/null && continue \\\\\\
+ ; echo "rm -r \$\$i"; (rm -r "\$\$i") ; done ; rm Makefile
+_EOF
+ cp "$tmp/conftemp.sed" "$ac_top_srcdir/makefile.sed~" ## DEBUGGING
+ $ax_enable_builddir_sed -f $tmp/conftemp.sed Makefile >$ac_top_srcdir/Makefile
+ if test -f "$ac_top_srcdir/Makefile.mk" ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: extend top_srcdir/Makefile with top_srcdir/Makefile.mk" >&5
+$as_echo "$as_me: extend top_srcdir/Makefile with top_srcdir/Makefile.mk" >&6;}
+ cat $ac_top_srcdir/Makefile.mk >>$ac_top_srcdir/Makefile
+ fi ; xxxx="####"
+ echo "$xxxx CONFIGURATIONS FOR TOPLEVEL MAKEFILE: " >>$ac_top_srcdir/Makefile
+ # sanity check
+ if grep '^; echo "MAKE ' $ac_top_srcdir/Makefile >/dev/null ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: buggy sed found - it deletes tab in \"a\" text parts" >&5
+$as_echo "$as_me: buggy sed found - it deletes tab in \"a\" text parts" >&6;}
+ $ax_enable_builddir_sed -e '/^@ HOST=/s/^/ /' -e '/^; /s/^/ /' $ac_top_srcdir/Makefile \
+ >$ac_top_srcdir/Makefile~
+ (test -s $ac_top_srcdir/Makefile~ && mv $ac_top_srcdir/Makefile~ $ac_top_srcdir/Makefile) 2>/dev/null
+ fi
+ else
+ xxxx="\\#\\#\\#\\#"
+ # echo "/^$xxxx *$ax_enable_builddir_host /d" >$tmp/conftemp.sed
+ echo "s!^$xxxx [^|]* | *$ax_enable_builddir *\$!$xxxx ...... $ax_enable_builddir!" >$tmp/conftemp.sed
+ $ax_enable_builddir_sed -f "$tmp/conftemp.sed" "$ac_top_srcdir/Makefile" >$tmp/mkfile.tmp
+ cp "$tmp/conftemp.sed" "$ac_top_srcdir/makefiles.sed~" ## DEBUGGING
+ cp "$tmp/mkfile.tmp" "$ac_top_srcdir/makefiles.out~" ## DEBUGGING
+ if cmp -s "$ac_top_srcdir/Makefile" "$tmp/mkfile.tmp" 2>/dev/null ; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: keeping top_srcdir/Makefile from earlier configure" >&5
+$as_echo "$as_me: keeping top_srcdir/Makefile from earlier configure" >&6;}
+ rm "$tmp/mkfile.tmp"
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: reusing top_srcdir/Makefile from earlier configure" >&5
+$as_echo "$as_me: reusing top_srcdir/Makefile from earlier configure" >&6;}
+ mv "$tmp/mkfile.tmp" "$ac_top_srcdir/Makefile"
+ fi
+ fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: build in $ax_enable_builddir (HOST=$ax_enable_builddir_host)" >&5
+$as_echo "$as_me: build in $ax_enable_builddir (HOST=$ax_enable_builddir_host)" >&6;}
+ xxxx="####"
+ echo "$xxxx" "$ax_enable_builddir_host" "|$ax_enable_builddir" >>$ac_top_srcdir/Makefile
+ fi
+ ;;
"depfiles":C) test x"$AMDEP_TRUE" != x"" || {
# Autoconf 2.62 quotes --file arguments for eval, but not when files
# are listed without --file. Let's play safe and only enable the eval
@@ -14166,7 +16844,7 @@ $as_echo "$as_me: executing $ac_file commands" >&6;}
# Strip MF so we end up with the name of the file.
mf=`echo "$mf" | sed -e 's/:.*$//'`
# Check whether this is an Automake generated Makefile or not.
- # We used to match only the files named `Makefile.in', but
+ # We used to match only the files named 'Makefile.in', but
# some people rename them; so instead we look at the file content.
# Grep'ing the first line is not enough: some people post-process
# each Makefile.in and add a new line on top of each file to say so.
@@ -14200,21 +16878,19 @@ $as_echo X"$mf" |
continue
fi
# Extract the definition of DEPDIR, am__include, and am__quote
- # from the Makefile without running `make'.
+ # from the Makefile without running 'make'.
DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
test -z "$DEPDIR" && continue
am__include=`sed -n 's/^am__include = //p' < "$mf"`
test -z "am__include" && continue
am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
- # When using ansi2knr, U may be empty or an underscore; expand it
- U=`sed -n 's/^U = //p' < "$mf"`
# Find all dependency output files, they are included files with
# $(DEPDIR) in their names. We invoke sed twice because it is the
# simplest approach to changing $(DEPDIR) to its actual value in the
# expansion.
for file in `sed -n "
s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \
- sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do
+ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do
# Make sure the directory exists.
test -f "$dirpart/$file" && continue
fdir=`$as_dirname -- "$file" ||
@@ -14268,7 +16944,8 @@ $as_echo X"$file" |
# NOTE: Changes made to this file will be lost: look at ltmain.sh.
#
# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,
-# 2006, 2007, 2008 Free Software Foundation, Inc.
+# 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+# Foundation, Inc.
# Written by Gordon Matzigkeit, 1996
#
# This file is part of GNU Libtool.
@@ -14316,6 +16993,15 @@ pic_mode=$pic_mode
# Whether or not to optimize for fast installation.
fast_install=$enable_fast_install
+# Shell to use when invoking shell scripts.
+SHELL=$lt_SHELL
+
+# An echo program that protects backslashes.
+ECHO=$lt_ECHO
+
+# The PATH separator for the build system.
+PATH_SEPARATOR=$lt_PATH_SEPARATOR
+
# The host system.
host_alias=$host_alias
host=$host
@@ -14365,9 +17051,11 @@ SP2NL=$lt_lt_SP2NL
# turn newlines into spaces.
NL2SP=$lt_lt_NL2SP
-# How to create reloadable object files.
-reload_flag=$lt_reload_flag
-reload_cmds=$lt_reload_cmds
+# convert \$build file names to \$host format.
+to_host_file_cmd=$lt_cv_to_host_file_cmd
+
+# convert \$build files to toolchain format.
+to_tool_file_cmd=$lt_cv_to_tool_file_cmd
# An object symbol dumper.
OBJDUMP=$lt_OBJDUMP
@@ -14375,13 +17063,30 @@ OBJDUMP=$lt_OBJDUMP
# Method to check whether dependent libraries are shared objects.
deplibs_check_method=$lt_deplibs_check_method
-# Command to use when deplibs_check_method == "file_magic".
+# Command to use when deplibs_check_method = "file_magic".
file_magic_cmd=$lt_file_magic_cmd
+# How to find potential files when deplibs_check_method = "file_magic".
+file_magic_glob=$lt_file_magic_glob
+
+# Find potential files using nocaseglob when deplibs_check_method = "file_magic".
+want_nocaseglob=$lt_want_nocaseglob
+
+# DLL creation program.
+DLLTOOL=$lt_DLLTOOL
+
+# Command to associate shared and link libraries.
+sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd
+
# The archiver.
AR=$lt_AR
+
+# Flags to create an archive.
AR_FLAGS=$lt_AR_FLAGS
+# How to feed a file listing to the archiver.
+archiver_list_spec=$lt_archiver_list_spec
+
# A symbol stripping program.
STRIP=$lt_STRIP
@@ -14390,6 +17095,9 @@ RANLIB=$lt_RANLIB
old_postinstall_cmds=$lt_old_postinstall_cmds
old_postuninstall_cmds=$lt_old_postuninstall_cmds
+# Whether to use a lock for old archive extraction.
+lock_old_archive_extraction=$lock_old_archive_extraction
+
# A C compiler.
LTCC=$lt_CC
@@ -14408,14 +17116,14 @@ global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address
# Transform the output of nm in a C name address pair when lib prefix is needed.
global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix
-# The name of the directory that contains temporary libtool files.
-objdir=$objdir
+# Specify filename containing input files for \$NM.
+nm_file_list_spec=$lt_nm_file_list_spec
-# Shell to use when invoking shell scripts.
-SHELL=$lt_SHELL
+# The root where to search for dependent libraries,and in which our libraries should be installed.
+lt_sysroot=$lt_sysroot
-# An echo program that does not interpret backslashes.
-ECHO=$lt_ECHO
+# The name of the directory that contains temporary libtool files.
+objdir=$objdir
# Used to examine libraries when file_magic_cmd begins with "file".
MAGIC_CMD=$MAGIC_CMD
@@ -14423,6 +17131,9 @@ MAGIC_CMD=$MAGIC_CMD
# Must we lock files when doing compilation?
need_locks=$lt_need_locks
+# Manifest tool.
+MANIFEST_TOOL=$lt_MANIFEST_TOOL
+
# Tool to manipulate archived DWARF debug symbol files on Mac OS X.
DSYMUTIL=$lt_DSYMUTIL
@@ -14479,6 +17190,9 @@ library_names_spec=$lt_library_names_spec
# The coded name of the library, if different from the real name.
soname_spec=$lt_soname_spec
+# Permission mode override for installation of shared libraries.
+install_override_mode=$lt_install_override_mode
+
# Command to use after installation of a shared archive.
postinstall_cmds=$lt_postinstall_cmds
@@ -14518,6 +17232,10 @@ striplib=$lt_striplib
# The linker used to build libraries.
LD=$lt_LD
+# How to create reloadable object files.
+reload_flag=$lt_reload_flag
+reload_cmds=$lt_reload_cmds
+
# Commands used to build an old-style archive.
old_archive_cmds=$lt_old_archive_cmds
@@ -14530,12 +17248,12 @@ with_gcc=$GCC
# Compiler flag to turn off builtin functions.
no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag
-# How to pass a linker flag through the compiler.
-wl=$lt_lt_prog_compiler_wl
-
# Additional compiler flags for building library objects.
pic_flag=$lt_lt_prog_compiler_pic
+# How to pass a linker flag through the compiler.
+wl=$lt_lt_prog_compiler_wl
+
# Compiler flag to prevent dynamic linking.
link_static_flag=$lt_lt_prog_compiler_static
@@ -14585,10 +17303,6 @@ no_undefined_flag=$lt_no_undefined_flag
# This must work even if \$libdir does not exist
hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec
-# If ld is used when linking, flag to hardcode \$libdir into a binary
-# during linking. This must work even if \$libdir does not exist.
-hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld
-
# Whether we need a single "-rpath" flag with a separated argument.
hardcode_libdir_separator=$lt_hardcode_libdir_separator
@@ -14622,9 +17336,6 @@ inherit_rpath=$inherit_rpath
# Whether libtool must link a program against all its dependency libraries.
link_all_deplibs=$link_all_deplibs
-# Fix the shell variable \$srcfile for the compiler.
-fix_srcfile_path=$lt_fix_srcfile_path
-
# Set to "yes" if exported symbols are required.
always_export_symbols=$always_export_symbols
@@ -14640,6 +17351,9 @@ include_expsyms=$lt_include_expsyms
# Commands necessary for linking programs (against libraries) with templates.
prelink_cmds=$lt_prelink_cmds
+# Commands necessary for finishing linking programs.
+postlink_cmds=$lt_postlink_cmds
+
# Specify filename containing input files.
file_list_spec=$lt_file_list_spec
@@ -14672,212 +17386,169 @@ ltmain="$ac_aux_dir/ltmain.sh"
# if finds mixed CR/LF and LF-only lines. Since sed operates in
# text mode, it properly converts lines to CR/LF. This bash problem
# is reportedly fixed, but why not run on old versions too?
- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \
- || (rm -f "$cfgfile"; exit 1)
-
- case $xsi_shell in
- yes)
- cat << \_LT_EOF >> "$cfgfile"
-
-# func_dirname file append nondir_replacement
-# Compute the dirname of FILE. If nonempty, add APPEND to the result,
-# otherwise set result to NONDIR_REPLACEMENT.
-func_dirname ()
-{
- case ${1} in
- */*) func_dirname_result="${1%/*}${2}" ;;
- * ) func_dirname_result="${3}" ;;
- esac
-}
-
-# func_basename file
-func_basename ()
-{
- func_basename_result="${1##*/}"
-}
-
-# func_dirname_and_basename file append nondir_replacement
-# perform func_basename and func_dirname in a single function
-# call:
-# dirname: Compute the dirname of FILE. If nonempty,
-# add APPEND to the result, otherwise set result
-# to NONDIR_REPLACEMENT.
-# value returned in "$func_dirname_result"
-# basename: Compute filename of FILE.
-# value retuned in "$func_basename_result"
-# Implementation must be kept synchronized with func_dirname
-# and func_basename. For efficiency, we do not delegate to
-# those functions but instead duplicate the functionality here.
-func_dirname_and_basename ()
-{
- case ${1} in
- */*) func_dirname_result="${1%/*}${2}" ;;
- * ) func_dirname_result="${3}" ;;
- esac
- func_basename_result="${1##*/}"
-}
-
-# func_stripname prefix suffix name
-# strip PREFIX and SUFFIX off of NAME.
-# PREFIX and SUFFIX must not contain globbing or regex special
-# characters, hashes, percent signs, but SUFFIX may contain a leading
-# dot (in which case that matches only a dot).
-func_stripname ()
-{
- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are
- # positional parameters, so assign one to ordinary parameter first.
- func_stripname_result=${3}
- func_stripname_result=${func_stripname_result#"${1}"}
- func_stripname_result=${func_stripname_result%"${2}"}
-}
-
-# func_opt_split
-func_opt_split ()
-{
- func_opt_split_opt=${1%%=*}
- func_opt_split_arg=${1#*=}
-}
-
-# func_lo2o object
-func_lo2o ()
-{
- case ${1} in
- *.lo) func_lo2o_result=${1%.lo}.${objext} ;;
- *) func_lo2o_result=${1} ;;
- esac
-}
-
-# func_xform libobj-or-source
-func_xform ()
-{
- func_xform_result=${1%.*}.lo
-}
-
-# func_arith arithmetic-term...
-func_arith ()
-{
- func_arith_result=$(( $* ))
-}
-
-# func_len string
-# STRING may not start with a hyphen.
-func_len ()
-{
- func_len_result=${#1}
-}
-
-_LT_EOF
- ;;
- *) # Bourne compatible functions.
- cat << \_LT_EOF >> "$cfgfile"
-
-# func_dirname file append nondir_replacement
-# Compute the dirname of FILE. If nonempty, add APPEND to the result,
-# otherwise set result to NONDIR_REPLACEMENT.
-func_dirname ()
-{
- # Extract subdirectory from the argument.
- func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"`
- if test "X$func_dirname_result" = "X${1}"; then
- func_dirname_result="${3}"
- else
- func_dirname_result="$func_dirname_result${2}"
- fi
-}
-
-# func_basename file
-func_basename ()
-{
- func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"`
-}
-
-
-# func_stripname prefix suffix name
-# strip PREFIX and SUFFIX off of NAME.
-# PREFIX and SUFFIX must not contain globbing or regex special
-# characters, hashes, percent signs, but SUFFIX may contain a leading
-# dot (in which case that matches only a dot).
-# func_strip_suffix prefix name
-func_stripname ()
-{
- case ${2} in
- .*) func_stripname_result=`$ECHO "X${3}" \
- | $Xsed -e "s%^${1}%%" -e "s%\\\\${2}\$%%"`;;
- *) func_stripname_result=`$ECHO "X${3}" \
- | $Xsed -e "s%^${1}%%" -e "s%${2}\$%%"`;;
- esac
-}
-
-# sed scripts:
-my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q'
-my_sed_long_arg='1s/^-[^=]*=//'
-
-# func_opt_split
-func_opt_split ()
-{
- func_opt_split_opt=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_opt"`
- func_opt_split_arg=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_arg"`
-}
-
-# func_lo2o object
-func_lo2o ()
-{
- func_lo2o_result=`$ECHO "X${1}" | $Xsed -e "$lo2o"`
-}
-
-# func_xform libobj-or-source
-func_xform ()
-{
- func_xform_result=`$ECHO "X${1}" | $Xsed -e 's/\.[^.]*$/.lo/'`
-}
-
-# func_arith arithmetic-term...
-func_arith ()
-{
- func_arith_result=`expr "$@"`
-}
-
-# func_len string
-# STRING may not start with a hyphen.
-func_len ()
-{
- func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len`
-}
-
-_LT_EOF
-esac
-
-case $lt_shell_append in
- yes)
- cat << \_LT_EOF >> "$cfgfile"
-
-# func_append var value
-# Append VALUE to the end of shell variable VAR.
-func_append ()
-{
- eval "$1+=\$2"
-}
-_LT_EOF
- ;;
- *)
- cat << \_LT_EOF >> "$cfgfile"
-
-# func_append var value
-# Append VALUE to the end of shell variable VAR.
-func_append ()
-{
- eval "$1=\$$1\$2"
-}
-
-_LT_EOF
- ;;
- esac
-
-
- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \
- || (rm -f "$cfgfile"; exit 1)
-
- mv -f "$cfgfile" "$ofile" ||
+ sed '$q' "$ltmain" >> "$cfgfile" \
+ || (rm -f "$cfgfile"; exit 1)
+
+ if test x"$xsi_shell" = xyes; then
+ sed -e '/^func_dirname ()$/,/^} # func_dirname /c\
+func_dirname ()\
+{\
+\ case ${1} in\
+\ */*) func_dirname_result="${1%/*}${2}" ;;\
+\ * ) func_dirname_result="${3}" ;;\
+\ esac\
+} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_basename ()$/,/^} # func_basename /c\
+func_basename ()\
+{\
+\ func_basename_result="${1##*/}"\
+} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\
+func_dirname_and_basename ()\
+{\
+\ case ${1} in\
+\ */*) func_dirname_result="${1%/*}${2}" ;;\
+\ * ) func_dirname_result="${3}" ;;\
+\ esac\
+\ func_basename_result="${1##*/}"\
+} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_stripname ()$/,/^} # func_stripname /c\
+func_stripname ()\
+{\
+\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\
+\ # positional parameters, so assign one to ordinary parameter first.\
+\ func_stripname_result=${3}\
+\ func_stripname_result=${func_stripname_result#"${1}"}\
+\ func_stripname_result=${func_stripname_result%"${2}"}\
+} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\
+func_split_long_opt ()\
+{\
+\ func_split_long_opt_name=${1%%=*}\
+\ func_split_long_opt_arg=${1#*=}\
+} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\
+func_split_short_opt ()\
+{\
+\ func_split_short_opt_arg=${1#??}\
+\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\
+} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\
+func_lo2o ()\
+{\
+\ case ${1} in\
+\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\
+\ *) func_lo2o_result=${1} ;;\
+\ esac\
+} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_xform ()$/,/^} # func_xform /c\
+func_xform ()\
+{\
+ func_xform_result=${1%.*}.lo\
+} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_arith ()$/,/^} # func_arith /c\
+func_arith ()\
+{\
+ func_arith_result=$(( $* ))\
+} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_len ()$/,/^} # func_len /c\
+func_len ()\
+{\
+ func_len_result=${#1}\
+} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+fi
+
+if test x"$lt_shell_append" = xyes; then
+ sed -e '/^func_append ()$/,/^} # func_append /c\
+func_append ()\
+{\
+ eval "${1}+=\\${2}"\
+} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\
+func_append_quoted ()\
+{\
+\ func_quote_for_eval "${2}"\
+\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\
+} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+ # Save a `func_append' function call where possible by direct use of '+='
+ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+ test 0 -eq $? || _lt_function_replace_fail=:
+else
+ # Save a `func_append' function call even when '+=' is not available
+ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+ test 0 -eq $? || _lt_function_replace_fail=:
+fi
+
+if test x"$_lt_function_replace_fail" = x":"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5
+$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;}
+fi
+
+
+ mv -f "$cfgfile" "$ofile" ||
(rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile")
chmod +x "$ofile"
@@ -14897,7 +17568,7 @@ _ACEOF
ac_clean_files=$ac_clean_files_save
test $ac_write_fail = 0 ||
- as_fn_error "write failure creating $CONFIG_STATUS" "$LINENO" 5
+ as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5
# configure is writing to config.log, and then calls config.status.
@@ -14918,7 +17589,7 @@ if test "$no_create" != yes; then
exec 5>>config.log
# Use ||, not &&, to avoid exiting from the if with $? = 1, which
# would make configure fail if this is the last instruction.
- $ac_cs_success || as_fn_exit $?
+ $ac_cs_success || as_fn_exit 1
fi
if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
diff --git a/Modules/_ctypes/libffi/configure.ac b/Modules/_ctypes/libffi/configure.ac
index 87c63e1..7fe5ff5 100644
--- a/Modules/_ctypes/libffi/configure.ac
+++ b/Modules/_ctypes/libffi/configure.ac
@@ -1,11 +1,11 @@
dnl Process this with autoconf to create configure
#
-# file from libffi - slightly patched for ctypes
+# file from libffi - slightly patched for Python's ctypes
#
-AC_PREREQ(2.63)
+AC_PREREQ(2.68)
-AC_INIT([libffi], [3.0.10rc0], [http://gcc.gnu.org/bugs.html])
+AC_INIT([libffi], [3.0.13], [http://github.com/atgreen/libffi/issues])
AC_CONFIG_HEADERS([fficonfig.h])
AC_CANONICAL_SYSTEM
@@ -13,18 +13,24 @@ target_alias=${target_alias-$host_alias}
. ${srcdir}/configure.host
+AX_ENABLE_BUILDDIR
+
AM_INIT_AUTOMAKE
# The same as in boehm-gc and libstdc++. Have to borrow it from there.
# We must force CC to /not/ be precious variables; otherwise
# the wrong, non-multilib-adjusted value will be used in multilibs.
# As a side effect, we have to subst CFLAGS ourselves.
+# Also save and restore CFLAGS, since AC_PROG_CC will come up with
+# defaults of its own if none are provided.
m4_rename([_AC_ARG_VAR_PRECIOUS],[real_PRECIOUS])
m4_define([_AC_ARG_VAR_PRECIOUS],[])
+save_CFLAGS=$CFLAGS
AC_PROG_CC
+CFLAGS=$save_CFLAGS
m4_undefine([_AC_ARG_VAR_PRECIOUS])
-m4_rename([real_PRECIOUS],[_AC_ARG_VAR_PRECIOUS])
+m4_rename_force([real_PRECIOUS],[_AC_ARG_VAR_PRECIOUS])
AC_SUBST(CFLAGS)
@@ -33,6 +39,26 @@ AM_PROG_CC_C_O
AC_PROG_LIBTOOL
AC_CONFIG_MACRO_DIR([m4])
+# Test for 64-bit build.
+AC_CHECK_SIZEOF([size_t])
+
+AX_COMPILER_VENDOR
+AX_CC_MAXOPT
+# The AX_CFLAGS_WARN_ALL macro doesn't currently work for sunpro
+# compiler.
+if test "$ax_cv_c_compiler_vendor" != "sun"; then
+ AX_CFLAGS_WARN_ALL
+fi
+
+if test "x$GCC" = "xyes"; then
+ CFLAGS="$CFLAGS -fexceptions"
+ touch local.exp
+else
+ cat > local.exp <<EOF
+set CC_FOR_TARGET "$CC"
+EOF
+fi
+
AM_MAINTAINER_MODE
AC_CHECK_HEADERS(sys/mman.h)
@@ -44,9 +70,13 @@ AM_CONDITIONAL(TESTSUBDIR, test -d $srcdir/testsuite)
TARGETDIR="unknown"
case "$host" in
+ aarch64*-*-*)
+ TARGET=AARCH64; TARGETDIR=aarch64
+ ;;
+
alpha*-*-*)
TARGET=ALPHA; TARGETDIR=alpha;
- # Support 128-bit long double, changable via command-line switch.
+ # Support 128-bit long double, changeable via command-line switch.
HAVE_LONG_DOUBLE='defined(__LONG_DOUBLE_128__)'
;;
@@ -60,12 +90,20 @@ case "$host" in
amd64-*-freebsd*)
TARGET=X86_64; TARGETDIR=x86
+ ;;
+
+ amd64-*-freebsd*)
+ TARGET=X86_64; TARGETDIR=x86
;;
avr32*-*-*)
TARGET=AVR32; TARGETDIR=avr32
;;
+ bfin*)
+ TARGET=BFIN; TARGETDIR=bfin
+ ;;
+
cris-*-*)
TARGET=LIBFFI_CRIS; TARGETDIR=cris
;;
@@ -74,7 +112,7 @@ case "$host" in
TARGET=FRV; TARGETDIR=frv
;;
- hppa*-*-linux* | parisc*-*-linux*)
+ hppa*-*-linux* | parisc*-*-linux* | hppa*-*-openbsd*)
TARGET=PA_LINUX; TARGETDIR=pa
;;
hppa*64-*-hpux*)
@@ -87,22 +125,65 @@ case "$host" in
i?86-*-freebsd* | i?86-*-openbsd*)
TARGET=X86_FREEBSD; TARGETDIR=x86
;;
- i?86-win32* | i?86-*-cygwin* | i?86-*-mingw*)
+ i?86-win32* | i?86-*-cygwin* | i?86-*-mingw* | i?86-*-os2* | i?86-*-interix*)
TARGET=X86_WIN32; TARGETDIR=x86
- # All mingw/cygwin/win32 builds require this for sharedlib
- AM_LTLDFLAGS="-no-undefined"
+ # All mingw/cygwin/win32 builds require -no-undefined for sharedlib.
+ # We must also check with_cross_host to decide if this is a native
+ # or cross-build and select where to install dlls appropriately.
+ if test -n "$with_cross_host" &&
+ test x"$with_cross_host" != x"no"; then
+ AM_LTLDFLAGS='-no-undefined -bindir "$(toolexeclibdir)"';
+ else
+ AM_LTLDFLAGS='-no-undefined -bindir "$(bindir)"';
+ fi
;;
i?86-*-darwin*)
TARGET=X86_DARWIN; TARGETDIR=x86
;;
i?86-*-solaris2.1[[0-9]]*)
- TARGET=X86_64; TARGETDIR=x86
+ TARGETDIR=x86
+ if test $ac_cv_sizeof_size_t = 4; then
+ TARGET=X86;
+ else
+ TARGET=X86_64;
+ fi
;;
+
i*86-*-nto-qnx*)
TARGET=X86; TARGETDIR=x86
;;
- i?86-*-*)
- TARGET=X86; TARGETDIR=x86
+
+ x86_64-*-darwin*)
+ TARGET=X86_DARWIN; TARGETDIR=x86
+ ;;
+
+ x86_64-*-cygwin* | x86_64-*-mingw*)
+ TARGET=X86_WIN64; TARGETDIR=x86
+ # All mingw/cygwin/win32 builds require -no-undefined for sharedlib.
+ # We must also check with_cross_host to decide if this is a native
+ # or cross-build and select where to install dlls appropriately.
+ if test -n "$with_cross_host" &&
+ test x"$with_cross_host" != x"no"; then
+ AM_LTLDFLAGS='-no-undefined -bindir "$(toolexeclibdir)"';
+ else
+ AM_LTLDFLAGS='-no-undefined -bindir "$(bindir)"';
+ fi
+ ;;
+
+ i?86-*-* | x86_64-*-*)
+ TARGETDIR=x86
+ if test $ac_cv_sizeof_size_t = 4; then
+ case "$host" in
+ *-gnux32)
+ TARGET=X86_64
+ ;;
+ *)
+ TARGET=X86
+ ;;
+ esac
+ else
+ TARGET=X86_64;
+ fi
;;
ia64*-*-*)
@@ -117,10 +198,22 @@ case "$host" in
TARGET=M68K; TARGETDIR=m68k
;;
- mips-sgi-irix5.* | mips-sgi-irix6.*)
+ microblaze*-*-*)
+ TARGET=MICROBLAZE; TARGETDIR=microblaze
+ ;;
+
+ moxie-*-*)
+ TARGET=MOXIE; TARGETDIR=moxie
+ ;;
+
+ metag-*-*)
+ TARGET=METAG; TARGETDIR=metag
+ ;;
+
+ mips-sgi-irix5.* | mips-sgi-irix6.* | mips*-*-rtems*)
TARGET=MIPS_IRIX; TARGETDIR=mips
;;
- mips*-*-linux*)
+ mips*-*-linux* | mips*-*-openbsd*)
# Support 128-bit long double for NewABI.
HAVE_LONG_DOUBLE='defined(__mips64)'
TARGET=MIPS_IRIX; TARGETDIR=mips
@@ -129,18 +222,24 @@ case "$host" in
powerpc*-*-linux* | powerpc-*-sysv*)
TARGET=POWERPC; TARGETDIR=powerpc
;;
+ powerpc-*-amigaos*)
+ TARGET=POWERPC; TARGETDIR=powerpc
+ ;;
powerpc-*-beos*)
TARGET=POWERPC; TARGETDIR=powerpc
;;
- powerpc-*-darwin*)
+ powerpc-*-darwin* | powerpc64-*-darwin*)
TARGET=POWERPC_DARWIN; TARGETDIR=powerpc
;;
powerpc-*-aix* | rs6000-*-aix*)
TARGET=POWERPC_AIX; TARGETDIR=powerpc
;;
- powerpc-*-freebsd*)
+ powerpc-*-freebsd* | powerpc-*-openbsd*)
TARGET=POWERPC_FREEBSD; TARGETDIR=powerpc
;;
+ powerpc64-*-freebsd*)
+ TARGET=POWERPC; TARGETDIR=powerpc
+ ;;
powerpc*-*-rtems*)
TARGET=POWERPC; TARGETDIR=powerpc
;;
@@ -160,17 +259,14 @@ case "$host" in
TARGET=SPARC; TARGETDIR=sparc
;;
- x86_64-*-darwin*)
- TARGET=X86_DARWIN; TARGETDIR=x86
- ;;
+ tile*-*)
+ TARGET=TILE; TARGETDIR=tile
+ ;;
- x86_64-*-cygwin* | x86_64-*-mingw*)
- TARGET=X86_WIN64; TARGETDIR=x86
+ xtensa*-*)
+ TARGET=XTENSA; TARGETDIR=xtensa
;;
- x86_64-*-*)
- TARGET=X86_64; TARGETDIR=x86
- ;;
esac
AC_SUBST(AM_RUNTESTFLAGS)
@@ -181,6 +277,7 @@ if test $TARGETDIR = unknown; then
fi
AM_CONDITIONAL(MIPS,[expr x$TARGET : 'xMIPS' > /dev/null])
+AM_CONDITIONAL(BFIN, test x$TARGET = xBFIN)
AM_CONDITIONAL(SPARC, test x$TARGET = xSPARC)
AM_CONDITIONAL(X86, test x$TARGET = xX86)
AM_CONDITIONAL(X86_FREEBSD, test x$TARGET = xX86_FREEBSD)
@@ -191,10 +288,14 @@ AM_CONDITIONAL(ALPHA, test x$TARGET = xALPHA)
AM_CONDITIONAL(IA64, test x$TARGET = xIA64)
AM_CONDITIONAL(M32R, test x$TARGET = xM32R)
AM_CONDITIONAL(M68K, test x$TARGET = xM68K)
+AM_CONDITIONAL(MICROBLAZE, test x$TARGET = xMICROBLAZE)
+AM_CONDITIONAL(METAG, test x$TARGET = xMETAG)
+AM_CONDITIONAL(MOXIE, test x$TARGET = xMOXIE)
AM_CONDITIONAL(POWERPC, test x$TARGET = xPOWERPC)
AM_CONDITIONAL(POWERPC_AIX, test x$TARGET = xPOWERPC_AIX)
AM_CONDITIONAL(POWERPC_DARWIN, test x$TARGET = xPOWERPC_DARWIN)
AM_CONDITIONAL(POWERPC_FREEBSD, test x$TARGET = xPOWERPC_FREEBSD)
+AM_CONDITIONAL(AARCH64, test x$TARGET = xAARCH64)
AM_CONDITIONAL(ARM, test x$TARGET = xARM)
AM_CONDITIONAL(AVR32, test x$TARGET = xAVR32)
AM_CONDITIONAL(LIBFFI_CRIS, test x$TARGET = xLIBFFI_CRIS)
@@ -206,6 +307,8 @@ AM_CONDITIONAL(SH64, test x$TARGET = xSH64)
AM_CONDITIONAL(PA_LINUX, test x$TARGET = xPA_LINUX)
AM_CONDITIONAL(PA_HPUX, test x$TARGET = xPA_HPUX)
AM_CONDITIONAL(PA64_HPUX, test x$TARGET = xPA64_HPUX)
+AM_CONDITIONAL(TILE, test x$TARGET = xTILE)
+AM_CONDITIONAL(XTENSA, test x$TARGET = xXTENSA)
AC_HEADER_STDC
AC_CHECK_FUNCS(memcpy)
@@ -228,17 +331,7 @@ AC_SUBST(HAVE_LONG_DOUBLE)
AC_C_BIGENDIAN
-AC_CACHE_CHECK([assembler .cfi pseudo-op support],
- libffi_cv_as_cfi_pseudo_op, [
- libffi_cv_as_cfi_pseudo_op=unknown
- AC_TRY_COMPILE([asm (".cfi_startproc\n\t.cfi_endproc");],,
- [libffi_cv_as_cfi_pseudo_op=yes],
- [libffi_cv_as_cfi_pseudo_op=no])
-])
-if test "x$libffi_cv_as_cfi_pseudo_op" = xyes; then
- AC_DEFINE(HAVE_AS_CFI_PSEUDO_OP, 1,
- [Define if your assembler supports .cfi_* directives.])
-fi
+GCC_AS_CFI_PSEUDO_OP
if test x$TARGET = xSPARC; then
AC_CACHE_CHECK([assembler and linker support unaligned pc related relocs],
@@ -261,7 +354,7 @@ if test x$TARGET = xSPARC; then
libffi_cv_as_register_pseudo_op, [
libffi_cv_as_register_pseudo_op=unknown
# Check if we have .register
- AC_TRY_COMPILE([asm (".register %g2, #scratch");],,
+ AC_TRY_COMPILE(,[asm (".register %g2, #scratch");],
[libffi_cv_as_register_pseudo_op=yes],
[libffi_cv_as_register_pseudo_op=no])
])
@@ -274,64 +367,132 @@ fi
if test x$TARGET = xX86 || test x$TARGET = xX86_WIN32 || test x$TARGET = xX86_64; then
AC_CACHE_CHECK([assembler supports pc related relocs],
libffi_cv_as_x86_pcrel, [
- libffi_cv_as_x86_pcrel=yes
+ libffi_cv_as_x86_pcrel=no
echo '.text; foo: nop; .data; .long foo-.; .text' > conftest.s
- if $CC $CFLAGS -c conftest.s 2>&1 | grep -i warning > /dev/null; then
- libffi_cv_as_x86_pcrel=no
+ if $CC $CFLAGS -c conftest.s > /dev/null 2>&1; then
+ libffi_cv_as_x86_pcrel=yes
fi
])
if test "x$libffi_cv_as_x86_pcrel" = xyes; then
AC_DEFINE(HAVE_AS_X86_PCREL, 1,
[Define if your assembler supports PC relative relocs.])
fi
+
+ AC_CACHE_CHECK([assembler .ascii pseudo-op support],
+ libffi_cv_as_ascii_pseudo_op, [
+ libffi_cv_as_ascii_pseudo_op=unknown
+ # Check if we have .ascii
+ AC_TRY_COMPILE(,[asm (".ascii \\"string\\"");],
+ [libffi_cv_as_ascii_pseudo_op=yes],
+ [libffi_cv_as_ascii_pseudo_op=no])
+ ])
+ if test "x$libffi_cv_as_ascii_pseudo_op" = xyes; then
+ AC_DEFINE(HAVE_AS_ASCII_PSEUDO_OP, 1,
+ [Define if your assembler supports .ascii.])
+ fi
+
+ AC_CACHE_CHECK([assembler .string pseudo-op support],
+ libffi_cv_as_string_pseudo_op, [
+ libffi_cv_as_string_pseudo_op=unknown
+ # Check if we have .string
+ AC_TRY_COMPILE(,[asm (".string \\"string\\"");],
+ [libffi_cv_as_string_pseudo_op=yes],
+ [libffi_cv_as_string_pseudo_op=no])
+ ])
+ if test "x$libffi_cv_as_string_pseudo_op" = xyes; then
+ AC_DEFINE(HAVE_AS_STRING_PSEUDO_OP, 1,
+ [Define if your assembler supports .string.])
+ fi
fi
+# On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC.
+AC_ARG_ENABLE(pax_emutramp,
+ [ --enable-pax_emutramp enable pax emulated trampolines, for we can't use PROT_EXEC],
+ if test "$enable_pax_emutramp" = "yes"; then
+ AC_DEFINE(FFI_MMAP_EXEC_EMUTRAMP_PAX, 1,
+ [Define this if you want to enable pax emulated trampolines])
+ fi)
+
+if test x$TARGET = xX86_WIN64; then
+ LT_SYS_SYMBOL_USCORE
+ if test "x$sys_symbol_underscore" = xyes; then
+ AC_DEFINE(SYMBOL_UNDERSCORE, 1, [Define if symbols are underscored.])
+ fi
+fi
+
+FFI_EXEC_TRAMPOLINE_TABLE=0
case "$target" in
- *-apple-darwin10* | *-*-freebsd* | *-*-openbsd* | *-pc-solaris*)
+ *arm*-apple-darwin*)
+ FFI_EXEC_TRAMPOLINE_TABLE=1
+ AC_DEFINE(FFI_EXEC_TRAMPOLINE_TABLE, 1,
+ [Cannot use PROT_EXEC on this target, so, we revert to
+ alternative means])
+ ;;
+ *-apple-darwin1* | *-*-freebsd* | *-*-kfreebsd* | *-*-openbsd* | *-pc-solaris*)
AC_DEFINE(FFI_MMAP_EXEC_WRIT, 1,
[Cannot use malloc on this target, so, we revert to
alternative means])
;;
esac
-
-AC_CACHE_CHECK([whether .eh_frame section should be read-only],
- libffi_cv_ro_eh_frame, [
- libffi_cv_ro_eh_frame=no
- echo 'extern void foo (void); void bar (void) { foo (); foo (); }' > conftest.c
- if $CC $CFLAGS -S -fpic -fexceptions -o conftest.s conftest.c > /dev/null 2>&1; then
- if grep '.section.*eh_frame.*"a"' conftest.s > /dev/null; then
- libffi_cv_ro_eh_frame=yes
- elif grep '.section.*eh_frame.*#alloc' conftest.c \
- | grep -v '#write' > /dev/null; then
- libffi_cv_ro_eh_frame=yes
- fi
+AM_CONDITIONAL(FFI_EXEC_TRAMPOLINE_TABLE, test x$FFI_EXEC_TRAMPOLINE_TABLE = x1)
+AC_SUBST(FFI_EXEC_TRAMPOLINE_TABLE)
+
+if test x$TARGET = xX86_64; then
+ AC_CACHE_CHECK([assembler supports unwind section type],
+ libffi_cv_as_x86_64_unwind_section_type, [
+ libffi_cv_as_x86_64_unwind_section_type=yes
+ echo '.section .eh_frame,"a",@unwind' > conftest.s
+ if $CC $CFLAGS -c conftest.s 2>&1 | grep -i warning > /dev/null; then
+ libffi_cv_as_x86_64_unwind_section_type=no
fi
- rm -f conftest.*
- ])
-if test "x$libffi_cv_ro_eh_frame" = xyes; then
- AC_DEFINE(HAVE_RO_EH_FRAME, 1,
- [Define if .eh_frame sections should be read-only.])
- AC_DEFINE(EH_FRAME_FLAGS, "a",
- [Define to the flags needed for the .section .eh_frame directive.])
-else
- AC_DEFINE(EH_FRAME_FLAGS, "aw",
- [Define to the flags needed for the .section .eh_frame directive.])
+ ])
+ if test "x$libffi_cv_as_x86_64_unwind_section_type" = xyes; then
+ AC_DEFINE(HAVE_AS_X86_64_UNWIND_SECTION_TYPE, 1,
+ [Define if your assembler supports unwind section type.])
+ fi
fi
-AC_CACHE_CHECK([for __attribute__((visibility("hidden")))],
- libffi_cv_hidden_visibility_attribute, [
- echo 'int __attribute__ ((visibility ("hidden"))) foo (void) { return 1; }' > conftest.c
- libffi_cv_hidden_visibility_attribute=no
- if AC_TRY_COMMAND(${CC-cc} -Werror -S conftest.c -o conftest.s 1>&AS_MESSAGE_LOG_FD); then
- if grep '\.hidden.*foo' conftest.s >/dev/null; then
- libffi_cv_hidden_visibility_attribute=yes
- fi
- fi
- rm -f conftest.*
- ])
-if test $libffi_cv_hidden_visibility_attribute = yes; then
- AC_DEFINE(HAVE_HIDDEN_VISIBILITY_ATTRIBUTE, 1,
- [Define if __attribute__((visibility("hidden"))) is supported.])
+if test "x$GCC" = "xyes"; then
+ AC_CACHE_CHECK([whether .eh_frame section should be read-only],
+ libffi_cv_ro_eh_frame, [
+ libffi_cv_ro_eh_frame=no
+ echo 'extern void foo (void); void bar (void) { foo (); foo (); }' > conftest.c
+ if $CC $CFLAGS -c -fpic -fexceptions -o conftest.o conftest.c > /dev/null 2>&1; then
+ objdump -h conftest.o > conftest.dump 2>&1
+ libffi_eh_frame_line=`grep -n eh_frame conftest.dump | cut -d: -f 1`
+ libffi_test_line=`expr $libffi_eh_frame_line + 1`p
+ sed -n $libffi_test_line conftest.dump > conftest.line
+ if grep READONLY conftest.line > /dev/null; then
+ libffi_cv_ro_eh_frame=yes
+ fi
+ fi
+ rm -f conftest.*
+ ])
+ if test "x$libffi_cv_ro_eh_frame" = xyes; then
+ AC_DEFINE(HAVE_RO_EH_FRAME, 1,
+ [Define if .eh_frame sections should be read-only.])
+ AC_DEFINE(EH_FRAME_FLAGS, "a",
+ [Define to the flags needed for the .section .eh_frame directive. ])
+ else
+ AC_DEFINE(EH_FRAME_FLAGS, "aw",
+ [Define to the flags needed for the .section .eh_frame directive. ])
+ fi
+
+ AC_CACHE_CHECK([for __attribute__((visibility("hidden")))],
+ libffi_cv_hidden_visibility_attribute, [
+ echo 'int __attribute__ ((visibility ("hidden"))) foo (void) { return 1 ; }' > conftest.c
+ libffi_cv_hidden_visibility_attribute=no
+ if AC_TRY_COMMAND(${CC-cc} -Werror -S conftest.c -o conftest.s 1>&AS_MESSAGE_LOG_FD); then
+ if grep '\.hidden.*foo' conftest.s >/dev/null; then
+ libffi_cv_hidden_visibility_attribute=yes
+ fi
+ fi
+ rm -f conftest.*
+ ])
+ if test $libffi_cv_hidden_visibility_attribute = yes; then
+ AC_DEFINE(HAVE_HIDDEN_VISIBILITY_ATTRIBUTE, 1,
+ [Define if __attribute__((visibility("hidden"))) is supported.])
+ fi
fi
AH_BOTTOM([
@@ -360,12 +521,14 @@ AC_ARG_ENABLE(debug,
if test "$enable_debug" = "yes"; then
AC_DEFINE(FFI_DEBUG, 1, [Define this if you want extra debugging.])
fi)
+AM_CONDITIONAL(FFI_DEBUG, test "$enable_debug" = "yes")
AC_ARG_ENABLE(structs,
[ --disable-structs omit code for struct support],
if test "$enable_structs" = "no"; then
AC_DEFINE(FFI_NO_STRUCTS, 1, [Define this is you do not want support for aggregate types.])
fi)
+AM_CONDITIONAL(FFI_DEBUG, test "$enable_debug" = "yes")
AC_ARG_ENABLE(raw-api,
[ --disable-raw-api make the raw api unavailable],
@@ -379,28 +542,28 @@ AC_ARG_ENABLE(purify-safety,
AC_DEFINE(USING_PURIFY, 1, [Define this if you are using Purify and want to suppress spurious messages.])
fi)
-if test -n "$with_cross_host" &&
- test x"$with_cross_host" != x"no"; then
- toolexecdir='$(exec_prefix)/$(target_alias)'
- toolexeclibdir='$(toolexecdir)/lib'
+# These variables are only ever used when we cross-build to X86_WIN32.
+# And we only support this with GCC, so...
+if test "x$GCC" = "xyes"; then
+ if test -n "$with_cross_host" &&
+ test x"$with_cross_host" != x"no"; then
+ toolexecdir='$(exec_prefix)/$(target_alias)'
+ toolexeclibdir='$(toolexecdir)/lib'
+ else
+ toolexecdir='$(libdir)/gcc-lib/$(target_alias)'
+ toolexeclibdir='$(libdir)'
+ fi
+ multi_os_directory=`$CC -print-multi-os-directory`
+ case $multi_os_directory in
+ .) ;; # Avoid trailing /.
+ ../*) toolexeclibdir=$toolexeclibdir/$multi_os_directory ;;
+ esac
+ AC_SUBST(toolexecdir)
else
- toolexecdir='$(libdir)/gcc-lib/$(target_alias)'
toolexeclibdir='$(libdir)'
fi
-multi_os_directory=`$CC -print-multi-os-directory`
-case $multi_os_directory in
- .) ;; # Avoid trailing /.
- *) toolexeclibdir=$toolexeclibdir/$multi_os_directory ;;
-esac
-AC_SUBST(toolexecdir)
AC_SUBST(toolexeclibdir)
-if test "${multilib}" = "yes"; then
- multilib_arg="--enable-multilib"
-else
- multilib_arg=
-fi
-
AC_CONFIG_COMMANDS(include, [test -d include || mkdir include])
AC_CONFIG_COMMANDS(src, [
test -d src || mkdir src
diff --git a/Modules/_ctypes/libffi/depcomp b/Modules/_ctypes/libffi/depcomp
index ca5ea4e..df8eea7 100755
--- a/Modules/_ctypes/libffi/depcomp
+++ b/Modules/_ctypes/libffi/depcomp
@@ -1,10 +1,10 @@
#! /bin/sh
# depcomp - compile a program generating dependencies as side-effects
-scriptversion=2006-10-15.18
+scriptversion=2009-04-28.21; # UTC
-# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2006 Free Software
-# Foundation, Inc.
+# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2006, 2007, 2009 Free
+# Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -17,9 +17,7 @@ scriptversion=2006-10-15.18
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
-# 02110-1301, USA.
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
@@ -87,6 +85,15 @@ if test "$depmode" = dashXmstdout; then
depmode=dashmstdout
fi
+cygpath_u="cygpath -u -f -"
+if test "$depmode" = msvcmsys; then
+ # This is just like msvisualcpp but w/o cygpath translation.
+ # Just convert the backslash-escaped backslashes to single forward
+ # slashes to satisfy depend.m4
+ cygpath_u="sed s,\\\\\\\\,/,g"
+ depmode=msvisualcpp
+fi
+
case "$depmode" in
gcc3)
## gcc 3 implements dependency tracking that does exactly what
@@ -192,14 +199,14 @@ sgi)
' < "$tmpdepfile" \
| sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' | \
tr '
-' ' ' >> $depfile
- echo >> $depfile
+' ' ' >> "$depfile"
+ echo >> "$depfile"
# The second pass generates a dummy entry for each header file.
tr ' ' '
' < "$tmpdepfile" \
| sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \
- >> $depfile
+ >> "$depfile"
else
# The sourcefile does not contain any dependencies, so just
# store a dummy comment line, to avoid errors with the Makefile
@@ -215,34 +222,39 @@ aix)
# current directory. Also, the AIX compiler puts `$object:' at the
# start of each line; $object doesn't have directory information.
# Version 6 uses the directory in both cases.
- stripped=`echo "$object" | sed 's/\(.*\)\..*$/\1/'`
- tmpdepfile="$stripped.u"
+ dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
+ test "x$dir" = "x$object" && dir=
+ base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
if test "$libtool" = yes; then
+ tmpdepfile1=$dir$base.u
+ tmpdepfile2=$base.u
+ tmpdepfile3=$dir.libs/$base.u
"$@" -Wc,-M
else
+ tmpdepfile1=$dir$base.u
+ tmpdepfile2=$dir$base.u
+ tmpdepfile3=$dir$base.u
"$@" -M
fi
stat=$?
- if test -f "$tmpdepfile"; then :
- else
- stripped=`echo "$stripped" | sed 's,^.*/,,'`
- tmpdepfile="$stripped.u"
- fi
-
if test $stat -eq 0; then :
else
- rm -f "$tmpdepfile"
+ rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
exit $stat
fi
+ for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
+ do
+ test -f "$tmpdepfile" && break
+ done
if test -f "$tmpdepfile"; then
- outname="$stripped.o"
# Each line is of the form `foo.o: dependent.h'.
# Do two passes, one to just change these to
# `$object: dependent.h' and one to simply `dependent.h:'.
- sed -e "s,^$outname:,$object :," < "$tmpdepfile" > "$depfile"
- sed -e "s,^$outname: \(.*\)$,\1:," < "$tmpdepfile" >> "$depfile"
+ sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile"
+ # That's a tab and a space in the [].
+ sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile"
else
# The sourcefile does not contain any dependencies, so just
# store a dummy comment line, to avoid errors with the Makefile
@@ -323,7 +335,12 @@ hp2)
if test -f "$tmpdepfile"; then
sed -e "s,^.*\.[a-z]*:,$object:," "$tmpdepfile" > "$depfile"
# Add `dependent.h:' lines.
- sed -ne '2,${; s/^ *//; s/ \\*$//; s/$/:/; p;}' "$tmpdepfile" >> "$depfile"
+ sed -ne '2,${
+ s/^ *//
+ s/ \\*$//
+ s/$/:/
+ p
+ }' "$tmpdepfile" >> "$depfile"
else
echo "#dummy" > "$depfile"
fi
@@ -399,7 +416,7 @@ dashmstdout)
# Remove the call to Libtool.
if test "$libtool" = yes; then
- while test $1 != '--mode=compile'; do
+ while test "X$1" != 'X--mode=compile'; do
shift
done
shift
@@ -450,32 +467,39 @@ makedepend)
"$@" || exit $?
# Remove any Libtool call
if test "$libtool" = yes; then
- while test $1 != '--mode=compile'; do
+ while test "X$1" != 'X--mode=compile'; do
shift
done
shift
fi
# X makedepend
shift
- cleared=no
- for arg in "$@"; do
+ cleared=no eat=no
+ for arg
+ do
case $cleared in
no)
set ""; shift
cleared=yes ;;
esac
+ if test $eat = yes; then
+ eat=no
+ continue
+ fi
case "$arg" in
-D*|-I*)
set fnord "$@" "$arg"; shift ;;
# Strip any option that makedepend may not understand. Remove
# the object too, otherwise makedepend will parse it as a source file.
+ -arch)
+ eat=yes ;;
-*|$object)
;;
*)
set fnord "$@" "$arg"; shift ;;
esac
done
- obj_suffix="`echo $object | sed 's/^.*\././'`"
+ obj_suffix=`echo "$object" | sed 's/^.*\././'`
touch "$tmpdepfile"
${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@"
rm -f "$depfile"
@@ -495,7 +519,7 @@ cpp)
# Remove the call to Libtool.
if test "$libtool" = yes; then
- while test $1 != '--mode=compile'; do
+ while test "X$1" != 'X--mode=compile'; do
shift
done
shift
@@ -533,13 +557,27 @@ cpp)
msvisualcpp)
# Important note: in order to support this mode, a compiler *must*
- # always write the preprocessed file to stdout, regardless of -o,
- # because we must use -o when running libtool.
+ # always write the preprocessed file to stdout.
"$@" || exit $?
+
+ # Remove the call to Libtool.
+ if test "$libtool" = yes; then
+ while test "X$1" != 'X--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+
IFS=" "
for arg
do
case "$arg" in
+ -o)
+ shift
+ ;;
+ $object)
+ shift
+ ;;
"-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI")
set fnord "$@"
shift
@@ -552,16 +590,23 @@ msvisualcpp)
;;
esac
done
- "$@" -E |
- sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::echo "`cygpath -u \\"\1\\"`":p' | sort | uniq > "$tmpdepfile"
+ "$@" -E 2>/dev/null |
+ sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile"
rm -f "$depfile"
echo "$object : \\" > "$depfile"
- . "$tmpdepfile" | sed 's% %\\ %g' | sed -n '/^\(.*\)$/ s:: \1 \\:p' >> "$depfile"
+ sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s:: \1 \\:p' >> "$depfile"
echo " " >> "$depfile"
- . "$tmpdepfile" | sed 's% %\\ %g' | sed -n '/^\(.*\)$/ s::\1\::p' >> "$depfile"
+ sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile"
rm -f "$tmpdepfile"
;;
+msvcmsys)
+ # This case exists only to let depend.m4 do its work. It works by
+ # looking at the text of this script. This case will never be run,
+ # since it is checked for above.
+ exit 1
+ ;;
+
none)
exec "$@"
;;
@@ -580,5 +625,6 @@ exit 0
# eval: (add-hook 'write-file-hooks 'time-stamp)
# time-stamp-start: "scriptversion="
# time-stamp-format: "%:y-%02m-%02d.%02H"
-# time-stamp-end: "$"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
# End:
diff --git a/Modules/_ctypes/libffi/doc/libffi.info b/Modules/_ctypes/libffi/doc/libffi.info
index 896a5ec..6d5acf8 100644
--- a/Modules/_ctypes/libffi/doc/libffi.info
+++ b/Modules/_ctypes/libffi/doc/libffi.info
@@ -4,7 +4,7 @@ from ../libffi/doc/libffi.texi.
This manual is for Libffi, a portable foreign-function interface
library.
- Copyright (C) 2008, 2010 Red Hat, Inc.
+ Copyright (C) 2008, 2010, 2011 Red Hat, Inc.
Permission is granted to copy, distribute and/or modify this
document under the terms of the GNU General Public License as
@@ -27,7 +27,7 @@ libffi
This manual is for Libffi, a portable foreign-function interface
library.
- Copyright (C) 2008, 2010 Red Hat, Inc.
+ Copyright (C) 2008, 2010, 2011 Red Hat, Inc.
Permission is granted to copy, distribute and/or modify this
document under the terms of the GNU General Public License as
@@ -115,8 +115,6 @@ To prepare a call interface object, use the function `ffi_prep_cif'.
want. *note Multiple ABIs:: for more information.
NARGS is the number of arguments that this function accepts.
- `libffi' does not yet handle varargs functions; see *note Missing
- Features:: for more information.
RTYPE is a pointer to an `ffi_type' structure that describes the
return type of the function. *Note Types::.
@@ -129,6 +127,30 @@ To prepare a call interface object, use the function `ffi_prep_cif'.
properly; `FFI_BAD_TYPEDEF' if one of the `ffi_type' objects is
incorrect; or `FFI_BAD_ABI' if the ABI parameter is invalid.
+ If the function being called is variadic (varargs) then
+`ffi_prep_cif_var' must be used instead of `ffi_prep_cif'.
+
+ -- Function: ffi_status ffi_prep_cif_var (ffi_cif *CIF, ffi_abi
+ varabi, unsigned int NFIXEDARGS, unsigned int varntotalargs,
+ ffi_type *RTYPE, ffi_type **ARGTYPES)
+ This initializes CIF according to the given parameters for a call
+ to a variadic function. In general it's operation is the same as
+ for `ffi_prep_cif' except that:
+
+ NFIXEDARGS is the number of fixed arguments, prior to any variadic
+ arguments. It must be greater than zero.
+
+ NTOTALARGS the total number of arguments, including variadic and
+ fixed arguments.
+
+ Note that, different cif's must be prepped for calls to the same
+ function when different numbers of arguments are passed.
+
+ Also note that a call to `ffi_prep_cif_var' with
+ NFIXEDARGS=NOTOTALARGS is NOT equivalent to a call to
+ `ffi_prep_cif'.
+
+
To call a function using an initialized `ffi_cif', use the
`ffi_call' function:
@@ -147,7 +169,9 @@ To prepare a call interface object, use the function `ffi_prep_cif'.
AVALUES is a vector of `void *' pointers that point to the memory
locations holding the argument values for a call. If CIF declares
that the function has no arguments (i.e., NARGS was 0), then
- AVALUES is ignored.
+ AVALUES is ignored. Note that argument values may be modified by
+ the callee (for instance, structs passed by value); the burden of
+ copying pass-by-value arguments is placed on the caller.

File: libffi.info, Node: Simple Example, Next: Types, Prev: The Basics, Up: Using libffi
@@ -294,7 +318,7 @@ is perfectly happy passing structures back and forth. You must first
describe the structure to `libffi' by creating a new `ffi_type' object
for it.
- -- ffi_type:
+ -- Data type: ffi_type
The `ffi_type' has the following members:
`size_t size'
This is set by `libffi'; you should initialize it to zero.
@@ -509,9 +533,7 @@ File: libffi.info, Node: Missing Features, Next: Index, Prev: Using libffi,
`libffi' is missing a few features. We welcome patches to add support
for these.
- * There is no support for calling varargs functions. This may work
- on some platforms, depending on how the ABI is defined, but it is
- not reliable.
+ * Variadic closures.
* There is no support for bit fields in structures.
@@ -519,6 +541,9 @@ for these.
* The "raw" API is undocumented.
+ Note that variadic support is very new and tested on a relatively
+small number of platforms.
+

File: libffi.info, Node: Index, Prev: Missing Features, Up: Top
@@ -528,7 +553,6 @@ Index
* Menu:
-* : Structures. (line 12)
* ABI: Introduction. (line 13)
* Application Binary Interface: Introduction. (line 13)
* calling convention: Introduction. (line 13)
@@ -536,11 +560,12 @@ Index
* closure API: The Closure API. (line 13)
* closures: The Closure API. (line 13)
* FFI: Introduction. (line 31)
-* ffi_call: The Basics. (line 41)
-* ffi_closure_alloca: The Closure API. (line 19)
+* ffi_call: The Basics. (line 63)
+* ffi_closure_alloc: The Closure API. (line 19)
* ffi_closure_free: The Closure API. (line 26)
* FFI_CLOSURES: The Closure API. (line 13)
* ffi_prep_cif: The Basics. (line 16)
+* ffi_prep_cif_var: The Basics. (line 39)
* ffi_prep_closure_loc: The Closure API. (line 34)
* ffi_status <1>: The Closure API. (line 37)
* ffi_status: The Basics. (line 18)
@@ -568,24 +593,24 @@ Index
* ffi_type_void: Primitive Types. (line 10)
* Foreign Function Interface: Introduction. (line 31)
* void <1>: The Closure API. (line 20)
-* void: The Basics. (line 43)
+* void: The Basics. (line 65)

Tag Table:
-Node: Top706
-Node: Introduction1448
-Node: Using libffi3084
-Node: The Basics3570
-Node: Simple Example6177
-Node: Types7204
-Node: Primitive Types7487
-Node: Structures9307
-Node: Type Example10167
-Node: Multiple ABIs11390
-Node: The Closure API11761
-Node: Closure Example14705
-Node: Missing Features16264
-Node: Index16757
+Node: Top712
+Node: Introduction1460
+Node: Using libffi3096
+Node: The Basics3582
+Node: Simple Example7224
+Node: Types8251
+Node: Primitive Types8534
+Node: Structures10354
+Node: Type Example11224
+Node: Multiple ABIs12447
+Node: The Closure API12818
+Node: Closure Example15762
+Node: Missing Features17321
+Node: Index17774

End Tag Table
diff --git a/Modules/_ctypes/libffi/doc/libffi.texi b/Modules/_ctypes/libffi/doc/libffi.texi
index 9fa5b17..5c0552b 100644
--- a/Modules/_ctypes/libffi/doc/libffi.texi
+++ b/Modules/_ctypes/libffi/doc/libffi.texi
@@ -19,7 +19,7 @@
This manual is for Libffi, a portable foreign-function interface
library.
-Copyright @copyright{} 2008, 2010 Red Hat, Inc.
+Copyright @copyright{} 2008, 2010, 2011 Red Hat, Inc.
@quotation
Permission is granted to copy, distribute and/or modify this document
@@ -133,8 +133,6 @@ This initializes @var{cif} according to the given parameters.
you want. @ref{Multiple ABIs} for more information.
@var{nargs} is the number of arguments that this function accepts.
-@samp{libffi} does not yet handle varargs functions; see @ref{Missing
-Features} for more information.
@var{rtype} is a pointer to an @code{ffi_type} structure that
describes the return type of the function. @xref{Types}.
@@ -150,6 +148,30 @@ objects is incorrect; or @code{FFI_BAD_ABI} if the @var{abi} parameter
is invalid.
@end defun
+If the function being called is variadic (varargs) then
+@code{ffi_prep_cif_var} must be used instead of @code{ffi_prep_cif}.
+
+@findex ffi_prep_cif_var
+@defun ffi_status ffi_prep_cif_var (ffi_cif *@var{cif}, ffi_abi var{abi}, unsigned int @var{nfixedargs}, unsigned int var{ntotalargs}, ffi_type *@var{rtype}, ffi_type **@var{argtypes})
+This initializes @var{cif} according to the given parameters for
+a call to a variadic function. In general it's operation is the
+same as for @code{ffi_prep_cif} except that:
+
+@var{nfixedargs} is the number of fixed arguments, prior to any
+variadic arguments. It must be greater than zero.
+
+@var{ntotalargs} the total number of arguments, including variadic
+and fixed arguments.
+
+Note that, different cif's must be prepped for calls to the same
+function when different numbers of arguments are passed.
+
+Also note that a call to @code{ffi_prep_cif_var} with
+@var{nfixedargs}=@var{nototalargs} is NOT equivalent to a call to
+@code{ffi_prep_cif}.
+
+@end defun
+
To call a function using an initialized @code{ffi_cif}, use the
@code{ffi_call} function:
@@ -171,7 +193,9 @@ discarded.
@var{avalues} is a vector of @code{void *} pointers that point to the
memory locations holding the argument values for a call. If @var{cif}
declares that the function has no arguments (i.e., @var{nargs} was 0),
-then @var{avalues} is ignored.
+then @var{avalues} is ignored. Note that argument values may be
+modified by the callee (for instance, structs passed by value); the
+burden of copying pass-by-value arguments is placed on the caller.
@end defun
@@ -336,7 +360,7 @@ You must first describe the structure to @samp{libffi} by creating a
new @code{ffi_type} object for it.
@tindex ffi_type
-@deftp ffi_type
+@deftp {Data type} ffi_type
The @code{ffi_type} has the following members:
@table @code
@item size_t size
@@ -438,7 +462,7 @@ require special allocation on platforms that have a non-executable
heap. Memory management for closures is handled by a pair of
functions:
-@findex ffi_closure_alloca
+@findex ffi_closure_alloc
@defun void *ffi_closure_alloc (size_t @var{size}, void **@var{code})
Allocate a chunk of memory holding @var{size} bytes. This returns a
pointer to the writable address, and sets *@var{code} to the
@@ -570,9 +594,7 @@ support for these.
@itemize @bullet
@item
-There is no support for calling varargs functions. This may work on
-some platforms, depending on how the ABI is defined, but it is not
-reliable.
+Variadic closures.
@item
There is no support for bit fields in structures.
@@ -589,6 +611,8 @@ The ``raw'' API is undocumented.
@c anything else?
@end itemize
+Note that variadic support is very new and tested on a relatively
+small number of platforms.
@node Index
@unnumbered Index
diff --git a/Modules/_ctypes/libffi/doc/stamp-vti b/Modules/_ctypes/libffi/doc/stamp-vti
index 81d0b79..54255ba 100644
--- a/Modules/_ctypes/libffi/doc/stamp-vti
+++ b/Modules/_ctypes/libffi/doc/stamp-vti
@@ -1,4 +1,4 @@
-@set UPDATED 14 February 2008
-@set UPDATED-MONTH February 2008
-@set EDITION 3.0.8
-@set VERSION 3.0.8
+@set UPDATED 16 March 2013
+@set UPDATED-MONTH March 2013
+@set EDITION 3.0.13
+@set VERSION 3.0.13
diff --git a/Modules/_ctypes/libffi/doc/version.texi b/Modules/_ctypes/libffi/doc/version.texi
index 81d0b79..54255ba 100644
--- a/Modules/_ctypes/libffi/doc/version.texi
+++ b/Modules/_ctypes/libffi/doc/version.texi
@@ -1,4 +1,4 @@
-@set UPDATED 14 February 2008
-@set UPDATED-MONTH February 2008
-@set EDITION 3.0.8
-@set VERSION 3.0.8
+@set UPDATED 16 March 2013
+@set UPDATED-MONTH March 2013
+@set EDITION 3.0.13
+@set VERSION 3.0.13
diff --git a/Modules/_ctypes/libffi/fficonfig.h.in b/Modules/_ctypes/libffi/fficonfig.h.in
index e03bbf9..c77585d 100644
--- a/Modules/_ctypes/libffi/fficonfig.h.in
+++ b/Modules/_ctypes/libffi/fficonfig.h.in
@@ -17,6 +17,12 @@
/* Define this if you want extra debugging. */
#undef FFI_DEBUG
+/* Cannot use PROT_EXEC on this target, so, we revert to alternative means */
+#undef FFI_EXEC_TRAMPOLINE_TABLE
+
+/* Define this if you want to enable pax emulated trampolines */
+#undef FFI_MMAP_EXEC_EMUTRAMP_PAX
+
/* Cannot use malloc on this target, so, we revert to alternative means */
#undef FFI_MMAP_EXEC_WRIT
@@ -33,6 +39,9 @@
*/
#undef HAVE_ALLOCA_H
+/* Define if your assembler supports .ascii. */
+#undef HAVE_AS_ASCII_PSEUDO_OP
+
/* Define if your assembler supports .cfi_* directives. */
#undef HAVE_AS_CFI_PSEUDO_OP
@@ -43,6 +52,12 @@
*/
#undef HAVE_AS_SPARC_UA_PCREL
+/* Define if your assembler supports .string. */
+#undef HAVE_AS_STRING_PSEUDO_OP
+
+/* Define if your assembler supports unwind section type. */
+#undef HAVE_AS_X86_64_UNWIND_SECTION_TYPE
+
/* Define if your assembler supports PC relative relocs. */
#undef HAVE_AS_X86_PCREL
@@ -148,6 +163,9 @@
/* Define to 1 if you have the ANSI C header files. */
#undef STDC_HEADERS
+/* Define if symbols are underscored. */
+#undef SYMBOL_UNDERSCORE
+
/* Define this if you are using Purify and want to suppress spurious messages.
*/
#undef USING_PURIFY
@@ -167,6 +185,9 @@
# endif
#endif
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+#undef size_t
+
#ifdef HAVE_HIDDEN_VISIBILITY_ATTRIBUTE
#ifdef LIBFFI_ASM
diff --git a/Modules/_ctypes/libffi/fficonfig.py.in b/Modules/_ctypes/libffi/fficonfig.py.in
index 27c971f..c910814 100644
--- a/Modules/_ctypes/libffi/fficonfig.py.in
+++ b/Modules/_ctypes/libffi/fficonfig.py.in
@@ -1,7 +1,6 @@
ffi_sources = """
src/prep_cif.c
src/closures.c
-src/dlmalloc.c
""".split()
ffi_platforms = {
diff --git a/Modules/_ctypes/libffi/generate-ios-source-and-headers.py b/Modules/_ctypes/libffi/generate-ios-source-and-headers.py
new file mode 100755
index 0000000..c2bca73
--- /dev/null
+++ b/Modules/_ctypes/libffi/generate-ios-source-and-headers.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+
+import subprocess
+import re
+import os
+import errno
+import collections
+import sys
+
+class Platform(object):
+ pass
+
+sdk_re = re.compile(r'.*-sdk ([a-zA-Z0-9.]*)')
+
+def sdkinfo(sdkname):
+ ret = {}
+ for line in subprocess.Popen(['xcodebuild', '-sdk', sdkname, '-version'], stdout=subprocess.PIPE).stdout:
+ kv = line.strip().split(': ', 1)
+ if len(kv) == 2:
+ k,v = kv
+ ret[k] = v
+ return ret
+
+sim_sdk_info = sdkinfo('iphonesimulator')
+device_sdk_info = sdkinfo('iphoneos')
+
+def latest_sdks():
+ latest_sim = None
+ latest_device = None
+ for line in subprocess.Popen(['xcodebuild', '-showsdks'], stdout=subprocess.PIPE).stdout:
+ match = sdk_re.match(line)
+ if match:
+ if 'Simulator' in line:
+ latest_sim = match.group(1)
+ elif 'iOS' in line:
+ latest_device = match.group(1)
+
+ return latest_sim, latest_device
+
+sim_sdk, device_sdk = latest_sdks()
+
+class simulator_platform(Platform):
+ sdk='iphonesimulator'
+ arch = 'i386'
+ name = 'simulator'
+ triple = 'i386-apple-darwin10'
+ sdkroot = sim_sdk_info['Path']
+
+ prefix = "#if !defined(__arm__) && defined(__i386__)\n\n"
+ suffix = "\n\n#endif"
+
+class device_platform(Platform):
+ sdk='iphoneos'
+ name = 'ios'
+ arch = 'armv7'
+ triple = 'arm-apple-darwin10'
+ sdkroot = device_sdk_info['Path']
+
+ prefix = "#ifdef __arm__\n\n"
+ suffix = "\n\n#endif"
+
+
+def move_file(src_dir, dst_dir, filename, file_suffix=None, prefix='', suffix=''):
+ if not os.path.exists(dst_dir):
+ os.makedirs(dst_dir)
+
+ out_filename = filename
+
+ if file_suffix:
+ split_name = os.path.splitext(filename)
+ out_filename = "%s_%s%s" % (split_name[0], file_suffix, split_name[1])
+
+ with open(os.path.join(src_dir, filename)) as in_file:
+ with open(os.path.join(dst_dir, out_filename), 'w') as out_file:
+ if prefix:
+ out_file.write(prefix)
+
+ out_file.write(in_file.read())
+
+ if suffix:
+ out_file.write(suffix)
+
+headers_seen = collections.defaultdict(set)
+
+def move_source_tree(src_dir, dest_dir, dest_include_dir, arch=None, prefix=None, suffix=None):
+ for root, dirs, files in os.walk(src_dir, followlinks=True):
+ relroot = os.path.relpath(root,src_dir)
+
+ def move_dir(arch, prefix='', suffix='', files=[]):
+ for file in files:
+ file_suffix = None
+ if file.endswith('.h'):
+ if dest_include_dir:
+ file_suffix = arch
+ if arch:
+ headers_seen[file].add(arch)
+ move_file(root, dest_include_dir, file, arch, prefix=prefix, suffix=suffix)
+
+ elif dest_dir:
+ outroot = os.path.join(dest_dir, relroot)
+ move_file(root, outroot, file, prefix=prefix, suffix=suffix)
+
+ if relroot == '.':
+ move_dir(arch=arch,
+ files=files,
+ prefix=prefix,
+ suffix=suffix)
+ elif relroot == 'arm':
+ move_dir(arch='arm',
+ prefix="#ifdef __arm__\n\n",
+ suffix="\n\n#endif",
+ files=files)
+ elif relroot == 'x86':
+ move_dir(arch='i386',
+ prefix="#if !defined(__arm__) && defined(__i386__)\n\n",
+ suffix="\n\n#endif",
+ files=files)
+
+def build_target(platform):
+ def xcrun_cmd(cmd):
+ return subprocess.check_output(['xcrun', '-sdk', platform.sdkroot, '-find', cmd]).strip()
+
+ build_dir = 'build_' + platform.name
+ if not os.path.exists(build_dir):
+ os.makedirs(build_dir)
+ env = dict(CC=xcrun_cmd('clang'),
+ LD=xcrun_cmd('ld'),
+ CFLAGS='-arch %s -isysroot %s -miphoneos-version-min=4.0' % (platform.arch, platform.sdkroot))
+ working_dir=os.getcwd()
+ try:
+ os.chdir(build_dir)
+ subprocess.check_call(['../configure', '-host', platform.triple], env=env)
+ move_source_tree('.', None, '../ios/include',
+ arch=platform.arch,
+ prefix=platform.prefix,
+ suffix=platform.suffix)
+ move_source_tree('./include', None, '../ios/include',
+ arch=platform.arch,
+ prefix=platform.prefix,
+ suffix=platform.suffix)
+ finally:
+ os.chdir(working_dir)
+
+ for header_name, archs in headers_seen.iteritems():
+ basename, suffix = os.path.splitext(header_name)
+
+def main():
+ move_source_tree('src', 'ios/src', 'ios/include')
+ move_source_tree('include', None, 'ios/include')
+ build_target(simulator_platform)
+ build_target(device_platform)
+
+ for header_name, archs in headers_seen.iteritems():
+ basename, suffix = os.path.splitext(header_name)
+ with open(os.path.join('ios/include', header_name), 'w') as header:
+ for arch in archs:
+ header.write('#include <%s_%s%s>\n' % (basename, arch, suffix))
+
+if __name__ == '__main__':
+ main()
diff --git a/Modules/_ctypes/libffi/generate-osx-source-and-headers.py b/Modules/_ctypes/libffi/generate-osx-source-and-headers.py
new file mode 100755
index 0000000..64313c1
--- /dev/null
+++ b/Modules/_ctypes/libffi/generate-osx-source-and-headers.py
@@ -0,0 +1,153 @@
+#!/usr/bin/env python
+import subprocess
+import re
+import os
+import errno
+import collections
+import sys
+
+class Platform(object):
+ pass
+
+sdk_re = re.compile(r'.*-sdk ([a-zA-Z0-9.]*)')
+
+def sdkinfo(sdkname):
+ ret = {}
+ for line in subprocess.Popen(['xcodebuild', '-sdk', sdkname, '-version'], stdout=subprocess.PIPE).stdout:
+ kv = line.strip().split(': ', 1)
+ if len(kv) == 2:
+ k,v = kv
+ ret[k] = v
+ return ret
+
+desktop_sdk_info = sdkinfo('macosx')
+
+def latest_sdks():
+ latest_desktop = None
+ for line in subprocess.Popen(['xcodebuild', '-showsdks'], stdout=subprocess.PIPE).stdout:
+ match = sdk_re.match(line)
+ if match:
+ if 'OS X' in line:
+ latest_desktop = match.group(1)
+
+ return latest_desktop
+
+desktop_sdk = latest_sdks()
+
+class desktop_platform_32(Platform):
+ sdk='macosx'
+ arch = 'i386'
+ name = 'mac32'
+ triple = 'i386-apple-darwin10'
+ sdkroot = desktop_sdk_info['Path']
+
+ prefix = "#if defined(__i386__) && !defined(__x86_64__)\n\n"
+ suffix = "\n\n#endif"
+
+class desktop_platform_64(Platform):
+ sdk='macosx'
+ arch = 'x86_64'
+ name = 'mac'
+ triple = 'x86_64-apple-darwin10'
+ sdkroot = desktop_sdk_info['Path']
+
+ prefix = "#if !defined(__i386__) && defined(__x86_64__)\n\n"
+ suffix = "\n\n#endif"
+
+def move_file(src_dir, dst_dir, filename, file_suffix=None, prefix='', suffix=''):
+ if not os.path.exists(dst_dir):
+ os.makedirs(dst_dir)
+
+ out_filename = filename
+
+ if file_suffix:
+ split_name = os.path.splitext(filename)
+ out_filename = "%s_%s%s" % (split_name[0], file_suffix, split_name[1])
+
+ with open(os.path.join(src_dir, filename)) as in_file:
+ with open(os.path.join(dst_dir, out_filename), 'w') as out_file:
+ if prefix:
+ out_file.write(prefix)
+
+ out_file.write(in_file.read())
+
+ if suffix:
+ out_file.write(suffix)
+
+headers_seen = collections.defaultdict(set)
+
+def move_source_tree(src_dir, dest_dir, dest_include_dir, arch=None, prefix=None, suffix=None):
+ for root, dirs, files in os.walk(src_dir, followlinks=True):
+ relroot = os.path.relpath(root,src_dir)
+
+ def move_dir(arch, prefix='', suffix='', files=[]):
+ for file in files:
+ file_suffix = None
+ if file.endswith('.h'):
+ if dest_include_dir:
+ file_suffix = arch
+ if arch:
+ headers_seen[file].add(arch)
+ move_file(root, dest_include_dir, file, arch, prefix=prefix, suffix=suffix)
+
+ elif dest_dir:
+ outroot = os.path.join(dest_dir, relroot)
+ move_file(root, outroot, file, prefix=prefix, suffix=suffix)
+
+ if relroot == '.':
+ move_dir(arch=arch,
+ files=files,
+ prefix=prefix,
+ suffix=suffix)
+ elif relroot == 'x86':
+ move_dir(arch='i386',
+ prefix="#if defined(__i386__) && !defined(__x86_64__)\n\n",
+ suffix="\n\n#endif",
+ files=files)
+ move_dir(arch='x86_64',
+ prefix="#if !defined(__i386__) && defined(__x86_64__)\n\n",
+ suffix="\n\n#endif",
+ files=files)
+
+def build_target(platform):
+ def xcrun_cmd(cmd):
+ return subprocess.check_output(['xcrun', '-sdk', platform.sdkroot, '-find', cmd]).strip()
+
+ build_dir = 'build_' + platform.name
+ if not os.path.exists(build_dir):
+ os.makedirs(build_dir)
+ env = dict(CC=xcrun_cmd('clang'),
+ LD=xcrun_cmd('ld'),
+ CFLAGS='-arch %s -isysroot %s -mmacosx-version-min=10.6' % (platform.arch, platform.sdkroot))
+ working_dir=os.getcwd()
+ try:
+ os.chdir(build_dir)
+ subprocess.check_call(['../configure', '-host', platform.triple], env=env)
+ move_source_tree('.', None, '../osx/include',
+ arch=platform.arch,
+ prefix=platform.prefix,
+ suffix=platform.suffix)
+ move_source_tree('./include', None, '../osx/include',
+ arch=platform.arch,
+ prefix=platform.prefix,
+ suffix=platform.suffix)
+ finally:
+ os.chdir(working_dir)
+
+ for header_name, archs in headers_seen.iteritems():
+ basename, suffix = os.path.splitext(header_name)
+
+def main():
+ move_source_tree('src', 'osx/src', 'osx/include')
+ move_source_tree('include', None, 'osx/include')
+ build_target(desktop_platform_32)
+ build_target(desktop_platform_64)
+
+ for header_name, archs in headers_seen.iteritems():
+ basename, suffix = os.path.splitext(header_name)
+ with open(os.path.join('osx/include', header_name), 'w') as header:
+ for arch in archs:
+ header.write('#include <%s_%s%s>\n' % (basename, arch, suffix))
+
+if __name__ == '__main__':
+ main()
diff --git a/Modules/_ctypes/libffi/include/Makefile.in b/Modules/_ctypes/libffi/include/Makefile.in
index 136f36c..2c36e36 100644
--- a/Modules/_ctypes/libffi/include/Makefile.in
+++ b/Modules/_ctypes/libffi/include/Makefile.in
@@ -1,9 +1,8 @@
-# Makefile.in generated by automake 1.11 from Makefile.am.
+# Makefile.in generated by automake 1.12.2 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
-# Inc.
+# Copyright (C) 1994-2012 Free Software Foundation, Inc.
+
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -16,6 +15,23 @@
@SET_MAKE@
VPATH = @srcdir@
+am__make_dryrun = \
+ { \
+ am__dry=no; \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ echo 'am--echo: ; @echo "AM" OK' | $(MAKE) -f - 2>/dev/null \
+ | grep '^AM OK$$' >/dev/null || am__dry=yes;; \
+ *) \
+ for am__flg in $$MAKEFLAGS; do \
+ case $$am__flg in \
+ *=*|--*) ;; \
+ *n*) am__dry=yes; break;; \
+ esac; \
+ done;; \
+ esac; \
+ test $$am__dry = yes; \
+ }
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -39,7 +55,19 @@ subdir = include
DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
$(srcdir)/ffi.h.in
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \
+am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \
+ $(top_srcdir)/m4/ax_append_flag.m4 \
+ $(top_srcdir)/m4/ax_cc_maxopt.m4 \
+ $(top_srcdir)/m4/ax_cflags_warn_all.m4 \
+ $(top_srcdir)/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/m4/ax_compiler_vendor.m4 \
+ $(top_srcdir)/m4/ax_configure_args.m4 \
+ $(top_srcdir)/m4/ax_enable_builddir.m4 \
+ $(top_srcdir)/m4/ax_gcc_archflag.m4 \
+ $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \
+ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
+ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
+ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \
$(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
@@ -49,6 +77,11 @@ CONFIG_CLEAN_FILES = ffi.h ffitarget.h
CONFIG_CLEAN_VPATH_FILES =
SOURCES =
DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
@@ -70,6 +103,12 @@ am__nobase_list = $(am__nobase_strip_setup); \
am__base_list = \
sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
am__installdirs = "$(DESTDIR)$(includesdir)"
HEADERS = $(nodist_includes_HEADERS)
ETAGS = etags
@@ -78,6 +117,7 @@ DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
+AM_LTLDFLAGS = @AM_LTLDFLAGS@
AM_RUNTESTFLAGS = @AM_RUNTESTFLAGS@
AR = @AR@
AUTOCONF = @AUTOCONF@
@@ -95,6 +135,7 @@ CPPFLAGS = @CPPFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
ECHO_C = @ECHO_C@
@@ -102,6 +143,7 @@ ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
+FFI_EXEC_TRAMPOLINE_TABLE = @FFI_EXEC_TRAMPOLINE_TABLE@
FGREP = @FGREP@
GREP = @GREP@
HAVE_LONG_DOUBLE = @HAVE_LONG_DOUBLE@
@@ -120,6 +162,7 @@ LN_S = @LN_S@
LTLIBOBJS = @LTLIBOBJS@
MAINT = @MAINT@
MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
MKDIR_P = @MKDIR_P@
NM = @NM@
NMEDIT = @NMEDIT@
@@ -132,8 +175,10 @@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
+PRTDIAG = @PRTDIAG@
RANLIB = @RANLIB@
SED = @SED@
SET_MAKE = @SET_MAKE@
@@ -146,6 +191,7 @@ abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
ac_ct_CC = @ac_ct_CC@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
am__include = @am__include@
@@ -153,6 +199,7 @@ am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
+ax_enable_builddir_sed = @ax_enable_builddir_sed@
bindir = @bindir@
build = @build@
build_alias = @build_alias@
@@ -178,7 +225,6 @@ libdir = @libdir@
libexecdir = @libexecdir@
localedir = @localedir@
localstatedir = @localstatedir@
-lt_ECHO = @lt_ECHO@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
@@ -189,6 +235,7 @@ psdir = @psdir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
+sys_symbol_underscore = @sys_symbol_underscore@
sysconfdir = @sysconfdir@
target = @target@
target_alias = @target_alias@
@@ -248,8 +295,11 @@ clean-libtool:
-rm -rf .libs _libs
install-nodist_includesHEADERS: $(nodist_includes_HEADERS)
@$(NORMAL_INSTALL)
- test -z "$(includesdir)" || $(MKDIR_P) "$(DESTDIR)$(includesdir)"
@list='$(nodist_includes_HEADERS)'; test -n "$(includesdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(includesdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(includesdir)" || exit 1; \
+ fi; \
for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; \
@@ -263,9 +313,7 @@ uninstall-nodist_includesHEADERS:
@$(NORMAL_UNINSTALL)
@list='$(nodist_includes_HEADERS)'; test -n "$(includesdir)" || list=; \
files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
- test -n "$$files" || exit 0; \
- echo " ( cd '$(DESTDIR)$(includesdir)' && rm -f" $$files ")"; \
- cd "$(DESTDIR)$(includesdir)" && rm -f $$files
+ dir='$(DESTDIR)$(includesdir)'; $(am__uninstall_files_from_dir)
ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
@@ -316,6 +364,20 @@ GTAGS:
&& $(am__cd) $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: $(HEADERS) $(SOURCES) $(LISP)
+ list='$(SOURCES) $(HEADERS) $(LISP)'; \
+ case "$(srcdir)" in \
+ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+ *) sdir=$(subdir)/$(srcdir) ;; \
+ esac; \
+ for i in $$list; do \
+ if test -f "$$i"; then \
+ echo "$(subdir)/$$i"; \
+ else \
+ echo "$$sdir/$$i"; \
+ fi; \
+ done >> $(top_builddir)/cscope.files
+
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
@@ -366,10 +428,15 @@ install-am: all-am
installcheck: installcheck-am
install-strip:
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- `test -z '$(STRIP)' || \
- echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
mostlyclean-generic:
clean-generic:
@@ -451,7 +518,7 @@ uninstall-am: uninstall-nodist_includesHEADERS
.MAKE: install-am install-strip
.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
- clean-libtool ctags distclean distclean-generic \
+ clean-libtool cscopelist ctags distclean distclean-generic \
distclean-libtool distclean-tags distdir dvi dvi-am html \
html-am info info-am install install-am install-data \
install-data-am install-dvi install-dvi-am install-exec \
diff --git a/Modules/_ctypes/libffi/include/ffi.h.in b/Modules/_ctypes/libffi/include/ffi.h.in
index df7d2cd..a51583b 100644
--- a/Modules/_ctypes/libffi/include/ffi.h.in
+++ b/Modules/_ctypes/libffi/include/ffi.h.in
@@ -1,16 +1,17 @@
/* -----------------------------------------------------------------*-C-*-
- libffi @VERSION@ - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc.
+ libffi @VERSION@ - Copyright (c) 2011 Anthony Green
+ - Copyright (c) 1996-2003, 2007, 2008 Red Hat, Inc.
- Permission is hereby granted, free of charge, to any person obtaining
- a copy of this software and associated documentation files (the
- ``Software''), to deal in the Software without restriction, including
- without limitation the rights to use, copy, modify, merge, publish,
- distribute, sublicense, and/or sell copies of the Software, and to
- permit persons to whom the Software is furnished to do so, subject to
- the following conditions:
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the ``Software''), to deal in the Software without
+ restriction, including without limitation the rights to use, copy,
+ modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
- The above copyright notice and this permission notice shall be included
- in all copies or substantial portions of the Software.
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
@@ -57,7 +58,9 @@ extern "C" {
#endif
/* Specify which architecture libffi is configured for. */
+#ifndef @TARGET@
#define @TARGET@
+#endif
/* ---- System configuration information --------------------------------- */
@@ -75,15 +78,31 @@ extern "C" {
/* LONG_LONG_MAX is not always defined (not if STRICT_ANSI, for example).
But we can find it either under the correct ANSI name, or under GNU
C's internal name. */
+
+#define FFI_64_BIT_MAX 9223372036854775807
+
#ifdef LONG_LONG_MAX
# define FFI_LONG_LONG_MAX LONG_LONG_MAX
#else
# ifdef LLONG_MAX
# define FFI_LONG_LONG_MAX LLONG_MAX
+# ifdef _AIX52 /* or newer has C99 LLONG_MAX */
+# undef FFI_64_BIT_MAX
+# define FFI_64_BIT_MAX 9223372036854775807LL
+# endif /* _AIX52 or newer */
# else
# ifdef __GNUC__
# define FFI_LONG_LONG_MAX __LONG_LONG_MAX__
# endif
+# ifdef _AIX /* AIX 5.1 and earlier have LONGLONG_MAX */
+# ifndef __PPC64__
+# if defined (__IBMC__) || defined (__IBMCPP__)
+# define FFI_LONG_LONG_MAX LONGLONG_MAX
+# endif
+# endif /* __PPC64__ */
+# undef FFI_64_BIT_MAX
+# define FFI_64_BIT_MAX 9223372036854775807LL
+# endif
# endif
#endif
@@ -130,39 +149,53 @@ typedef struct _ffi_type
#endif
#if LONG_MAX == 2147483647
-# if FFI_LONG_LONG_MAX != 9223372036854775807
+# if FFI_LONG_LONG_MAX != FFI_64_BIT_MAX
#error "no 64-bit data type supported"
# endif
-#elif LONG_MAX != 9223372036854775807
+#elif LONG_MAX != FFI_64_BIT_MAX
#error "long size not supported"
#endif
#if LONG_MAX == 2147483647
# define ffi_type_ulong ffi_type_uint32
# define ffi_type_slong ffi_type_sint32
-#elif LONG_MAX == 9223372036854775807
+#elif LONG_MAX == FFI_64_BIT_MAX
# define ffi_type_ulong ffi_type_uint64
# define ffi_type_slong ffi_type_sint64
#else
#error "long size not supported"
#endif
+/* Need minimal decorations for DLLs to works on Windows. */
+/* GCC has autoimport and autoexport. Rely on Libtool to */
+/* help MSVC export from a DLL, but always declare data */
+/* to be imported for MSVC clients. This costs an extra */
+/* indirection for MSVC clients using the static version */
+/* of the library, but don't worry about that. Besides, */
+/* as a workaround, they can define FFI_BUILDING if they */
+/* *know* they are going to link with the static library. */
+#if defined _MSC_VER && !defined FFI_BUILDING
+#define FFI_EXTERN extern __declspec(dllimport)
+#else
+#define FFI_EXTERN extern
+#endif
+
/* These are defined in types.c */
-extern ffi_type ffi_type_void;
-extern ffi_type ffi_type_uint8;
-extern ffi_type ffi_type_sint8;
-extern ffi_type ffi_type_uint16;
-extern ffi_type ffi_type_sint16;
-extern ffi_type ffi_type_uint32;
-extern ffi_type ffi_type_sint32;
-extern ffi_type ffi_type_uint64;
-extern ffi_type ffi_type_sint64;
-extern ffi_type ffi_type_float;
-extern ffi_type ffi_type_double;
-extern ffi_type ffi_type_pointer;
+FFI_EXTERN ffi_type ffi_type_void;
+FFI_EXTERN ffi_type ffi_type_uint8;
+FFI_EXTERN ffi_type ffi_type_sint8;
+FFI_EXTERN ffi_type ffi_type_uint16;
+FFI_EXTERN ffi_type ffi_type_sint16;
+FFI_EXTERN ffi_type ffi_type_uint32;
+FFI_EXTERN ffi_type ffi_type_sint32;
+FFI_EXTERN ffi_type ffi_type_uint64;
+FFI_EXTERN ffi_type ffi_type_sint64;
+FFI_EXTERN ffi_type ffi_type_float;
+FFI_EXTERN ffi_type ffi_type_double;
+FFI_EXTERN ffi_type ffi_type_pointer;
#if @HAVE_LONG_DOUBLE@
-extern ffi_type ffi_type_longdouble;
+FFI_EXTERN ffi_type ffi_type_longdouble;
#else
#define ffi_type_longdouble ffi_type_double
#endif
@@ -188,12 +221,21 @@ typedef struct {
#endif
} ffi_cif;
+/* Used internally, but overridden by some architectures */
+ffi_status ffi_prep_cif_core(ffi_cif *cif,
+ ffi_abi abi,
+ unsigned int isvariadic,
+ unsigned int nfixedargs,
+ unsigned int ntotalargs,
+ ffi_type *rtype,
+ ffi_type **atypes);
+
/* ---- Definitions for the raw API -------------------------------------- */
#ifndef FFI_SIZEOF_ARG
# if LONG_MAX == 2147483647
# define FFI_SIZEOF_ARG 4
-# elif LONG_MAX == 9223372036854775807
+# elif LONG_MAX == FFI_64_BIT_MAX
# define FFI_SIZEOF_ARG 8
# endif
#endif
@@ -255,7 +297,12 @@ size_t ffi_java_raw_size (ffi_cif *cif);
__declspec(align(8))
#endif
typedef struct {
+#if @FFI_EXEC_TRAMPOLINE_TABLE@
+ void *trampoline_table;
+ void *trampoline_table_entry;
+#else
char tramp[FFI_TRAMPOLINE_SIZE];
+#endif
ffi_cif *cif;
void (*fun)(ffi_cif*,void*,void**,void*);
void *user_data;
@@ -263,6 +310,9 @@ typedef struct {
} ffi_closure __attribute__((aligned (8)));
#else
} ffi_closure;
+# ifdef __sgi
+# pragma pack 0
+# endif
#endif
void *ffi_closure_alloc (size_t size, void **code);
@@ -281,9 +331,16 @@ ffi_prep_closure_loc (ffi_closure*,
void *user_data,
void*codeloc);
+#ifdef __sgi
+# pragma pack 8
+#endif
typedef struct {
+#if @FFI_EXEC_TRAMPOLINE_TABLE@
+ void *trampoline_table;
+ void *trampoline_table_entry;
+#else
char tramp[FFI_TRAMPOLINE_SIZE];
-
+#endif
ffi_cif *cif;
#if !FFI_NATIVE_RAW_API
@@ -303,7 +360,12 @@ typedef struct {
} ffi_raw_closure;
typedef struct {
+#if @FFI_EXEC_TRAMPOLINE_TABLE@
+ void *trampoline_table;
+ void *trampoline_table_entry;
+#else
char tramp[FFI_TRAMPOLINE_SIZE];
+#endif
ffi_cif *cif;
@@ -359,6 +421,13 @@ ffi_status ffi_prep_cif(ffi_cif *cif,
ffi_type *rtype,
ffi_type **atypes);
+ffi_status ffi_prep_cif_var(ffi_cif *cif,
+ ffi_abi abi,
+ unsigned int nfixedargs,
+ unsigned int ntotalargs,
+ ffi_type *rtype,
+ ffi_type **atypes);
+
void ffi_call(ffi_cif *cif,
void (*fn)(void),
void *rvalue,
diff --git a/Modules/_ctypes/libffi/include/ffi_common.h b/Modules/_ctypes/libffi/include/ffi_common.h
index 42cace9..650ca69 100644
--- a/Modules/_ctypes/libffi/include/ffi_common.h
+++ b/Modules/_ctypes/libffi/include/ffi_common.h
@@ -1,7 +1,8 @@
/* -----------------------------------------------------------------------
- ffi_common.h - Copyright (c) 1996 Red Hat, Inc.
- Copyright (C) 2007 Free Software Foundation, Inc
-
+ ffi_common.h - Copyright (C) 2011, 2012 Anthony Green
+ Copyright (C) 2007 Free Software Foundation, Inc
+ Copyright (c) 1996 Red Hat, Inc.
+
Common internal definitions and macros. Only necessary for building
libffi.
----------------------------------------------------------------------- */
@@ -74,6 +75,8 @@ void ffi_type_test(ffi_type *a, char *file, int line);
/* Perform machine dependent cif processing */
ffi_status ffi_prep_cif_machdep(ffi_cif *cif);
+ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif,
+ unsigned int nfixedargs, unsigned int ntotalargs);
/* Extended cif, used in callback from assembly routine */
typedef struct
@@ -84,7 +87,7 @@ typedef struct
} extended_cif;
/* Terse sized type definitions. */
-#if defined(_MSC_VER) || defined(__sgi)
+#if defined(_MSC_VER) || defined(__sgi) || defined(__SUNPRO_C)
typedef unsigned char UINT8;
typedef signed char SINT8;
typedef unsigned short UINT16;
@@ -112,11 +115,14 @@ typedef signed int SINT64 __attribute__((__mode__(__DI__)));
typedef float FLOAT32;
+#ifndef __GNUC__
+#define __builtin_expect(x, expected_value) (x)
+#endif
+#define LIKELY(x) __builtin_expect(!!(x),1)
+#define UNLIKELY(x) __builtin_expect((x)!=0,0)
#ifdef __cplusplus
}
#endif
#endif
-
-
diff --git a/Modules/_ctypes/libffi/install-sh b/Modules/_ctypes/libffi/install-sh
index 6ebe46d..6781b98 100755
--- a/Modules/_ctypes/libffi/install-sh
+++ b/Modules/_ctypes/libffi/install-sh
@@ -1,7 +1,7 @@
#!/bin/sh
# install - install a program, script, or datafile
-scriptversion=2004-12-17.09
+scriptversion=2009-04-28.21; # UTC
# This originates from X11R5 (mit/util/scripts/install.sh), which was
# later released in X11R6 (xc/config/util/install.sh) with the
@@ -39,38 +39,68 @@ scriptversion=2004-12-17.09
# when there is no Makefile.
#
# This script is compatible with the BSD install script, but was written
-# from scratch. It can only install one file at a time, a restriction
-# shared with many OS's install programs.
+# from scratch.
+
+nl='
+'
+IFS=" "" $nl"
# set DOITPROG to echo to test this script
# Don't use :- since 4.3BSD and earlier shells don't like it.
-doit="${DOITPROG-}"
+doit=${DOITPROG-}
+if test -z "$doit"; then
+ doit_exec=exec
+else
+ doit_exec=$doit
+fi
+
+# Put in absolute file names if you don't have them in your path;
+# or use environment vars.
+
+chgrpprog=${CHGRPPROG-chgrp}
+chmodprog=${CHMODPROG-chmod}
+chownprog=${CHOWNPROG-chown}
+cmpprog=${CMPPROG-cmp}
+cpprog=${CPPROG-cp}
+mkdirprog=${MKDIRPROG-mkdir}
+mvprog=${MVPROG-mv}
+rmprog=${RMPROG-rm}
+stripprog=${STRIPPROG-strip}
+
+posix_glob='?'
+initialize_posix_glob='
+ test "$posix_glob" != "?" || {
+ if (set -f) 2>/dev/null; then
+ posix_glob=
+ else
+ posix_glob=:
+ fi
+ }
+'
-# put in absolute paths if you don't have them in your path; or use env. vars.
+posix_mkdir=
-mvprog="${MVPROG-mv}"
-cpprog="${CPPROG-cp}"
-chmodprog="${CHMODPROG-chmod}"
-chownprog="${CHOWNPROG-chown}"
-chgrpprog="${CHGRPPROG-chgrp}"
-stripprog="${STRIPPROG-strip}"
-rmprog="${RMPROG-rm}"
-mkdirprog="${MKDIRPROG-mkdir}"
+# Desired mode of installed file.
+mode=0755
-chmodcmd="$chmodprog 0755"
-chowncmd=
chgrpcmd=
-stripcmd=
+chmodcmd=$chmodprog
+chowncmd=
+mvcmd=$mvprog
rmcmd="$rmprog -f"
-mvcmd="$mvprog"
+stripcmd=
+
src=
dst=
dir_arg=
-dstarg=
+dst_arg=
+
+copy_on_change=false
no_target_directory=
-usage="Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE
+usage="\
+Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE
or: $0 [OPTION]... SRCFILES... DIRECTORY
or: $0 [OPTION]... -t DIRECTORY SRCFILES...
or: $0 [OPTION]... -d DIRECTORIES...
@@ -80,81 +110,86 @@ In the 2nd and 3rd, copy all SRCFILES to DIRECTORY.
In the 4th, create DIRECTORIES.
Options:
--c (ignored)
--d create directories instead of installing files.
--g GROUP $chgrpprog installed files to GROUP.
--m MODE $chmodprog installed files to MODE.
--o USER $chownprog installed files to USER.
--s $stripprog installed files.
--t DIRECTORY install into DIRECTORY.
--T report an error if DSTFILE is a directory.
---help display this help and exit.
---version display version info and exit.
+ --help display this help and exit.
+ --version display version info and exit.
+
+ -c (ignored)
+ -C install only if different (preserve the last data modification time)
+ -d create directories instead of installing files.
+ -g GROUP $chgrpprog installed files to GROUP.
+ -m MODE $chmodprog installed files to MODE.
+ -o USER $chownprog installed files to USER.
+ -s $stripprog installed files.
+ -t DIRECTORY install into DIRECTORY.
+ -T report an error if DSTFILE is a directory.
Environment variables override the default commands:
- CHGRPPROG CHMODPROG CHOWNPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG
+ CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG
+ RMPROG STRIPPROG
"
-while test -n "$1"; do
+while test $# -ne 0; do
case $1 in
- -c) shift
- continue;;
+ -c) ;;
- -d) dir_arg=true
- shift
- continue;;
+ -C) copy_on_change=true;;
+
+ -d) dir_arg=true;;
-g) chgrpcmd="$chgrpprog $2"
- shift
- shift
- continue;;
+ shift;;
- --help) echo "$usage"; exit 0;;
+ --help) echo "$usage"; exit $?;;
- -m) chmodcmd="$chmodprog $2"
- shift
- shift
- continue;;
+ -m) mode=$2
+ case $mode in
+ *' '* | *' '* | *'
+'* | *'*'* | *'?'* | *'['*)
+ echo "$0: invalid mode: $mode" >&2
+ exit 1;;
+ esac
+ shift;;
-o) chowncmd="$chownprog $2"
- shift
- shift
- continue;;
-
- -s) stripcmd=$stripprog
- shift
- continue;;
-
- -t) dstarg=$2
- shift
- shift
- continue;;
-
- -T) no_target_directory=true
- shift
- continue;;
-
- --version) echo "$0 $scriptversion"; exit 0;;
-
- *) # When -d is used, all remaining arguments are directories to create.
- # When -t is used, the destination is already specified.
- test -n "$dir_arg$dstarg" && break
- # Otherwise, the last argument is the destination. Remove it from $@.
- for arg
- do
- if test -n "$dstarg"; then
- # $@ is not empty: it contains at least $arg.
- set fnord "$@" "$dstarg"
- shift # fnord
- fi
- shift # arg
- dstarg=$arg
- done
+ shift;;
+
+ -s) stripcmd=$stripprog;;
+
+ -t) dst_arg=$2
+ shift;;
+
+ -T) no_target_directory=true;;
+
+ --version) echo "$0 $scriptversion"; exit $?;;
+
+ --) shift
break;;
+
+ -*) echo "$0: invalid option: $1" >&2
+ exit 1;;
+
+ *) break;;
esac
+ shift
done
-if test -z "$1"; then
+if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then
+ # When -d is used, all remaining arguments are directories to create.
+ # When -t is used, the destination is already specified.
+ # Otherwise, the last argument is the destination. Remove it from $@.
+ for arg
+ do
+ if test -n "$dst_arg"; then
+ # $@ is not empty: it contains at least $arg.
+ set fnord "$@" "$dst_arg"
+ shift # fnord
+ fi
+ shift # arg
+ dst_arg=$arg
+ done
+fi
+
+if test $# -eq 0; then
if test -z "$dir_arg"; then
echo "$0: no input file specified." >&2
exit 1
@@ -164,24 +199,47 @@ if test -z "$1"; then
exit 0
fi
+if test -z "$dir_arg"; then
+ trap '(exit $?); exit' 1 2 13 15
+
+ # Set umask so as not to create temps with too-generous modes.
+ # However, 'strip' requires both read and write access to temps.
+ case $mode in
+ # Optimize common cases.
+ *644) cp_umask=133;;
+ *755) cp_umask=22;;
+
+ *[0-7])
+ if test -z "$stripcmd"; then
+ u_plus_rw=
+ else
+ u_plus_rw='% 200'
+ fi
+ cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;;
+ *)
+ if test -z "$stripcmd"; then
+ u_plus_rw=
+ else
+ u_plus_rw=,u+rw
+ fi
+ cp_umask=$mode$u_plus_rw;;
+ esac
+fi
+
for src
do
# Protect names starting with `-'.
case $src in
- -*) src=./$src ;;
+ -*) src=./$src;;
esac
if test -n "$dir_arg"; then
dst=$src
- src=
-
- if test -d "$dst"; then
- mkdircmd=:
- chmodcmd=
- else
- mkdircmd=$mkdirprog
- fi
+ dstdir=$dst
+ test -d "$dstdir"
+ dstdir_status=$?
else
+
# Waiting for this to be detected by the "$cpprog $src $dsttmp" command
# might cause directories to be created, which would be especially bad
# if $src (and thus $dsttmp) contains '*'.
@@ -190,71 +248,199 @@ do
exit 1
fi
- if test -z "$dstarg"; then
+ if test -z "$dst_arg"; then
echo "$0: no destination specified." >&2
exit 1
fi
- dst=$dstarg
+ dst=$dst_arg
# Protect names starting with `-'.
case $dst in
- -*) dst=./$dst ;;
+ -*) dst=./$dst;;
esac
# If destination is a directory, append the input filename; won't work
# if double slashes aren't ignored.
if test -d "$dst"; then
if test -n "$no_target_directory"; then
- echo "$0: $dstarg: Is a directory" >&2
+ echo "$0: $dst_arg: Is a directory" >&2
exit 1
fi
- dst=$dst/`basename "$src"`
+ dstdir=$dst
+ dst=$dstdir/`basename "$src"`
+ dstdir_status=0
+ else
+ # Prefer dirname, but fall back on a substitute if dirname fails.
+ dstdir=`
+ (dirname "$dst") 2>/dev/null ||
+ expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$dst" : 'X\(//\)[^/]' \| \
+ X"$dst" : 'X\(//\)$' \| \
+ X"$dst" : 'X\(/\)' \| . 2>/dev/null ||
+ echo X"$dst" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'
+ `
+
+ test -d "$dstdir"
+ dstdir_status=$?
fi
fi
- # This sed command emulates the dirname command.
- dstdir=`echo "$dst" | sed -e 's,/*$,,;s,[^/]*$,,;s,/*$,,;s,^$,.,'`
+ obsolete_mkdir_used=false
+
+ if test $dstdir_status != 0; then
+ case $posix_mkdir in
+ '')
+ # Create intermediate dirs using mode 755 as modified by the umask.
+ # This is like FreeBSD 'install' as of 1997-10-28.
+ umask=`umask`
+ case $stripcmd.$umask in
+ # Optimize common cases.
+ *[2367][2367]) mkdir_umask=$umask;;
+ .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;;
+
+ *[0-7])
+ mkdir_umask=`expr $umask + 22 \
+ - $umask % 100 % 40 + $umask % 20 \
+ - $umask % 10 % 4 + $umask % 2
+ `;;
+ *) mkdir_umask=$umask,go-w;;
+ esac
+
+ # With -d, create the new directory with the user-specified mode.
+ # Otherwise, rely on $mkdir_umask.
+ if test -n "$dir_arg"; then
+ mkdir_mode=-m$mode
+ else
+ mkdir_mode=
+ fi
+
+ posix_mkdir=false
+ case $umask in
+ *[123567][0-7][0-7])
+ # POSIX mkdir -p sets u+wx bits regardless of umask, which
+ # is incompatible with FreeBSD 'install' when (umask & 300) != 0.
+ ;;
+ *)
+ tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$
+ trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0
+
+ if (umask $mkdir_umask &&
+ exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1
+ then
+ if test -z "$dir_arg" || {
+ # Check for POSIX incompatibilities with -m.
+ # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or
+ # other-writeable bit of parent directory when it shouldn't.
+ # FreeBSD 6.1 mkdir -m -p sets mode of existing directory.
+ ls_ld_tmpdir=`ls -ld "$tmpdir"`
+ case $ls_ld_tmpdir in
+ d????-?r-*) different_mode=700;;
+ d????-?--*) different_mode=755;;
+ *) false;;
+ esac &&
+ $mkdirprog -m$different_mode -p -- "$tmpdir" && {
+ ls_ld_tmpdir_1=`ls -ld "$tmpdir"`
+ test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1"
+ }
+ }
+ then posix_mkdir=:
+ fi
+ rmdir "$tmpdir/d" "$tmpdir"
+ else
+ # Remove any dirs left behind by ancient mkdir implementations.
+ rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null
+ fi
+ trap '' 0;;
+ esac;;
+ esac
- # Make sure that the destination directory exists.
+ if
+ $posix_mkdir && (
+ umask $mkdir_umask &&
+ $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir"
+ )
+ then :
+ else
- # Skip lots of stat calls in the usual case.
- if test ! -d "$dstdir"; then
- defaultIFS='
- '
- IFS="${IFS-$defaultIFS}"
+ # The umask is ridiculous, or mkdir does not conform to POSIX,
+ # or it failed possibly due to a race condition. Create the
+ # directory the slow way, step by step, checking for races as we go.
- oIFS=$IFS
- # Some sh's can't handle IFS=/ for some reason.
- IFS='%'
- set x `echo "$dstdir" | sed -e 's@/@%@g' -e 's@^%@/@'`
- shift
- IFS=$oIFS
+ case $dstdir in
+ /*) prefix='/';;
+ -*) prefix='./';;
+ *) prefix='';;
+ esac
- pathcomp=
+ eval "$initialize_posix_glob"
- while test $# -ne 0 ; do
- pathcomp=$pathcomp$1
+ oIFS=$IFS
+ IFS=/
+ $posix_glob set -f
+ set fnord $dstdir
shift
- if test ! -d "$pathcomp"; then
- $mkdirprog "$pathcomp"
- # mkdir can fail with a `File exist' error in case several
- # install-sh are creating the directory concurrently. This
- # is OK.
- test -d "$pathcomp" || exit
+ $posix_glob set +f
+ IFS=$oIFS
+
+ prefixes=
+
+ for d
+ do
+ test -z "$d" && continue
+
+ prefix=$prefix$d
+ if test -d "$prefix"; then
+ prefixes=
+ else
+ if $posix_mkdir; then
+ (umask=$mkdir_umask &&
+ $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break
+ # Don't fail if two instances are running concurrently.
+ test -d "$prefix" || exit 1
+ else
+ case $prefix in
+ *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;;
+ *) qprefix=$prefix;;
+ esac
+ prefixes="$prefixes '$qprefix'"
+ fi
+ fi
+ prefix=$prefix/
+ done
+
+ if test -n "$prefixes"; then
+ # Don't fail if two instances are running concurrently.
+ (umask $mkdir_umask &&
+ eval "\$doit_exec \$mkdirprog $prefixes") ||
+ test -d "$dstdir" || exit 1
+ obsolete_mkdir_used=true
fi
- pathcomp=$pathcomp/
- done
+ fi
fi
if test -n "$dir_arg"; then
- $doit $mkdircmd "$dst" \
- && { test -z "$chowncmd" || $doit $chowncmd "$dst"; } \
- && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } \
- && { test -z "$stripcmd" || $doit $stripcmd "$dst"; } \
- && { test -z "$chmodcmd" || $doit $chmodcmd "$dst"; }
-
+ { test -z "$chowncmd" || $doit $chowncmd "$dst"; } &&
+ { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } &&
+ { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false ||
+ test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1
else
- dstfile=`basename "$dst"`
# Make a couple of temp file names in the proper directory.
dsttmp=$dstdir/_inst.$$_
@@ -262,10 +448,9 @@ do
# Trap to clean up those temp files at exit.
trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0
- trap '(exit $?); exit' 1 2 13 15
# Copy the file name to the temp name.
- $doit $cpprog "$src" "$dsttmp" &&
+ (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") &&
# and set any options; do chmod last to preserve setuid bits.
#
@@ -273,51 +458,63 @@ do
# ignore errors from any of these, just make sure not to ignore
# errors from the above "$doit $cpprog $src $dsttmp" command.
#
- { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } \
- && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } \
- && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } \
- && { test -z "$chmodcmd" || $doit $chmodcmd "$dsttmp"; } &&
-
- # Now rename the file to the real destination.
- { $doit $mvcmd -f "$dsttmp" "$dstdir/$dstfile" 2>/dev/null \
- || {
- # The rename failed, perhaps because mv can't rename something else
- # to itself, or perhaps because mv is so ancient that it does not
- # support -f.
-
- # Now remove or move aside any old file at destination location.
- # We try this two ways since rm can't unlink itself on some
- # systems and the destination file might be busy for other
- # reasons. In this case, the final cleanup might fail but the new
- # file should still install successfully.
- {
- if test -f "$dstdir/$dstfile"; then
- $doit $rmcmd -f "$dstdir/$dstfile" 2>/dev/null \
- || $doit $mvcmd -f "$dstdir/$dstfile" "$rmtmp" 2>/dev/null \
- || {
- echo "$0: cannot unlink or rename $dstdir/$dstfile" >&2
- (exit 1); exit 1
- }
- else
- :
- fi
- } &&
-
- # Now rename the file to the real destination.
- $doit $mvcmd "$dsttmp" "$dstdir/$dstfile"
- }
- }
- fi || { (exit 1); exit 1; }
+ { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } &&
+ { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } &&
+ { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } &&
+ { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } &&
+
+ # If -C, don't bother to copy if it wouldn't change the file.
+ if $copy_on_change &&
+ old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` &&
+ new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` &&
+
+ eval "$initialize_posix_glob" &&
+ $posix_glob set -f &&
+ set X $old && old=:$2:$4:$5:$6 &&
+ set X $new && new=:$2:$4:$5:$6 &&
+ $posix_glob set +f &&
+
+ test "$old" = "$new" &&
+ $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1
+ then
+ rm -f "$dsttmp"
+ else
+ # Rename the file to the real destination.
+ $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null ||
+
+ # The rename failed, perhaps because mv can't rename something else
+ # to itself, or perhaps because mv is so ancient that it does not
+ # support -f.
+ {
+ # Now remove or move aside any old file at destination location.
+ # We try this two ways since rm can't unlink itself on some
+ # systems and the destination file might be busy for other
+ # reasons. In this case, the final cleanup might fail but the new
+ # file should still install successfully.
+ {
+ test ! -f "$dst" ||
+ $doit $rmcmd -f "$dst" 2>/dev/null ||
+ { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null &&
+ { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; }
+ } ||
+ { echo "$0: cannot unlink or rename $dst" >&2
+ (exit 1); exit 1
+ }
+ } &&
+
+ # Now rename the file to the real destination.
+ $doit $mvcmd "$dsttmp" "$dst"
+ }
+ fi || exit 1
+
+ trap '' 0
+ fi
done
-# The final little trick to "correctly" pass the exit status to the exit trap.
-{
- (exit 0); exit 0
-}
-
# Local variables:
# eval: (add-hook 'write-file-hooks 'time-stamp)
# time-stamp-start: "scriptversion="
# time-stamp-format: "%:y-%02m-%02d.%02H"
-# time-stamp-end: "$"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
# End:
diff --git a/Modules/_ctypes/libffi/libffi.xcodeproj/project.pbxproj b/Modules/_ctypes/libffi/libffi.xcodeproj/project.pbxproj
new file mode 100644
index 0000000..14c39a2
--- /dev/null
+++ b/Modules/_ctypes/libffi/libffi.xcodeproj/project.pbxproj
@@ -0,0 +1,579 @@
+// !$*UTF8*$!
+{
+ archiveVersion = 1;
+ classes = {
+ };
+ objectVersion = 46;
+ objects = {
+
+/* Begin PBXBuildFile section */
+ 6C43CBDC1534F76F00162364 /* ffi.c in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CBBD1534F76F00162364 /* ffi.c */; };
+ 6C43CBDD1534F76F00162364 /* sysv.S in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CBBF1534F76F00162364 /* sysv.S */; };
+ 6C43CBDE1534F76F00162364 /* trampoline.S in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CBC01534F76F00162364 /* trampoline.S */; };
+ 6C43CBE61534F76F00162364 /* darwin.S in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CBC91534F76F00162364 /* darwin.S */; };
+ 6C43CBE81534F76F00162364 /* ffi.c in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CBCB1534F76F00162364 /* ffi.c */; };
+ 6C43CC1F1534F77800162364 /* darwin.S in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CC051534F77800162364 /* darwin.S */; };
+ 6C43CC201534F77800162364 /* darwin64.S in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CC061534F77800162364 /* darwin64.S */; };
+ 6C43CC211534F77800162364 /* ffi.c in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CC071534F77800162364 /* ffi.c */; };
+ 6C43CC221534F77800162364 /* ffi64.c in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CC081534F77800162364 /* ffi64.c */; };
+ 6C43CC2F1534F7BE00162364 /* closures.c in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CC281534F7BE00162364 /* closures.c */; };
+ 6C43CC301534F7BE00162364 /* closures.c in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CC281534F7BE00162364 /* closures.c */; };
+ 6C43CC351534F7BE00162364 /* java_raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CC2B1534F7BE00162364 /* java_raw_api.c */; };
+ 6C43CC361534F7BE00162364 /* java_raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CC2B1534F7BE00162364 /* java_raw_api.c */; };
+ 6C43CC371534F7BE00162364 /* prep_cif.c in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CC2C1534F7BE00162364 /* prep_cif.c */; };
+ 6C43CC381534F7BE00162364 /* prep_cif.c in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CC2C1534F7BE00162364 /* prep_cif.c */; };
+ 6C43CC391534F7BE00162364 /* raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CC2D1534F7BE00162364 /* raw_api.c */; };
+ 6C43CC3A1534F7BE00162364 /* raw_api.c in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CC2D1534F7BE00162364 /* raw_api.c */; };
+ 6C43CC3B1534F7BE00162364 /* types.c in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CC2E1534F7BE00162364 /* types.c */; };
+ 6C43CC3C1534F7BE00162364 /* types.c in Sources */ = {isa = PBXBuildFile; fileRef = 6C43CC2E1534F7BE00162364 /* types.c */; };
+ 6C43CC971535032600162364 /* ffi.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CC8D1535032600162364 /* ffi.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 6C43CC981535032600162364 /* ffi_common.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CC8E1535032600162364 /* ffi_common.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 6C43CC991535032600162364 /* ffi_i386.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CC8F1535032600162364 /* ffi_i386.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 6C43CC9A1535032600162364 /* ffi_x86_64.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CC901535032600162364 /* ffi_x86_64.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 6C43CC9B1535032600162364 /* fficonfig.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CC911535032600162364 /* fficonfig.h */; };
+ 6C43CC9C1535032600162364 /* fficonfig_i386.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CC921535032600162364 /* fficonfig_i386.h */; };
+ 6C43CC9D1535032600162364 /* fficonfig_x86_64.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CC931535032600162364 /* fficonfig_x86_64.h */; };
+ 6C43CC9E1535032600162364 /* ffitarget.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CC941535032600162364 /* ffitarget.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 6C43CC9F1535032600162364 /* ffitarget_i386.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CC951535032600162364 /* ffitarget_i386.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 6C43CCA01535032600162364 /* ffitarget_x86_64.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CC961535032600162364 /* ffitarget_x86_64.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 6C43CCAD1535039600162364 /* ffi.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CCA21535039600162364 /* ffi.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 6C43CCAE1535039600162364 /* ffi_armv7.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CCA31535039600162364 /* ffi_armv7.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 6C43CCAF1535039600162364 /* ffi_common.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CCA41535039600162364 /* ffi_common.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 6C43CCB01535039600162364 /* ffi_i386.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CCA51535039600162364 /* ffi_i386.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 6C43CCB11535039600162364 /* fficonfig.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CCA61535039600162364 /* fficonfig.h */; };
+ 6C43CCB21535039600162364 /* fficonfig_armv7.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CCA71535039600162364 /* fficonfig_armv7.h */; };
+ 6C43CCB31535039600162364 /* fficonfig_i386.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CCA81535039600162364 /* fficonfig_i386.h */; };
+ 6C43CCB41535039600162364 /* ffitarget.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CCA91535039600162364 /* ffitarget.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 6C43CCB51535039600162364 /* ffitarget_arm.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CCAA1535039600162364 /* ffitarget_arm.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 6C43CCB61535039600162364 /* ffitarget_armv7.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CCAB1535039600162364 /* ffitarget_armv7.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 6C43CCB71535039600162364 /* ffitarget_i386.h in Headers */ = {isa = PBXBuildFile; fileRef = 6C43CCAC1535039600162364 /* ffitarget_i386.h */; settings = {ATTRIBUTES = (Public, ); }; };
+/* End PBXBuildFile section */
+
+/* Begin PBXFileReference section */
+ 6C43CB3D1534E9D100162364 /* libffi.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libffi.a; sourceTree = BUILT_PRODUCTS_DIR; };
+ 6C43CBBD1534F76F00162364 /* ffi.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ffi.c; sourceTree = "<group>"; };
+ 6C43CBBF1534F76F00162364 /* sysv.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = sysv.S; sourceTree = "<group>"; };
+ 6C43CBC01534F76F00162364 /* trampoline.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = trampoline.S; sourceTree = "<group>"; };
+ 6C43CBC91534F76F00162364 /* darwin.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = darwin.S; sourceTree = "<group>"; };
+ 6C43CBCB1534F76F00162364 /* ffi.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ffi.c; sourceTree = "<group>"; };
+ 6C43CC051534F77800162364 /* darwin.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = darwin.S; sourceTree = "<group>"; };
+ 6C43CC061534F77800162364 /* darwin64.S */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = darwin64.S; sourceTree = "<group>"; };
+ 6C43CC071534F77800162364 /* ffi.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ffi.c; sourceTree = "<group>"; };
+ 6C43CC081534F77800162364 /* ffi64.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = ffi64.c; sourceTree = "<group>"; };
+ 6C43CC281534F7BE00162364 /* closures.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = closures.c; path = src/closures.c; sourceTree = SOURCE_ROOT; };
+ 6C43CC2B1534F7BE00162364 /* java_raw_api.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = java_raw_api.c; path = src/java_raw_api.c; sourceTree = SOURCE_ROOT; };
+ 6C43CC2C1534F7BE00162364 /* prep_cif.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = prep_cif.c; path = src/prep_cif.c; sourceTree = SOURCE_ROOT; };
+ 6C43CC2D1534F7BE00162364 /* raw_api.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = raw_api.c; path = src/raw_api.c; sourceTree = SOURCE_ROOT; };
+ 6C43CC2E1534F7BE00162364 /* types.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = types.c; path = src/types.c; sourceTree = SOURCE_ROOT; };
+ 6C43CC8D1535032600162364 /* ffi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi.h; sourceTree = "<group>"; };
+ 6C43CC8E1535032600162364 /* ffi_common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_common.h; sourceTree = "<group>"; };
+ 6C43CC8F1535032600162364 /* ffi_i386.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_i386.h; sourceTree = "<group>"; };
+ 6C43CC901535032600162364 /* ffi_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_x86_64.h; sourceTree = "<group>"; };
+ 6C43CC911535032600162364 /* fficonfig.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig.h; sourceTree = "<group>"; };
+ 6C43CC921535032600162364 /* fficonfig_i386.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_i386.h; sourceTree = "<group>"; };
+ 6C43CC931535032600162364 /* fficonfig_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_x86_64.h; sourceTree = "<group>"; };
+ 6C43CC941535032600162364 /* ffitarget.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget.h; sourceTree = "<group>"; };
+ 6C43CC951535032600162364 /* ffitarget_i386.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_i386.h; sourceTree = "<group>"; };
+ 6C43CC961535032600162364 /* ffitarget_x86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_x86_64.h; sourceTree = "<group>"; };
+ 6C43CCA21535039600162364 /* ffi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi.h; sourceTree = "<group>"; };
+ 6C43CCA31535039600162364 /* ffi_armv7.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_armv7.h; sourceTree = "<group>"; };
+ 6C43CCA41535039600162364 /* ffi_common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_common.h; sourceTree = "<group>"; };
+ 6C43CCA51535039600162364 /* ffi_i386.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffi_i386.h; sourceTree = "<group>"; };
+ 6C43CCA61535039600162364 /* fficonfig.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig.h; sourceTree = "<group>"; };
+ 6C43CCA71535039600162364 /* fficonfig_armv7.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_armv7.h; sourceTree = "<group>"; };
+ 6C43CCA81535039600162364 /* fficonfig_i386.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fficonfig_i386.h; sourceTree = "<group>"; };
+ 6C43CCA91535039600162364 /* ffitarget.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget.h; sourceTree = "<group>"; };
+ 6C43CCAA1535039600162364 /* ffitarget_arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_arm.h; sourceTree = "<group>"; };
+ 6C43CCAB1535039600162364 /* ffitarget_armv7.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_armv7.h; sourceTree = "<group>"; };
+ 6C43CCAC1535039600162364 /* ffitarget_i386.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ffitarget_i386.h; sourceTree = "<group>"; };
+ F6F980BA147386130008F121 /* libffi.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libffi.a; sourceTree = BUILT_PRODUCTS_DIR; };
+/* End PBXFileReference section */
+
+/* Begin PBXFrameworksBuildPhase section */
+ 6C43CB3A1534E9D100162364 /* Frameworks */ = {
+ isa = PBXFrameworksBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
+ F6F980B7147386130008F121 /* Frameworks */ = {
+ isa = PBXFrameworksBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
+/* End PBXFrameworksBuildPhase section */
+
+/* Begin PBXGroup section */
+ 6C43CBAF1534F76F00162364 /* iOS */ = {
+ isa = PBXGroup;
+ children = (
+ 6C43CCA11535039600162364 /* include */,
+ 6C43CBBB1534F76F00162364 /* src */,
+ );
+ name = iOS;
+ path = ios;
+ sourceTree = "<group>";
+ };
+ 6C43CBBB1534F76F00162364 /* src */ = {
+ isa = PBXGroup;
+ children = (
+ 6C43CBC81534F76F00162364 /* x86 */,
+ 6C43CBBC1534F76F00162364 /* arm */,
+ );
+ path = src;
+ sourceTree = "<group>";
+ };
+ 6C43CBBC1534F76F00162364 /* arm */ = {
+ isa = PBXGroup;
+ children = (
+ 6C43CBBD1534F76F00162364 /* ffi.c */,
+ 6C43CBBF1534F76F00162364 /* sysv.S */,
+ 6C43CBC01534F76F00162364 /* trampoline.S */,
+ );
+ path = arm;
+ sourceTree = "<group>";
+ };
+ 6C43CBC81534F76F00162364 /* x86 */ = {
+ isa = PBXGroup;
+ children = (
+ 6C43CBC91534F76F00162364 /* darwin.S */,
+ 6C43CBCB1534F76F00162364 /* ffi.c */,
+ );
+ path = x86;
+ sourceTree = "<group>";
+ };
+ 6C43CBF01534F77800162364 /* OS X */ = {
+ isa = PBXGroup;
+ children = (
+ 6C43CC8C1535032600162364 /* include */,
+ 6C43CBFC1534F77800162364 /* src */,
+ );
+ name = "OS X";
+ path = osx;
+ sourceTree = "<group>";
+ };
+ 6C43CBFC1534F77800162364 /* src */ = {
+ isa = PBXGroup;
+ children = (
+ 6C43CC041534F77800162364 /* x86 */,
+ );
+ path = src;
+ sourceTree = "<group>";
+ };
+ 6C43CC041534F77800162364 /* x86 */ = {
+ isa = PBXGroup;
+ children = (
+ 6C43CC051534F77800162364 /* darwin.S */,
+ 6C43CC061534F77800162364 /* darwin64.S */,
+ 6C43CC071534F77800162364 /* ffi.c */,
+ 6C43CC081534F77800162364 /* ffi64.c */,
+ );
+ path = x86;
+ sourceTree = "<group>";
+ };
+ 6C43CC3D1534F7C400162364 /* src */ = {
+ isa = PBXGroup;
+ children = (
+ 6C43CC281534F7BE00162364 /* closures.c */,
+ 6C43CC2B1534F7BE00162364 /* java_raw_api.c */,
+ 6C43CC2C1534F7BE00162364 /* prep_cif.c */,
+ 6C43CC2D1534F7BE00162364 /* raw_api.c */,
+ 6C43CC2E1534F7BE00162364 /* types.c */,
+ );
+ name = src;
+ path = ios;
+ sourceTree = "<group>";
+ };
+ 6C43CC8C1535032600162364 /* include */ = {
+ isa = PBXGroup;
+ children = (
+ 6C43CC8D1535032600162364 /* ffi.h */,
+ 6C43CC8E1535032600162364 /* ffi_common.h */,
+ 6C43CC8F1535032600162364 /* ffi_i386.h */,
+ 6C43CC901535032600162364 /* ffi_x86_64.h */,
+ 6C43CC911535032600162364 /* fficonfig.h */,
+ 6C43CC921535032600162364 /* fficonfig_i386.h */,
+ 6C43CC931535032600162364 /* fficonfig_x86_64.h */,
+ 6C43CC941535032600162364 /* ffitarget.h */,
+ 6C43CC951535032600162364 /* ffitarget_i386.h */,
+ 6C43CC961535032600162364 /* ffitarget_x86_64.h */,
+ );
+ path = include;
+ sourceTree = "<group>";
+ };
+ 6C43CCA11535039600162364 /* include */ = {
+ isa = PBXGroup;
+ children = (
+ 6C43CCA21535039600162364 /* ffi.h */,
+ 6C43CCA31535039600162364 /* ffi_armv7.h */,
+ 6C43CCA41535039600162364 /* ffi_common.h */,
+ 6C43CCA51535039600162364 /* ffi_i386.h */,
+ 6C43CCA61535039600162364 /* fficonfig.h */,
+ 6C43CCA71535039600162364 /* fficonfig_armv7.h */,
+ 6C43CCA81535039600162364 /* fficonfig_i386.h */,
+ 6C43CCA91535039600162364 /* ffitarget.h */,
+ 6C43CCAA1535039600162364 /* ffitarget_arm.h */,
+ 6C43CCAB1535039600162364 /* ffitarget_armv7.h */,
+ 6C43CCAC1535039600162364 /* ffitarget_i386.h */,
+ );
+ path = include;
+ sourceTree = "<group>";
+ };
+ F6B0839514721EE50031D8A1 = {
+ isa = PBXGroup;
+ children = (
+ 6C43CC3D1534F7C400162364 /* src */,
+ 6C43CBAF1534F76F00162364 /* iOS */,
+ 6C43CBF01534F77800162364 /* OS X */,
+ F6F980C6147386260008F121 /* Products */,
+ );
+ sourceTree = "<group>";
+ };
+ F6F980C6147386260008F121 /* Products */ = {
+ isa = PBXGroup;
+ children = (
+ F6F980BA147386130008F121 /* libffi.a */,
+ 6C43CB3D1534E9D100162364 /* libffi.a */,
+ );
+ name = Products;
+ path = ../..;
+ sourceTree = BUILT_PRODUCTS_DIR;
+ };
+/* End PBXGroup section */
+
+/* Begin PBXHeadersBuildPhase section */
+ 6C43CB3B1534E9D100162364 /* Headers */ = {
+ isa = PBXHeadersBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ 6C43CC971535032600162364 /* ffi.h in Headers */,
+ 6C43CC981535032600162364 /* ffi_common.h in Headers */,
+ 6C43CC991535032600162364 /* ffi_i386.h in Headers */,
+ 6C43CC9A1535032600162364 /* ffi_x86_64.h in Headers */,
+ 6C43CC9E1535032600162364 /* ffitarget.h in Headers */,
+ 6C43CC9F1535032600162364 /* ffitarget_i386.h in Headers */,
+ 6C43CCA01535032600162364 /* ffitarget_x86_64.h in Headers */,
+ 6C43CC9B1535032600162364 /* fficonfig.h in Headers */,
+ 6C43CC9C1535032600162364 /* fficonfig_i386.h in Headers */,
+ 6C43CC9D1535032600162364 /* fficonfig_x86_64.h in Headers */,
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
+ F6F980B8147386130008F121 /* Headers */ = {
+ isa = PBXHeadersBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ 6C43CCAD1535039600162364 /* ffi.h in Headers */,
+ 6C43CCAE1535039600162364 /* ffi_armv7.h in Headers */,
+ 6C43CCAF1535039600162364 /* ffi_common.h in Headers */,
+ 6C43CCB01535039600162364 /* ffi_i386.h in Headers */,
+ 6C43CCB41535039600162364 /* ffitarget.h in Headers */,
+ 6C43CCB51535039600162364 /* ffitarget_arm.h in Headers */,
+ 6C43CCB61535039600162364 /* ffitarget_armv7.h in Headers */,
+ 6C43CCB71535039600162364 /* ffitarget_i386.h in Headers */,
+ 6C43CCB11535039600162364 /* fficonfig.h in Headers */,
+ 6C43CCB21535039600162364 /* fficonfig_armv7.h in Headers */,
+ 6C43CCB31535039600162364 /* fficonfig_i386.h in Headers */,
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
+/* End PBXHeadersBuildPhase section */
+
+/* Begin PBXNativeTarget section */
+ 6C43CB3C1534E9D100162364 /* libffi OS X */ = {
+ isa = PBXNativeTarget;
+ buildConfigurationList = 6C43CB4A1534E9D100162364 /* Build configuration list for PBXNativeTarget "libffi OS X" */;
+ buildPhases = (
+ 6C43CC401534FF3B00162364 /* Generate Source and Headers */,
+ 6C43CB391534E9D100162364 /* Sources */,
+ 6C43CB3A1534E9D100162364 /* Frameworks */,
+ 6C43CB3B1534E9D100162364 /* Headers */,
+ );
+ buildRules = (
+ );
+ dependencies = (
+ );
+ name = "libffi OS X";
+ productName = "ffi OS X";
+ productReference = 6C43CB3D1534E9D100162364 /* libffi.a */;
+ productType = "com.apple.product-type.library.static";
+ };
+ F6F980B9147386130008F121 /* libffi iOS */ = {
+ isa = PBXNativeTarget;
+ buildConfigurationList = F6F980C4147386130008F121 /* Build configuration list for PBXNativeTarget "libffi iOS" */;
+ buildPhases = (
+ 6C43CC3E1534F8E200162364 /* Generate Trampoline */,
+ 6C43CC3F1534FF1B00162364 /* Generate Source and Headers */,
+ F6F980B6147386130008F121 /* Sources */,
+ F6F980B7147386130008F121 /* Frameworks */,
+ F6F980B8147386130008F121 /* Headers */,
+ );
+ buildRules = (
+ );
+ dependencies = (
+ );
+ name = "libffi iOS";
+ productName = ffi;
+ productReference = F6F980BA147386130008F121 /* libffi.a */;
+ productType = "com.apple.product-type.library.static";
+ };
+/* End PBXNativeTarget section */
+
+/* Begin PBXProject section */
+ F6B0839714721EE50031D8A1 /* Project object */ = {
+ isa = PBXProject;
+ attributes = {
+ LastUpgradeCheck = 0430;
+ };
+ buildConfigurationList = F6B0839A14721EE50031D8A1 /* Build configuration list for PBXProject "libffi" */;
+ compatibilityVersion = "Xcode 3.2";
+ developmentRegion = English;
+ hasScannedForEncodings = 0;
+ knownRegions = (
+ en,
+ );
+ mainGroup = F6B0839514721EE50031D8A1;
+ productRefGroup = F6B0839514721EE50031D8A1;
+ projectDirPath = "";
+ projectRoot = "";
+ targets = (
+ F6F980B9147386130008F121 /* libffi iOS */,
+ 6C43CB3C1534E9D100162364 /* libffi OS X */,
+ );
+ };
+/* End PBXProject section */
+
+/* Begin PBXShellScriptBuildPhase section */
+ 6C43CC3E1534F8E200162364 /* Generate Trampoline */ = {
+ isa = PBXShellScriptBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ );
+ inputPaths = (
+ );
+ name = "Generate Trampoline";
+ outputPaths = (
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ shellPath = /usr/bin/python;
+ shellScript = "import subprocess\nimport re\nimport os\nimport errno\nimport sys\n\ndef main():\n with open('src/arm/trampoline.S', 'w') as tramp_out:\n p = subprocess.Popen(['bash', 'src/arm/gentramp.sh'], stdout=tramp_out)\n p.wait()\n\nif __name__ == '__main__':\n main()";
+ };
+ 6C43CC3F1534FF1B00162364 /* Generate Source and Headers */ = {
+ isa = PBXShellScriptBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ );
+ inputPaths = (
+ );
+ name = "Generate Source and Headers";
+ outputPaths = (
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ shellPath = /bin/sh;
+ shellScript = "/usr/bin/python generate-ios-source-and-headers.py";
+ };
+ 6C43CC401534FF3B00162364 /* Generate Source and Headers */ = {
+ isa = PBXShellScriptBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ );
+ inputPaths = (
+ );
+ name = "Generate Source and Headers";
+ outputPaths = (
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ shellPath = /bin/sh;
+ shellScript = "/usr/bin/python generate-osx-source-and-headers.py";
+ };
+/* End PBXShellScriptBuildPhase section */
+
+/* Begin PBXSourcesBuildPhase section */
+ 6C43CB391534E9D100162364 /* Sources */ = {
+ isa = PBXSourcesBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ 6C43CC1F1534F77800162364 /* darwin.S in Sources */,
+ 6C43CC201534F77800162364 /* darwin64.S in Sources */,
+ 6C43CC211534F77800162364 /* ffi.c in Sources */,
+ 6C43CC221534F77800162364 /* ffi64.c in Sources */,
+ 6C43CC301534F7BE00162364 /* closures.c in Sources */,
+ 6C43CC361534F7BE00162364 /* java_raw_api.c in Sources */,
+ 6C43CC381534F7BE00162364 /* prep_cif.c in Sources */,
+ 6C43CC3A1534F7BE00162364 /* raw_api.c in Sources */,
+ 6C43CC3C1534F7BE00162364 /* types.c in Sources */,
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
+ F6F980B6147386130008F121 /* Sources */ = {
+ isa = PBXSourcesBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ 6C43CBDC1534F76F00162364 /* ffi.c in Sources */,
+ 6C43CBDD1534F76F00162364 /* sysv.S in Sources */,
+ 6C43CBDE1534F76F00162364 /* trampoline.S in Sources */,
+ 6C43CBE61534F76F00162364 /* darwin.S in Sources */,
+ 6C43CBE81534F76F00162364 /* ffi.c in Sources */,
+ 6C43CC2F1534F7BE00162364 /* closures.c in Sources */,
+ 6C43CC351534F7BE00162364 /* java_raw_api.c in Sources */,
+ 6C43CC371534F7BE00162364 /* prep_cif.c in Sources */,
+ 6C43CC391534F7BE00162364 /* raw_api.c in Sources */,
+ 6C43CC3B1534F7BE00162364 /* types.c in Sources */,
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
+/* End PBXSourcesBuildPhase section */
+
+/* Begin XCBuildConfiguration section */
+ 6C43CB4B1534E9D100162364 /* Debug */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ ARCHS = "$(ARCHS_STANDARD_32_64_BIT)";
+ DSTROOT = /tmp/ffi.dst;
+ FRAMEWORK_SEARCH_PATHS = (
+ "$(inherited)",
+ "\"$(SYSTEM_APPS_DIR)/Xcode.app/Contents/Developer/Library/Frameworks\"",
+ );
+ GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+ GCC_VERSION = com.apple.compilers.llvm.clang.1_0;
+ GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
+ GCC_WARN_UNINITIALIZED_AUTOS = YES;
+ MACOSX_DEPLOYMENT_TARGET = 10.6;
+ ONLY_ACTIVE_ARCH = YES;
+ PRODUCT_NAME = ffi;
+ SDKROOT = macosx;
+ };
+ name = Debug;
+ };
+ 6C43CB4C1534E9D100162364 /* Release */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ ARCHS = "$(ARCHS_STANDARD_32_64_BIT)";
+ DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
+ DSTROOT = /tmp/ffi.dst;
+ FRAMEWORK_SEARCH_PATHS = (
+ "$(inherited)",
+ "\"$(SYSTEM_APPS_DIR)/Xcode.app/Contents/Developer/Library/Frameworks\"",
+ );
+ GCC_ENABLE_OBJC_EXCEPTIONS = YES;
+ GCC_VERSION = com.apple.compilers.llvm.clang.1_0;
+ GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
+ GCC_WARN_UNINITIALIZED_AUTOS = YES;
+ MACOSX_DEPLOYMENT_TARGET = 10.6;
+ PRODUCT_NAME = ffi;
+ SDKROOT = macosx;
+ };
+ name = Release;
+ };
+ F6B083AB14721EE50031D8A1 /* Debug */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ ALWAYS_SEARCH_USER_PATHS = NO;
+ ARCHS = "$(ARCHS_STANDARD_32_BIT)";
+ COPY_PHASE_STRIP = NO;
+ GCC_C_LANGUAGE_STANDARD = gnu99;
+ GCC_DYNAMIC_NO_PIC = NO;
+ GCC_OPTIMIZATION_LEVEL = 0;
+ GCC_PREPROCESSOR_DEFINITIONS = (
+ "DEBUG=1",
+ "$(inherited)",
+ );
+ GCC_SYMBOLS_PRIVATE_EXTERN = NO;
+ GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
+ GCC_WARN_ABOUT_RETURN_TYPE = YES;
+ GCC_WARN_UNUSED_VALUE = NO;
+ GCC_WARN_UNUSED_VARIABLE = YES;
+ HEADER_SEARCH_PATHS = ios/include;
+ SDKROOT = iphoneos;
+ };
+ name = Debug;
+ };
+ F6B083AC14721EE50031D8A1 /* Release */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ ALWAYS_SEARCH_USER_PATHS = NO;
+ ARCHS = "$(ARCHS_STANDARD_32_BIT)";
+ COPY_PHASE_STRIP = YES;
+ GCC_C_LANGUAGE_STANDARD = gnu99;
+ GCC_PREPROCESSOR_DEFINITIONS = "";
+ GCC_WARN_ABOUT_MISSING_PROTOTYPES = NO;
+ GCC_WARN_ABOUT_RETURN_TYPE = YES;
+ GCC_WARN_UNUSED_VALUE = NO;
+ GCC_WARN_UNUSED_VARIABLE = YES;
+ HEADER_SEARCH_PATHS = ios/include;
+ SDKROOT = iphoneos;
+ VALIDATE_PRODUCT = YES;
+ };
+ name = Release;
+ };
+ F6F980C2147386130008F121 /* Debug */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ ARCHS = (
+ armv6,
+ armv7,
+ );
+ DSTROOT = /tmp/ffi.dst;
+ GCC_PRECOMPILE_PREFIX_HEADER = YES;
+ GCC_THUMB_SUPPORT = NO;
+ IPHONEOS_DEPLOYMENT_TARGET = 4.0;
+ OTHER_LDFLAGS = "-ObjC";
+ PRODUCT_NAME = ffi;
+ SKIP_INSTALL = YES;
+ };
+ name = Debug;
+ };
+ F6F980C3147386130008F121 /* Release */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ ARCHS = (
+ armv6,
+ armv7,
+ );
+ DSTROOT = /tmp/ffi.dst;
+ GCC_PRECOMPILE_PREFIX_HEADER = YES;
+ GCC_THUMB_SUPPORT = NO;
+ IPHONEOS_DEPLOYMENT_TARGET = 4.0;
+ OTHER_LDFLAGS = "-ObjC";
+ PRODUCT_NAME = ffi;
+ SKIP_INSTALL = YES;
+ };
+ name = Release;
+ };
+/* End XCBuildConfiguration section */
+
+/* Begin XCConfigurationList section */
+ 6C43CB4A1534E9D100162364 /* Build configuration list for PBXNativeTarget "libffi OS X" */ = {
+ isa = XCConfigurationList;
+ buildConfigurations = (
+ 6C43CB4B1534E9D100162364 /* Debug */,
+ 6C43CB4C1534E9D100162364 /* Release */,
+ );
+ defaultConfigurationIsVisible = 0;
+ defaultConfigurationName = Release;
+ };
+ F6B0839A14721EE50031D8A1 /* Build configuration list for PBXProject "libffi" */ = {
+ isa = XCConfigurationList;
+ buildConfigurations = (
+ F6B083AB14721EE50031D8A1 /* Debug */,
+ F6B083AC14721EE50031D8A1 /* Release */,
+ );
+ defaultConfigurationIsVisible = 0;
+ defaultConfigurationName = Release;
+ };
+ F6F980C4147386130008F121 /* Build configuration list for PBXNativeTarget "libffi iOS" */ = {
+ isa = XCConfigurationList;
+ buildConfigurations = (
+ F6F980C2147386130008F121 /* Debug */,
+ F6F980C3147386130008F121 /* Release */,
+ );
+ defaultConfigurationIsVisible = 0;
+ defaultConfigurationName = Release;
+ };
+/* End XCConfigurationList section */
+ };
+ rootObject = F6B0839714721EE50031D8A1 /* Project object */;
+}
diff --git a/Modules/_ctypes/libffi/libtool-ldflags b/Modules/_ctypes/libffi/libtool-ldflags
new file mode 100755
index 0000000..e32e37b
--- /dev/null
+++ b/Modules/_ctypes/libffi/libtool-ldflags
@@ -0,0 +1,106 @@
+#! /bin/sh
+
+# Script to translate LDFLAGS into a form suitable for use with libtool.
+
+# Copyright (C) 2005 Free Software Foundation, Inc.
+#
+# This file is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+# MA 02110-1301, USA.
+
+# Contributed by CodeSourcery, LLC.
+
+# This script is designed to be used from a Makefile that uses libtool
+# to build libraries as follows:
+#
+# LTLDFLAGS = $(shell libtool-ldflags $(LDFLAGS))
+#
+# Then, use (LTLDFLAGS) in place of $(LDFLAGS) in your link line.
+
+# The output of the script. This string is built up as we process the
+# arguments.
+result=
+prev_arg=
+
+for arg
+do
+ case $arg in
+ -f*|--*)
+ # Libtool does not ascribe any special meaning options
+ # that begin with -f or with a double-dash. So, it will
+ # think these options are linker options, and prefix them
+ # with "-Wl,". Then, the compiler driver will ignore the
+ # options. So, we prefix these options with -Xcompiler to
+ # make clear to libtool that they are in fact compiler
+ # options.
+ case $prev_arg in
+ -Xpreprocessor|-Xcompiler|-Xlinker)
+ # This option is already prefixed; don't prefix it again.
+ ;;
+ *)
+ result="$result -Xcompiler"
+ ;;
+ esac
+ ;;
+ *)
+ # We do not want to add -Xcompiler to other options because
+ # that would prevent libtool itself from recognizing them.
+ ;;
+ esac
+ prev_arg=$arg
+
+ # If $(LDFLAGS) is (say):
+ # a "b'c d" e
+ # then the user expects that:
+ # $(LD) $(LDFLAGS)
+ # will pass three arguments to $(LD):
+ # 1) a
+ # 2) b'c d
+ # 3) e
+ # We must ensure, therefore, that the arguments are appropriately
+ # quoted so that using:
+ # libtool --mode=link ... $(LTLDFLAGS)
+ # will result in the same number of arguments being passed to
+ # libtool. In other words, when this script was invoked, the shell
+ # removed one level of quoting, present in $(LDFLAGS); we have to put
+ # it back.
+
+ # Quote any embedded single quotes.
+ case $arg in
+ *"'"*)
+ # The following command creates the script:
+ # 1s,^X,,;s|'|'"'"'|g
+ # which removes a leading X, and then quotes and embedded single
+ # quotes.
+ sed_script="1s,^X,,;s|'|'\"'\"'|g"
+ # Add a leading "X" so that if $arg starts with a dash,
+ # the echo command will not try to interpret the argument
+ # as a command-line option.
+ arg="X$arg"
+ # Generate the quoted string.
+ quoted_arg=`echo "$arg" | sed -e "$sed_script"`
+ ;;
+ *)
+ quoted_arg=$arg
+ ;;
+ esac
+ # Surround the entire argument with single quotes.
+ quoted_arg="'"$quoted_arg"'"
+
+ # Add it to the string.
+ result="$result $quoted_arg"
+done
+
+# Output the string we have built up.
+echo "$result"
diff --git a/Modules/_ctypes/libffi/libtool-version b/Modules/_ctypes/libffi/libtool-version
index b8b80e0..e784fc4 100644
--- a/Modules/_ctypes/libffi/libtool-version
+++ b/Modules/_ctypes/libffi/libtool-version
@@ -26,4 +26,4 @@
# release, then set age to 0.
#
# CURRENT:REVISION:AGE
-5:10:0
+6:1:0
diff --git a/Modules/_ctypes/libffi/ltmain.sh b/Modules/_ctypes/libffi/ltmain.sh
index 6939dcc..63ae69d 100755..100644
--- a/Modules/_ctypes/libffi/ltmain.sh
+++ b/Modules/_ctypes/libffi/ltmain.sh
@@ -1,9 +1,9 @@
-# Generated from ltmain.m4sh.
-# ltmain.sh (GNU libtool) 2.2.6
+# libtool (GNU libtool) 2.4.2
# Written by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
-# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, 2007 2008 Free Software Foundation, Inc.
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006,
+# 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
# This is free software; see the source for copying conditions. There is NO
# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
@@ -32,50 +32,57 @@
#
# Provide generalized library-building support services.
#
-# --config show all configuration variables
-# --debug enable verbose shell tracing
-# -n, --dry-run display commands without modifying any files
-# --features display basic configuration information and exit
-# --mode=MODE use operation mode MODE
-# --preserve-dup-deps don't remove duplicate dependency libraries
-# --quiet, --silent don't print informational messages
-# --tag=TAG use configuration variables from tag TAG
-# -v, --verbose print informational messages (default)
-# --version print version information
-# -h, --help print short or long help message
+# --config show all configuration variables
+# --debug enable verbose shell tracing
+# -n, --dry-run display commands without modifying any files
+# --features display basic configuration information and exit
+# --mode=MODE use operation mode MODE
+# --preserve-dup-deps don't remove duplicate dependency libraries
+# --quiet, --silent don't print informational messages
+# --no-quiet, --no-silent
+# print informational messages (default)
+# --no-warn don't display warning messages
+# --tag=TAG use configuration variables from tag TAG
+# -v, --verbose print more informational messages than default
+# --no-verbose don't print the extra informational messages
+# --version print version information
+# -h, --help, --help-all print short, long, or detailed help message
#
# MODE must be one of the following:
#
-# clean remove files from the build directory
-# compile compile a source file into a libtool object
-# execute automatically set library path, then run a program
-# finish complete the installation of libtool libraries
-# install install libraries or executables
-# link create a library or an executable
-# uninstall remove libraries from an installed directory
+# clean remove files from the build directory
+# compile compile a source file into a libtool object
+# execute automatically set library path, then run a program
+# finish complete the installation of libtool libraries
+# install install libraries or executables
+# link create a library or an executable
+# uninstall remove libraries from an installed directory
#
-# MODE-ARGS vary depending on the MODE.
+# MODE-ARGS vary depending on the MODE. When passed as first option,
+# `--mode=MODE' may be abbreviated as `MODE' or a unique abbreviation of that.
# Try `$progname --help --mode=MODE' for a more detailed description of MODE.
#
# When reporting a bug, please describe a test case to reproduce it and
# include the following information:
#
-# host-triplet: $host
-# shell: $SHELL
-# compiler: $LTCC
-# compiler flags: $LTCFLAGS
-# linker: $LD (gnu? $with_gnu_ld)
-# $progname: (GNU libtool) 2.2.6
-# automake: $automake_version
-# autoconf: $autoconf_version
+# host-triplet: $host
+# shell: $SHELL
+# compiler: $LTCC
+# compiler flags: $LTCFLAGS
+# linker: $LD (gnu? $with_gnu_ld)
+# $progname: (GNU libtool) 2.4.2
+# automake: $automake_version
+# autoconf: $autoconf_version
#
# Report bugs to <bug-libtool@gnu.org>.
+# GNU libtool home page: <http://www.gnu.org/software/libtool/>.
+# General help using GNU software: <http://www.gnu.org/gethelp/>.
-PROGRAM=ltmain.sh
+PROGRAM=libtool
PACKAGE=libtool
-VERSION=2.2.6
+VERSION=2.4.2
TIMESTAMP=""
-package_revision=1.3012
+package_revision=1.3337
# Be Bourne compatible
if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
@@ -91,10 +98,15 @@ fi
BIN_SH=xpg4; export BIN_SH # for Tru64
DUALCASE=1; export DUALCASE # for MKS sh
+# A function that is used when there is no print builtin or printf.
+func_fallback_echo ()
+{
+ eval 'cat <<_LTECHO_EOF
+$1
+_LTECHO_EOF'
+}
+
# NLS nuisances: We save the old values to restore during execute mode.
-# Only set LANG and LC_ALL to C if already set.
-# These must not be set unconditionally because not all systems understand
-# e.g. LANG=C (notably SCO).
lt_user_locale=
lt_safe_locale=
for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES
@@ -107,24 +119,28 @@ do
lt_safe_locale=\"$lt_var=C; \$lt_safe_locale\"
fi"
done
+LC_ALL=C
+LANGUAGE=C
+export LANGUAGE LC_ALL
$lt_unset CDPATH
+# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh
+# is ksh but when the shell is invoked as "sh" and the current value of
+# the _XPG environment variable is not equal to 1 (one), the special
+# positional parameter $0, within a function call, is the name of the
+# function.
+progpath="$0"
: ${CP="cp -f"}
-: ${ECHO="echo"}
-: ${EGREP="/bin/grep -E"}
-: ${FGREP="/bin/grep -F"}
-: ${GREP="/bin/grep"}
-: ${LN_S="ln -s"}
+test "${ECHO+set}" = set || ECHO=${as_echo-'printf %s\n'}
: ${MAKE="make"}
: ${MKDIR="mkdir"}
: ${MV="mv -f"}
: ${RM="rm -f"}
-: ${SED="/bin/sed"}
: ${SHELL="${CONFIG_SHELL-/bin/sh}"}
: ${Xsed="$SED -e 1s/^X//"}
@@ -144,6 +160,27 @@ IFS=" $lt_nl"
dirname="s,/[^/]*$,,"
basename="s,^.*/,,"
+# func_dirname file append nondir_replacement
+# Compute the dirname of FILE. If nonempty, add APPEND to the result,
+# otherwise set result to NONDIR_REPLACEMENT.
+func_dirname ()
+{
+ func_dirname_result=`$ECHO "${1}" | $SED "$dirname"`
+ if test "X$func_dirname_result" = "X${1}"; then
+ func_dirname_result="${3}"
+ else
+ func_dirname_result="$func_dirname_result${2}"
+ fi
+} # func_dirname may be replaced by extended shell implementation
+
+
+# func_basename file
+func_basename ()
+{
+ func_basename_result=`$ECHO "${1}" | $SED "$basename"`
+} # func_basename may be replaced by extended shell implementation
+
+
# func_dirname_and_basename file append nondir_replacement
# perform func_basename and func_dirname in a single function
# call:
@@ -158,33 +195,183 @@ basename="s,^.*/,,"
# those functions but instead duplicate the functionality here.
func_dirname_and_basename ()
{
- # Extract subdirectory from the argument.
- func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"`
- if test "X$func_dirname_result" = "X${1}"; then
- func_dirname_result="${3}"
- else
- func_dirname_result="$func_dirname_result${2}"
- fi
- func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"`
+ # Extract subdirectory from the argument.
+ func_dirname_result=`$ECHO "${1}" | $SED -e "$dirname"`
+ if test "X$func_dirname_result" = "X${1}"; then
+ func_dirname_result="${3}"
+ else
+ func_dirname_result="$func_dirname_result${2}"
+ fi
+ func_basename_result=`$ECHO "${1}" | $SED -e "$basename"`
+} # func_dirname_and_basename may be replaced by extended shell implementation
+
+
+# func_stripname prefix suffix name
+# strip PREFIX and SUFFIX off of NAME.
+# PREFIX and SUFFIX must not contain globbing or regex special
+# characters, hashes, percent signs, but SUFFIX may contain a leading
+# dot (in which case that matches only a dot).
+# func_strip_suffix prefix name
+func_stripname ()
+{
+ case ${2} in
+ .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;;
+ *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;;
+ esac
+} # func_stripname may be replaced by extended shell implementation
+
+
+# These SED scripts presuppose an absolute path with a trailing slash.
+pathcar='s,^/\([^/]*\).*$,\1,'
+pathcdr='s,^/[^/]*,,'
+removedotparts=':dotsl
+ s@/\./@/@g
+ t dotsl
+ s,/\.$,/,'
+collapseslashes='s@/\{1,\}@/@g'
+finalslash='s,/*$,/,'
+
+# func_normal_abspath PATH
+# Remove doubled-up and trailing slashes, "." path components,
+# and cancel out any ".." path components in PATH after making
+# it an absolute path.
+# value returned in "$func_normal_abspath_result"
+func_normal_abspath ()
+{
+ # Start from root dir and reassemble the path.
+ func_normal_abspath_result=
+ func_normal_abspath_tpath=$1
+ func_normal_abspath_altnamespace=
+ case $func_normal_abspath_tpath in
+ "")
+ # Empty path, that just means $cwd.
+ func_stripname '' '/' "`pwd`"
+ func_normal_abspath_result=$func_stripname_result
+ return
+ ;;
+ # The next three entries are used to spot a run of precisely
+ # two leading slashes without using negated character classes;
+ # we take advantage of case's first-match behaviour.
+ ///*)
+ # Unusual form of absolute path, do nothing.
+ ;;
+ //*)
+ # Not necessarily an ordinary path; POSIX reserves leading '//'
+ # and for example Cygwin uses it to access remote file shares
+ # over CIFS/SMB, so we conserve a leading double slash if found.
+ func_normal_abspath_altnamespace=/
+ ;;
+ /*)
+ # Absolute path, do nothing.
+ ;;
+ *)
+ # Relative path, prepend $cwd.
+ func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath
+ ;;
+ esac
+ # Cancel out all the simple stuff to save iterations. We also want
+ # the path to end with a slash for ease of parsing, so make sure
+ # there is one (and only one) here.
+ func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \
+ -e "$removedotparts" -e "$collapseslashes" -e "$finalslash"`
+ while :; do
+ # Processed it all yet?
+ if test "$func_normal_abspath_tpath" = / ; then
+ # If we ascended to the root using ".." the result may be empty now.
+ if test -z "$func_normal_abspath_result" ; then
+ func_normal_abspath_result=/
+ fi
+ break
+ fi
+ func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \
+ -e "$pathcar"`
+ func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \
+ -e "$pathcdr"`
+ # Figure out what to do with it
+ case $func_normal_abspath_tcomponent in
+ "")
+ # Trailing empty path component, ignore it.
+ ;;
+ ..)
+ # Parent dir; strip last assembled component from result.
+ func_dirname "$func_normal_abspath_result"
+ func_normal_abspath_result=$func_dirname_result
+ ;;
+ *)
+ # Actual path component, append it.
+ func_normal_abspath_result=$func_normal_abspath_result/$func_normal_abspath_tcomponent
+ ;;
+ esac
+ done
+ # Restore leading double-slash if one was found on entry.
+ func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result
}
-# Generated shell functions inserted here.
+# func_relative_path SRCDIR DSTDIR
+# generates a relative path from SRCDIR to DSTDIR, with a trailing
+# slash if non-empty, suitable for immediately appending a filename
+# without needing to append a separator.
+# value returned in "$func_relative_path_result"
+func_relative_path ()
+{
+ func_relative_path_result=
+ func_normal_abspath "$1"
+ func_relative_path_tlibdir=$func_normal_abspath_result
+ func_normal_abspath "$2"
+ func_relative_path_tbindir=$func_normal_abspath_result
+
+ # Ascend the tree starting from libdir
+ while :; do
+ # check if we have found a prefix of bindir
+ case $func_relative_path_tbindir in
+ $func_relative_path_tlibdir)
+ # found an exact match
+ func_relative_path_tcancelled=
+ break
+ ;;
+ $func_relative_path_tlibdir*)
+ # found a matching prefix
+ func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir"
+ func_relative_path_tcancelled=$func_stripname_result
+ if test -z "$func_relative_path_result"; then
+ func_relative_path_result=.
+ fi
+ break
+ ;;
+ *)
+ func_dirname $func_relative_path_tlibdir
+ func_relative_path_tlibdir=${func_dirname_result}
+ if test "x$func_relative_path_tlibdir" = x ; then
+ # Have to descend all the way to the root!
+ func_relative_path_result=../$func_relative_path_result
+ func_relative_path_tcancelled=$func_relative_path_tbindir
+ break
+ fi
+ func_relative_path_result=../$func_relative_path_result
+ ;;
+ esac
+ done
-# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh
-# is ksh but when the shell is invoked as "sh" and the current value of
-# the _XPG environment variable is not equal to 1 (one), the special
-# positional parameter $0, within a function call, is the name of the
-# function.
-progpath="$0"
+ # Now calculate path; take care to avoid doubling-up slashes.
+ func_stripname '' '/' "$func_relative_path_result"
+ func_relative_path_result=$func_stripname_result
+ func_stripname '/' '/' "$func_relative_path_tcancelled"
+ if test "x$func_stripname_result" != x ; then
+ func_relative_path_result=${func_relative_path_result}/${func_stripname_result}
+ fi
+
+ # Normalisation. If bindir is libdir, return empty string,
+ # else relative path ending with a slash; either way, target
+ # file name can be directly appended.
+ if test ! -z "$func_relative_path_result"; then
+ func_stripname './' '' "$func_relative_path_result/"
+ func_relative_path_result=$func_stripname_result
+ fi
+}
# The name of this program:
-# In the unlikely event $progname began with a '-', it would play havoc with
-# func_echo (imagine progname=-n), so we prepend ./ in that case:
func_dirname_and_basename "$progpath"
progname=$func_basename_result
-case $progname in
- -*) progname=./$progname ;;
-esac
# Make sure we have an absolute path for reexecution:
case $progpath in
@@ -196,7 +383,7 @@ case $progpath in
;;
*)
save_IFS="$IFS"
- IFS=:
+ IFS=${PATH_SEPARATOR-:}
for progdir in $PATH; do
IFS="$save_IFS"
test -x "$progdir/$progname" && break
@@ -215,6 +402,15 @@ sed_quote_subst='s/\([`"$\\]\)/\\\1/g'
# Same as above, but do not quote variable references.
double_quote_subst='s/\(["`\\]\)/\\\1/g'
+# Sed substitution that turns a string into a regex matching for the
+# string literally.
+sed_make_literal_regex='s,[].[^$\\*\/],\\&,g'
+
+# Sed substitution that converts a w32 file name or path
+# which contains forward slashes, into one that contains
+# (escaped) backslashes. A very naive implementation.
+lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g'
+
# Re-`\' parameter expansions in output of double_quote_subst that were
# `\'-ed in input to the same. If an odd number of `\' preceded a '$'
# in input to double_quote_subst, that '$' was protected from expansion.
@@ -243,7 +439,7 @@ opt_warning=:
# name if it has been set yet.
func_echo ()
{
- $ECHO "$progname${mode+: }$mode: $*"
+ $ECHO "$progname: ${opt_mode+$opt_mode: }$*"
}
# func_verbose arg...
@@ -258,18 +454,25 @@ func_verbose ()
:
}
+# func_echo_all arg...
+# Invoke $ECHO with all args, space-separated.
+func_echo_all ()
+{
+ $ECHO "$*"
+}
+
# func_error arg...
# Echo program name prefixed message to standard error.
func_error ()
{
- $ECHO "$progname${mode+: }$mode: "${1+"$@"} 1>&2
+ $ECHO "$progname: ${opt_mode+$opt_mode: }"${1+"$@"} 1>&2
}
# func_warning arg...
# Echo program name prefixed warning message to standard error.
func_warning ()
{
- $opt_warning && $ECHO "$progname${mode+: }$mode: warning: "${1+"$@"} 1>&2
+ $opt_warning && $ECHO "$progname: ${opt_mode+$opt_mode: }warning: "${1+"$@"} 1>&2
# bash bug again:
:
@@ -326,9 +529,9 @@ func_mkdir_p ()
case $my_directory_path in */*) ;; *) break ;; esac
# ...otherwise throw away the child directory and loop
- my_directory_path=`$ECHO "X$my_directory_path" | $Xsed -e "$dirname"`
+ my_directory_path=`$ECHO "$my_directory_path" | $SED -e "$dirname"`
done
- my_dir_list=`$ECHO "X$my_dir_list" | $Xsed -e 's,:*$,,'`
+ my_dir_list=`$ECHO "$my_dir_list" | $SED 's,:*$,,'`
save_mkdir_p_IFS="$IFS"; IFS=':'
for my_dir in $my_dir_list; do
@@ -378,7 +581,7 @@ func_mktempdir ()
func_fatal_error "cannot create temporary directory \`$my_tmpdir'"
fi
- $ECHO "X$my_tmpdir" | $Xsed
+ $ECHO "$my_tmpdir"
}
@@ -392,7 +595,7 @@ func_quote_for_eval ()
{
case $1 in
*[\\\`\"\$]*)
- func_quote_for_eval_unquoted_result=`$ECHO "X$1" | $Xsed -e "$sed_quote_subst"` ;;
+ func_quote_for_eval_unquoted_result=`$ECHO "$1" | $SED "$sed_quote_subst"` ;;
*)
func_quote_for_eval_unquoted_result="$1" ;;
esac
@@ -419,7 +622,7 @@ func_quote_for_expand ()
{
case $1 in
*[\\\`\"]*)
- my_arg=`$ECHO "X$1" | $Xsed \
+ my_arg=`$ECHO "$1" | $SED \
-e "$double_quote_subst" -e "$sed_double_backslash"` ;;
*)
my_arg="$1" ;;
@@ -488,15 +691,39 @@ func_show_eval_locale ()
fi
}
-
-
+# func_tr_sh
+# Turn $1 into a string suitable for a shell variable name.
+# Result is stored in $func_tr_sh_result. All characters
+# not in the set a-zA-Z0-9_ are replaced with '_'. Further,
+# if $1 begins with a digit, a '_' is prepended as well.
+func_tr_sh ()
+{
+ case $1 in
+ [0-9]* | *[!a-zA-Z0-9_]*)
+ func_tr_sh_result=`$ECHO "$1" | $SED 's/^\([0-9]\)/_\1/; s/[^a-zA-Z0-9_]/_/g'`
+ ;;
+ * )
+ func_tr_sh_result=$1
+ ;;
+ esac
+}
# func_version
# Echo version message to standard output and exit.
func_version ()
{
- $SED -n '/^# '$PROGRAM' (GNU /,/# warranty; / {
+ $opt_debug
+
+ $SED -n '/(C)/!b go
+ :more
+ /\./!{
+ N
+ s/\n# / /
+ b more
+ }
+ :go
+ /^# '$PROGRAM' (GNU /,/# warranty; / {
s/^# //
s/^# *$//
s/\((C)\)[ 0-9,-]*\( [1-9][0-9]*\)/\1\2/
@@ -509,22 +736,28 @@ func_version ()
# Echo short help message to standard output and exit.
func_usage ()
{
- $SED -n '/^# Usage:/,/# -h/ {
+ $opt_debug
+
+ $SED -n '/^# Usage:/,/^# *.*--help/ {
s/^# //
s/^# *$//
s/\$progname/'$progname'/
p
}' < "$progpath"
- $ECHO
+ echo
$ECHO "run \`$progname --help | more' for full usage"
exit $?
}
-# func_help
-# Echo long help message to standard output and exit.
+# func_help [NOEXIT]
+# Echo long help message to standard output and exit,
+# unless 'noexit' is passed as argument.
func_help ()
{
+ $opt_debug
+
$SED -n '/^# Usage:/,/# Report bugs to/ {
+ :print
s/^# //
s/^# *$//
s*\$progname*'$progname'*
@@ -534,11 +767,18 @@ func_help ()
s*\$LTCFLAGS*'"$LTCFLAGS"'*
s*\$LD*'"$LD"'*
s/\$with_gnu_ld/'"$with_gnu_ld"'/
- s/\$automake_version/'"`(automake --version) 2>/dev/null |$SED 1q`"'/
- s/\$autoconf_version/'"`(autoconf --version) 2>/dev/null |$SED 1q`"'/
+ s/\$automake_version/'"`(${AUTOMAKE-automake} --version) 2>/dev/null |$SED 1q`"'/
+ s/\$autoconf_version/'"`(${AUTOCONF-autoconf} --version) 2>/dev/null |$SED 1q`"'/
p
- }' < "$progpath"
- exit $?
+ d
+ }
+ /^# .* home page:/b print
+ /^# General help using/b print
+ ' < "$progpath"
+ ret=$?
+ if test -z "$1"; then
+ exit $ret
+ fi
}
# func_missing_arg argname
@@ -546,63 +786,106 @@ func_help ()
# exit_cmd.
func_missing_arg ()
{
- func_error "missing argument for $1"
+ $opt_debug
+
+ func_error "missing argument for $1."
exit_cmd=exit
}
-exit_cmd=:
+# func_split_short_opt shortopt
+# Set func_split_short_opt_name and func_split_short_opt_arg shell
+# variables after splitting SHORTOPT after the 2nd character.
+func_split_short_opt ()
+{
+ my_sed_short_opt='1s/^\(..\).*$/\1/;q'
+ my_sed_short_rest='1s/^..\(.*\)$/\1/;q'
+ func_split_short_opt_name=`$ECHO "$1" | $SED "$my_sed_short_opt"`
+ func_split_short_opt_arg=`$ECHO "$1" | $SED "$my_sed_short_rest"`
+} # func_split_short_opt may be replaced by extended shell implementation
+
+
+# func_split_long_opt longopt
+# Set func_split_long_opt_name and func_split_long_opt_arg shell
+# variables after splitting LONGOPT at the `=' sign.
+func_split_long_opt ()
+{
+ my_sed_long_opt='1s/^\(--[^=]*\)=.*/\1/;q'
+ my_sed_long_arg='1s/^--[^=]*=//'
+
+ func_split_long_opt_name=`$ECHO "$1" | $SED "$my_sed_long_opt"`
+ func_split_long_opt_arg=`$ECHO "$1" | $SED "$my_sed_long_arg"`
+} # func_split_long_opt may be replaced by extended shell implementation
+
+exit_cmd=:
-# Check that we have a working $ECHO.
-if test "X$1" = X--no-reexec; then
- # Discard the --no-reexec flag, and continue.
- shift
-elif test "X$1" = X--fallback-echo; then
- # Avoid inline document here, it may be left over
- :
-elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t'; then
- # Yippee, $ECHO works!
- :
-else
- # Restart under the correct shell, and then maybe $ECHO will work.
- exec $SHELL "$progpath" --no-reexec ${1+"$@"}
-fi
-if test "X$1" = X--fallback-echo; then
- # used as fallback echo
- shift
- cat <<EOF
-$*
-EOF
- exit $EXIT_SUCCESS
-fi
magic="%%%MAGIC variable%%%"
magic_exe="%%%MAGIC EXE variable%%%"
# Global variables.
-# $mode is unset
nonopt=
-execute_dlfiles=
preserve_args=
lo2o="s/\\.lo\$/.${objext}/"
o2lo="s/\\.${objext}\$/.lo/"
extracted_archives=
extracted_serial=0
-opt_dry_run=false
-opt_duplicate_deps=false
-opt_silent=false
-opt_debug=:
-
# If this variable is set in any of the actions, the command in it
# will be execed at the end. This prevents here-documents from being
# left over by shells.
exec_cmd=
+# func_append var value
+# Append VALUE to the end of shell variable VAR.
+func_append ()
+{
+ eval "${1}=\$${1}\${2}"
+} # func_append may be replaced by extended shell implementation
+
+# func_append_quoted var value
+# Quote VALUE and append to the end of shell variable VAR, separated
+# by a space.
+func_append_quoted ()
+{
+ func_quote_for_eval "${2}"
+ eval "${1}=\$${1}\\ \$func_quote_for_eval_result"
+} # func_append_quoted may be replaced by extended shell implementation
+
+
+# func_arith arithmetic-term...
+func_arith ()
+{
+ func_arith_result=`expr "${@}"`
+} # func_arith may be replaced by extended shell implementation
+
+
+# func_len string
+# STRING may not start with a hyphen.
+func_len ()
+{
+ func_len_result=`expr "${1}" : ".*" 2>/dev/null || echo $max_cmd_len`
+} # func_len may be replaced by extended shell implementation
+
+
+# func_lo2o object
+func_lo2o ()
+{
+ func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"`
+} # func_lo2o may be replaced by extended shell implementation
+
+
+# func_xform libobj-or-source
+func_xform ()
+{
+ func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'`
+} # func_xform may be replaced by extended shell implementation
+
+
# func_fatal_configuration arg...
# Echo program name prefixed message to standard error, followed by
# a configuration failure hint, and exit.
@@ -636,16 +919,16 @@ func_config ()
# Display the features supported by this script.
func_features ()
{
- $ECHO "host: $host"
+ echo "host: $host"
if test "$build_libtool_libs" = yes; then
- $ECHO "enable shared libraries"
+ echo "enable shared libraries"
else
- $ECHO "disable shared libraries"
+ echo "disable shared libraries"
fi
if test "$build_old_libs" = yes; then
- $ECHO "enable static libraries"
+ echo "enable static libraries"
else
- $ECHO "disable static libraries"
+ echo "disable static libraries"
fi
exit $?
@@ -692,117 +975,209 @@ func_enable_tag ()
esac
}
-# Parse options once, thoroughly. This comes as soon as possible in
-# the script to make things like `libtool --version' happen quickly.
+# func_check_version_match
+# Ensure that we are using m4 macros, and libtool script from the same
+# release of libtool.
+func_check_version_match ()
{
+ if test "$package_revision" != "$macro_revision"; then
+ if test "$VERSION" != "$macro_version"; then
+ if test -z "$macro_version"; then
+ cat >&2 <<_LT_EOF
+$progname: Version mismatch error. This is $PACKAGE $VERSION, but the
+$progname: definition of this LT_INIT comes from an older release.
+$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION
+$progname: and run autoconf again.
+_LT_EOF
+ else
+ cat >&2 <<_LT_EOF
+$progname: Version mismatch error. This is $PACKAGE $VERSION, but the
+$progname: definition of this LT_INIT comes from $PACKAGE $macro_version.
+$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION
+$progname: and run autoconf again.
+_LT_EOF
+ fi
+ else
+ cat >&2 <<_LT_EOF
+$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision,
+$progname: but the definition of this LT_INIT comes from revision $macro_revision.
+$progname: You should recreate aclocal.m4 with macros from revision $package_revision
+$progname: of $PACKAGE $VERSION and run autoconf again.
+_LT_EOF
+ fi
+
+ exit $EXIT_MISMATCH
+ fi
+}
+
+
+# Shorthand for --mode=foo, only valid as the first argument
+case $1 in
+clean|clea|cle|cl)
+ shift; set dummy --mode clean ${1+"$@"}; shift
+ ;;
+compile|compil|compi|comp|com|co|c)
+ shift; set dummy --mode compile ${1+"$@"}; shift
+ ;;
+execute|execut|execu|exec|exe|ex|e)
+ shift; set dummy --mode execute ${1+"$@"}; shift
+ ;;
+finish|finis|fini|fin|fi|f)
+ shift; set dummy --mode finish ${1+"$@"}; shift
+ ;;
+install|instal|insta|inst|ins|in|i)
+ shift; set dummy --mode install ${1+"$@"}; shift
+ ;;
+link|lin|li|l)
+ shift; set dummy --mode link ${1+"$@"}; shift
+ ;;
+uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u)
+ shift; set dummy --mode uninstall ${1+"$@"}; shift
+ ;;
+esac
+
- # Shorthand for --mode=foo, only valid as the first argument
- case $1 in
- clean|clea|cle|cl)
- shift; set dummy --mode clean ${1+"$@"}; shift
- ;;
- compile|compil|compi|comp|com|co|c)
- shift; set dummy --mode compile ${1+"$@"}; shift
- ;;
- execute|execut|execu|exec|exe|ex|e)
- shift; set dummy --mode execute ${1+"$@"}; shift
- ;;
- finish|finis|fini|fin|fi|f)
- shift; set dummy --mode finish ${1+"$@"}; shift
- ;;
- install|instal|insta|inst|ins|in|i)
- shift; set dummy --mode install ${1+"$@"}; shift
- ;;
- link|lin|li|l)
- shift; set dummy --mode link ${1+"$@"}; shift
- ;;
- uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u)
- shift; set dummy --mode uninstall ${1+"$@"}; shift
- ;;
- esac
- # Parse non-mode specific arguments:
- while test "$#" -gt 0; do
+# Option defaults:
+opt_debug=:
+opt_dry_run=false
+opt_config=false
+opt_preserve_dup_deps=false
+opt_features=false
+opt_finish=false
+opt_help=false
+opt_help_all=false
+opt_silent=:
+opt_warning=:
+opt_verbose=:
+opt_silent=false
+opt_verbose=false
+
+
+# Parse options once, thoroughly. This comes as soon as possible in the
+# script to make things like `--version' happen as quickly as we can.
+{
+ # this just eases exit handling
+ while test $# -gt 0; do
opt="$1"
shift
-
case $opt in
- --config) func_config ;;
-
- --debug) preserve_args="$preserve_args $opt"
+ --debug|-x) opt_debug='set -x'
func_echo "enabling shell trace mode"
- opt_debug='set -x'
$opt_debug
;;
-
- -dlopen) test "$#" -eq 0 && func_missing_arg "$opt" && break
- execute_dlfiles="$execute_dlfiles $1"
- shift
+ --dry-run|--dryrun|-n)
+ opt_dry_run=:
;;
-
- --dry-run | -n) opt_dry_run=: ;;
- --features) func_features ;;
- --finish) mode="finish" ;;
-
- --mode) test "$#" -eq 0 && func_missing_arg "$opt" && break
- case $1 in
- # Valid mode arguments:
- clean) ;;
- compile) ;;
- execute) ;;
- finish) ;;
- install) ;;
- link) ;;
- relink) ;;
- uninstall) ;;
-
- # Catch anything else as an error
- *) func_error "invalid argument for $opt"
- exit_cmd=exit
- break
- ;;
- esac
-
- mode="$1"
+ --config)
+ opt_config=:
+func_config
+ ;;
+ --dlopen|-dlopen)
+ optarg="$1"
+ opt_dlopen="${opt_dlopen+$opt_dlopen
+}$optarg"
shift
;;
-
--preserve-dup-deps)
- opt_duplicate_deps=: ;;
-
- --quiet|--silent) preserve_args="$preserve_args $opt"
- opt_silent=:
+ opt_preserve_dup_deps=:
;;
-
- --verbose| -v) preserve_args="$preserve_args $opt"
+ --features)
+ opt_features=:
+func_features
+ ;;
+ --finish)
+ opt_finish=:
+set dummy --mode finish ${1+"$@"}; shift
+ ;;
+ --help)
+ opt_help=:
+ ;;
+ --help-all)
+ opt_help_all=:
+opt_help=': help-all'
+ ;;
+ --mode)
+ test $# = 0 && func_missing_arg $opt && break
+ optarg="$1"
+ opt_mode="$optarg"
+case $optarg in
+ # Valid mode arguments:
+ clean|compile|execute|finish|install|link|relink|uninstall) ;;
+
+ # Catch anything else as an error
+ *) func_error "invalid argument for $opt"
+ exit_cmd=exit
+ break
+ ;;
+esac
+ shift
+ ;;
+ --no-silent|--no-quiet)
opt_silent=false
+func_append preserve_args " $opt"
;;
-
- --tag) test "$#" -eq 0 && func_missing_arg "$opt" && break
- preserve_args="$preserve_args $opt $1"
- func_enable_tag "$1" # tagname is set here
+ --no-warning|--no-warn)
+ opt_warning=false
+func_append preserve_args " $opt"
+ ;;
+ --no-verbose)
+ opt_verbose=false
+func_append preserve_args " $opt"
+ ;;
+ --silent|--quiet)
+ opt_silent=:
+func_append preserve_args " $opt"
+ opt_verbose=false
+ ;;
+ --verbose|-v)
+ opt_verbose=:
+func_append preserve_args " $opt"
+opt_silent=false
+ ;;
+ --tag)
+ test $# = 0 && func_missing_arg $opt && break
+ optarg="$1"
+ opt_tag="$optarg"
+func_append preserve_args " $opt $optarg"
+func_enable_tag "$optarg"
shift
;;
+ -\?|-h) func_usage ;;
+ --help) func_help ;;
+ --version) func_version ;;
+
# Separate optargs to long options:
- -dlopen=*|--mode=*|--tag=*)
- func_opt_split "$opt"
- set dummy "$func_opt_split_opt" "$func_opt_split_arg" ${1+"$@"}
+ --*=*)
+ func_split_long_opt "$opt"
+ set dummy "$func_split_long_opt_name" "$func_split_long_opt_arg" ${1+"$@"}
shift
;;
- -\?|-h) func_usage ;;
- --help) opt_help=: ;;
- --version) func_version ;;
-
- -*) func_fatal_help "unrecognized option \`$opt'" ;;
-
- *) nonopt="$opt"
- break
+ # Separate non-argument short options:
+ -\?*|-h*|-n*|-v*)
+ func_split_short_opt "$opt"
+ set dummy "$func_split_short_opt_name" "-$func_split_short_opt_arg" ${1+"$@"}
+ shift
;;
+
+ --) break ;;
+ -*) func_fatal_help "unrecognized option \`$opt'" ;;
+ *) set dummy "$opt" ${1+"$@"}; shift; break ;;
esac
done
+ # Validate options:
+
+ # save first non-option argument
+ if test "$#" -gt 0; then
+ nonopt="$opt"
+ shift
+ fi
+
+ # preserve --debug
+ test "$opt_debug" = : || func_append preserve_args " --debug"
case $host in
*cygwin* | *mingw* | *pw32* | *cegcc*)
@@ -810,82 +1185,44 @@ func_enable_tag ()
opt_duplicate_compiler_generated_deps=:
;;
*)
- opt_duplicate_compiler_generated_deps=$opt_duplicate_deps
+ opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps
;;
esac
- # Having warned about all mis-specified options, bail out if
- # anything was wrong.
- $exit_cmd $EXIT_FAILURE
-}
+ $opt_help || {
+ # Sanity checks first:
+ func_check_version_match
-# func_check_version_match
-# Ensure that we are using m4 macros, and libtool script from the same
-# release of libtool.
-func_check_version_match ()
-{
- if test "$package_revision" != "$macro_revision"; then
- if test "$VERSION" != "$macro_version"; then
- if test -z "$macro_version"; then
- cat >&2 <<_LT_EOF
-$progname: Version mismatch error. This is $PACKAGE $VERSION, but the
-$progname: definition of this LT_INIT comes from an older release.
-$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION
-$progname: and run autoconf again.
-_LT_EOF
- else
- cat >&2 <<_LT_EOF
-$progname: Version mismatch error. This is $PACKAGE $VERSION, but the
-$progname: definition of this LT_INIT comes from $PACKAGE $macro_version.
-$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION
-$progname: and run autoconf again.
-_LT_EOF
- fi
- else
- cat >&2 <<_LT_EOF
-$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision,
-$progname: but the definition of this LT_INIT comes from revision $macro_revision.
-$progname: You should recreate aclocal.m4 with macros from revision $package_revision
-$progname: of $PACKAGE $VERSION and run autoconf again.
-_LT_EOF
+ if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then
+ func_fatal_configuration "not configured to build any kind of library"
fi
- exit $EXIT_MISMATCH
- fi
-}
-
-
-## ----------- ##
-## Main. ##
-## ----------- ##
-
-$opt_help || {
- # Sanity checks first:
- func_check_version_match
+ # Darwin sucks
+ eval std_shrext=\"$shrext_cmds\"
- if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then
- func_fatal_configuration "not configured to build any kind of library"
- fi
+ # Only execute mode is allowed to have -dlopen flags.
+ if test -n "$opt_dlopen" && test "$opt_mode" != execute; then
+ func_error "unrecognized option \`-dlopen'"
+ $ECHO "$help" 1>&2
+ exit $EXIT_FAILURE
+ fi
- test -z "$mode" && func_fatal_error "error: you must specify a MODE."
+ # Change the help message to a mode-specific one.
+ generic_help="$help"
+ help="Try \`$progname --help --mode=$opt_mode' for more information."
+ }
- # Darwin sucks
- eval std_shrext=\"$shrext_cmds\"
+ # Bail if the options were screwed
+ $exit_cmd $EXIT_FAILURE
+}
- # Only execute mode is allowed to have -dlopen flags.
- if test -n "$execute_dlfiles" && test "$mode" != execute; then
- func_error "unrecognized option \`-dlopen'"
- $ECHO "$help" 1>&2
- exit $EXIT_FAILURE
- fi
- # Change the help message to a mode-specific one.
- generic_help="$help"
- help="Try \`$progname --help --mode=$mode' for more information."
-}
+## ----------- ##
+## Main. ##
+## ----------- ##
# func_lalib_p file
# True iff FILE is a libtool `.la' library or `.lo' object file.
@@ -950,12 +1287,9 @@ func_ltwrapper_executable_p ()
# temporary ltwrapper_script.
func_ltwrapper_scriptname ()
{
- func_ltwrapper_scriptname_result=""
- if func_ltwrapper_executable_p "$1"; then
- func_dirname_and_basename "$1" "" "."
- func_stripname '' '.exe' "$func_basename_result"
- func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper"
- fi
+ func_dirname_and_basename "$1" "" "."
+ func_stripname '' '.exe' "$func_basename_result"
+ func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper"
}
# func_ltwrapper_p file
@@ -1001,6 +1335,37 @@ func_source ()
}
+# func_resolve_sysroot PATH
+# Replace a leading = in PATH with a sysroot. Store the result into
+# func_resolve_sysroot_result
+func_resolve_sysroot ()
+{
+ func_resolve_sysroot_result=$1
+ case $func_resolve_sysroot_result in
+ =*)
+ func_stripname '=' '' "$func_resolve_sysroot_result"
+ func_resolve_sysroot_result=$lt_sysroot$func_stripname_result
+ ;;
+ esac
+}
+
+# func_replace_sysroot PATH
+# If PATH begins with the sysroot, replace it with = and
+# store the result into func_replace_sysroot_result.
+func_replace_sysroot ()
+{
+ case "$lt_sysroot:$1" in
+ ?*:"$lt_sysroot"*)
+ func_stripname "$lt_sysroot" '' "$1"
+ func_replace_sysroot_result="=$func_stripname_result"
+ ;;
+ *)
+ # Including no sysroot.
+ func_replace_sysroot_result=$1
+ ;;
+ esac
+}
+
# func_infer_tag arg
# Infer tagged configuration to use if any are available and
# if one wasn't chosen via the "--tag" command line option.
@@ -1013,13 +1378,15 @@ func_infer_tag ()
if test -n "$available_tags" && test -z "$tagname"; then
CC_quoted=
for arg in $CC; do
- func_quote_for_eval "$arg"
- CC_quoted="$CC_quoted $func_quote_for_eval_result"
+ func_append_quoted CC_quoted "$arg"
done
+ CC_expanded=`func_echo_all $CC`
+ CC_quoted_expanded=`func_echo_all $CC_quoted`
case $@ in
# Blanks in the command may have been stripped by the calling shell,
# but not from the CC environment variable when configure was run.
- " $CC "* | "$CC "* | " `$ECHO $CC` "* | "`$ECHO $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$ECHO $CC_quoted` "* | "`$ECHO $CC_quoted` "*) ;;
+ " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \
+ " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;;
# Blanks at the start of $base_compile will cause this to fail
# if we don't check for them as well.
*)
@@ -1030,11 +1397,13 @@ func_infer_tag ()
CC_quoted=
for arg in $CC; do
# Double-quote args containing other shell metacharacters.
- func_quote_for_eval "$arg"
- CC_quoted="$CC_quoted $func_quote_for_eval_result"
+ func_append_quoted CC_quoted "$arg"
done
+ CC_expanded=`func_echo_all $CC`
+ CC_quoted_expanded=`func_echo_all $CC_quoted`
case "$@ " in
- " $CC "* | "$CC "* | " `$ECHO $CC` "* | "`$ECHO $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$ECHO $CC_quoted` "* | "`$ECHO $CC_quoted` "*)
+ " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \
+ " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*)
# The compiler in the base compile command matches
# the one in the tagged configuration.
# Assume this is the tagged configuration we want.
@@ -1097,6 +1466,486 @@ EOF
}
}
+
+##################################################
+# FILE NAME AND PATH CONVERSION HELPER FUNCTIONS #
+##################################################
+
+# func_convert_core_file_wine_to_w32 ARG
+# Helper function used by file name conversion functions when $build is *nix,
+# and $host is mingw, cygwin, or some other w32 environment. Relies on a
+# correctly configured wine environment available, with the winepath program
+# in $build's $PATH.
+#
+# ARG is the $build file name to be converted to w32 format.
+# Result is available in $func_convert_core_file_wine_to_w32_result, and will
+# be empty on error (or when ARG is empty)
+func_convert_core_file_wine_to_w32 ()
+{
+ $opt_debug
+ func_convert_core_file_wine_to_w32_result="$1"
+ if test -n "$1"; then
+ # Unfortunately, winepath does not exit with a non-zero error code, so we
+ # are forced to check the contents of stdout. On the other hand, if the
+ # command is not found, the shell will set an exit code of 127 and print
+ # *an error message* to stdout. So we must check for both error code of
+ # zero AND non-empty stdout, which explains the odd construction:
+ func_convert_core_file_wine_to_w32_tmp=`winepath -w "$1" 2>/dev/null`
+ if test "$?" -eq 0 && test -n "${func_convert_core_file_wine_to_w32_tmp}"; then
+ func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" |
+ $SED -e "$lt_sed_naive_backslashify"`
+ else
+ func_convert_core_file_wine_to_w32_result=
+ fi
+ fi
+}
+# end: func_convert_core_file_wine_to_w32
+
+
+# func_convert_core_path_wine_to_w32 ARG
+# Helper function used by path conversion functions when $build is *nix, and
+# $host is mingw, cygwin, or some other w32 environment. Relies on a correctly
+# configured wine environment available, with the winepath program in $build's
+# $PATH. Assumes ARG has no leading or trailing path separator characters.
+#
+# ARG is path to be converted from $build format to win32.
+# Result is available in $func_convert_core_path_wine_to_w32_result.
+# Unconvertible file (directory) names in ARG are skipped; if no directory names
+# are convertible, then the result may be empty.
+func_convert_core_path_wine_to_w32 ()
+{
+ $opt_debug
+ # unfortunately, winepath doesn't convert paths, only file names
+ func_convert_core_path_wine_to_w32_result=""
+ if test -n "$1"; then
+ oldIFS=$IFS
+ IFS=:
+ for func_convert_core_path_wine_to_w32_f in $1; do
+ IFS=$oldIFS
+ func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f"
+ if test -n "$func_convert_core_file_wine_to_w32_result" ; then
+ if test -z "$func_convert_core_path_wine_to_w32_result"; then
+ func_convert_core_path_wine_to_w32_result="$func_convert_core_file_wine_to_w32_result"
+ else
+ func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result"
+ fi
+ fi
+ done
+ IFS=$oldIFS
+ fi
+}
+# end: func_convert_core_path_wine_to_w32
+
+
+# func_cygpath ARGS...
+# Wrapper around calling the cygpath program via LT_CYGPATH. This is used when
+# when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2)
+# $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or
+# (2), returns the Cygwin file name or path in func_cygpath_result (input
+# file name or path is assumed to be in w32 format, as previously converted
+# from $build's *nix or MSYS format). In case (3), returns the w32 file name
+# or path in func_cygpath_result (input file name or path is assumed to be in
+# Cygwin format). Returns an empty string on error.
+#
+# ARGS are passed to cygpath, with the last one being the file name or path to
+# be converted.
+#
+# Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH
+# environment variable; do not put it in $PATH.
+func_cygpath ()
+{
+ $opt_debug
+ if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then
+ func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null`
+ if test "$?" -ne 0; then
+ # on failure, ensure result is empty
+ func_cygpath_result=
+ fi
+ else
+ func_cygpath_result=
+ func_error "LT_CYGPATH is empty or specifies non-existent file: \`$LT_CYGPATH'"
+ fi
+}
+#end: func_cygpath
+
+
+# func_convert_core_msys_to_w32 ARG
+# Convert file name or path ARG from MSYS format to w32 format. Return
+# result in func_convert_core_msys_to_w32_result.
+func_convert_core_msys_to_w32 ()
+{
+ $opt_debug
+ # awkward: cmd appends spaces to result
+ func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null |
+ $SED -e 's/[ ]*$//' -e "$lt_sed_naive_backslashify"`
+}
+#end: func_convert_core_msys_to_w32
+
+
+# func_convert_file_check ARG1 ARG2
+# Verify that ARG1 (a file name in $build format) was converted to $host
+# format in ARG2. Otherwise, emit an error message, but continue (resetting
+# func_to_host_file_result to ARG1).
+func_convert_file_check ()
+{
+ $opt_debug
+ if test -z "$2" && test -n "$1" ; then
+ func_error "Could not determine host file name corresponding to"
+ func_error " \`$1'"
+ func_error "Continuing, but uninstalled executables may not work."
+ # Fallback:
+ func_to_host_file_result="$1"
+ fi
+}
+# end func_convert_file_check
+
+
+# func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH
+# Verify that FROM_PATH (a path in $build format) was converted to $host
+# format in TO_PATH. Otherwise, emit an error message, but continue, resetting
+# func_to_host_file_result to a simplistic fallback value (see below).
+func_convert_path_check ()
+{
+ $opt_debug
+ if test -z "$4" && test -n "$3"; then
+ func_error "Could not determine the host path corresponding to"
+ func_error " \`$3'"
+ func_error "Continuing, but uninstalled executables may not work."
+ # Fallback. This is a deliberately simplistic "conversion" and
+ # should not be "improved". See libtool.info.
+ if test "x$1" != "x$2"; then
+ lt_replace_pathsep_chars="s|$1|$2|g"
+ func_to_host_path_result=`echo "$3" |
+ $SED -e "$lt_replace_pathsep_chars"`
+ else
+ func_to_host_path_result="$3"
+ fi
+ fi
+}
+# end func_convert_path_check
+
+
+# func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG
+# Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT
+# and appending REPL if ORIG matches BACKPAT.
+func_convert_path_front_back_pathsep ()
+{
+ $opt_debug
+ case $4 in
+ $1 ) func_to_host_path_result="$3$func_to_host_path_result"
+ ;;
+ esac
+ case $4 in
+ $2 ) func_append func_to_host_path_result "$3"
+ ;;
+ esac
+}
+# end func_convert_path_front_back_pathsep
+
+
+##################################################
+# $build to $host FILE NAME CONVERSION FUNCTIONS #
+##################################################
+# invoked via `$to_host_file_cmd ARG'
+#
+# In each case, ARG is the path to be converted from $build to $host format.
+# Result will be available in $func_to_host_file_result.
+
+
+# func_to_host_file ARG
+# Converts the file name ARG from $build format to $host format. Return result
+# in func_to_host_file_result.
+func_to_host_file ()
+{
+ $opt_debug
+ $to_host_file_cmd "$1"
+}
+# end func_to_host_file
+
+
+# func_to_tool_file ARG LAZY
+# converts the file name ARG from $build format to toolchain format. Return
+# result in func_to_tool_file_result. If the conversion in use is listed
+# in (the comma separated) LAZY, no conversion takes place.
+func_to_tool_file ()
+{
+ $opt_debug
+ case ,$2, in
+ *,"$to_tool_file_cmd",*)
+ func_to_tool_file_result=$1
+ ;;
+ *)
+ $to_tool_file_cmd "$1"
+ func_to_tool_file_result=$func_to_host_file_result
+ ;;
+ esac
+}
+# end func_to_tool_file
+
+
+# func_convert_file_noop ARG
+# Copy ARG to func_to_host_file_result.
+func_convert_file_noop ()
+{
+ func_to_host_file_result="$1"
+}
+# end func_convert_file_noop
+
+
+# func_convert_file_msys_to_w32 ARG
+# Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic
+# conversion to w32 is not available inside the cwrapper. Returns result in
+# func_to_host_file_result.
+func_convert_file_msys_to_w32 ()
+{
+ $opt_debug
+ func_to_host_file_result="$1"
+ if test -n "$1"; then
+ func_convert_core_msys_to_w32 "$1"
+ func_to_host_file_result="$func_convert_core_msys_to_w32_result"
+ fi
+ func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_msys_to_w32
+
+
+# func_convert_file_cygwin_to_w32 ARG
+# Convert file name ARG from Cygwin to w32 format. Returns result in
+# func_to_host_file_result.
+func_convert_file_cygwin_to_w32 ()
+{
+ $opt_debug
+ func_to_host_file_result="$1"
+ if test -n "$1"; then
+ # because $build is cygwin, we call "the" cygpath in $PATH; no need to use
+ # LT_CYGPATH in this case.
+ func_to_host_file_result=`cygpath -m "$1"`
+ fi
+ func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_cygwin_to_w32
+
+
+# func_convert_file_nix_to_w32 ARG
+# Convert file name ARG from *nix to w32 format. Requires a wine environment
+# and a working winepath. Returns result in func_to_host_file_result.
+func_convert_file_nix_to_w32 ()
+{
+ $opt_debug
+ func_to_host_file_result="$1"
+ if test -n "$1"; then
+ func_convert_core_file_wine_to_w32 "$1"
+ func_to_host_file_result="$func_convert_core_file_wine_to_w32_result"
+ fi
+ func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_nix_to_w32
+
+
+# func_convert_file_msys_to_cygwin ARG
+# Convert file name ARG from MSYS to Cygwin format. Requires LT_CYGPATH set.
+# Returns result in func_to_host_file_result.
+func_convert_file_msys_to_cygwin ()
+{
+ $opt_debug
+ func_to_host_file_result="$1"
+ if test -n "$1"; then
+ func_convert_core_msys_to_w32 "$1"
+ func_cygpath -u "$func_convert_core_msys_to_w32_result"
+ func_to_host_file_result="$func_cygpath_result"
+ fi
+ func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_msys_to_cygwin
+
+
+# func_convert_file_nix_to_cygwin ARG
+# Convert file name ARG from *nix to Cygwin format. Requires Cygwin installed
+# in a wine environment, working winepath, and LT_CYGPATH set. Returns result
+# in func_to_host_file_result.
+func_convert_file_nix_to_cygwin ()
+{
+ $opt_debug
+ func_to_host_file_result="$1"
+ if test -n "$1"; then
+ # convert from *nix to w32, then use cygpath to convert from w32 to cygwin.
+ func_convert_core_file_wine_to_w32 "$1"
+ func_cygpath -u "$func_convert_core_file_wine_to_w32_result"
+ func_to_host_file_result="$func_cygpath_result"
+ fi
+ func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_nix_to_cygwin
+
+
+#############################################
+# $build to $host PATH CONVERSION FUNCTIONS #
+#############################################
+# invoked via `$to_host_path_cmd ARG'
+#
+# In each case, ARG is the path to be converted from $build to $host format.
+# The result will be available in $func_to_host_path_result.
+#
+# Path separators are also converted from $build format to $host format. If
+# ARG begins or ends with a path separator character, it is preserved (but
+# converted to $host format) on output.
+#
+# All path conversion functions are named using the following convention:
+# file name conversion function : func_convert_file_X_to_Y ()
+# path conversion function : func_convert_path_X_to_Y ()
+# where, for any given $build/$host combination the 'X_to_Y' value is the
+# same. If conversion functions are added for new $build/$host combinations,
+# the two new functions must follow this pattern, or func_init_to_host_path_cmd
+# will break.
+
+
+# func_init_to_host_path_cmd
+# Ensures that function "pointer" variable $to_host_path_cmd is set to the
+# appropriate value, based on the value of $to_host_file_cmd.
+to_host_path_cmd=
+func_init_to_host_path_cmd ()
+{
+ $opt_debug
+ if test -z "$to_host_path_cmd"; then
+ func_stripname 'func_convert_file_' '' "$to_host_file_cmd"
+ to_host_path_cmd="func_convert_path_${func_stripname_result}"
+ fi
+}
+
+
+# func_to_host_path ARG
+# Converts the path ARG from $build format to $host format. Return result
+# in func_to_host_path_result.
+func_to_host_path ()
+{
+ $opt_debug
+ func_init_to_host_path_cmd
+ $to_host_path_cmd "$1"
+}
+# end func_to_host_path
+
+
+# func_convert_path_noop ARG
+# Copy ARG to func_to_host_path_result.
+func_convert_path_noop ()
+{
+ func_to_host_path_result="$1"
+}
+# end func_convert_path_noop
+
+
+# func_convert_path_msys_to_w32 ARG
+# Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic
+# conversion to w32 is not available inside the cwrapper. Returns result in
+# func_to_host_path_result.
+func_convert_path_msys_to_w32 ()
+{
+ $opt_debug
+ func_to_host_path_result="$1"
+ if test -n "$1"; then
+ # Remove leading and trailing path separator characters from ARG. MSYS
+ # behavior is inconsistent here; cygpath turns them into '.;' and ';.';
+ # and winepath ignores them completely.
+ func_stripname : : "$1"
+ func_to_host_path_tmp1=$func_stripname_result
+ func_convert_core_msys_to_w32 "$func_to_host_path_tmp1"
+ func_to_host_path_result="$func_convert_core_msys_to_w32_result"
+ func_convert_path_check : ";" \
+ "$func_to_host_path_tmp1" "$func_to_host_path_result"
+ func_convert_path_front_back_pathsep ":*" "*:" ";" "$1"
+ fi
+}
+# end func_convert_path_msys_to_w32
+
+
+# func_convert_path_cygwin_to_w32 ARG
+# Convert path ARG from Cygwin to w32 format. Returns result in
+# func_to_host_file_result.
+func_convert_path_cygwin_to_w32 ()
+{
+ $opt_debug
+ func_to_host_path_result="$1"
+ if test -n "$1"; then
+ # See func_convert_path_msys_to_w32:
+ func_stripname : : "$1"
+ func_to_host_path_tmp1=$func_stripname_result
+ func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"`
+ func_convert_path_check : ";" \
+ "$func_to_host_path_tmp1" "$func_to_host_path_result"
+ func_convert_path_front_back_pathsep ":*" "*:" ";" "$1"
+ fi
+}
+# end func_convert_path_cygwin_to_w32
+
+
+# func_convert_path_nix_to_w32 ARG
+# Convert path ARG from *nix to w32 format. Requires a wine environment and
+# a working winepath. Returns result in func_to_host_file_result.
+func_convert_path_nix_to_w32 ()
+{
+ $opt_debug
+ func_to_host_path_result="$1"
+ if test -n "$1"; then
+ # See func_convert_path_msys_to_w32:
+ func_stripname : : "$1"
+ func_to_host_path_tmp1=$func_stripname_result
+ func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1"
+ func_to_host_path_result="$func_convert_core_path_wine_to_w32_result"
+ func_convert_path_check : ";" \
+ "$func_to_host_path_tmp1" "$func_to_host_path_result"
+ func_convert_path_front_back_pathsep ":*" "*:" ";" "$1"
+ fi
+}
+# end func_convert_path_nix_to_w32
+
+
+# func_convert_path_msys_to_cygwin ARG
+# Convert path ARG from MSYS to Cygwin format. Requires LT_CYGPATH set.
+# Returns result in func_to_host_file_result.
+func_convert_path_msys_to_cygwin ()
+{
+ $opt_debug
+ func_to_host_path_result="$1"
+ if test -n "$1"; then
+ # See func_convert_path_msys_to_w32:
+ func_stripname : : "$1"
+ func_to_host_path_tmp1=$func_stripname_result
+ func_convert_core_msys_to_w32 "$func_to_host_path_tmp1"
+ func_cygpath -u -p "$func_convert_core_msys_to_w32_result"
+ func_to_host_path_result="$func_cygpath_result"
+ func_convert_path_check : : \
+ "$func_to_host_path_tmp1" "$func_to_host_path_result"
+ func_convert_path_front_back_pathsep ":*" "*:" : "$1"
+ fi
+}
+# end func_convert_path_msys_to_cygwin
+
+
+# func_convert_path_nix_to_cygwin ARG
+# Convert path ARG from *nix to Cygwin format. Requires Cygwin installed in a
+# a wine environment, working winepath, and LT_CYGPATH set. Returns result in
+# func_to_host_file_result.
+func_convert_path_nix_to_cygwin ()
+{
+ $opt_debug
+ func_to_host_path_result="$1"
+ if test -n "$1"; then
+ # Remove leading and trailing path separator characters from
+ # ARG. msys behavior is inconsistent here, cygpath turns them
+ # into '.;' and ';.', and winepath ignores them completely.
+ func_stripname : : "$1"
+ func_to_host_path_tmp1=$func_stripname_result
+ func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1"
+ func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result"
+ func_to_host_path_result="$func_cygpath_result"
+ func_convert_path_check : : \
+ "$func_to_host_path_tmp1" "$func_to_host_path_result"
+ func_convert_path_front_back_pathsep ":*" "*:" : "$1"
+ fi
+}
+# end func_convert_path_nix_to_cygwin
+
+
# func_mode_compile arg...
func_mode_compile ()
{
@@ -1137,12 +1986,12 @@ func_mode_compile ()
;;
-pie | -fpie | -fPIE)
- pie_flag="$pie_flag $arg"
+ func_append pie_flag " $arg"
continue
;;
-shared | -static | -prefer-pic | -prefer-non-pic)
- later="$later $arg"
+ func_append later " $arg"
continue
;;
@@ -1163,15 +2012,14 @@ func_mode_compile ()
save_ifs="$IFS"; IFS=','
for arg in $args; do
IFS="$save_ifs"
- func_quote_for_eval "$arg"
- lastarg="$lastarg $func_quote_for_eval_result"
+ func_append_quoted lastarg "$arg"
done
IFS="$save_ifs"
func_stripname ' ' '' "$lastarg"
lastarg=$func_stripname_result
# Add the arguments to base_compile.
- base_compile="$base_compile $lastarg"
+ func_append base_compile " $lastarg"
continue
;;
@@ -1187,8 +2035,7 @@ func_mode_compile ()
esac # case $arg_mode
# Aesthetically quote the previous argument.
- func_quote_for_eval "$lastarg"
- base_compile="$base_compile $func_quote_for_eval_result"
+ func_append_quoted base_compile "$lastarg"
done # for arg
case $arg_mode in
@@ -1213,7 +2060,7 @@ func_mode_compile ()
*.[cCFSifmso] | \
*.ada | *.adb | *.ads | *.asm | \
*.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \
- *.[fF][09]? | *.for | *.java | *.obj | *.sx)
+ *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup)
func_xform "$libobj"
libobj=$func_xform_result
;;
@@ -1288,7 +2135,7 @@ func_mode_compile ()
# Calculate the filename of the output object if compiler does
# not support -o with -c
if test "$compiler_c_o" = no; then
- output_obj=`$ECHO "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext}
+ output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.${objext}
lockfile="$output_obj.lock"
else
output_obj=
@@ -1319,17 +2166,16 @@ compiler."
$opt_dry_run || $RM $removelist
exit $EXIT_FAILURE
fi
- removelist="$removelist $output_obj"
+ func_append removelist " $output_obj"
$ECHO "$srcfile" > "$lockfile"
fi
$opt_dry_run || $RM $removelist
- removelist="$removelist $lockfile"
+ func_append removelist " $lockfile"
trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15
- if test -n "$fix_srcfile_path"; then
- eval srcfile=\"$fix_srcfile_path\"
- fi
+ func_to_tool_file "$srcfile" func_convert_file_msys_to_w32
+ srcfile=$func_to_tool_file_result
func_quote_for_eval "$srcfile"
qsrcfile=$func_quote_for_eval_result
@@ -1349,7 +2195,7 @@ compiler."
if test -z "$output_obj"; then
# Place PIC objects in $objdir
- command="$command -o $lobj"
+ func_append command " -o $lobj"
fi
func_show_eval_locale "$command" \
@@ -1396,11 +2242,11 @@ compiler."
command="$base_compile $qsrcfile $pic_flag"
fi
if test "$compiler_c_o" = yes; then
- command="$command -o $obj"
+ func_append command " -o $obj"
fi
# Suppress compiler output if we already did a PIC compilation.
- command="$command$suppress_output"
+ func_append command "$suppress_output"
func_show_eval_locale "$command" \
'$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE'
@@ -1445,13 +2291,13 @@ compiler."
}
$opt_help || {
-test "$mode" = compile && func_mode_compile ${1+"$@"}
+ test "$opt_mode" = compile && func_mode_compile ${1+"$@"}
}
func_mode_help ()
{
# We need to display help for each of the modes.
- case $mode in
+ case $opt_mode in
"")
# Generic help is extracted from the usage comments
# at the start of this file.
@@ -1482,10 +2328,11 @@ This mode accepts the following additional options:
-o OUTPUT-FILE set the output file name to OUTPUT-FILE
-no-suppress do not suppress compiler output for multiple passes
- -prefer-pic try to building PIC objects only
- -prefer-non-pic try to building non-PIC objects only
+ -prefer-pic try to build PIC objects only
+ -prefer-non-pic try to build non-PIC objects only
-shared do not build a \`.o' file suitable for static linking
-static only build a \`.o' file suitable for static linking
+ -Wc,FLAG pass FLAG directly to the compiler
COMPILE-COMMAND is a command to be used in creating a \`standard' object file
from the given SOURCEFILE.
@@ -1538,7 +2385,7 @@ either the \`install' or \`cp' program.
The following components of INSTALL-COMMAND are treated specially:
- -inst-prefix PREFIX-DIR Use PREFIX-DIR as a staging area for installation
+ -inst-prefix-dir PREFIX-DIR Use PREFIX-DIR as a staging area for installation
The rest of the components are interpreted as arguments to that command (only
BSD-compatible install options are recognized)."
@@ -1558,6 +2405,8 @@ The following components of LINK-COMMAND are treated specially:
-all-static do not do any dynamic linking at all
-avoid-version do not add a version suffix if possible
+ -bindir BINDIR specify path to binaries directory (for systems where
+ libraries must be found in the PATH setting at runtime)
-dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime
-dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols
-export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3)
@@ -1586,6 +2435,11 @@ The following components of LINK-COMMAND are treated specially:
-version-info CURRENT[:REVISION[:AGE]]
specify library version info [each variable defaults to 0]
-weak LIBNAME declare that the target provides the LIBNAME interface
+ -Wc,FLAG
+ -Xcompiler FLAG pass linker-specific FLAG directly to the compiler
+ -Wl,FLAG
+ -Xlinker FLAG pass linker-specific FLAG directly to the linker
+ -XCClinker FLAG pass link-specific FLAG to the compiler driver (CC)
All other options (arguments beginning with \`-') are ignored.
@@ -1619,18 +2473,44 @@ Otherwise, only FILE itself is deleted using RM."
;;
*)
- func_fatal_help "invalid operation mode \`$mode'"
+ func_fatal_help "invalid operation mode \`$opt_mode'"
;;
esac
- $ECHO
+ echo
$ECHO "Try \`$progname --help' for more information about other modes."
-
- exit $?
}
- # Now that we've collected a possible --mode arg, show help if necessary
- $opt_help && func_mode_help
+# Now that we've collected a possible --mode arg, show help if necessary
+if $opt_help; then
+ if test "$opt_help" = :; then
+ func_mode_help
+ else
+ {
+ func_help noexit
+ for opt_mode in compile link execute install finish uninstall clean; do
+ func_mode_help
+ done
+ } | sed -n '1p; 2,$s/^Usage:/ or: /p'
+ {
+ func_help noexit
+ for opt_mode in compile link execute install finish uninstall clean; do
+ echo
+ func_mode_help
+ done
+ } |
+ sed '1d
+ /^When reporting/,/^Report/{
+ H
+ d
+ }
+ $x
+ /information about other modes/d
+ /more detailed .*MODE/d
+ s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/'
+ fi
+ exit $?
+fi
# func_mode_execute arg...
@@ -1643,13 +2523,16 @@ func_mode_execute ()
func_fatal_help "you must specify a COMMAND"
# Handle -dlopen flags immediately.
- for file in $execute_dlfiles; do
+ for file in $opt_dlopen; do
test -f "$file" \
|| func_fatal_help "\`$file' is not a file"
dir=
case $file in
*.la)
+ func_resolve_sysroot "$file"
+ file=$func_resolve_sysroot_result
+
# Check to see that this really is a libtool archive.
func_lalib_unsafe_p "$file" \
|| func_fatal_help "\`$lib' is not a valid libtool archive"
@@ -1671,7 +2554,7 @@ func_mode_execute ()
dir="$func_dirname_result"
if test -f "$dir/$objdir/$dlname"; then
- dir="$dir/$objdir"
+ func_append dir "/$objdir"
else
if test ! -f "$dir/$dlname"; then
func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'"
@@ -1712,7 +2595,7 @@ func_mode_execute ()
for file
do
case $file in
- -*) ;;
+ -* | *.la | *.lo ) ;;
*)
# Do a test to see if this is really a libtool program.
if func_ltwrapper_script_p "$file"; then
@@ -1728,8 +2611,7 @@ func_mode_execute ()
;;
esac
# Quote arguments (to preserve shell metacharacters).
- func_quote_for_eval "$file"
- args="$args $func_quote_for_eval_result"
+ func_append_quoted args "$file"
done
if test "X$opt_dry_run" = Xfalse; then
@@ -1754,29 +2636,66 @@ func_mode_execute ()
# Display what would be done.
if test -n "$shlibpath_var"; then
eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\""
- $ECHO "export $shlibpath_var"
+ echo "export $shlibpath_var"
fi
$ECHO "$cmd$args"
exit $EXIT_SUCCESS
fi
}
-test "$mode" = execute && func_mode_execute ${1+"$@"}
+test "$opt_mode" = execute && func_mode_execute ${1+"$@"}
# func_mode_finish arg...
func_mode_finish ()
{
$opt_debug
- libdirs="$nonopt"
+ libs=
+ libdirs=
admincmds=
- if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
- for dir
- do
- libdirs="$libdirs $dir"
- done
+ for opt in "$nonopt" ${1+"$@"}
+ do
+ if test -d "$opt"; then
+ func_append libdirs " $opt"
+ elif test -f "$opt"; then
+ if func_lalib_unsafe_p "$opt"; then
+ func_append libs " $opt"
+ else
+ func_warning "\`$opt' is not a valid libtool archive"
+ fi
+
+ else
+ func_fatal_error "invalid argument \`$opt'"
+ fi
+ done
+
+ if test -n "$libs"; then
+ if test -n "$lt_sysroot"; then
+ sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"`
+ sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;"
+ else
+ sysroot_cmd=
+ fi
+
+ # Remove sysroot references
+ if $opt_dry_run; then
+ for lib in $libs; do
+ echo "removing references to $lt_sysroot and \`=' prefixes from $lib"
+ done
+ else
+ tmpdir=`func_mktempdir`
+ for lib in $libs; do
+ sed -e "${sysroot_cmd} s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \
+ > $tmpdir/tmp-la
+ mv -f $tmpdir/tmp-la $lib
+ done
+ ${RM}r "$tmpdir"
+ fi
+ fi
+
+ if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
for libdir in $libdirs; do
if test -n "$finish_cmds"; then
# Do each command in the finish commands.
@@ -1786,7 +2705,7 @@ func_mode_finish ()
if test -n "$finish_eval"; then
# Do the single finish_eval.
eval cmds=\"$finish_eval\"
- $opt_dry_run || eval "$cmds" || admincmds="$admincmds
+ $opt_dry_run || eval "$cmds" || func_append admincmds "
$cmds"
fi
done
@@ -1795,53 +2714,55 @@ func_mode_finish ()
# Exit here if they wanted silent mode.
$opt_silent && exit $EXIT_SUCCESS
- $ECHO "X----------------------------------------------------------------------" | $Xsed
- $ECHO "Libraries have been installed in:"
- for libdir in $libdirs; do
- $ECHO " $libdir"
- done
- $ECHO
- $ECHO "If you ever happen to want to link against installed libraries"
- $ECHO "in a given directory, LIBDIR, you must either use libtool, and"
- $ECHO "specify the full pathname of the library, or use the \`-LLIBDIR'"
- $ECHO "flag during linking and do at least one of the following:"
- if test -n "$shlibpath_var"; then
- $ECHO " - add LIBDIR to the \`$shlibpath_var' environment variable"
- $ECHO " during execution"
- fi
- if test -n "$runpath_var"; then
- $ECHO " - add LIBDIR to the \`$runpath_var' environment variable"
- $ECHO " during linking"
- fi
- if test -n "$hardcode_libdir_flag_spec"; then
- libdir=LIBDIR
- eval flag=\"$hardcode_libdir_flag_spec\"
+ if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
+ echo "----------------------------------------------------------------------"
+ echo "Libraries have been installed in:"
+ for libdir in $libdirs; do
+ $ECHO " $libdir"
+ done
+ echo
+ echo "If you ever happen to want to link against installed libraries"
+ echo "in a given directory, LIBDIR, you must either use libtool, and"
+ echo "specify the full pathname of the library, or use the \`-LLIBDIR'"
+ echo "flag during linking and do at least one of the following:"
+ if test -n "$shlibpath_var"; then
+ echo " - add LIBDIR to the \`$shlibpath_var' environment variable"
+ echo " during execution"
+ fi
+ if test -n "$runpath_var"; then
+ echo " - add LIBDIR to the \`$runpath_var' environment variable"
+ echo " during linking"
+ fi
+ if test -n "$hardcode_libdir_flag_spec"; then
+ libdir=LIBDIR
+ eval flag=\"$hardcode_libdir_flag_spec\"
- $ECHO " - use the \`$flag' linker flag"
- fi
- if test -n "$admincmds"; then
- $ECHO " - have your system administrator run these commands:$admincmds"
- fi
- if test -f /etc/ld.so.conf; then
- $ECHO " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'"
- fi
- $ECHO
+ $ECHO " - use the \`$flag' linker flag"
+ fi
+ if test -n "$admincmds"; then
+ $ECHO " - have your system administrator run these commands:$admincmds"
+ fi
+ if test -f /etc/ld.so.conf; then
+ echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'"
+ fi
+ echo
- $ECHO "See any operating system documentation about shared libraries for"
- case $host in
- solaris2.[6789]|solaris2.1[0-9])
- $ECHO "more information, such as the ld(1), crle(1) and ld.so(8) manual"
- $ECHO "pages."
- ;;
- *)
- $ECHO "more information, such as the ld(1) and ld.so(8) manual pages."
- ;;
- esac
- $ECHO "X----------------------------------------------------------------------" | $Xsed
+ echo "See any operating system documentation about shared libraries for"
+ case $host in
+ solaris2.[6789]|solaris2.1[0-9])
+ echo "more information, such as the ld(1), crle(1) and ld.so(8) manual"
+ echo "pages."
+ ;;
+ *)
+ echo "more information, such as the ld(1) and ld.so(8) manual pages."
+ ;;
+ esac
+ echo "----------------------------------------------------------------------"
+ fi
exit $EXIT_SUCCESS
}
-test "$mode" = finish && func_mode_finish ${1+"$@"}
+test "$opt_mode" = finish && func_mode_finish ${1+"$@"}
# func_mode_install arg...
@@ -1852,7 +2773,7 @@ func_mode_install ()
# install_prog (especially on Windows NT).
if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh ||
# Allow the use of GNU shtool's install command.
- $ECHO "X$nonopt" | $GREP shtool >/dev/null; then
+ case $nonopt in *shtool*) :;; *) false;; esac; then
# Aesthetically quote it.
func_quote_for_eval "$nonopt"
install_prog="$func_quote_for_eval_result "
@@ -1866,7 +2787,12 @@ func_mode_install ()
# The real first argument should be the name of the installation program.
# Aesthetically quote it.
func_quote_for_eval "$arg"
- install_prog="$install_prog$func_quote_for_eval_result"
+ func_append install_prog "$func_quote_for_eval_result"
+ install_shared_prog=$install_prog
+ case " $install_prog " in
+ *[\\\ /]cp\ *) install_cp=: ;;
+ *) install_cp=false ;;
+ esac
# We need to accept at least all the BSD install flags.
dest=
@@ -1876,10 +2802,12 @@ func_mode_install ()
install_type=
isdir=no
stripme=
+ no_mode=:
for arg
do
+ arg2=
if test -n "$dest"; then
- files="$files $dest"
+ func_append files " $dest"
dest=$arg
continue
fi
@@ -1887,10 +2815,9 @@ func_mode_install ()
case $arg in
-d) isdir=yes ;;
-f)
- case " $install_prog " in
- *[\\\ /]cp\ *) ;;
- *) prev=$arg ;;
- esac
+ if $install_cp; then :; else
+ prev=$arg
+ fi
;;
-g | -m | -o)
prev=$arg
@@ -1904,6 +2831,10 @@ func_mode_install ()
*)
# If the previous option needed an argument, then skip it.
if test -n "$prev"; then
+ if test "x$prev" = x-m && test -n "$install_override_mode"; then
+ arg2=$install_override_mode
+ no_mode=false
+ fi
prev=
else
dest=$arg
@@ -1914,7 +2845,11 @@ func_mode_install ()
# Aesthetically quote the argument.
func_quote_for_eval "$arg"
- install_prog="$install_prog $func_quote_for_eval_result"
+ func_append install_prog " $func_quote_for_eval_result"
+ if test -n "$arg2"; then
+ func_quote_for_eval "$arg2"
+ fi
+ func_append install_shared_prog " $func_quote_for_eval_result"
done
test -z "$install_prog" && \
@@ -1923,6 +2858,13 @@ func_mode_install ()
test -n "$prev" && \
func_fatal_help "the \`$prev' option requires an argument"
+ if test -n "$install_override_mode" && $no_mode; then
+ if $install_cp; then :; else
+ func_quote_for_eval "$install_override_mode"
+ func_append install_shared_prog " -m $func_quote_for_eval_result"
+ fi
+ fi
+
if test -z "$files"; then
if test -z "$dest"; then
func_fatal_help "no file or destination specified"
@@ -1977,10 +2919,13 @@ func_mode_install ()
case $file in
*.$libext)
# Do the static libraries later.
- staticlibs="$staticlibs $file"
+ func_append staticlibs " $file"
;;
*.la)
+ func_resolve_sysroot "$file"
+ file=$func_resolve_sysroot_result
+
# Check to see that this really is a libtool archive.
func_lalib_unsafe_p "$file" \
|| func_fatal_help "\`$file' is not a valid libtool archive"
@@ -1994,23 +2939,23 @@ func_mode_install ()
if test "X$destdir" = "X$libdir"; then
case "$current_libdirs " in
*" $libdir "*) ;;
- *) current_libdirs="$current_libdirs $libdir" ;;
+ *) func_append current_libdirs " $libdir" ;;
esac
else
# Note the libdir as a future libdir.
case "$future_libdirs " in
*" $libdir "*) ;;
- *) future_libdirs="$future_libdirs $libdir" ;;
+ *) func_append future_libdirs " $libdir" ;;
esac
fi
func_dirname "$file" "/" ""
dir="$func_dirname_result"
- dir="$dir$objdir"
+ func_append dir "$objdir"
if test -n "$relink_command"; then
# Determine the prefix the user has applied to our future dir.
- inst_prefix_dir=`$ECHO "X$destdir" | $Xsed -e "s%$libdir\$%%"`
+ inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"`
# Don't allow the user to place us outside of our expected
# location b/c this prevents finding dependent libraries that
@@ -2023,9 +2968,9 @@ func_mode_install ()
if test -n "$inst_prefix_dir"; then
# Stick the inst_prefix_dir data into the link command.
- relink_command=`$ECHO "X$relink_command" | $Xsed -e "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"`
+ relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"`
else
- relink_command=`$ECHO "X$relink_command" | $Xsed -e "s%@inst_prefix_dir@%%"`
+ relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"`
fi
func_warning "relinking \`$file'"
@@ -2043,7 +2988,7 @@ func_mode_install ()
test -n "$relink_command" && srcname="$realname"T
# Install the shared library and build the symlinks.
- func_show_eval "$install_prog $dir/$srcname $destdir/$realname" \
+ func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \
'exit $?'
tstripme="$stripme"
case $host_os in
@@ -2083,7 +3028,7 @@ func_mode_install ()
func_show_eval "$install_prog $instname $destdir/$name" 'exit $?'
# Maybe install the static library, too.
- test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library"
+ test -n "$old_library" && func_append staticlibs " $dir/$old_library"
;;
*.lo)
@@ -2183,7 +3128,7 @@ func_mode_install ()
if test -f "$lib"; then
func_source "$lib"
fi
- libfile="$libdir/"`$ECHO "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test
+ libfile="$libdir/"`$ECHO "$lib" | $SED 's%^.*/%%g'` ### testsuite: skip nested quoting test
if test -n "$libdir" && test ! -f "$libfile"; then
func_warning "\`$lib' has not been installed in \`$libdir'"
finalize=no
@@ -2202,7 +3147,7 @@ func_mode_install ()
file="$func_basename_result"
outputname="$tmpdir/$file"
# Replace the output file specification.
- relink_command=`$ECHO "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'`
+ relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'`
$opt_silent || {
func_quote_for_expand "$relink_command"
@@ -2221,7 +3166,7 @@ func_mode_install ()
}
else
# Install the binary that we compiled earlier.
- file=`$ECHO "X$file$stripped_ext" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"`
+ file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"`
fi
fi
@@ -2257,11 +3202,13 @@ func_mode_install ()
# Set up the ranlib parameters.
oldlib="$destdir/$name"
+ func_to_tool_file "$oldlib" func_convert_file_msys_to_w32
+ tool_oldlib=$func_to_tool_file_result
func_show_eval "$install_prog \$file \$oldlib" 'exit $?'
if test -n "$stripme" && test -n "$old_striplib"; then
- func_show_eval "$old_striplib $oldlib" 'exit $?'
+ func_show_eval "$old_striplib $tool_oldlib" 'exit $?'
fi
# Do each command in the postinstall commands.
@@ -2280,7 +3227,7 @@ func_mode_install ()
fi
}
-test "$mode" = install && func_mode_install ${1+"$@"}
+test "$opt_mode" = install && func_mode_install ${1+"$@"}
# func_generate_dlsyms outputname originator pic_p
@@ -2323,6 +3270,22 @@ func_generate_dlsyms ()
extern \"C\" {
#endif
+#if defined(__GNUC__) && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4))
+#pragma GCC diagnostic ignored \"-Wstrict-prototypes\"
+#endif
+
+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */
+#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE)
+/* DATA imports from DLLs on WIN32 con't be const, because runtime
+ relocations are performed -- see ld's documentation on pseudo-relocs. */
+# define LT_DLSYM_CONST
+#elif defined(__osf__)
+/* This system does not cope well with relocations in const data. */
+# define LT_DLSYM_CONST
+#else
+# define LT_DLSYM_CONST const
+#endif
+
/* External symbol declarations for the compiler. */\
"
@@ -2332,10 +3295,11 @@ extern \"C\" {
$opt_dry_run || echo ': @PROGRAM@ ' > "$nlist"
# Add our own program objects to the symbol list.
- progfiles=`$ECHO "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+ progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP`
for progfile in $progfiles; do
- func_verbose "extracting global C symbols from \`$progfile'"
- $opt_dry_run || eval "$NM $progfile | $global_symbol_pipe >> '$nlist'"
+ func_to_tool_file "$progfile" func_convert_file_msys_to_w32
+ func_verbose "extracting global C symbols from \`$func_to_tool_file_result'"
+ $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'"
done
if test -n "$exclude_expsyms"; then
@@ -2371,7 +3335,7 @@ extern \"C\" {
eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T'
eval '$MV "$nlist"T "$nlist"'
case $host in
- *cygwin | *mingw* | *cegcc* )
+ *cygwin* | *mingw* | *cegcc* )
eval "echo EXPORTS "'> "$output_objdir/$outputname.def"'
eval 'cat "$nlist" >> "$output_objdir/$outputname.def"'
;;
@@ -2384,10 +3348,52 @@ extern \"C\" {
func_verbose "extracting global C symbols from \`$dlprefile'"
func_basename "$dlprefile"
name="$func_basename_result"
- $opt_dry_run || {
- eval '$ECHO ": $name " >> "$nlist"'
- eval "$NM $dlprefile 2>/dev/null | $global_symbol_pipe >> '$nlist'"
- }
+ case $host in
+ *cygwin* | *mingw* | *cegcc* )
+ # if an import library, we need to obtain dlname
+ if func_win32_import_lib_p "$dlprefile"; then
+ func_tr_sh "$dlprefile"
+ eval "curr_lafile=\$libfile_$func_tr_sh_result"
+ dlprefile_dlbasename=""
+ if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then
+ # Use subshell, to avoid clobbering current variable values
+ dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"`
+ if test -n "$dlprefile_dlname" ; then
+ func_basename "$dlprefile_dlname"
+ dlprefile_dlbasename="$func_basename_result"
+ else
+ # no lafile. user explicitly requested -dlpreopen <import library>.
+ $sharedlib_from_linklib_cmd "$dlprefile"
+ dlprefile_dlbasename=$sharedlib_from_linklib_result
+ fi
+ fi
+ $opt_dry_run || {
+ if test -n "$dlprefile_dlbasename" ; then
+ eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"'
+ else
+ func_warning "Could not compute DLL name from $name"
+ eval '$ECHO ": $name " >> "$nlist"'
+ fi
+ func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32
+ eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe |
+ $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'"
+ }
+ else # not an import lib
+ $opt_dry_run || {
+ eval '$ECHO ": $name " >> "$nlist"'
+ func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32
+ eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'"
+ }
+ fi
+ ;;
+ *)
+ $opt_dry_run || {
+ eval '$ECHO ": $name " >> "$nlist"'
+ func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32
+ eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'"
+ }
+ ;;
+ esac
done
$opt_dry_run || {
@@ -2415,36 +3421,19 @@ extern \"C\" {
if test -f "$nlist"S; then
eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"'
else
- $ECHO '/* NONE */' >> "$output_objdir/$my_dlsyms"
+ echo '/* NONE */' >> "$output_objdir/$my_dlsyms"
fi
- $ECHO >> "$output_objdir/$my_dlsyms" "\
+ echo >> "$output_objdir/$my_dlsyms" "\
/* The mapping between symbol names and symbols. */
typedef struct {
const char *name;
void *address;
} lt_dlsymlist;
-"
- case $host in
- *cygwin* | *mingw* | *cegcc* )
- $ECHO >> "$output_objdir/$my_dlsyms" "\
-/* DATA imports from DLLs on WIN32 con't be const, because
- runtime relocations are performed -- see ld's documentation
- on pseudo-relocs. */"
- lt_dlsym_const= ;;
- *osf5*)
- echo >> "$output_objdir/$my_dlsyms" "\
-/* This system does not cope well with relocations in const data */"
- lt_dlsym_const= ;;
- *)
- lt_dlsym_const=const ;;
- esac
-
- $ECHO >> "$output_objdir/$my_dlsyms" "\
-extern $lt_dlsym_const lt_dlsymlist
+extern LT_DLSYM_CONST lt_dlsymlist
lt_${my_prefix}_LTX_preloaded_symbols[];
-$lt_dlsym_const lt_dlsymlist
+LT_DLSYM_CONST lt_dlsymlist
lt_${my_prefix}_LTX_preloaded_symbols[] =
{\
{ \"$my_originator\", (void *) 0 },"
@@ -2457,7 +3446,7 @@ lt_${my_prefix}_LTX_preloaded_symbols[] =
eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms"
;;
esac
- $ECHO >> "$output_objdir/$my_dlsyms" "\
+ echo >> "$output_objdir/$my_dlsyms" "\
{0, (void *) 0}
};
@@ -2484,7 +3473,7 @@ static const void *lt_preloaded_setup() {
# linked before any other PIC object. But we must not use
# pic_flag when linking with -static. The problem exists in
# FreeBSD 2.2.6 and is fixed in FreeBSD 3.1.
- *-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*)
+ *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0*)
pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;;
*-*-hpux*)
pic_flag_for_symtable=" $pic_flag" ;;
@@ -2500,7 +3489,7 @@ static const void *lt_preloaded_setup() {
for arg in $LTCFLAGS; do
case $arg in
-pie | -fpie | -fPIE) ;;
- *) symtab_cflags="$symtab_cflags $arg" ;;
+ *) func_append symtab_cflags " $arg" ;;
esac
done
@@ -2515,16 +3504,16 @@ static const void *lt_preloaded_setup() {
case $host in
*cygwin* | *mingw* | *cegcc* )
if test -f "$output_objdir/$my_outputname.def"; then
- compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"`
- finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"`
+ compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"`
+ finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"`
else
- compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"`
- finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"`
+ compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"`
+ finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"`
fi
;;
*)
- compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"`
- finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"`
+ compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"`
+ finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"`
;;
esac
;;
@@ -2538,8 +3527,8 @@ static const void *lt_preloaded_setup() {
# really was required.
# Nullify the symbol file.
- compile_command=`$ECHO "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"`
- finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"`
+ compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"`
+ finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"`
fi
}
@@ -2549,6 +3538,7 @@ static const void *lt_preloaded_setup() {
# Need a lot of goo to handle *both* DLLs and import libs
# Has to be a shell function in order to 'eat' the argument
# that is supplied when $file_magic_command is called.
+# Despite the name, also deal with 64 bit binaries.
func_win32_libid ()
{
$opt_debug
@@ -2559,9 +3549,11 @@ func_win32_libid ()
win32_libid_type="x86 archive import"
;;
*ar\ archive*) # could be an import, or static
+ # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD.
if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null |
- $EGREP 'file format pe-i386(.*architecture: i386)?' >/dev/null ; then
- win32_nmres=`eval $NM -f posix -A $1 |
+ $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then
+ func_to_tool_file "$1" func_convert_file_msys_to_w32
+ win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" |
$SED -n -e '
1,100{
/ I /{
@@ -2590,6 +3582,131 @@ func_win32_libid ()
$ECHO "$win32_libid_type"
}
+# func_cygming_dll_for_implib ARG
+#
+# Platform-specific function to extract the
+# name of the DLL associated with the specified
+# import library ARG.
+# Invoked by eval'ing the libtool variable
+# $sharedlib_from_linklib_cmd
+# Result is available in the variable
+# $sharedlib_from_linklib_result
+func_cygming_dll_for_implib ()
+{
+ $opt_debug
+ sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"`
+}
+
+# func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs
+#
+# The is the core of a fallback implementation of a
+# platform-specific function to extract the name of the
+# DLL associated with the specified import library LIBNAME.
+#
+# SECTION_NAME is either .idata$6 or .idata$7, depending
+# on the platform and compiler that created the implib.
+#
+# Echos the name of the DLL associated with the
+# specified import library.
+func_cygming_dll_for_implib_fallback_core ()
+{
+ $opt_debug
+ match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"`
+ $OBJDUMP -s --section "$1" "$2" 2>/dev/null |
+ $SED '/^Contents of section '"$match_literal"':/{
+ # Place marker at beginning of archive member dllname section
+ s/.*/====MARK====/
+ p
+ d
+ }
+ # These lines can sometimes be longer than 43 characters, but
+ # are always uninteresting
+ /:[ ]*file format pe[i]\{,1\}-/d
+ /^In archive [^:]*:/d
+ # Ensure marker is printed
+ /^====MARK====/p
+ # Remove all lines with less than 43 characters
+ /^.\{43\}/!d
+ # From remaining lines, remove first 43 characters
+ s/^.\{43\}//' |
+ $SED -n '
+ # Join marker and all lines until next marker into a single line
+ /^====MARK====/ b para
+ H
+ $ b para
+ b
+ :para
+ x
+ s/\n//g
+ # Remove the marker
+ s/^====MARK====//
+ # Remove trailing dots and whitespace
+ s/[\. \t]*$//
+ # Print
+ /./p' |
+ # we now have a list, one entry per line, of the stringified
+ # contents of the appropriate section of all members of the
+ # archive which possess that section. Heuristic: eliminate
+ # all those which have a first or second character that is
+ # a '.' (that is, objdump's representation of an unprintable
+ # character.) This should work for all archives with less than
+ # 0x302f exports -- but will fail for DLLs whose name actually
+ # begins with a literal '.' or a single character followed by
+ # a '.'.
+ #
+ # Of those that remain, print the first one.
+ $SED -e '/^\./d;/^.\./d;q'
+}
+
+# func_cygming_gnu_implib_p ARG
+# This predicate returns with zero status (TRUE) if
+# ARG is a GNU/binutils-style import library. Returns
+# with nonzero status (FALSE) otherwise.
+func_cygming_gnu_implib_p ()
+{
+ $opt_debug
+ func_to_tool_file "$1" func_convert_file_msys_to_w32
+ func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'`
+ test -n "$func_cygming_gnu_implib_tmp"
+}
+
+# func_cygming_ms_implib_p ARG
+# This predicate returns with zero status (TRUE) if
+# ARG is an MS-style import library. Returns
+# with nonzero status (FALSE) otherwise.
+func_cygming_ms_implib_p ()
+{
+ $opt_debug
+ func_to_tool_file "$1" func_convert_file_msys_to_w32
+ func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'`
+ test -n "$func_cygming_ms_implib_tmp"
+}
+
+# func_cygming_dll_for_implib_fallback ARG
+# Platform-specific function to extract the
+# name of the DLL associated with the specified
+# import library ARG.
+#
+# This fallback implementation is for use when $DLLTOOL
+# does not support the --identify-strict option.
+# Invoked by eval'ing the libtool variable
+# $sharedlib_from_linklib_cmd
+# Result is available in the variable
+# $sharedlib_from_linklib_result
+func_cygming_dll_for_implib_fallback ()
+{
+ $opt_debug
+ if func_cygming_gnu_implib_p "$1" ; then
+ # binutils import library
+ sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"`
+ elif func_cygming_ms_implib_p "$1" ; then
+ # ms-generated import library
+ sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"`
+ else
+ # unknown
+ sharedlib_from_linklib_result=""
+ fi
+}
# func_extract_an_archive dir oldlib
@@ -2598,7 +3715,18 @@ func_extract_an_archive ()
$opt_debug
f_ex_an_ar_dir="$1"; shift
f_ex_an_ar_oldlib="$1"
- func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" 'exit $?'
+ if test "$lock_old_archive_extraction" = yes; then
+ lockfile=$f_ex_an_ar_oldlib.lock
+ until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do
+ func_echo "Waiting for $lockfile to be removed"
+ sleep 2
+ done
+ fi
+ func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \
+ 'stat=$?; rm -f "$lockfile"; exit $stat'
+ if test "$lock_old_archive_extraction" = yes; then
+ $opt_dry_run || rm -f "$lockfile"
+ fi
if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then
:
else
@@ -2669,7 +3797,7 @@ func_extract_archives ()
darwin_file=
darwin_files=
for darwin_file in $darwin_filelist; do
- darwin_files=`find unfat-$$ -name $darwin_file -print | $NL2SP`
+ darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP`
$LIPO -create -output "$darwin_file" $darwin_files
done # $darwin_filelist
$RM -rf unfat-$$
@@ -2684,25 +3812,30 @@ func_extract_archives ()
func_extract_an_archive "$my_xdir" "$my_xabs"
;;
esac
- my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP`
+ my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP`
done
func_extract_archives_result="$my_oldobjs"
}
-
-# func_emit_wrapper_part1 [arg=no]
+# func_emit_wrapper [arg=no]
#
-# Emit the first part of a libtool wrapper script on stdout.
-# For more information, see the description associated with
-# func_emit_wrapper(), below.
-func_emit_wrapper_part1 ()
+# Emit a libtool wrapper script on stdout.
+# Don't directly open a file because we may want to
+# incorporate the script contents within a cygwin/mingw
+# wrapper executable. Must ONLY be called from within
+# func_mode_link because it depends on a number of variables
+# set therein.
+#
+# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR
+# variable will take. If 'yes', then the emitted script
+# will assume that the directory in which it is stored is
+# the $objdir directory. This is a cygwin/mingw-specific
+# behavior.
+func_emit_wrapper ()
{
- func_emit_wrapper_part1_arg1=no
- if test -n "$1" ; then
- func_emit_wrapper_part1_arg1=$1
- fi
+ func_emit_wrapper_arg1=${1-no}
$ECHO "\
#! $SHELL
@@ -2718,7 +3851,6 @@ func_emit_wrapper_part1 ()
# Sed substitution that helps us do robust quoting. It backslashifies
# metacharacters that are still active within double-quoted strings.
-Xsed='${SED} -e 1s/^X//'
sed_quote_subst='$sed_quote_subst'
# Be Bourne compatible
@@ -2749,31 +3881,135 @@ if test \"\$libtool_install_magic\" = \"$magic\"; then
else
# When we are sourced in execute mode, \$file and \$ECHO are already set.
if test \"\$libtool_execute_magic\" != \"$magic\"; then
- ECHO=\"$qecho\"
- file=\"\$0\"
- # Make sure echo works.
- if test \"X\$1\" = X--no-reexec; then
- # Discard the --no-reexec flag, and continue.
- shift
- elif test \"X\`{ \$ECHO '\t'; } 2>/dev/null\`\" = 'X\t'; then
- # Yippee, \$ECHO works!
- :
- else
- # Restart under the correct shell, and then maybe \$ECHO will work.
- exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"}
- fi
- fi\
+ file=\"\$0\""
+
+ qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"`
+ $ECHO "\
+
+# A function that is used when there is no print builtin or printf.
+func_fallback_echo ()
+{
+ eval 'cat <<_LTECHO_EOF
+\$1
+_LTECHO_EOF'
+}
+ ECHO=\"$qECHO\"
+ fi
+
+# Very basic option parsing. These options are (a) specific to
+# the libtool wrapper, (b) are identical between the wrapper
+# /script/ and the wrapper /executable/ which is used only on
+# windows platforms, and (c) all begin with the string "--lt-"
+# (application programs are unlikely to have options which match
+# this pattern).
+#
+# There are only two supported options: --lt-debug and
+# --lt-dump-script. There is, deliberately, no --lt-help.
+#
+# The first argument to this parsing function should be the
+# script's $0 value, followed by "$@".
+lt_option_debug=
+func_parse_lt_options ()
+{
+ lt_script_arg0=\$0
+ shift
+ for lt_opt
+ do
+ case \"\$lt_opt\" in
+ --lt-debug) lt_option_debug=1 ;;
+ --lt-dump-script)
+ lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\`
+ test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=.
+ lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\`
+ cat \"\$lt_dump_D/\$lt_dump_F\"
+ exit 0
+ ;;
+ --lt-*)
+ \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2
+ exit 1
+ ;;
+ esac
+ done
+
+ # Print the debug banner immediately:
+ if test -n \"\$lt_option_debug\"; then
+ echo \"${outputname}:${output}:\${LINENO}: libtool wrapper (GNU $PACKAGE$TIMESTAMP) $VERSION\" 1>&2
+ fi
+}
+
+# Used when --lt-debug. Prints its arguments to stdout
+# (redirection is the responsibility of the caller)
+func_lt_dump_args ()
+{
+ lt_dump_args_N=1;
+ for lt_arg
+ do
+ \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[\$lt_dump_args_N]: \$lt_arg\"
+ lt_dump_args_N=\`expr \$lt_dump_args_N + 1\`
+ done
+}
+
+# Core function for launching the target application
+func_exec_program_core ()
+{
"
- $ECHO "\
+ case $host in
+ # Backslashes separate directories on plain windows
+ *-*-mingw | *-*-os2* | *-cegcc*)
+ $ECHO "\
+ if test -n \"\$lt_option_debug\"; then
+ \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir\\\\\$program\" 1>&2
+ func_lt_dump_args \${1+\"\$@\"} 1>&2
+ fi
+ exec \"\$progdir\\\\\$program\" \${1+\"\$@\"}
+"
+ ;;
+
+ *)
+ $ECHO "\
+ if test -n \"\$lt_option_debug\"; then
+ \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir/\$program\" 1>&2
+ func_lt_dump_args \${1+\"\$@\"} 1>&2
+ fi
+ exec \"\$progdir/\$program\" \${1+\"\$@\"}
+"
+ ;;
+ esac
+ $ECHO "\
+ \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2
+ exit 1
+}
+
+# A function to encapsulate launching the target application
+# Strips options in the --lt-* namespace from \$@ and
+# launches target application with the remaining arguments.
+func_exec_program ()
+{
+ case \" \$* \" in
+ *\\ --lt-*)
+ for lt_wr_arg
+ do
+ case \$lt_wr_arg in
+ --lt-*) ;;
+ *) set x \"\$@\" \"\$lt_wr_arg\"; shift;;
+ esac
+ shift
+ done ;;
+ esac
+ func_exec_program_core \${1+\"\$@\"}
+}
+
+ # Parse options
+ func_parse_lt_options \"\$0\" \${1+\"\$@\"}
# Find the directory that this script lives in.
- thisdir=\`\$ECHO \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\`
+ thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\`
test \"x\$thisdir\" = \"x\$file\" && thisdir=.
# Follow symbolic links until we get to the real thisdir.
- file=\`ls -ld \"\$file\" | ${SED} -n 's/.*-> //p'\`
+ file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\`
while test -n \"\$file\"; do
- destdir=\`\$ECHO \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\`
+ destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\`
# If there was a directory component, then change thisdir.
if test \"x\$destdir\" != \"x\$file\"; then
@@ -2783,30 +4019,13 @@ else
esac
fi
- file=\`\$ECHO \"X\$file\" | \$Xsed -e 's%^.*/%%'\`
- file=\`ls -ld \"\$thisdir/\$file\" | ${SED} -n 's/.*-> //p'\`
+ file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\`
+ file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\`
done
-"
-}
-# end: func_emit_wrapper_part1
-
-# func_emit_wrapper_part2 [arg=no]
-#
-# Emit the second part of a libtool wrapper script on stdout.
-# For more information, see the description associated with
-# func_emit_wrapper(), below.
-func_emit_wrapper_part2 ()
-{
- func_emit_wrapper_part2_arg1=no
- if test -n "$1" ; then
- func_emit_wrapper_part2_arg1=$1
- fi
-
- $ECHO "\
# Usually 'no', except on cygwin/mingw when embedded into
# the cwrapper.
- WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_part2_arg1
+ WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1
if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then
# special case for '.'
if test \"\$thisdir\" = \".\"; then
@@ -2814,7 +4033,7 @@ func_emit_wrapper_part2 ()
fi
# remove .libs from thisdir
case \"\$thisdir\" in
- *[\\\\/]$objdir ) thisdir=\`\$ECHO \"X\$thisdir\" | \$Xsed -e 's%[\\\\/][^\\\\/]*$%%'\` ;;
+ *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;;
$objdir ) thisdir=. ;;
esac
fi
@@ -2869,6 +4088,18 @@ func_emit_wrapper_part2 ()
if test -f \"\$progdir/\$program\"; then"
+ # fixup the dll searchpath if we need to.
+ #
+ # Fix the DLL searchpath if we need to. Do this before prepending
+ # to shlibpath, because on Windows, both are PATH and uninstalled
+ # libraries must come first.
+ if test -n "$dllsearchpath"; then
+ $ECHO "\
+ # Add the dll search path components to the executable PATH
+ PATH=$dllsearchpath:\$PATH
+"
+ fi
+
# Export our shlibpath_var if we have one.
if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
$ECHO "\
@@ -2877,254 +4108,29 @@ func_emit_wrapper_part2 ()
# Some systems cannot cope with colon-terminated $shlibpath_var
# The second colon is a workaround for a bug in BeOS R4 sed
- $shlibpath_var=\`\$ECHO \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\`
+ $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\`
export $shlibpath_var
"
fi
- # fixup the dll searchpath if we need to.
- if test -n "$dllsearchpath"; then
- $ECHO "\
- # Add the dll search path components to the executable PATH
- PATH=$dllsearchpath:\$PATH
-"
- fi
-
$ECHO "\
if test \"\$libtool_execute_magic\" != \"$magic\"; then
# Run the actual program with our arguments.
-"
- case $host in
- # Backslashes separate directories on plain windows
- *-*-mingw | *-*-os2* | *-cegcc*)
- $ECHO "\
- exec \"\$progdir\\\\\$program\" \${1+\"\$@\"}
-"
- ;;
-
- *)
- $ECHO "\
- exec \"\$progdir/\$program\" \${1+\"\$@\"}
-"
- ;;
- esac
- $ECHO "\
- \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2
- exit 1
+ func_exec_program \${1+\"\$@\"}
fi
else
# The program doesn't exist.
\$ECHO \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2
\$ECHO \"This script is just a wrapper for \$program.\" 1>&2
- $ECHO \"See the $PACKAGE documentation for more information.\" 1>&2
+ \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2
exit 1
fi
fi\
"
}
-# end: func_emit_wrapper_part2
-
-
-# func_emit_wrapper [arg=no]
-#
-# Emit a libtool wrapper script on stdout.
-# Don't directly open a file because we may want to
-# incorporate the script contents within a cygwin/mingw
-# wrapper executable. Must ONLY be called from within
-# func_mode_link because it depends on a number of variables
-# set therein.
-#
-# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR
-# variable will take. If 'yes', then the emitted script
-# will assume that the directory in which it is stored is
-# the $objdir directory. This is a cygwin/mingw-specific
-# behavior.
-func_emit_wrapper ()
-{
- func_emit_wrapper_arg1=no
- if test -n "$1" ; then
- func_emit_wrapper_arg1=$1
- fi
-
- # split this up so that func_emit_cwrapperexe_src
- # can call each part independently.
- func_emit_wrapper_part1 "${func_emit_wrapper_arg1}"
- func_emit_wrapper_part2 "${func_emit_wrapper_arg1}"
-}
-# func_to_host_path arg
-#
-# Convert paths to host format when used with build tools.
-# Intended for use with "native" mingw (where libtool itself
-# is running under the msys shell), or in the following cross-
-# build environments:
-# $build $host
-# mingw (msys) mingw [e.g. native]
-# cygwin mingw
-# *nix + wine mingw
-# where wine is equipped with the `winepath' executable.
-# In the native mingw case, the (msys) shell automatically
-# converts paths for any non-msys applications it launches,
-# but that facility isn't available from inside the cwrapper.
-# Similar accommodations are necessary for $host mingw and
-# $build cygwin. Calling this function does no harm for other
-# $host/$build combinations not listed above.
-#
-# ARG is the path (on $build) that should be converted to
-# the proper representation for $host. The result is stored
-# in $func_to_host_path_result.
-func_to_host_path ()
-{
- func_to_host_path_result="$1"
- if test -n "$1" ; then
- case $host in
- *mingw* )
- lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g'
- case $build in
- *mingw* ) # actually, msys
- # awkward: cmd appends spaces to result
- lt_sed_strip_trailing_spaces="s/[ ]*\$//"
- func_to_host_path_tmp1=`( cmd //c echo "$1" |\
- $SED -e "$lt_sed_strip_trailing_spaces" ) 2>/dev/null || echo ""`
- func_to_host_path_result=`echo "$func_to_host_path_tmp1" |\
- $SED -e "$lt_sed_naive_backslashify"`
- ;;
- *cygwin* )
- func_to_host_path_tmp1=`cygpath -w "$1"`
- func_to_host_path_result=`echo "$func_to_host_path_tmp1" |\
- $SED -e "$lt_sed_naive_backslashify"`
- ;;
- * )
- # Unfortunately, winepath does not exit with a non-zero
- # error code, so we are forced to check the contents of
- # stdout. On the other hand, if the command is not
- # found, the shell will set an exit code of 127 and print
- # *an error message* to stdout. So we must check for both
- # error code of zero AND non-empty stdout, which explains
- # the odd construction:
- func_to_host_path_tmp1=`winepath -w "$1" 2>/dev/null`
- if test "$?" -eq 0 && test -n "${func_to_host_path_tmp1}"; then
- func_to_host_path_result=`echo "$func_to_host_path_tmp1" |\
- $SED -e "$lt_sed_naive_backslashify"`
- else
- # Allow warning below.
- func_to_host_path_result=""
- fi
- ;;
- esac
- if test -z "$func_to_host_path_result" ; then
- func_error "Could not determine host path corresponding to"
- func_error " '$1'"
- func_error "Continuing, but uninstalled executables may not work."
- # Fallback:
- func_to_host_path_result="$1"
- fi
- ;;
- esac
- fi
-}
-# end: func_to_host_path
-
-# func_to_host_pathlist arg
-#
-# Convert pathlists to host format when used with build tools.
-# See func_to_host_path(), above. This function supports the
-# following $build/$host combinations (but does no harm for
-# combinations not listed here):
-# $build $host
-# mingw (msys) mingw [e.g. native]
-# cygwin mingw
-# *nix + wine mingw
-#
-# Path separators are also converted from $build format to
-# $host format. If ARG begins or ends with a path separator
-# character, it is preserved (but converted to $host format)
-# on output.
-#
-# ARG is a pathlist (on $build) that should be converted to
-# the proper representation on $host. The result is stored
-# in $func_to_host_pathlist_result.
-func_to_host_pathlist ()
-{
- func_to_host_pathlist_result="$1"
- if test -n "$1" ; then
- case $host in
- *mingw* )
- lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g'
- # Remove leading and trailing path separator characters from
- # ARG. msys behavior is inconsistent here, cygpath turns them
- # into '.;' and ';.', and winepath ignores them completely.
- func_to_host_pathlist_tmp2="$1"
- # Once set for this call, this variable should not be
- # reassigned. It is used in tha fallback case.
- func_to_host_pathlist_tmp1=`echo "$func_to_host_pathlist_tmp2" |\
- $SED -e 's|^:*||' -e 's|:*$||'`
- case $build in
- *mingw* ) # Actually, msys.
- # Awkward: cmd appends spaces to result.
- lt_sed_strip_trailing_spaces="s/[ ]*\$//"
- func_to_host_pathlist_tmp2=`( cmd //c echo "$func_to_host_pathlist_tmp1" |\
- $SED -e "$lt_sed_strip_trailing_spaces" ) 2>/dev/null || echo ""`
- func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp2" |\
- $SED -e "$lt_sed_naive_backslashify"`
- ;;
- *cygwin* )
- func_to_host_pathlist_tmp2=`cygpath -w -p "$func_to_host_pathlist_tmp1"`
- func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp2" |\
- $SED -e "$lt_sed_naive_backslashify"`
- ;;
- * )
- # unfortunately, winepath doesn't convert pathlists
- func_to_host_pathlist_result=""
- func_to_host_pathlist_oldIFS=$IFS
- IFS=:
- for func_to_host_pathlist_f in $func_to_host_pathlist_tmp1 ; do
- IFS=$func_to_host_pathlist_oldIFS
- if test -n "$func_to_host_pathlist_f" ; then
- func_to_host_path "$func_to_host_pathlist_f"
- if test -n "$func_to_host_path_result" ; then
- if test -z "$func_to_host_pathlist_result" ; then
- func_to_host_pathlist_result="$func_to_host_path_result"
- else
- func_to_host_pathlist_result="$func_to_host_pathlist_result;$func_to_host_path_result"
- fi
- fi
- fi
- IFS=:
- done
- IFS=$func_to_host_pathlist_oldIFS
- ;;
- esac
- if test -z "$func_to_host_pathlist_result" ; then
- func_error "Could not determine the host path(s) corresponding to"
- func_error " '$1'"
- func_error "Continuing, but uninstalled executables may not work."
- # Fallback. This may break if $1 contains DOS-style drive
- # specifications. The fix is not to complicate the expression
- # below, but for the user to provide a working wine installation
- # with winepath so that path translation in the cross-to-mingw
- # case works properly.
- lt_replace_pathsep_nix_to_dos="s|:|;|g"
- func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp1" |\
- $SED -e "$lt_replace_pathsep_nix_to_dos"`
- fi
- # Now, add the leading and trailing path separators back
- case "$1" in
- :* ) func_to_host_pathlist_result=";$func_to_host_pathlist_result"
- ;;
- esac
- case "$1" in
- *: ) func_to_host_pathlist_result="$func_to_host_pathlist_result;"
- ;;
- esac
- ;;
- esac
- fi
-}
-# end: func_to_host_pathlist
-
# func_emit_cwrapperexe_src
# emit the source code for a wrapper executable on stdout
# Must ONLY be called from within func_mode_link because
@@ -3141,31 +4147,23 @@ func_emit_cwrapperexe_src ()
This wrapper executable should never be moved out of the build directory.
If it is, it will not operate correctly.
-
- Currently, it simply execs the wrapper *script* "$SHELL $output",
- but could eventually absorb all of the scripts functionality and
- exec $objdir/$outputname directly.
*/
EOF
cat <<"EOF"
+#ifdef _MSC_VER
+# define _CRT_SECURE_NO_DEPRECATE 1
+#endif
#include <stdio.h>
#include <stdlib.h>
#ifdef _MSC_VER
# include <direct.h>
# include <process.h>
# include <io.h>
-# define setmode _setmode
#else
# include <unistd.h>
# include <stdint.h>
# ifdef __CYGWIN__
# include <io.h>
-# define HAVE_SETENV
-# ifdef __STRICT_ANSI__
-char *realpath (const char *, char *);
-int putenv (char *);
-int setenv (const char *, const char *, int);
-# endif
# endif
#endif
#include <malloc.h>
@@ -3177,6 +4175,44 @@ int setenv (const char *, const char *, int);
#include <fcntl.h>
#include <sys/stat.h>
+/* declarations of non-ANSI functions */
+#if defined(__MINGW32__)
+# ifdef __STRICT_ANSI__
+int _putenv (const char *);
+# endif
+#elif defined(__CYGWIN__)
+# ifdef __STRICT_ANSI__
+char *realpath (const char *, char *);
+int putenv (char *);
+int setenv (const char *, const char *, int);
+# endif
+/* #elif defined (other platforms) ... */
+#endif
+
+/* portability defines, excluding path handling macros */
+#if defined(_MSC_VER)
+# define setmode _setmode
+# define stat _stat
+# define chmod _chmod
+# define getcwd _getcwd
+# define putenv _putenv
+# define S_IXUSR _S_IEXEC
+# ifndef _INTPTR_T_DEFINED
+# define _INTPTR_T_DEFINED
+# define intptr_t int
+# endif
+#elif defined(__MINGW32__)
+# define setmode _setmode
+# define stat _stat
+# define chmod _chmod
+# define getcwd _getcwd
+# define putenv _putenv
+#elif defined(__CYGWIN__)
+# define HAVE_SETENV
+# define FOPEN_WB "wb"
+/* #elif defined (other platforms) ... */
+#endif
+
#if defined(PATH_MAX)
# define LT_PATHMAX PATH_MAX
#elif defined(MAXPATHLEN)
@@ -3192,14 +4228,7 @@ int setenv (const char *, const char *, int);
# define S_IXGRP 0
#endif
-#ifdef _MSC_VER
-# define S_IXUSR _S_IEXEC
-# define stat _stat
-# ifndef _INTPTR_T_DEFINED
-# define intptr_t int
-# endif
-#endif
-
+/* path handling portability macros */
#ifndef DIR_SEPARATOR
# define DIR_SEPARATOR '/'
# define PATH_SEPARATOR ':'
@@ -3230,10 +4259,6 @@ int setenv (const char *, const char *, int);
# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2)
#endif /* PATH_SEPARATOR_2 */
-#ifdef __CYGWIN__
-# define FOPEN_WB "wb"
-#endif
-
#ifndef FOPEN_WB
# define FOPEN_WB "w"
#endif
@@ -3246,22 +4271,13 @@ int setenv (const char *, const char *, int);
if (stale) { free ((void *) stale); stale = 0; } \
} while (0)
-#undef LTWRAPPER_DEBUGPRINTF
-#if defined DEBUGWRAPPER
-# define LTWRAPPER_DEBUGPRINTF(args) ltwrapper_debugprintf args
-static void
-ltwrapper_debugprintf (const char *fmt, ...)
-{
- va_list args;
- va_start (args, fmt);
- (void) vfprintf (stderr, fmt, args);
- va_end (args);
-}
+#if defined(LT_DEBUGWRAPPER)
+static int lt_debug = 1;
#else
-# define LTWRAPPER_DEBUGPRINTF(args)
+static int lt_debug = 0;
#endif
-const char *program_name = NULL;
+const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */
void *xmalloc (size_t num);
char *xstrdup (const char *string);
@@ -3271,41 +4287,27 @@ char *chase_symlinks (const char *pathspec);
int make_executable (const char *path);
int check_executable (const char *path);
char *strendzap (char *str, const char *pat);
-void lt_fatal (const char *message, ...);
+void lt_debugprintf (const char *file, int line, const char *fmt, ...);
+void lt_fatal (const char *file, int line, const char *message, ...);
+static const char *nonnull (const char *s);
+static const char *nonempty (const char *s);
void lt_setenv (const char *name, const char *value);
char *lt_extend_str (const char *orig_value, const char *add, int to_end);
-void lt_opt_process_env_set (const char *arg);
-void lt_opt_process_env_prepend (const char *arg);
-void lt_opt_process_env_append (const char *arg);
-int lt_split_name_value (const char *arg, char** name, char** value);
void lt_update_exe_path (const char *name, const char *value);
void lt_update_lib_path (const char *name, const char *value);
-
-static const char *script_text_part1 =
+char **prepare_spawn (char **argv);
+void lt_dump_script (FILE *f);
EOF
- func_emit_wrapper_part1 yes |
- $SED -e 's/\([\\"]\)/\\\1/g' \
- -e 's/^/ "/' -e 's/$/\\n"/'
- echo ";"
cat <<EOF
-
-static const char *script_text_part2 =
-EOF
- func_emit_wrapper_part2 yes |
- $SED -e 's/\([\\"]\)/\\\1/g' \
- -e 's/^/ "/' -e 's/$/\\n"/'
- echo ";"
-
- cat <<EOF
-const char * MAGIC_EXE = "$magic_exe";
+volatile const char * MAGIC_EXE = "$magic_exe";
const char * LIB_PATH_VARNAME = "$shlibpath_var";
EOF
if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
- func_to_host_pathlist "$temp_rpath"
+ func_to_host_path "$temp_rpath"
cat <<EOF
-const char * LIB_PATH_VALUE = "$func_to_host_pathlist_result";
+const char * LIB_PATH_VALUE = "$func_to_host_path_result";
EOF
else
cat <<"EOF"
@@ -3314,10 +4316,10 @@ EOF
fi
if test -n "$dllsearchpath"; then
- func_to_host_pathlist "$dllsearchpath:"
+ func_to_host_path "$dllsearchpath:"
cat <<EOF
const char * EXE_PATH_VARNAME = "PATH";
-const char * EXE_PATH_VALUE = "$func_to_host_pathlist_result";
+const char * EXE_PATH_VALUE = "$func_to_host_path_result";
EOF
else
cat <<"EOF"
@@ -3340,24 +4342,10 @@ EOF
cat <<"EOF"
#define LTWRAPPER_OPTION_PREFIX "--lt-"
-#define LTWRAPPER_OPTION_PREFIX_LENGTH 5
-static const size_t opt_prefix_len = LTWRAPPER_OPTION_PREFIX_LENGTH;
static const char *ltwrapper_option_prefix = LTWRAPPER_OPTION_PREFIX;
-
static const char *dumpscript_opt = LTWRAPPER_OPTION_PREFIX "dump-script";
-
-static const size_t env_set_opt_len = LTWRAPPER_OPTION_PREFIX_LENGTH + 7;
-static const char *env_set_opt = LTWRAPPER_OPTION_PREFIX "env-set";
- /* argument is putenv-style "foo=bar", value of foo is set to bar */
-
-static const size_t env_prepend_opt_len = LTWRAPPER_OPTION_PREFIX_LENGTH + 11;
-static const char *env_prepend_opt = LTWRAPPER_OPTION_PREFIX "env-prepend";
- /* argument is putenv-style "foo=bar", new value of foo is bar${foo} */
-
-static const size_t env_append_opt_len = LTWRAPPER_OPTION_PREFIX_LENGTH + 10;
-static const char *env_append_opt = LTWRAPPER_OPTION_PREFIX "env-append";
- /* argument is putenv-style "foo=bar", new value of foo is ${foo}bar */
+static const char *debug_opt = LTWRAPPER_OPTION_PREFIX "debug";
int
main (int argc, char *argv[])
@@ -3374,10 +4362,13 @@ main (int argc, char *argv[])
int i;
program_name = (char *) xstrdup (base_name (argv[0]));
- LTWRAPPER_DEBUGPRINTF (("(main) argv[0] : %s\n", argv[0]));
- LTWRAPPER_DEBUGPRINTF (("(main) program_name : %s\n", program_name));
+ newargz = XMALLOC (char *, argc + 1);
- /* very simple arg parsing; don't want to rely on getopt */
+ /* very simple arg parsing; don't want to rely on getopt
+ * also, copy all non cwrapper options to newargz, except
+ * argz[0], which is handled differently
+ */
+ newargc=0;
for (i = 1; i < argc; i++)
{
if (strcmp (argv[i], dumpscript_opt) == 0)
@@ -3391,25 +4382,57 @@ EOF
esac
cat <<"EOF"
- printf ("%s", script_text_part1);
- printf ("%s", script_text_part2);
+ lt_dump_script (stdout);
return 0;
}
+ if (strcmp (argv[i], debug_opt) == 0)
+ {
+ lt_debug = 1;
+ continue;
+ }
+ if (strcmp (argv[i], ltwrapper_option_prefix) == 0)
+ {
+ /* however, if there is an option in the LTWRAPPER_OPTION_PREFIX
+ namespace, but it is not one of the ones we know about and
+ have already dealt with, above (inluding dump-script), then
+ report an error. Otherwise, targets might begin to believe
+ they are allowed to use options in the LTWRAPPER_OPTION_PREFIX
+ namespace. The first time any user complains about this, we'll
+ need to make LTWRAPPER_OPTION_PREFIX a configure-time option
+ or a configure.ac-settable value.
+ */
+ lt_fatal (__FILE__, __LINE__,
+ "unrecognized %s option: '%s'",
+ ltwrapper_option_prefix, argv[i]);
+ }
+ /* otherwise ... */
+ newargz[++newargc] = xstrdup (argv[i]);
}
+ newargz[++newargc] = NULL;
+
+EOF
+ cat <<EOF
+ /* The GNU banner must be the first non-error debug message */
+ lt_debugprintf (__FILE__, __LINE__, "libtool wrapper (GNU $PACKAGE$TIMESTAMP) $VERSION\n");
+EOF
+ cat <<"EOF"
+ lt_debugprintf (__FILE__, __LINE__, "(main) argv[0]: %s\n", argv[0]);
+ lt_debugprintf (__FILE__, __LINE__, "(main) program_name: %s\n", program_name);
- newargz = XMALLOC (char *, argc + 1);
tmp_pathspec = find_executable (argv[0]);
if (tmp_pathspec == NULL)
- lt_fatal ("Couldn't find %s", argv[0]);
- LTWRAPPER_DEBUGPRINTF (("(main) found exe (before symlink chase) at : %s\n",
- tmp_pathspec));
+ lt_fatal (__FILE__, __LINE__, "couldn't find %s", argv[0]);
+ lt_debugprintf (__FILE__, __LINE__,
+ "(main) found exe (before symlink chase) at: %s\n",
+ tmp_pathspec);
actual_cwrapper_path = chase_symlinks (tmp_pathspec);
- LTWRAPPER_DEBUGPRINTF (("(main) found exe (after symlink chase) at : %s\n",
- actual_cwrapper_path));
+ lt_debugprintf (__FILE__, __LINE__,
+ "(main) found exe (after symlink chase) at: %s\n",
+ actual_cwrapper_path);
XFREE (tmp_pathspec);
- actual_cwrapper_name = xstrdup( base_name (actual_cwrapper_path));
+ actual_cwrapper_name = xstrdup (base_name (actual_cwrapper_path));
strendzap (actual_cwrapper_path, actual_cwrapper_name);
/* wrapper name transforms */
@@ -3427,8 +4450,9 @@ EOF
target_name = tmp_pathspec;
tmp_pathspec = 0;
- LTWRAPPER_DEBUGPRINTF (("(main) libtool target name: %s\n",
- target_name));
+ lt_debugprintf (__FILE__, __LINE__,
+ "(main) libtool target name: %s\n",
+ target_name);
EOF
cat <<EOF
@@ -3478,80 +4502,19 @@ EOF
lt_setenv ("BIN_SH", "xpg4"); /* for Tru64 */
lt_setenv ("DUALCASE", "1"); /* for MSK sh */
- lt_update_lib_path (LIB_PATH_VARNAME, LIB_PATH_VALUE);
+ /* Update the DLL searchpath. EXE_PATH_VALUE ($dllsearchpath) must
+ be prepended before (that is, appear after) LIB_PATH_VALUE ($temp_rpath)
+ because on Windows, both *_VARNAMEs are PATH but uninstalled
+ libraries must come first. */
lt_update_exe_path (EXE_PATH_VARNAME, EXE_PATH_VALUE);
+ lt_update_lib_path (LIB_PATH_VARNAME, LIB_PATH_VALUE);
- newargc=0;
- for (i = 1; i < argc; i++)
- {
- if (strncmp (argv[i], env_set_opt, env_set_opt_len) == 0)
- {
- if (argv[i][env_set_opt_len] == '=')
- {
- const char *p = argv[i] + env_set_opt_len + 1;
- lt_opt_process_env_set (p);
- }
- else if (argv[i][env_set_opt_len] == '\0' && i + 1 < argc)
- {
- lt_opt_process_env_set (argv[++i]); /* don't copy */
- }
- else
- lt_fatal ("%s missing required argument", env_set_opt);
- continue;
- }
- if (strncmp (argv[i], env_prepend_opt, env_prepend_opt_len) == 0)
- {
- if (argv[i][env_prepend_opt_len] == '=')
- {
- const char *p = argv[i] + env_prepend_opt_len + 1;
- lt_opt_process_env_prepend (p);
- }
- else if (argv[i][env_prepend_opt_len] == '\0' && i + 1 < argc)
- {
- lt_opt_process_env_prepend (argv[++i]); /* don't copy */
- }
- else
- lt_fatal ("%s missing required argument", env_prepend_opt);
- continue;
- }
- if (strncmp (argv[i], env_append_opt, env_append_opt_len) == 0)
- {
- if (argv[i][env_append_opt_len] == '=')
- {
- const char *p = argv[i] + env_append_opt_len + 1;
- lt_opt_process_env_append (p);
- }
- else if (argv[i][env_append_opt_len] == '\0' && i + 1 < argc)
- {
- lt_opt_process_env_append (argv[++i]); /* don't copy */
- }
- else
- lt_fatal ("%s missing required argument", env_append_opt);
- continue;
- }
- if (strncmp (argv[i], ltwrapper_option_prefix, opt_prefix_len) == 0)
- {
- /* however, if there is an option in the LTWRAPPER_OPTION_PREFIX
- namespace, but it is not one of the ones we know about and
- have already dealt with, above (inluding dump-script), then
- report an error. Otherwise, targets might begin to believe
- they are allowed to use options in the LTWRAPPER_OPTION_PREFIX
- namespace. The first time any user complains about this, we'll
- need to make LTWRAPPER_OPTION_PREFIX a configure-time option
- or a configure.ac-settable value.
- */
- lt_fatal ("Unrecognized option in %s namespace: '%s'",
- ltwrapper_option_prefix, argv[i]);
- }
- /* otherwise ... */
- newargz[++newargc] = xstrdup (argv[i]);
- }
- newargz[++newargc] = NULL;
-
- LTWRAPPER_DEBUGPRINTF (("(main) lt_argv_zero : %s\n", (lt_argv_zero ? lt_argv_zero : "<NULL>")));
+ lt_debugprintf (__FILE__, __LINE__, "(main) lt_argv_zero: %s\n",
+ nonnull (lt_argv_zero));
for (i = 0; i < newargc; i++)
{
- LTWRAPPER_DEBUGPRINTF (("(main) newargz[%d] : %s\n", i, (newargz[i] ? newargz[i] : "<NULL>")));
+ lt_debugprintf (__FILE__, __LINE__, "(main) newargz[%d]: %s\n",
+ i, nonnull (newargz[i]));
}
EOF
@@ -3560,11 +4523,14 @@ EOF
mingw*)
cat <<"EOF"
/* execv doesn't actually work on mingw as expected on unix */
+ newargz = prepare_spawn (newargz);
rval = _spawnv (_P_WAIT, lt_argv_zero, (const char * const *) newargz);
if (rval == -1)
{
/* failed to start process */
- LTWRAPPER_DEBUGPRINTF (("(main) failed to launch target \"%s\": errno = %d\n", lt_argv_zero, errno));
+ lt_debugprintf (__FILE__, __LINE__,
+ "(main) failed to launch target \"%s\": %s\n",
+ lt_argv_zero, nonnull (strerror (errno)));
return 127;
}
return rval;
@@ -3586,7 +4552,7 @@ xmalloc (size_t num)
{
void *p = (void *) malloc (num);
if (!p)
- lt_fatal ("Memory exhausted");
+ lt_fatal (__FILE__, __LINE__, "memory exhausted");
return p;
}
@@ -3620,8 +4586,8 @@ check_executable (const char *path)
{
struct stat st;
- LTWRAPPER_DEBUGPRINTF (("(check_executable) : %s\n",
- path ? (*path ? path : "EMPTY!") : "NULL!"));
+ lt_debugprintf (__FILE__, __LINE__, "(check_executable): %s\n",
+ nonempty (path));
if ((!path) || (!*path))
return 0;
@@ -3638,8 +4604,8 @@ make_executable (const char *path)
int rval = 0;
struct stat st;
- LTWRAPPER_DEBUGPRINTF (("(make_executable) : %s\n",
- path ? (*path ? path : "EMPTY!") : "NULL!"));
+ lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n",
+ nonempty (path));
if ((!path) || (!*path))
return 0;
@@ -3665,8 +4631,8 @@ find_executable (const char *wrapper)
int tmp_len;
char *concat_name;
- LTWRAPPER_DEBUGPRINTF (("(find_executable) : %s\n",
- wrapper ? (*wrapper ? wrapper : "EMPTY!") : "NULL!"));
+ lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n",
+ nonempty (wrapper));
if ((wrapper == NULL) || (*wrapper == '\0'))
return NULL;
@@ -3719,7 +4685,8 @@ find_executable (const char *wrapper)
{
/* empty path: current directory */
if (getcwd (tmp, LT_PATHMAX) == NULL)
- lt_fatal ("getcwd failed");
+ lt_fatal (__FILE__, __LINE__, "getcwd failed: %s",
+ nonnull (strerror (errno)));
tmp_len = strlen (tmp);
concat_name =
XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1);
@@ -3744,7 +4711,8 @@ find_executable (const char *wrapper)
}
/* Relative path | not found in path: prepend cwd */
if (getcwd (tmp, LT_PATHMAX) == NULL)
- lt_fatal ("getcwd failed");
+ lt_fatal (__FILE__, __LINE__, "getcwd failed: %s",
+ nonnull (strerror (errno)));
tmp_len = strlen (tmp);
concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1);
memcpy (concat_name, tmp, tmp_len);
@@ -3770,8 +4738,9 @@ chase_symlinks (const char *pathspec)
int has_symlinks = 0;
while (strlen (tmp_pathspec) && !has_symlinks)
{
- LTWRAPPER_DEBUGPRINTF (("checking path component for symlinks: %s\n",
- tmp_pathspec));
+ lt_debugprintf (__FILE__, __LINE__,
+ "checking path component for symlinks: %s\n",
+ tmp_pathspec);
if (lstat (tmp_pathspec, &s) == 0)
{
if (S_ISLNK (s.st_mode) != 0)
@@ -3793,8 +4762,9 @@ chase_symlinks (const char *pathspec)
}
else
{
- char *errstr = strerror (errno);
- lt_fatal ("Error accessing file %s (%s)", tmp_pathspec, errstr);
+ lt_fatal (__FILE__, __LINE__,
+ "error accessing file \"%s\": %s",
+ tmp_pathspec, nonnull (strerror (errno)));
}
}
XFREE (tmp_pathspec);
@@ -3807,7 +4777,8 @@ chase_symlinks (const char *pathspec)
tmp_pathspec = realpath (pathspec, buf);
if (tmp_pathspec == 0)
{
- lt_fatal ("Could not follow symlinks for %s", pathspec);
+ lt_fatal (__FILE__, __LINE__,
+ "could not follow symlinks for %s", pathspec);
}
return xstrdup (tmp_pathspec);
#endif
@@ -3833,11 +4804,25 @@ strendzap (char *str, const char *pat)
return str;
}
+void
+lt_debugprintf (const char *file, int line, const char *fmt, ...)
+{
+ va_list args;
+ if (lt_debug)
+ {
+ (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line);
+ va_start (args, fmt);
+ (void) vfprintf (stderr, fmt, args);
+ va_end (args);
+ }
+}
+
static void
-lt_error_core (int exit_status, const char *mode,
+lt_error_core (int exit_status, const char *file,
+ int line, const char *mode,
const char *message, va_list ap)
{
- fprintf (stderr, "%s: %s: ", program_name, mode);
+ fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode);
vfprintf (stderr, message, ap);
fprintf (stderr, ".\n");
@@ -3846,20 +4831,32 @@ lt_error_core (int exit_status, const char *mode,
}
void
-lt_fatal (const char *message, ...)
+lt_fatal (const char *file, int line, const char *message, ...)
{
va_list ap;
va_start (ap, message);
- lt_error_core (EXIT_FAILURE, "FATAL", message, ap);
+ lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap);
va_end (ap);
}
+static const char *
+nonnull (const char *s)
+{
+ return s ? s : "(null)";
+}
+
+static const char *
+nonempty (const char *s)
+{
+ return (s && !*s) ? "(empty)" : nonnull (s);
+}
+
void
lt_setenv (const char *name, const char *value)
{
- LTWRAPPER_DEBUGPRINTF (("(lt_setenv) setting '%s' to '%s'\n",
- (name ? name : "<NULL>"),
- (value ? value : "<NULL>")));
+ lt_debugprintf (__FILE__, __LINE__,
+ "(lt_setenv) setting '%s' to '%s'\n",
+ nonnull (name), nonnull (value));
{
#ifdef HAVE_SETENV
/* always make a copy, for consistency with !HAVE_SETENV */
@@ -3904,95 +4901,12 @@ lt_extend_str (const char *orig_value, const char *add, int to_end)
return new_value;
}
-int
-lt_split_name_value (const char *arg, char** name, char** value)
-{
- const char *p;
- int len;
- if (!arg || !*arg)
- return 1;
-
- p = strchr (arg, (int)'=');
-
- if (!p)
- return 1;
-
- *value = xstrdup (++p);
-
- len = strlen (arg) - strlen (*value);
- *name = XMALLOC (char, len);
- strncpy (*name, arg, len-1);
- (*name)[len - 1] = '\0';
-
- return 0;
-}
-
-void
-lt_opt_process_env_set (const char *arg)
-{
- char *name = NULL;
- char *value = NULL;
-
- if (lt_split_name_value (arg, &name, &value) != 0)
- {
- XFREE (name);
- XFREE (value);
- lt_fatal ("bad argument for %s: '%s'", env_set_opt, arg);
- }
-
- lt_setenv (name, value);
- XFREE (name);
- XFREE (value);
-}
-
-void
-lt_opt_process_env_prepend (const char *arg)
-{
- char *name = NULL;
- char *value = NULL;
- char *new_value = NULL;
-
- if (lt_split_name_value (arg, &name, &value) != 0)
- {
- XFREE (name);
- XFREE (value);
- lt_fatal ("bad argument for %s: '%s'", env_prepend_opt, arg);
- }
-
- new_value = lt_extend_str (getenv (name), value, 0);
- lt_setenv (name, new_value);
- XFREE (new_value);
- XFREE (name);
- XFREE (value);
-}
-
-void
-lt_opt_process_env_append (const char *arg)
-{
- char *name = NULL;
- char *value = NULL;
- char *new_value = NULL;
-
- if (lt_split_name_value (arg, &name, &value) != 0)
- {
- XFREE (name);
- XFREE (value);
- lt_fatal ("bad argument for %s: '%s'", env_append_opt, arg);
- }
-
- new_value = lt_extend_str (getenv (name), value, 1);
- lt_setenv (name, new_value);
- XFREE (new_value);
- XFREE (name);
- XFREE (value);
-}
-
void
lt_update_exe_path (const char *name, const char *value)
{
- LTWRAPPER_DEBUGPRINTF (("(lt_update_exe_path) modifying '%s' by prepending '%s'\n",
- (name ? name : "<NULL>"),
- (value ? value : "<NULL>")));
+ lt_debugprintf (__FILE__, __LINE__,
+ "(lt_update_exe_path) modifying '%s' by prepending '%s'\n",
+ nonnull (name), nonnull (value));
if (name && *name && value && *value)
{
@@ -4011,9 +4925,9 @@ lt_update_exe_path (const char *name, const char *value)
void
lt_update_lib_path (const char *name, const char *value)
{
- LTWRAPPER_DEBUGPRINTF (("(lt_update_lib_path) modifying '%s' by prepending '%s'\n",
- (name ? name : "<NULL>"),
- (value ? value : "<NULL>")));
+ lt_debugprintf (__FILE__, __LINE__,
+ "(lt_update_lib_path) modifying '%s' by prepending '%s'\n",
+ nonnull (name), nonnull (value));
if (name && *name && value && *value)
{
@@ -4023,11 +4937,158 @@ lt_update_lib_path (const char *name, const char *value)
}
}
+EOF
+ case $host_os in
+ mingw*)
+ cat <<"EOF"
+
+/* Prepares an argument vector before calling spawn().
+ Note that spawn() does not by itself call the command interpreter
+ (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") :
+ ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+ GetVersionEx(&v);
+ v.dwPlatformId == VER_PLATFORM_WIN32_NT;
+ }) ? "cmd.exe" : "command.com").
+ Instead it simply concatenates the arguments, separated by ' ', and calls
+ CreateProcess(). We must quote the arguments since Win32 CreateProcess()
+ interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a
+ special way:
+ - Space and tab are interpreted as delimiters. They are not treated as
+ delimiters if they are surrounded by double quotes: "...".
+ - Unescaped double quotes are removed from the input. Their only effect is
+ that within double quotes, space and tab are treated like normal
+ characters.
+ - Backslashes not followed by double quotes are not special.
+ - But 2*n+1 backslashes followed by a double quote become
+ n backslashes followed by a double quote (n >= 0):
+ \" -> "
+ \\\" -> \"
+ \\\\\" -> \\"
+ */
+#define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037"
+#define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037"
+char **
+prepare_spawn (char **argv)
+{
+ size_t argc;
+ char **new_argv;
+ size_t i;
+
+ /* Count number of arguments. */
+ for (argc = 0; argv[argc] != NULL; argc++)
+ ;
+
+ /* Allocate new argument vector. */
+ new_argv = XMALLOC (char *, argc + 1);
+
+ /* Put quoted arguments into the new argument vector. */
+ for (i = 0; i < argc; i++)
+ {
+ const char *string = argv[i];
+ if (string[0] == '\0')
+ new_argv[i] = xstrdup ("\"\"");
+ else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL)
+ {
+ int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL);
+ size_t length;
+ unsigned int backslashes;
+ const char *s;
+ char *quoted_string;
+ char *p;
+
+ length = 0;
+ backslashes = 0;
+ if (quote_around)
+ length++;
+ for (s = string; *s != '\0'; s++)
+ {
+ char c = *s;
+ if (c == '"')
+ length += backslashes + 1;
+ length++;
+ if (c == '\\')
+ backslashes++;
+ else
+ backslashes = 0;
+ }
+ if (quote_around)
+ length += backslashes + 1;
+
+ quoted_string = XMALLOC (char, length + 1);
+
+ p = quoted_string;
+ backslashes = 0;
+ if (quote_around)
+ *p++ = '"';
+ for (s = string; *s != '\0'; s++)
+ {
+ char c = *s;
+ if (c == '"')
+ {
+ unsigned int j;
+ for (j = backslashes + 1; j > 0; j--)
+ *p++ = '\\';
+ }
+ *p++ = c;
+ if (c == '\\')
+ backslashes++;
+ else
+ backslashes = 0;
+ }
+ if (quote_around)
+ {
+ unsigned int j;
+ for (j = backslashes; j > 0; j--)
+ *p++ = '\\';
+ *p++ = '"';
+ }
+ *p = '\0';
+
+ new_argv[i] = quoted_string;
+ }
+ else
+ new_argv[i] = (char *) string;
+ }
+ new_argv[argc] = NULL;
+
+ return new_argv;
+}
+EOF
+ ;;
+ esac
+
+ cat <<"EOF"
+void lt_dump_script (FILE* f)
+{
+EOF
+ func_emit_wrapper yes |
+ $SED -n -e '
+s/^\(.\{79\}\)\(..*\)/\1\
+\2/
+h
+s/\([\\"]\)/\\\1/g
+s/$/\\n/
+s/\([^\n]*\).*/ fputs ("\1", f);/p
+g
+D'
+ cat <<"EOF"
+}
EOF
}
# end: func_emit_cwrapperexe_src
+# func_win32_import_lib_p ARG
+# True if ARG is an import lib, as indicated by $file_magic_cmd
+func_win32_import_lib_p ()
+{
+ $opt_debug
+ case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in
+ *import*) : ;;
+ *) false ;;
+ esac
+}
+
# func_mode_link arg...
func_mode_link ()
{
@@ -4072,6 +5133,7 @@ func_mode_link ()
new_inherited_linker_flags=
avoid_version=no
+ bindir=
dlfiles=
dlprefiles=
dlself=no
@@ -4164,6 +5226,11 @@ func_mode_link ()
esac
case $prev in
+ bindir)
+ bindir="$arg"
+ prev=
+ continue
+ ;;
dlfiles|dlprefiles)
if test "$preload" = no; then
# Add the symbol object into the linking commands.
@@ -4195,9 +5262,9 @@ func_mode_link ()
;;
*)
if test "$prev" = dlfiles; then
- dlfiles="$dlfiles $arg"
+ func_append dlfiles " $arg"
else
- dlprefiles="$dlprefiles $arg"
+ func_append dlprefiles " $arg"
fi
prev=
continue
@@ -4221,7 +5288,7 @@ func_mode_link ()
*-*-darwin*)
case "$deplibs " in
*" $qarg.ltframework "*) ;;
- *) deplibs="$deplibs $qarg.ltframework" # this is fixed later
+ *) func_append deplibs " $qarg.ltframework" # this is fixed later
;;
esac
;;
@@ -4240,7 +5307,7 @@ func_mode_link ()
moreargs=
for fil in `cat "$save_arg"`
do
-# moreargs="$moreargs $fil"
+# func_append moreargs " $fil"
arg=$fil
# A libtool-controlled object.
@@ -4269,7 +5336,7 @@ func_mode_link ()
if test "$prev" = dlfiles; then
if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
- dlfiles="$dlfiles $pic_object"
+ func_append dlfiles " $pic_object"
prev=
continue
else
@@ -4281,7 +5348,7 @@ func_mode_link ()
# CHECK ME: I think I busted this. -Ossama
if test "$prev" = dlprefiles; then
# Preload the old-style object.
- dlprefiles="$dlprefiles $pic_object"
+ func_append dlprefiles " $pic_object"
prev=
fi
@@ -4351,12 +5418,12 @@ func_mode_link ()
if test "$prev" = rpath; then
case "$rpath " in
*" $arg "*) ;;
- *) rpath="$rpath $arg" ;;
+ *) func_append rpath " $arg" ;;
esac
else
case "$xrpath " in
*" $arg "*) ;;
- *) xrpath="$xrpath $arg" ;;
+ *) func_append xrpath " $arg" ;;
esac
fi
prev=
@@ -4368,28 +5435,28 @@ func_mode_link ()
continue
;;
weak)
- weak_libs="$weak_libs $arg"
+ func_append weak_libs " $arg"
prev=
continue
;;
xcclinker)
- linker_flags="$linker_flags $qarg"
- compiler_flags="$compiler_flags $qarg"
+ func_append linker_flags " $qarg"
+ func_append compiler_flags " $qarg"
prev=
func_append compile_command " $qarg"
func_append finalize_command " $qarg"
continue
;;
xcompiler)
- compiler_flags="$compiler_flags $qarg"
+ func_append compiler_flags " $qarg"
prev=
func_append compile_command " $qarg"
func_append finalize_command " $qarg"
continue
;;
xlinker)
- linker_flags="$linker_flags $qarg"
- compiler_flags="$compiler_flags $wl$qarg"
+ func_append linker_flags " $qarg"
+ func_append compiler_flags " $wl$qarg"
prev=
func_append compile_command " $wl$qarg"
func_append finalize_command " $wl$qarg"
@@ -4425,6 +5492,11 @@ func_mode_link ()
continue
;;
+ -bindir)
+ prev=bindir
+ continue
+ ;;
+
-dlopen)
prev=dlfiles
continue
@@ -4475,15 +5547,16 @@ func_mode_link ()
;;
-L*)
- func_stripname '-L' '' "$arg"
- dir=$func_stripname_result
- if test -z "$dir"; then
+ func_stripname "-L" '' "$arg"
+ if test -z "$func_stripname_result"; then
if test "$#" -gt 0; then
func_fatal_error "require no space between \`-L' and \`$1'"
else
func_fatal_error "need path for \`-L' option"
fi
fi
+ func_resolve_sysroot "$func_stripname_result"
+ dir=$func_resolve_sysroot_result
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
@@ -4495,24 +5568,30 @@ func_mode_link ()
;;
esac
case "$deplibs " in
- *" -L$dir "*) ;;
+ *" -L$dir "* | *" $arg "*)
+ # Will only happen for absolute or sysroot arguments
+ ;;
*)
- deplibs="$deplibs -L$dir"
- lib_search_path="$lib_search_path $dir"
+ # Preserve sysroot, but never include relative directories
+ case $dir in
+ [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;;
+ *) func_append deplibs " -L$dir" ;;
+ esac
+ func_append lib_search_path " $dir"
;;
esac
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*)
- testbindir=`$ECHO "X$dir" | $Xsed -e 's*/lib$*/bin*'`
+ testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'`
case :$dllsearchpath: in
*":$dir:"*) ;;
::) dllsearchpath=$dir;;
- *) dllsearchpath="$dllsearchpath:$dir";;
+ *) func_append dllsearchpath ":$dir";;
esac
case :$dllsearchpath: in
*":$testbindir:"*) ;;
::) dllsearchpath=$testbindir;;
- *) dllsearchpath="$dllsearchpath:$testbindir";;
+ *) func_append dllsearchpath ":$testbindir";;
esac
;;
esac
@@ -4522,7 +5601,7 @@ func_mode_link ()
-l*)
if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then
case $host in
- *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc*)
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*)
# These systems don't actually have a C or math library (as such)
continue
;;
@@ -4536,7 +5615,7 @@ func_mode_link ()
;;
*-*-rhapsody* | *-*-darwin1.[012])
# Rhapsody C and math libraries are in the System framework
- deplibs="$deplibs System.ltframework"
+ func_append deplibs " System.ltframework"
continue
;;
*-*-sco3.2v5* | *-*-sco5v6*)
@@ -4556,7 +5635,7 @@ func_mode_link ()
;;
esac
fi
- deplibs="$deplibs $arg"
+ func_append deplibs " $arg"
continue
;;
@@ -4568,21 +5647,22 @@ func_mode_link ()
# Tru64 UNIX uses -model [arg] to determine the layout of C++
# classes, name mangling, and exception handling.
# Darwin uses the -arch flag to determine output architecture.
- -model|-arch|-isysroot)
- compiler_flags="$compiler_flags $arg"
+ -model|-arch|-isysroot|--sysroot)
+ func_append compiler_flags " $arg"
func_append compile_command " $arg"
func_append finalize_command " $arg"
prev=xcompiler
continue
;;
- -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads)
- compiler_flags="$compiler_flags $arg"
+ -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \
+ |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*)
+ func_append compiler_flags " $arg"
func_append compile_command " $arg"
func_append finalize_command " $arg"
case "$new_inherited_linker_flags " in
*" $arg "*) ;;
- * ) new_inherited_linker_flags="$new_inherited_linker_flags $arg" ;;
+ * ) func_append new_inherited_linker_flags " $arg" ;;
esac
continue
;;
@@ -4649,13 +5729,17 @@ func_mode_link ()
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
+ =*)
+ func_stripname '=' '' "$dir"
+ dir=$lt_sysroot$func_stripname_result
+ ;;
*)
func_fatal_error "only absolute run-paths are allowed"
;;
esac
case "$xrpath " in
*" $dir "*) ;;
- *) xrpath="$xrpath $dir" ;;
+ *) func_append xrpath " $dir" ;;
esac
continue
;;
@@ -4708,8 +5792,8 @@ func_mode_link ()
for flag in $args; do
IFS="$save_ifs"
func_quote_for_eval "$flag"
- arg="$arg $wl$func_quote_for_eval_result"
- compiler_flags="$compiler_flags $func_quote_for_eval_result"
+ func_append arg " $func_quote_for_eval_result"
+ func_append compiler_flags " $func_quote_for_eval_result"
done
IFS="$save_ifs"
func_stripname ' ' '' "$arg"
@@ -4724,9 +5808,9 @@ func_mode_link ()
for flag in $args; do
IFS="$save_ifs"
func_quote_for_eval "$flag"
- arg="$arg $wl$func_quote_for_eval_result"
- compiler_flags="$compiler_flags $wl$func_quote_for_eval_result"
- linker_flags="$linker_flags $func_quote_for_eval_result"
+ func_append arg " $wl$func_quote_for_eval_result"
+ func_append compiler_flags " $wl$func_quote_for_eval_result"
+ func_append linker_flags " $func_quote_for_eval_result"
done
IFS="$save_ifs"
func_stripname ' ' '' "$arg"
@@ -4754,23 +5838,27 @@ func_mode_link ()
arg="$func_quote_for_eval_result"
;;
- # -64, -mips[0-9] enable 64-bit mode on the SGI compiler
- # -r[0-9][0-9]* specifies the processor on the SGI compiler
- # -xarch=*, -xtarget=* enable 64-bit mode on the Sun compiler
- # +DA*, +DD* enable 64-bit mode on the HP compiler
- # -q* pass through compiler args for the IBM compiler
- # -m*, -t[45]*, -txscale* pass through architecture-specific
- # compiler args for GCC
- # -F/path gives path to uninstalled frameworks, gcc on darwin
- # -p, -pg, --coverage, -fprofile-* pass through profiling flag for GCC
- # @file GCC response files
+ # Flags to be passed through unchanged, with rationale:
+ # -64, -mips[0-9] enable 64-bit mode for the SGI compiler
+ # -r[0-9][0-9]* specify processor for the SGI compiler
+ # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler
+ # +DA*, +DD* enable 64-bit mode for the HP compiler
+ # -q* compiler args for the IBM compiler
+ # -m*, -t[45]*, -txscale* architecture-specific flags for GCC
+ # -F/path path to uninstalled frameworks, gcc on darwin
+ # -p, -pg, --coverage, -fprofile-* profiling flags for GCC
+ # @file GCC response files
+ # -tp=* Portland pgcc target processor selection
+ # --sysroot=* for sysroot support
+ # -O*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization
-64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \
- -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*)
+ -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \
+ -O*|-flto*|-fwhopr*|-fuse-linker-plugin)
func_quote_for_eval "$arg"
arg="$func_quote_for_eval_result"
func_append compile_command " $arg"
func_append finalize_command " $arg"
- compiler_flags="$compiler_flags $arg"
+ func_append compiler_flags " $arg"
continue
;;
@@ -4782,7 +5870,7 @@ func_mode_link ()
*.$objext)
# A standard object.
- objs="$objs $arg"
+ func_append objs " $arg"
;;
*.lo)
@@ -4813,7 +5901,7 @@ func_mode_link ()
if test "$prev" = dlfiles; then
if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
- dlfiles="$dlfiles $pic_object"
+ func_append dlfiles " $pic_object"
prev=
continue
else
@@ -4825,7 +5913,7 @@ func_mode_link ()
# CHECK ME: I think I busted this. -Ossama
if test "$prev" = dlprefiles; then
# Preload the old-style object.
- dlprefiles="$dlprefiles $pic_object"
+ func_append dlprefiles " $pic_object"
prev=
fi
@@ -4870,24 +5958,25 @@ func_mode_link ()
*.$libext)
# An archive.
- deplibs="$deplibs $arg"
- old_deplibs="$old_deplibs $arg"
+ func_append deplibs " $arg"
+ func_append old_deplibs " $arg"
continue
;;
*.la)
# A libtool-controlled library.
+ func_resolve_sysroot "$arg"
if test "$prev" = dlfiles; then
# This library was specified with -dlopen.
- dlfiles="$dlfiles $arg"
+ func_append dlfiles " $func_resolve_sysroot_result"
prev=
elif test "$prev" = dlprefiles; then
# The library was specified with -dlpreopen.
- dlprefiles="$dlprefiles $arg"
+ func_append dlprefiles " $func_resolve_sysroot_result"
prev=
else
- deplibs="$deplibs $arg"
+ func_append deplibs " $func_resolve_sysroot_result"
fi
continue
;;
@@ -4925,7 +6014,7 @@ func_mode_link ()
if test -n "$shlibpath_var"; then
# get the directories listed in $shlibpath_var
- eval shlib_search_path=\`\$ECHO \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\`
+ eval shlib_search_path=\`\$ECHO \"\${$shlibpath_var}\" \| \$SED \'s/:/ /g\'\`
else
shlib_search_path=
fi
@@ -4934,6 +6023,8 @@ func_mode_link ()
func_dirname "$output" "/" ""
output_objdir="$func_dirname_result$objdir"
+ func_to_tool_file "$output_objdir/"
+ tool_output_objdir=$func_to_tool_file_result
# Create the object directory.
func_mkdir_p "$output_objdir"
@@ -4954,12 +6045,12 @@ func_mode_link ()
# Find all interdependent deplibs by searching for libraries
# that are linked more than once (e.g. -la -lb -la)
for deplib in $deplibs; do
- if $opt_duplicate_deps ; then
+ if $opt_preserve_dup_deps ; then
case "$libs " in
- *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ *" $deplib "*) func_append specialdeplibs " $deplib" ;;
esac
fi
- libs="$libs $deplib"
+ func_append libs " $deplib"
done
if test "$linkmode" = lib; then
@@ -4972,9 +6063,9 @@ func_mode_link ()
if $opt_duplicate_compiler_generated_deps; then
for pre_post_dep in $predeps $postdeps; do
case "$pre_post_deps " in
- *" $pre_post_dep "*) specialdeplibs="$specialdeplibs $pre_post_deps" ;;
+ *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;;
esac
- pre_post_deps="$pre_post_deps $pre_post_dep"
+ func_append pre_post_deps " $pre_post_dep"
done
fi
pre_post_deps=
@@ -5041,17 +6132,19 @@ func_mode_link ()
for lib in $dlprefiles; do
# Ignore non-libtool-libs
dependency_libs=
+ func_resolve_sysroot "$lib"
case $lib in
- *.la) func_source "$lib" ;;
+ *.la) func_source "$func_resolve_sysroot_result" ;;
esac
# Collect preopened libtool deplibs, except any this library
# has declared as weak libs
for deplib in $dependency_libs; do
- deplib_base=`$ECHO "X$deplib" | $Xsed -e "$basename"`
+ func_basename "$deplib"
+ deplib_base=$func_basename_result
case " $weak_libs " in
*" $deplib_base "*) ;;
- *) deplibs="$deplibs $deplib" ;;
+ *) func_append deplibs " $deplib" ;;
esac
done
done
@@ -5067,16 +6160,17 @@ func_mode_link ()
lib=
found=no
case $deplib in
- -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads)
+ -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \
+ |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*)
if test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
- compiler_flags="$compiler_flags $deplib"
+ func_append compiler_flags " $deplib"
if test "$linkmode" = lib ; then
case "$new_inherited_linker_flags " in
*" $deplib "*) ;;
- * ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;;
+ * ) func_append new_inherited_linker_flags " $deplib" ;;
esac
fi
fi
@@ -5161,7 +6255,7 @@ func_mode_link ()
if test "$linkmode" = lib ; then
case "$new_inherited_linker_flags " in
*" $deplib "*) ;;
- * ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;;
+ * ) func_append new_inherited_linker_flags " $deplib" ;;
esac
fi
fi
@@ -5174,7 +6268,8 @@ func_mode_link ()
test "$pass" = conv && continue
newdependency_libs="$deplib $newdependency_libs"
func_stripname '-L' '' "$deplib"
- newlib_search_path="$newlib_search_path $func_stripname_result"
+ func_resolve_sysroot "$func_stripname_result"
+ func_append newlib_search_path " $func_resolve_sysroot_result"
;;
prog)
if test "$pass" = conv; then
@@ -5188,7 +6283,8 @@ func_mode_link ()
finalize_deplibs="$deplib $finalize_deplibs"
fi
func_stripname '-L' '' "$deplib"
- newlib_search_path="$newlib_search_path $func_stripname_result"
+ func_resolve_sysroot "$func_stripname_result"
+ func_append newlib_search_path " $func_resolve_sysroot_result"
;;
*)
func_warning "\`-L' is ignored for archives/objects"
@@ -5199,17 +6295,21 @@ func_mode_link ()
-R*)
if test "$pass" = link; then
func_stripname '-R' '' "$deplib"
- dir=$func_stripname_result
+ func_resolve_sysroot "$func_stripname_result"
+ dir=$func_resolve_sysroot_result
# Make sure the xrpath contains only unique directories.
case "$xrpath " in
*" $dir "*) ;;
- *) xrpath="$xrpath $dir" ;;
+ *) func_append xrpath " $dir" ;;
esac
fi
deplibs="$deplib $deplibs"
continue
;;
- *.la) lib="$deplib" ;;
+ *.la)
+ func_resolve_sysroot "$deplib"
+ lib=$func_resolve_sysroot_result
+ ;;
*.$libext)
if test "$pass" = conv; then
deplibs="$deplib $deplibs"
@@ -5227,7 +6327,7 @@ func_mode_link ()
match_pattern*)
set dummy $deplibs_check_method; shift
match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
- if eval "\$ECHO \"X$deplib\"" 2>/dev/null | $Xsed -e 10q \
+ if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \
| $EGREP "$match_pattern_regex" > /dev/null; then
valid_a_lib=yes
fi
@@ -5237,15 +6337,15 @@ func_mode_link ()
;;
esac
if test "$valid_a_lib" != yes; then
- $ECHO
+ echo
$ECHO "*** Warning: Trying to link with static lib archive $deplib."
- $ECHO "*** I have the capability to make that library automatically link in when"
- $ECHO "*** you link to this library. But I can only do this if you have a"
- $ECHO "*** shared version of the library, which you do not appear to have"
- $ECHO "*** because the file extensions .$libext of this argument makes me believe"
- $ECHO "*** that it is just a static archive that I should not use here."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have"
+ echo "*** because the file extensions .$libext of this argument makes me believe"
+ echo "*** that it is just a static archive that I should not use here."
else
- $ECHO
+ echo
$ECHO "*** Warning: Linking the shared library $output against the"
$ECHO "*** static library $deplib is not portable!"
deplibs="$deplib $deplibs"
@@ -5272,11 +6372,11 @@ func_mode_link ()
if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then
# If there is no dlopen support or we're linking statically,
# we need to preload.
- newdlprefiles="$newdlprefiles $deplib"
+ func_append newdlprefiles " $deplib"
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
- newdlfiles="$newdlfiles $deplib"
+ func_append newdlfiles " $deplib"
fi
fi
continue
@@ -5318,20 +6418,20 @@ func_mode_link ()
# Convert "-framework foo" to "foo.ltframework"
if test -n "$inherited_linker_flags"; then
- tmp_inherited_linker_flags=`$ECHO "X$inherited_linker_flags" | $Xsed -e 's/-framework \([^ $]*\)/\1.ltframework/g'`
+ tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'`
for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do
case " $new_inherited_linker_flags " in
*" $tmp_inherited_linker_flag "*) ;;
- *) new_inherited_linker_flags="$new_inherited_linker_flags $tmp_inherited_linker_flag";;
+ *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";;
esac
done
fi
- dependency_libs=`$ECHO "X $dependency_libs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
+ dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
if test "$linkmode,$pass" = "lib,link" ||
test "$linkmode,$pass" = "prog,scan" ||
{ test "$linkmode" != prog && test "$linkmode" != lib; }; then
- test -n "$dlopen" && dlfiles="$dlfiles $dlopen"
- test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen"
+ test -n "$dlopen" && func_append dlfiles " $dlopen"
+ test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen"
fi
if test "$pass" = conv; then
@@ -5342,20 +6442,20 @@ func_mode_link ()
func_fatal_error "cannot find name of link library for \`$lib'"
fi
# It is a libtool convenience library, so add in its objects.
- convenience="$convenience $ladir/$objdir/$old_library"
- old_convenience="$old_convenience $ladir/$objdir/$old_library"
+ func_append convenience " $ladir/$objdir/$old_library"
+ func_append old_convenience " $ladir/$objdir/$old_library"
elif test "$linkmode" != prog && test "$linkmode" != lib; then
func_fatal_error "\`$lib' is not a convenience library"
fi
tmp_libs=
for deplib in $dependency_libs; do
deplibs="$deplib $deplibs"
- if $opt_duplicate_deps ; then
+ if $opt_preserve_dup_deps ; then
case "$tmp_libs " in
- *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ *" $deplib "*) func_append specialdeplibs " $deplib" ;;
esac
fi
- tmp_libs="$tmp_libs $deplib"
+ func_append tmp_libs " $deplib"
done
continue
fi # $pass = conv
@@ -5363,9 +6463,15 @@ func_mode_link ()
# Get the name of the library we link against.
linklib=
- for l in $old_library $library_names; do
- linklib="$l"
- done
+ if test -n "$old_library" &&
+ { test "$prefer_static_libs" = yes ||
+ test "$prefer_static_libs,$installed" = "built,no"; }; then
+ linklib=$old_library
+ else
+ for l in $old_library $library_names; do
+ linklib="$l"
+ done
+ fi
if test -z "$linklib"; then
func_fatal_error "cannot find name of link library for \`$lib'"
fi
@@ -5382,9 +6488,9 @@ func_mode_link ()
# statically, we need to preload. We also need to preload any
# dependent libraries so libltdl's deplib preloader doesn't
# bomb out in the load deplibs phase.
- dlprefiles="$dlprefiles $lib $dependency_libs"
+ func_append dlprefiles " $lib $dependency_libs"
else
- newdlfiles="$newdlfiles $lib"
+ func_append newdlfiles " $lib"
fi
continue
fi # $pass = dlopen
@@ -5406,14 +6512,14 @@ func_mode_link ()
# Find the relevant object directory and library name.
if test "X$installed" = Xyes; then
- if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then
+ if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then
func_warning "library \`$lib' was moved."
dir="$ladir"
absdir="$abs_ladir"
libdir="$abs_ladir"
else
- dir="$libdir"
- absdir="$libdir"
+ dir="$lt_sysroot$libdir"
+ absdir="$lt_sysroot$libdir"
fi
test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes
else
@@ -5421,12 +6527,12 @@ func_mode_link ()
dir="$ladir"
absdir="$abs_ladir"
# Remove this search path later
- notinst_path="$notinst_path $abs_ladir"
+ func_append notinst_path " $abs_ladir"
else
dir="$ladir/$objdir"
absdir="$abs_ladir/$objdir"
# Remove this search path later
- notinst_path="$notinst_path $abs_ladir"
+ func_append notinst_path " $abs_ladir"
fi
fi # $installed = yes
func_stripname 'lib' '.la' "$laname"
@@ -5437,20 +6543,46 @@ func_mode_link ()
if test -z "$libdir" && test "$linkmode" = prog; then
func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'"
fi
- # Prefer using a static library (so that no silly _DYNAMIC symbols
- # are required to link).
- if test -n "$old_library"; then
- newdlprefiles="$newdlprefiles $dir/$old_library"
- # Keep a list of preopened convenience libraries to check
- # that they are being used correctly in the link pass.
- test -z "$libdir" && \
- dlpreconveniencelibs="$dlpreconveniencelibs $dir/$old_library"
- # Otherwise, use the dlname, so that lt_dlopen finds it.
- elif test -n "$dlname"; then
- newdlprefiles="$newdlprefiles $dir/$dlname"
- else
- newdlprefiles="$newdlprefiles $dir/$linklib"
- fi
+ case "$host" in
+ # special handling for platforms with PE-DLLs.
+ *cygwin* | *mingw* | *cegcc* )
+ # Linker will automatically link against shared library if both
+ # static and shared are present. Therefore, ensure we extract
+ # symbols from the import library if a shared library is present
+ # (otherwise, the dlopen module name will be incorrect). We do
+ # this by putting the import library name into $newdlprefiles.
+ # We recover the dlopen module name by 'saving' the la file
+ # name in a special purpose variable, and (later) extracting the
+ # dlname from the la file.
+ if test -n "$dlname"; then
+ func_tr_sh "$dir/$linklib"
+ eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname"
+ func_append newdlprefiles " $dir/$linklib"
+ else
+ func_append newdlprefiles " $dir/$old_library"
+ # Keep a list of preopened convenience libraries to check
+ # that they are being used correctly in the link pass.
+ test -z "$libdir" && \
+ func_append dlpreconveniencelibs " $dir/$old_library"
+ fi
+ ;;
+ * )
+ # Prefer using a static library (so that no silly _DYNAMIC symbols
+ # are required to link).
+ if test -n "$old_library"; then
+ func_append newdlprefiles " $dir/$old_library"
+ # Keep a list of preopened convenience libraries to check
+ # that they are being used correctly in the link pass.
+ test -z "$libdir" && \
+ func_append dlpreconveniencelibs " $dir/$old_library"
+ # Otherwise, use the dlname, so that lt_dlopen finds it.
+ elif test -n "$dlname"; then
+ func_append newdlprefiles " $dir/$dlname"
+ else
+ func_append newdlprefiles " $dir/$linklib"
+ fi
+ ;;
+ esac
fi # $pass = dlpreopen
if test -z "$libdir"; then
@@ -5468,7 +6600,7 @@ func_mode_link ()
if test "$linkmode" = prog && test "$pass" != link; then
- newlib_search_path="$newlib_search_path $ladir"
+ func_append newlib_search_path " $ladir"
deplibs="$lib $deplibs"
linkalldeplibs=no
@@ -5481,7 +6613,8 @@ func_mode_link ()
for deplib in $dependency_libs; do
case $deplib in
-L*) func_stripname '-L' '' "$deplib"
- newlib_search_path="$newlib_search_path $func_stripname_result"
+ func_resolve_sysroot "$func_stripname_result"
+ func_append newlib_search_path " $func_resolve_sysroot_result"
;;
esac
# Need to link against all dependency_libs?
@@ -5492,12 +6625,12 @@ func_mode_link ()
# or/and link against static libraries
newdependency_libs="$deplib $newdependency_libs"
fi
- if $opt_duplicate_deps ; then
+ if $opt_preserve_dup_deps ; then
case "$tmp_libs " in
- *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ *" $deplib "*) func_append specialdeplibs " $deplib" ;;
esac
fi
- tmp_libs="$tmp_libs $deplib"
+ func_append tmp_libs " $deplib"
done # for deplib
continue
fi # $linkmode = prog...
@@ -5512,7 +6645,7 @@ func_mode_link ()
# Make sure the rpath contains only unique directories.
case "$temp_rpath:" in
*"$absdir:"*) ;;
- *) temp_rpath="$temp_rpath$absdir:" ;;
+ *) func_append temp_rpath "$absdir:" ;;
esac
fi
@@ -5524,7 +6657,7 @@ func_mode_link ()
*)
case "$compile_rpath " in
*" $absdir "*) ;;
- *) compile_rpath="$compile_rpath $absdir"
+ *) func_append compile_rpath " $absdir" ;;
esac
;;
esac
@@ -5533,7 +6666,7 @@ func_mode_link ()
*)
case "$finalize_rpath " in
*" $libdir "*) ;;
- *) finalize_rpath="$finalize_rpath $libdir"
+ *) func_append finalize_rpath " $libdir" ;;
esac
;;
esac
@@ -5558,12 +6691,12 @@ func_mode_link ()
case $host in
*cygwin* | *mingw* | *cegcc*)
# No point in relinking DLLs because paths are not encoded
- notinst_deplibs="$notinst_deplibs $lib"
+ func_append notinst_deplibs " $lib"
need_relink=no
;;
*)
if test "$installed" = no; then
- notinst_deplibs="$notinst_deplibs $lib"
+ func_append notinst_deplibs " $lib"
need_relink=yes
fi
;;
@@ -5580,7 +6713,7 @@ func_mode_link ()
fi
done
if test -z "$dlopenmodule" && test "$shouldnotlink" = yes && test "$pass" = link; then
- $ECHO
+ echo
if test "$linkmode" = prog; then
$ECHO "*** Warning: Linking the executable $output against the loadable module"
else
@@ -5598,7 +6731,7 @@ func_mode_link ()
*)
case "$compile_rpath " in
*" $absdir "*) ;;
- *) compile_rpath="$compile_rpath $absdir"
+ *) func_append compile_rpath " $absdir" ;;
esac
;;
esac
@@ -5607,7 +6740,7 @@ func_mode_link ()
*)
case "$finalize_rpath " in
*" $libdir "*) ;;
- *) finalize_rpath="$finalize_rpath $libdir"
+ *) func_append finalize_rpath " $libdir" ;;
esac
;;
esac
@@ -5661,7 +6794,7 @@ func_mode_link ()
linklib=$newlib
fi # test -n "$old_archive_from_expsyms_cmds"
- if test "$linkmode" = prog || test "$mode" != relink; then
+ if test "$linkmode" = prog || test "$opt_mode" != relink; then
add_shlibpath=
add_dir=
add=
@@ -5683,9 +6816,9 @@ func_mode_link ()
if test "X$dlopenmodule" != "X$lib"; then
$ECHO "*** Warning: lib $linklib is a module, not a shared library"
if test -z "$old_library" ; then
- $ECHO
- $ECHO "*** And there doesn't seem to be a static archive available"
- $ECHO "*** The link will probably fail, sorry"
+ echo
+ echo "*** And there doesn't seem to be a static archive available"
+ echo "*** The link will probably fail, sorry"
else
add="$dir/$old_library"
fi
@@ -5712,12 +6845,12 @@ func_mode_link ()
test "$hardcode_direct_absolute" = no; then
add="$dir/$linklib"
elif test "$hardcode_minus_L" = yes; then
- add_dir="-L$dir"
+ add_dir="-L$absdir"
# Try looking first in the location we're being installed to.
if test -n "$inst_prefix_dir"; then
case $libdir in
[\\/]*)
- add_dir="$add_dir -L$inst_prefix_dir$libdir"
+ func_append add_dir " -L$inst_prefix_dir$libdir"
;;
esac
fi
@@ -5739,7 +6872,7 @@ func_mode_link ()
if test -n "$add_shlibpath"; then
case :$compile_shlibpath: in
*":$add_shlibpath:"*) ;;
- *) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;;
+ *) func_append compile_shlibpath "$add_shlibpath:" ;;
esac
fi
if test "$linkmode" = prog; then
@@ -5753,13 +6886,13 @@ func_mode_link ()
test "$hardcode_shlibpath_var" = yes; then
case :$finalize_shlibpath: in
*":$libdir:"*) ;;
- *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
+ *) func_append finalize_shlibpath "$libdir:" ;;
esac
fi
fi
fi
- if test "$linkmode" = prog || test "$mode" = relink; then
+ if test "$linkmode" = prog || test "$opt_mode" = relink; then
add_shlibpath=
add_dir=
add=
@@ -5773,7 +6906,7 @@ func_mode_link ()
elif test "$hardcode_shlibpath_var" = yes; then
case :$finalize_shlibpath: in
*":$libdir:"*) ;;
- *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
+ *) func_append finalize_shlibpath "$libdir:" ;;
esac
add="-l$name"
elif test "$hardcode_automatic" = yes; then
@@ -5790,7 +6923,7 @@ func_mode_link ()
if test -n "$inst_prefix_dir"; then
case $libdir in
[\\/]*)
- add_dir="$add_dir -L$inst_prefix_dir$libdir"
+ func_append add_dir " -L$inst_prefix_dir$libdir"
;;
esac
fi
@@ -5825,21 +6958,21 @@ func_mode_link ()
# Just print a warning and add the library to dependency_libs so
# that the program can be linked against the static library.
- $ECHO
+ echo
$ECHO "*** Warning: This system can not link to static lib archive $lib."
- $ECHO "*** I have the capability to make that library automatically link in when"
- $ECHO "*** you link to this library. But I can only do this if you have a"
- $ECHO "*** shared version of the library, which you do not appear to have."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have."
if test "$module" = yes; then
- $ECHO "*** But as you try to build a module library, libtool will still create "
- $ECHO "*** a static module, that should work as long as the dlopening application"
- $ECHO "*** is linked with the -dlopen flag to resolve symbols at runtime."
+ echo "*** But as you try to build a module library, libtool will still create "
+ echo "*** a static module, that should work as long as the dlopening application"
+ echo "*** is linked with the -dlopen flag to resolve symbols at runtime."
if test -z "$global_symbol_pipe"; then
- $ECHO
- $ECHO "*** However, this would only work if libtool was able to extract symbol"
- $ECHO "*** lists from a program, using \`nm' or equivalent, but libtool could"
- $ECHO "*** not find such a program. So, this module is probably useless."
- $ECHO "*** \`nm' from GNU binutils and a full rebuild may help."
+ echo
+ echo "*** However, this would only work if libtool was able to extract symbol"
+ echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
+ echo "*** not find such a program. So, this module is probably useless."
+ echo "*** \`nm' from GNU binutils and a full rebuild may help."
fi
if test "$build_old_libs" = no; then
build_libtool_libs=module
@@ -5867,37 +7000,46 @@ func_mode_link ()
temp_xrpath=$func_stripname_result
case " $xrpath " in
*" $temp_xrpath "*) ;;
- *) xrpath="$xrpath $temp_xrpath";;
+ *) func_append xrpath " $temp_xrpath";;
esac;;
- *) temp_deplibs="$temp_deplibs $libdir";;
+ *) func_append temp_deplibs " $libdir";;
esac
done
dependency_libs="$temp_deplibs"
fi
- newlib_search_path="$newlib_search_path $absdir"
+ func_append newlib_search_path " $absdir"
# Link against this library
test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs"
# ... and its dependency_libs
tmp_libs=
for deplib in $dependency_libs; do
newdependency_libs="$deplib $newdependency_libs"
- if $opt_duplicate_deps ; then
+ case $deplib in
+ -L*) func_stripname '-L' '' "$deplib"
+ func_resolve_sysroot "$func_stripname_result";;
+ *) func_resolve_sysroot "$deplib" ;;
+ esac
+ if $opt_preserve_dup_deps ; then
case "$tmp_libs " in
- *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
+ *" $func_resolve_sysroot_result "*)
+ func_append specialdeplibs " $func_resolve_sysroot_result" ;;
esac
fi
- tmp_libs="$tmp_libs $deplib"
+ func_append tmp_libs " $func_resolve_sysroot_result"
done
if test "$link_all_deplibs" != no; then
# Add the search paths of all dependency libraries
for deplib in $dependency_libs; do
+ path=
case $deplib in
-L*) path="$deplib" ;;
*.la)
+ func_resolve_sysroot "$deplib"
+ deplib=$func_resolve_sysroot_result
func_dirname "$deplib" "" "."
- dir="$func_dirname_result"
+ dir=$func_dirname_result
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;;
@@ -5924,8 +7066,8 @@ func_mode_link ()
if test -z "$darwin_install_name"; then
darwin_install_name=`${OTOOL64} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'`
fi
- compiler_flags="$compiler_flags ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}"
- linker_flags="$linker_flags -dylib_file ${darwin_install_name}:${depdepl}"
+ func_append compiler_flags " ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}"
+ func_append linker_flags " -dylib_file ${darwin_install_name}:${depdepl}"
path=
fi
fi
@@ -5958,7 +7100,7 @@ func_mode_link ()
compile_deplibs="$new_inherited_linker_flags $compile_deplibs"
finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs"
else
- compiler_flags="$compiler_flags "`$ECHO "X $new_inherited_linker_flags" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
+ compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
fi
fi
dependency_libs="$newdependency_libs"
@@ -5975,7 +7117,7 @@ func_mode_link ()
for dir in $newlib_search_path; do
case "$lib_search_path " in
*" $dir "*) ;;
- *) lib_search_path="$lib_search_path $dir" ;;
+ *) func_append lib_search_path " $dir" ;;
esac
done
newlib_search_path=
@@ -6033,10 +7175,10 @@ func_mode_link ()
-L*)
case " $tmp_libs " in
*" $deplib "*) ;;
- *) tmp_libs="$tmp_libs $deplib" ;;
+ *) func_append tmp_libs " $deplib" ;;
esac
;;
- *) tmp_libs="$tmp_libs $deplib" ;;
+ *) func_append tmp_libs " $deplib" ;;
esac
done
eval $var=\"$tmp_libs\"
@@ -6052,7 +7194,7 @@ func_mode_link ()
;;
esac
if test -n "$i" ; then
- tmp_libs="$tmp_libs $i"
+ func_append tmp_libs " $i"
fi
done
dependency_libs=$tmp_libs
@@ -6093,7 +7235,7 @@ func_mode_link ()
# Now set the variables for building old libraries.
build_libtool_libs=no
oldlibs="$output"
- objs="$objs$old_deplibs"
+ func_append objs "$old_deplibs"
;;
lib)
@@ -6126,10 +7268,10 @@ func_mode_link ()
if test "$deplibs_check_method" != pass_all; then
func_fatal_error "cannot build libtool library \`$output' from non-libtool objects on this host:$objs"
else
- $ECHO
+ echo
$ECHO "*** Warning: Linking the shared library $output against the non-libtool"
$ECHO "*** objects $objs is not portable!"
- libobjs="$libobjs $objs"
+ func_append libobjs " $objs"
fi
fi
@@ -6188,13 +7330,14 @@ func_mode_link ()
# which has an extra 1 added just for fun
#
case $version_type in
+ # correct linux to gnu/linux during the next big refactor
darwin|linux|osf|windows|none)
func_arith $number_major + $number_minor
current=$func_arith_result
age="$number_minor"
revision="$number_revision"
;;
- freebsd-aout|freebsd-elf|sunos)
+ freebsd-aout|freebsd-elf|qnx|sunos)
current="$number_major"
revision="$number_minor"
age="0"
@@ -6304,7 +7447,7 @@ func_mode_link ()
versuffix="$major.$revision"
;;
- linux)
+ linux) # correct to gnu/linux during the next big refactor
func_arith $current - $age
major=.$func_arith_result
versuffix="$major.$age.$revision"
@@ -6327,7 +7470,7 @@ func_mode_link ()
done
# Make executables depend on our current version.
- verstring="$verstring:${current}.0"
+ func_append verstring ":${current}.0"
;;
qnx)
@@ -6395,10 +7538,10 @@ func_mode_link ()
fi
func_generate_dlsyms "$libname" "$libname" "yes"
- libobjs="$libobjs $symfileobj"
+ func_append libobjs " $symfileobj"
test "X$libobjs" = "X " && libobjs=
- if test "$mode" != relink; then
+ if test "$opt_mode" != relink; then
# Remove our outputs, but don't remove object files since they
# may have been created when compiling PIC objects.
removelist=
@@ -6414,7 +7557,7 @@ func_mode_link ()
continue
fi
fi
- removelist="$removelist $p"
+ func_append removelist " $p"
;;
*) ;;
esac
@@ -6425,27 +7568,28 @@ func_mode_link ()
# Now set the variables for building old libraries.
if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then
- oldlibs="$oldlibs $output_objdir/$libname.$libext"
+ func_append oldlibs " $output_objdir/$libname.$libext"
# Transform .lo files to .o files.
- oldobjs="$objs "`$ECHO "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP`
+ oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; $lo2o" | $NL2SP`
fi
# Eliminate all temporary directories.
#for path in $notinst_path; do
- # lib_search_path=`$ECHO "X$lib_search_path " | $Xsed -e "s% $path % %g"`
- # deplibs=`$ECHO "X$deplibs " | $Xsed -e "s% -L$path % %g"`
- # dependency_libs=`$ECHO "X$dependency_libs " | $Xsed -e "s% -L$path % %g"`
+ # lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"`
+ # deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"`
+ # dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"`
#done
if test -n "$xrpath"; then
# If the user specified any rpath flags, then add them.
temp_xrpath=
for libdir in $xrpath; do
- temp_xrpath="$temp_xrpath -R$libdir"
+ func_replace_sysroot "$libdir"
+ func_append temp_xrpath " -R$func_replace_sysroot_result"
case "$finalize_rpath " in
*" $libdir "*) ;;
- *) finalize_rpath="$finalize_rpath $libdir" ;;
+ *) func_append finalize_rpath " $libdir" ;;
esac
done
if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then
@@ -6459,7 +7603,7 @@ func_mode_link ()
for lib in $old_dlfiles; do
case " $dlprefiles $dlfiles " in
*" $lib "*) ;;
- *) dlfiles="$dlfiles $lib" ;;
+ *) func_append dlfiles " $lib" ;;
esac
done
@@ -6469,19 +7613,19 @@ func_mode_link ()
for lib in $old_dlprefiles; do
case "$dlprefiles " in
*" $lib "*) ;;
- *) dlprefiles="$dlprefiles $lib" ;;
+ *) func_append dlprefiles " $lib" ;;
esac
done
if test "$build_libtool_libs" = yes; then
if test -n "$rpath"; then
case $host in
- *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc*)
+ *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*)
# these systems don't actually have a c library (as such)!
;;
*-*-rhapsody* | *-*-darwin1.[012])
# Rhapsody C library is in the System framework
- deplibs="$deplibs System.ltframework"
+ func_append deplibs " System.ltframework"
;;
*-*-netbsd*)
# Don't link with libc until the a.out ld.so is fixed.
@@ -6498,7 +7642,7 @@ func_mode_link ()
*)
# Add libc to deplibs on all other systems if necessary.
if test "$build_libtool_need_lc" = "yes"; then
- deplibs="$deplibs -lc"
+ func_append deplibs " -lc"
fi
;;
esac
@@ -6547,7 +7691,7 @@ EOF
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $i "*)
- newdeplibs="$newdeplibs $i"
+ func_append newdeplibs " $i"
i=""
;;
esac
@@ -6558,21 +7702,21 @@ EOF
set dummy $deplib_matches; shift
deplib_match=$1
if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
- newdeplibs="$newdeplibs $i"
+ func_append newdeplibs " $i"
else
droppeddeps=yes
- $ECHO
+ echo
$ECHO "*** Warning: dynamic linker does not accept needed library $i."
- $ECHO "*** I have the capability to make that library automatically link in when"
- $ECHO "*** you link to this library. But I can only do this if you have a"
- $ECHO "*** shared version of the library, which I believe you do not have"
- $ECHO "*** because a test_compile did reveal that the linker did not use it for"
- $ECHO "*** its dynamic dependency list that programs get resolved with at runtime."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which I believe you do not have"
+ echo "*** because a test_compile did reveal that the linker did not use it for"
+ echo "*** its dynamic dependency list that programs get resolved with at runtime."
fi
fi
;;
*)
- newdeplibs="$newdeplibs $i"
+ func_append newdeplibs " $i"
;;
esac
done
@@ -6590,7 +7734,7 @@ EOF
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $i "*)
- newdeplibs="$newdeplibs $i"
+ func_append newdeplibs " $i"
i=""
;;
esac
@@ -6601,29 +7745,29 @@ EOF
set dummy $deplib_matches; shift
deplib_match=$1
if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
- newdeplibs="$newdeplibs $i"
+ func_append newdeplibs " $i"
else
droppeddeps=yes
- $ECHO
+ echo
$ECHO "*** Warning: dynamic linker does not accept needed library $i."
- $ECHO "*** I have the capability to make that library automatically link in when"
- $ECHO "*** you link to this library. But I can only do this if you have a"
- $ECHO "*** shared version of the library, which you do not appear to have"
- $ECHO "*** because a test_compile did reveal that the linker did not use this one"
- $ECHO "*** as a dynamic dependency that programs can get resolved with at runtime."
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have"
+ echo "*** because a test_compile did reveal that the linker did not use this one"
+ echo "*** as a dynamic dependency that programs can get resolved with at runtime."
fi
fi
else
droppeddeps=yes
- $ECHO
+ echo
$ECHO "*** Warning! Library $i is needed by this library but I was not able to"
- $ECHO "*** make it link in! You will probably need to install it or some"
- $ECHO "*** library that it depends on before this library will be fully"
- $ECHO "*** functional. Installing it before continuing would be even better."
+ echo "*** make it link in! You will probably need to install it or some"
+ echo "*** library that it depends on before this library will be fully"
+ echo "*** functional. Installing it before continuing would be even better."
fi
;;
*)
- newdeplibs="$newdeplibs $i"
+ func_append newdeplibs " $i"
;;
esac
done
@@ -6640,15 +7784,27 @@ EOF
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $a_deplib "*)
- newdeplibs="$newdeplibs $a_deplib"
+ func_append newdeplibs " $a_deplib"
a_deplib=""
;;
esac
fi
if test -n "$a_deplib" ; then
libname=`eval "\\$ECHO \"$libname_spec\""`
+ if test -n "$file_magic_glob"; then
+ libnameglob=`func_echo_all "$libname" | $SED -e $file_magic_glob`
+ else
+ libnameglob=$libname
+ fi
+ test "$want_nocaseglob" = yes && nocaseglob=`shopt -p nocaseglob`
for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
- potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
+ if test "$want_nocaseglob" = yes; then
+ shopt -s nocaseglob
+ potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null`
+ $nocaseglob
+ else
+ potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null`
+ fi
for potent_lib in $potential_libs; do
# Follow soft links.
if ls -lLd "$potent_lib" 2>/dev/null |
@@ -6665,13 +7821,13 @@ EOF
potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'`
case $potliblink in
[\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";;
- *) potlib=`$ECHO "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";;
+ *) potlib=`$ECHO "$potlib" | $SED 's,[^/]*$,,'`"$potliblink";;
esac
done
if eval $file_magic_cmd \"\$potlib\" 2>/dev/null |
$SED -e 10q |
$EGREP "$file_magic_regex" > /dev/null; then
- newdeplibs="$newdeplibs $a_deplib"
+ func_append newdeplibs " $a_deplib"
a_deplib=""
break 2
fi
@@ -6680,12 +7836,12 @@ EOF
fi
if test -n "$a_deplib" ; then
droppeddeps=yes
- $ECHO
+ echo
$ECHO "*** Warning: linker path does not have real file for library $a_deplib."
- $ECHO "*** I have the capability to make that library automatically link in when"
- $ECHO "*** you link to this library. But I can only do this if you have a"
- $ECHO "*** shared version of the library, which you do not appear to have"
- $ECHO "*** because I did check the linker path looking for a file starting"
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have"
+ echo "*** because I did check the linker path looking for a file starting"
if test -z "$potlib" ; then
$ECHO "*** with $libname but no candidates were found. (...for file magic test)"
else
@@ -6696,7 +7852,7 @@ EOF
;;
*)
# Add a -L argument.
- newdeplibs="$newdeplibs $a_deplib"
+ func_append newdeplibs " $a_deplib"
;;
esac
done # Gone through all deplibs.
@@ -6712,7 +7868,7 @@ EOF
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $a_deplib "*)
- newdeplibs="$newdeplibs $a_deplib"
+ func_append newdeplibs " $a_deplib"
a_deplib=""
;;
esac
@@ -6723,9 +7879,9 @@ EOF
potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
for potent_lib in $potential_libs; do
potlib="$potent_lib" # see symlink-check above in file_magic test
- if eval "\$ECHO \"X$potent_lib\"" 2>/dev/null | $Xsed -e 10q | \
+ if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \
$EGREP "$match_pattern_regex" > /dev/null; then
- newdeplibs="$newdeplibs $a_deplib"
+ func_append newdeplibs " $a_deplib"
a_deplib=""
break 2
fi
@@ -6734,12 +7890,12 @@ EOF
fi
if test -n "$a_deplib" ; then
droppeddeps=yes
- $ECHO
+ echo
$ECHO "*** Warning: linker path does not have real file for library $a_deplib."
- $ECHO "*** I have the capability to make that library automatically link in when"
- $ECHO "*** you link to this library. But I can only do this if you have a"
- $ECHO "*** shared version of the library, which you do not appear to have"
- $ECHO "*** because I did check the linker path looking for a file starting"
+ echo "*** I have the capability to make that library automatically link in when"
+ echo "*** you link to this library. But I can only do this if you have a"
+ echo "*** shared version of the library, which you do not appear to have"
+ echo "*** because I did check the linker path looking for a file starting"
if test -z "$potlib" ; then
$ECHO "*** with $libname but no candidates were found. (...for regex pattern test)"
else
@@ -6750,32 +7906,32 @@ EOF
;;
*)
# Add a -L argument.
- newdeplibs="$newdeplibs $a_deplib"
+ func_append newdeplibs " $a_deplib"
;;
esac
done # Gone through all deplibs.
;;
none | unknown | *)
newdeplibs=""
- tmp_deplibs=`$ECHO "X $deplibs" | $Xsed \
- -e 's/ -lc$//' -e 's/ -[LR][^ ]*//g'`
+ tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'`
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
for i in $predeps $postdeps ; do
# can't use Xsed below, because $i might contain '/'
- tmp_deplibs=`$ECHO "X $tmp_deplibs" | $Xsed -e "s,$i,,"`
+ tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s,$i,,"`
done
fi
- if $ECHO "X $tmp_deplibs" | $Xsed -e 's/[ ]//g' |
- $GREP . >/dev/null; then
- $ECHO
+ case $tmp_deplibs in
+ *[!\ \ ]*)
+ echo
if test "X$deplibs_check_method" = "Xnone"; then
- $ECHO "*** Warning: inter-library dependencies are not supported in this platform."
+ echo "*** Warning: inter-library dependencies are not supported in this platform."
else
- $ECHO "*** Warning: inter-library dependencies are not known to be supported."
+ echo "*** Warning: inter-library dependencies are not known to be supported."
fi
- $ECHO "*** All declared inter-library dependencies are being dropped."
+ echo "*** All declared inter-library dependencies are being dropped."
droppeddeps=yes
- fi
+ ;;
+ esac
;;
esac
versuffix=$versuffix_save
@@ -6787,23 +7943,23 @@ EOF
case $host in
*-*-rhapsody* | *-*-darwin1.[012])
# On Rhapsody replace the C library with the System framework
- newdeplibs=`$ECHO "X $newdeplibs" | $Xsed -e 's/ -lc / System.ltframework /'`
+ newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'`
;;
esac
if test "$droppeddeps" = yes; then
if test "$module" = yes; then
- $ECHO
- $ECHO "*** Warning: libtool could not satisfy all declared inter-library"
+ echo
+ echo "*** Warning: libtool could not satisfy all declared inter-library"
$ECHO "*** dependencies of module $libname. Therefore, libtool will create"
- $ECHO "*** a static module, that should work as long as the dlopening"
- $ECHO "*** application is linked with the -dlopen flag."
+ echo "*** a static module, that should work as long as the dlopening"
+ echo "*** application is linked with the -dlopen flag."
if test -z "$global_symbol_pipe"; then
- $ECHO
- $ECHO "*** However, this would only work if libtool was able to extract symbol"
- $ECHO "*** lists from a program, using \`nm' or equivalent, but libtool could"
- $ECHO "*** not find such a program. So, this module is probably useless."
- $ECHO "*** \`nm' from GNU binutils and a full rebuild may help."
+ echo
+ echo "*** However, this would only work if libtool was able to extract symbol"
+ echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
+ echo "*** not find such a program. So, this module is probably useless."
+ echo "*** \`nm' from GNU binutils and a full rebuild may help."
fi
if test "$build_old_libs" = no; then
oldlibs="$output_objdir/$libname.$libext"
@@ -6813,16 +7969,16 @@ EOF
build_libtool_libs=no
fi
else
- $ECHO "*** The inter-library dependencies that have been dropped here will be"
- $ECHO "*** automatically added whenever a program is linked with this library"
- $ECHO "*** or is declared to -dlopen it."
+ echo "*** The inter-library dependencies that have been dropped here will be"
+ echo "*** automatically added whenever a program is linked with this library"
+ echo "*** or is declared to -dlopen it."
if test "$allow_undefined" = no; then
- $ECHO
- $ECHO "*** Since this library must not contain undefined symbols,"
- $ECHO "*** because either the platform does not support them or"
- $ECHO "*** it was explicitly requested with -no-undefined,"
- $ECHO "*** libtool will only create a static version of it."
+ echo
+ echo "*** Since this library must not contain undefined symbols,"
+ echo "*** because either the platform does not support them or"
+ echo "*** it was explicitly requested with -no-undefined,"
+ echo "*** libtool will only create a static version of it."
if test "$build_old_libs" = no; then
oldlibs="$output_objdir/$libname.$libext"
build_libtool_libs=module
@@ -6839,9 +7995,9 @@ EOF
# Time to change all our "foo.ltframework" stuff back to "-framework foo"
case $host in
*-*-darwin*)
- newdeplibs=`$ECHO "X $newdeplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
- new_inherited_linker_flags=`$ECHO "X $new_inherited_linker_flags" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
- deplibs=`$ECHO "X $deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
+ newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+ new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+ deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
;;
esac
@@ -6854,7 +8010,7 @@ EOF
*)
case " $deplibs " in
*" -L$path/$objdir "*)
- new_libs="$new_libs -L$path/$objdir" ;;
+ func_append new_libs " -L$path/$objdir" ;;
esac
;;
esac
@@ -6864,10 +8020,10 @@ EOF
-L*)
case " $new_libs " in
*" $deplib "*) ;;
- *) new_libs="$new_libs $deplib" ;;
+ *) func_append new_libs " $deplib" ;;
esac
;;
- *) new_libs="$new_libs $deplib" ;;
+ *) func_append new_libs " $deplib" ;;
esac
done
deplibs="$new_libs"
@@ -6879,15 +8035,22 @@ EOF
# Test again, we may have decided not to build it any more
if test "$build_libtool_libs" = yes; then
+ # Remove ${wl} instances when linking with ld.
+ # FIXME: should test the right _cmds variable.
+ case $archive_cmds in
+ *\$LD\ *) wl= ;;
+ esac
if test "$hardcode_into_libs" = yes; then
# Hardcode the library paths
hardcode_libdirs=
dep_rpath=
rpath="$finalize_rpath"
- test "$mode" != relink && rpath="$compile_rpath$rpath"
+ test "$opt_mode" != relink && rpath="$compile_rpath$rpath"
for libdir in $rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
+ func_replace_sysroot "$libdir"
+ libdir=$func_replace_sysroot_result
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
@@ -6896,18 +8059,18 @@ EOF
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
- hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
+ func_append hardcode_libdirs "$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
- dep_rpath="$dep_rpath $flag"
+ func_append dep_rpath " $flag"
fi
elif test -n "$runpath_var"; then
case "$perm_rpath " in
*" $libdir "*) ;;
- *) perm_rpath="$perm_rpath $libdir" ;;
+ *) func_append perm_rpath " $libdir" ;;
esac
fi
done
@@ -6915,17 +8078,13 @@ EOF
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
- if test -n "$hardcode_libdir_flag_spec_ld"; then
- eval dep_rpath=\"$hardcode_libdir_flag_spec_ld\"
- else
- eval dep_rpath=\"$hardcode_libdir_flag_spec\"
- fi
+ eval "dep_rpath=\"$hardcode_libdir_flag_spec\""
fi
if test -n "$runpath_var" && test -n "$perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $perm_rpath; do
- rpath="$rpath$dir:"
+ func_append rpath "$dir:"
done
eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var"
fi
@@ -6933,7 +8092,7 @@ EOF
fi
shlibpath="$finalize_shlibpath"
- test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath"
+ test "$opt_mode" != relink && shlibpath="$compile_shlibpath$shlibpath"
if test -n "$shlibpath"; then
eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var"
fi
@@ -6959,18 +8118,18 @@ EOF
linknames=
for link
do
- linknames="$linknames $link"
+ func_append linknames " $link"
done
# Use standard objects if they are pic
- test -z "$pic_flag" && libobjs=`$ECHO "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+ test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP`
test "X$libobjs" = "X " && libobjs=
delfiles=
if test -n "$export_symbols" && test -n "$include_expsyms"; then
$opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp"
export_symbols="$output_objdir/$libname.uexp"
- delfiles="$delfiles $export_symbols"
+ func_append delfiles " $export_symbols"
fi
orig_export_symbols=
@@ -7001,13 +8160,45 @@ EOF
$opt_dry_run || $RM $export_symbols
cmds=$export_symbols_cmds
save_ifs="$IFS"; IFS='~'
- for cmd in $cmds; do
+ for cmd1 in $cmds; do
IFS="$save_ifs"
- eval cmd=\"$cmd\"
- func_len " $cmd"
- len=$func_len_result
- if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then
+ # Take the normal branch if the nm_file_list_spec branch
+ # doesn't work or if tool conversion is not needed.
+ case $nm_file_list_spec~$to_tool_file_cmd in
+ *~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*)
+ try_normal_branch=yes
+ eval cmd=\"$cmd1\"
+ func_len " $cmd"
+ len=$func_len_result
+ ;;
+ *)
+ try_normal_branch=no
+ ;;
+ esac
+ if test "$try_normal_branch" = yes \
+ && { test "$len" -lt "$max_cmd_len" \
+ || test "$max_cmd_len" -le -1; }
+ then
+ func_show_eval "$cmd" 'exit $?'
+ skipped_export=false
+ elif test -n "$nm_file_list_spec"; then
+ func_basename "$output"
+ output_la=$func_basename_result
+ save_libobjs=$libobjs
+ save_output=$output
+ output=${output_objdir}/${output_la}.nm
+ func_to_tool_file "$output"
+ libobjs=$nm_file_list_spec$func_to_tool_file_result
+ func_append delfiles " $output"
+ func_verbose "creating $NM input file list: $output"
+ for obj in $save_libobjs; do
+ func_to_tool_file "$obj"
+ $ECHO "$func_to_tool_file_result"
+ done > "$output"
+ eval cmd=\"$cmd1\"
func_show_eval "$cmd" 'exit $?'
+ output=$save_output
+ libobjs=$save_libobjs
skipped_export=false
else
# The command line is too long to execute in one step.
@@ -7029,7 +8220,7 @@ EOF
if test -n "$export_symbols" && test -n "$include_expsyms"; then
tmp_export_symbols="$export_symbols"
test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols"
- $opt_dry_run || eval '$ECHO "X$include_expsyms" | $Xsed | $SP2NL >> "$tmp_export_symbols"'
+ $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"'
fi
if test "X$skipped_export" != "X:" && test -n "$orig_export_symbols"; then
@@ -7041,7 +8232,7 @@ EOF
# global variables. join(1) would be nice here, but unfortunately
# isn't a blessed tool.
$opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter
- delfiles="$delfiles $export_symbols $output_objdir/$libname.filter"
+ func_append delfiles " $export_symbols $output_objdir/$libname.filter"
export_symbols=$output_objdir/$libname.def
$opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols
fi
@@ -7051,7 +8242,7 @@ EOF
case " $convenience " in
*" $test_deplib "*) ;;
*)
- tmp_deplibs="$tmp_deplibs $test_deplib"
+ func_append tmp_deplibs " $test_deplib"
;;
esac
done
@@ -7071,21 +8262,21 @@ EOF
test "X$libobjs" = "X " && libobjs=
else
gentop="$output_objdir/${outputname}x"
- generated="$generated $gentop"
+ func_append generated " $gentop"
func_extract_archives $gentop $convenience
- libobjs="$libobjs $func_extract_archives_result"
+ func_append libobjs " $func_extract_archives_result"
test "X$libobjs" = "X " && libobjs=
fi
fi
if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then
eval flag=\"$thread_safe_flag_spec\"
- linker_flags="$linker_flags $flag"
+ func_append linker_flags " $flag"
fi
# Make a backup of the uninstalled library when relinking
- if test "$mode" = relink; then
+ if test "$opt_mode" = relink; then
$opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $?
fi
@@ -7130,7 +8321,8 @@ EOF
save_libobjs=$libobjs
fi
save_output=$output
- output_la=`$ECHO "X$output" | $Xsed -e "$basename"`
+ func_basename "$output"
+ output_la=$func_basename_result
# Clear the reloadable object creation command queue and
# initialize k to one.
@@ -7143,13 +8335,16 @@ EOF
if test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "$with_gnu_ld" = yes; then
output=${output_objdir}/${output_la}.lnkscript
func_verbose "creating GNU ld script: $output"
- $ECHO 'INPUT (' > $output
+ echo 'INPUT (' > $output
for obj in $save_libobjs
do
- $ECHO "$obj" >> $output
+ func_to_tool_file "$obj"
+ $ECHO "$func_to_tool_file_result" >> $output
done
- $ECHO ')' >> $output
- delfiles="$delfiles $output"
+ echo ')' >> $output
+ func_append delfiles " $output"
+ func_to_tool_file "$output"
+ output=$func_to_tool_file_result
elif test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X; then
output=${output_objdir}/${output_la}.lnk
func_verbose "creating linker input file list: $output"
@@ -7163,10 +8358,12 @@ EOF
fi
for obj
do
- $ECHO "$obj" >> $output
+ func_to_tool_file "$obj"
+ $ECHO "$func_to_tool_file_result" >> $output
done
- delfiles="$delfiles $output"
- output=$firstobj\"$file_list_spec$output\"
+ func_append delfiles " $output"
+ func_to_tool_file "$output"
+ output=$firstobj\"$file_list_spec$func_to_tool_file_result\"
else
if test -n "$save_libobjs"; then
func_verbose "creating reloadable object files..."
@@ -7190,17 +8387,19 @@ EOF
# command to the queue.
if test "$k" -eq 1 ; then
# The first file doesn't have a previous command to add.
- eval concat_cmds=\"$reload_cmds $objlist $last_robj\"
+ reload_objs=$objlist
+ eval concat_cmds=\"$reload_cmds\"
else
# All subsequent reloadable object files will link in
# the last one created.
- eval concat_cmds=\"\$concat_cmds~$reload_cmds $objlist $last_robj~\$RM $last_robj\"
+ reload_objs="$objlist $last_robj"
+ eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\"
fi
last_robj=$output_objdir/$output_la-${k}.$objext
func_arith $k + 1
k=$func_arith_result
output=$output_objdir/$output_la-${k}.$objext
- objlist=$obj
+ objlist=" $obj"
func_len " $last_robj"
func_arith $len0 + $func_len_result
len=$func_arith_result
@@ -7210,11 +8409,12 @@ EOF
# reloadable object file. All subsequent reloadable object
# files will link in the last one created.
test -z "$concat_cmds" || concat_cmds=$concat_cmds~
- eval concat_cmds=\"\${concat_cmds}$reload_cmds $objlist $last_robj\"
+ reload_objs="$objlist $last_robj"
+ eval concat_cmds=\"\${concat_cmds}$reload_cmds\"
if test -n "$last_robj"; then
eval concat_cmds=\"\${concat_cmds}~\$RM $last_robj\"
fi
- delfiles="$delfiles $output"
+ func_append delfiles " $output"
else
output=
@@ -7248,7 +8448,7 @@ EOF
lt_exit=$?
# Restore the uninstalled library and exit
- if test "$mode" = relink; then
+ if test "$opt_mode" = relink; then
( cd "$output_objdir" && \
$RM "${realname}T" && \
$MV "${realname}U" "$realname" )
@@ -7269,7 +8469,7 @@ EOF
if test -n "$export_symbols" && test -n "$include_expsyms"; then
tmp_export_symbols="$export_symbols"
test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols"
- $opt_dry_run || eval '$ECHO "X$include_expsyms" | $Xsed | $SP2NL >> "$tmp_export_symbols"'
+ $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"'
fi
if test -n "$orig_export_symbols"; then
@@ -7281,7 +8481,7 @@ EOF
# global variables. join(1) would be nice here, but unfortunately
# isn't a blessed tool.
$opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter
- delfiles="$delfiles $export_symbols $output_objdir/$libname.filter"
+ func_append delfiles " $export_symbols $output_objdir/$libname.filter"
export_symbols=$output_objdir/$libname.def
$opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols
fi
@@ -7322,10 +8522,10 @@ EOF
# Add any objects from preloaded convenience libraries
if test -n "$dlprefiles"; then
gentop="$output_objdir/${outputname}x"
- generated="$generated $gentop"
+ func_append generated " $gentop"
func_extract_archives $gentop $dlprefiles
- libobjs="$libobjs $func_extract_archives_result"
+ func_append libobjs " $func_extract_archives_result"
test "X$libobjs" = "X " && libobjs=
fi
@@ -7341,7 +8541,7 @@ EOF
lt_exit=$?
# Restore the uninstalled library and exit
- if test "$mode" = relink; then
+ if test "$opt_mode" = relink; then
( cd "$output_objdir" && \
$RM "${realname}T" && \
$MV "${realname}U" "$realname" )
@@ -7353,7 +8553,7 @@ EOF
IFS="$save_ifs"
# Restore the uninstalled library and exit
- if test "$mode" = relink; then
+ if test "$opt_mode" = relink; then
$opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $?
if test -n "$convenience"; then
@@ -7434,18 +8634,21 @@ EOF
if test -n "$convenience"; then
if test -n "$whole_archive_flag_spec"; then
eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\"
- reload_conv_objs=$reload_objs\ `$ECHO "X$tmp_whole_archive_flags" | $Xsed -e 's|,| |g'`
+ reload_conv_objs=$reload_objs\ `$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'`
else
gentop="$output_objdir/${obj}x"
- generated="$generated $gentop"
+ func_append generated " $gentop"
func_extract_archives $gentop $convenience
reload_conv_objs="$reload_objs $func_extract_archives_result"
fi
fi
+ # If we're not building shared, we need to use non_pic_objs
+ test "$build_libtool_libs" != yes && libobjs="$non_pic_objects"
+
# Create the old-style object.
- reload_objs="$objs$old_deplibs "`$ECHO "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test
+ reload_objs="$objs$old_deplibs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; /\.lib$/d; $lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test
output="$obj"
func_execute_cmds "$reload_cmds" 'exit $?'
@@ -7505,8 +8708,8 @@ EOF
case $host in
*-*-rhapsody* | *-*-darwin1.[012])
# On Rhapsody replace the C library is the System framework
- compile_deplibs=`$ECHO "X $compile_deplibs" | $Xsed -e 's/ -lc / System.ltframework /'`
- finalize_deplibs=`$ECHO "X $finalize_deplibs" | $Xsed -e 's/ -lc / System.ltframework /'`
+ compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'`
+ finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'`
;;
esac
@@ -7517,14 +8720,14 @@ EOF
if test "$tagname" = CXX ; then
case ${MACOSX_DEPLOYMENT_TARGET-10.0} in
10.[0123])
- compile_command="$compile_command ${wl}-bind_at_load"
- finalize_command="$finalize_command ${wl}-bind_at_load"
+ func_append compile_command " ${wl}-bind_at_load"
+ func_append finalize_command " ${wl}-bind_at_load"
;;
esac
fi
# Time to change all our "foo.ltframework" stuff back to "-framework foo"
- compile_deplibs=`$ECHO "X $compile_deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
- finalize_deplibs=`$ECHO "X $finalize_deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
+ compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+ finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
;;
esac
@@ -7538,7 +8741,7 @@ EOF
*)
case " $compile_deplibs " in
*" -L$path/$objdir "*)
- new_libs="$new_libs -L$path/$objdir" ;;
+ func_append new_libs " -L$path/$objdir" ;;
esac
;;
esac
@@ -7548,17 +8751,17 @@ EOF
-L*)
case " $new_libs " in
*" $deplib "*) ;;
- *) new_libs="$new_libs $deplib" ;;
+ *) func_append new_libs " $deplib" ;;
esac
;;
- *) new_libs="$new_libs $deplib" ;;
+ *) func_append new_libs " $deplib" ;;
esac
done
compile_deplibs="$new_libs"
- compile_command="$compile_command $compile_deplibs"
- finalize_command="$finalize_command $finalize_deplibs"
+ func_append compile_command " $compile_deplibs"
+ func_append finalize_command " $finalize_deplibs"
if test -n "$rpath$xrpath"; then
# If the user specified any rpath flags, then add them.
@@ -7566,7 +8769,7 @@ EOF
# This is the magic to use -rpath.
case "$finalize_rpath " in
*" $libdir "*) ;;
- *) finalize_rpath="$finalize_rpath $libdir" ;;
+ *) func_append finalize_rpath " $libdir" ;;
esac
done
fi
@@ -7585,18 +8788,18 @@ EOF
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
- hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
+ func_append hardcode_libdirs "$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
- rpath="$rpath $flag"
+ func_append rpath " $flag"
fi
elif test -n "$runpath_var"; then
case "$perm_rpath " in
*" $libdir "*) ;;
- *) perm_rpath="$perm_rpath $libdir" ;;
+ *) func_append perm_rpath " $libdir" ;;
esac
fi
case $host in
@@ -7605,12 +8808,12 @@ EOF
case :$dllsearchpath: in
*":$libdir:"*) ;;
::) dllsearchpath=$libdir;;
- *) dllsearchpath="$dllsearchpath:$libdir";;
+ *) func_append dllsearchpath ":$libdir";;
esac
case :$dllsearchpath: in
*":$testbindir:"*) ;;
::) dllsearchpath=$testbindir;;
- *) dllsearchpath="$dllsearchpath:$testbindir";;
+ *) func_append dllsearchpath ":$testbindir";;
esac
;;
esac
@@ -7636,18 +8839,18 @@ EOF
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
- hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
+ func_append hardcode_libdirs "$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
- rpath="$rpath $flag"
+ func_append rpath " $flag"
fi
elif test -n "$runpath_var"; then
case "$finalize_perm_rpath " in
*" $libdir "*) ;;
- *) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;;
+ *) func_append finalize_perm_rpath " $libdir" ;;
esac
fi
done
@@ -7661,8 +8864,8 @@ EOF
if test -n "$libobjs" && test "$build_old_libs" = yes; then
# Transform all the library objects into standard objects.
- compile_command=`$ECHO "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
- finalize_command=`$ECHO "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
+ compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP`
+ finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP`
fi
func_generate_dlsyms "$outputname" "@PROGRAM@" "no"
@@ -7674,15 +8877,15 @@ EOF
wrappers_required=yes
case $host in
+ *cegcc* | *mingw32ce*)
+ # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway.
+ wrappers_required=no
+ ;;
*cygwin* | *mingw* )
if test "$build_libtool_libs" != yes; then
wrappers_required=no
fi
;;
- *cegcc)
- # Disable wrappers for cegcc, we are cross compiling anyway.
- wrappers_required=no
- ;;
*)
if test "$need_relink" = no || test "$build_libtool_libs" != yes; then
wrappers_required=no
@@ -7691,13 +8894,19 @@ EOF
esac
if test "$wrappers_required" = no; then
# Replace the output file specification.
- compile_command=`$ECHO "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
+ compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'`
link_command="$compile_command$compile_rpath"
# We have no uninstalled library dependencies, so finalize right now.
exit_status=0
func_show_eval "$link_command" 'exit_status=$?'
+ if test -n "$postlink_cmds"; then
+ func_to_tool_file "$output"
+ postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'`
+ func_execute_cmds "$postlink_cmds" 'exit $?'
+ fi
+
# Delete the generated files.
if test -f "$output_objdir/${outputname}S.${objext}"; then
func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"'
@@ -7720,7 +8929,7 @@ EOF
# We should set the runpath_var.
rpath=
for dir in $perm_rpath; do
- rpath="$rpath$dir:"
+ func_append rpath "$dir:"
done
compile_var="$runpath_var=\"$rpath\$$runpath_var\" "
fi
@@ -7728,7 +8937,7 @@ EOF
# We should set the runpath_var.
rpath=
for dir in $finalize_perm_rpath; do
- rpath="$rpath$dir:"
+ func_append rpath "$dir:"
done
finalize_var="$runpath_var=\"$rpath\$$runpath_var\" "
fi
@@ -7738,11 +8947,18 @@ EOF
# We don't need to create a wrapper script.
link_command="$compile_var$compile_command$compile_rpath"
# Replace the output file specification.
- link_command=`$ECHO "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
+ link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'`
# Delete the old output file.
$opt_dry_run || $RM $output
# Link the executable and exit
func_show_eval "$link_command" 'exit $?'
+
+ if test -n "$postlink_cmds"; then
+ func_to_tool_file "$output"
+ postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'`
+ func_execute_cmds "$postlink_cmds" 'exit $?'
+ fi
+
exit $EXIT_SUCCESS
fi
@@ -7757,7 +8973,7 @@ EOF
if test "$fast_install" != no; then
link_command="$finalize_var$compile_command$finalize_rpath"
if test "$fast_install" = yes; then
- relink_command=`$ECHO "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'`
+ relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'`
else
# fast_install is set to needless
relink_command=
@@ -7769,13 +8985,19 @@ EOF
fi
# Replace the output file specification.
- link_command=`$ECHO "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'`
+ link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'`
# Delete the old output files.
$opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname
func_show_eval "$link_command" 'exit $?'
+ if test -n "$postlink_cmds"; then
+ func_to_tool_file "$output_objdir/$outputname"
+ postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'`
+ func_execute_cmds "$postlink_cmds" 'exit $?'
+ fi
+
# Now create the wrapper script.
func_verbose "creating $output"
@@ -7793,18 +9015,7 @@ EOF
fi
done
relink_command="(cd `pwd`; $relink_command)"
- relink_command=`$ECHO "X$relink_command" | $Xsed -e "$sed_quote_subst"`
- fi
-
- # Quote $ECHO for shipping.
- if test "X$ECHO" = "X$SHELL $progpath --fallback-echo"; then
- case $progpath in
- [\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $progpath --fallback-echo";;
- *) qecho="$SHELL `pwd`/$progpath --fallback-echo";;
- esac
- qecho=`$ECHO "X$qecho" | $Xsed -e "$sed_quote_subst"`
- else
- qecho=`$ECHO "X$ECHO" | $Xsed -e "$sed_quote_subst"`
+ relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"`
fi
# Only actually do things if not in dry run mode.
@@ -7884,7 +9095,7 @@ EOF
else
oldobjs="$old_deplibs $non_pic_objects"
if test "$preload" = yes && test -f "$symfileobj"; then
- oldobjs="$oldobjs $symfileobj"
+ func_append oldobjs " $symfileobj"
fi
fi
addlibs="$old_convenience"
@@ -7892,10 +9103,10 @@ EOF
if test -n "$addlibs"; then
gentop="$output_objdir/${outputname}x"
- generated="$generated $gentop"
+ func_append generated " $gentop"
func_extract_archives $gentop $addlibs
- oldobjs="$oldobjs $func_extract_archives_result"
+ func_append oldobjs " $func_extract_archives_result"
fi
# Do each command in the archive commands.
@@ -7906,10 +9117,10 @@ EOF
# Add any objects from preloaded convenience libraries
if test -n "$dlprefiles"; then
gentop="$output_objdir/${outputname}x"
- generated="$generated $gentop"
+ func_append generated " $gentop"
func_extract_archives $gentop $dlprefiles
- oldobjs="$oldobjs $func_extract_archives_result"
+ func_append oldobjs " $func_extract_archives_result"
fi
# POSIX demands no paths to be encoded in archives. We have
@@ -7925,9 +9136,9 @@ EOF
done | sort | sort -uc >/dev/null 2>&1); then
:
else
- $ECHO "copying selected object files to avoid basename conflicts..."
+ echo "copying selected object files to avoid basename conflicts..."
gentop="$output_objdir/${outputname}x"
- generated="$generated $gentop"
+ func_append generated " $gentop"
func_mkdir_p "$gentop"
save_oldobjs=$oldobjs
oldobjs=
@@ -7951,18 +9162,30 @@ EOF
esac
done
func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj"
- oldobjs="$oldobjs $gentop/$newobj"
+ func_append oldobjs " $gentop/$newobj"
;;
- *) oldobjs="$oldobjs $obj" ;;
+ *) func_append oldobjs " $obj" ;;
esac
done
fi
+ func_to_tool_file "$oldlib" func_convert_file_msys_to_w32
+ tool_oldlib=$func_to_tool_file_result
eval cmds=\"$old_archive_cmds\"
func_len " $cmds"
len=$func_len_result
if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then
cmds=$old_archive_cmds
+ elif test -n "$archiver_list_spec"; then
+ func_verbose "using command file archive linking..."
+ for obj in $oldobjs
+ do
+ func_to_tool_file "$obj"
+ $ECHO "$func_to_tool_file_result"
+ done > $output_objdir/$libname.libcmd
+ func_to_tool_file "$output_objdir/$libname.libcmd"
+ oldobjs=" $archiver_list_spec$func_to_tool_file_result"
+ cmds=$old_archive_cmds
else
# the command line is too long to link in one step, link in parts
func_verbose "using piecewise archive linking..."
@@ -8036,7 +9259,7 @@ EOF
done
# Quote the link command for shipping.
relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)"
- relink_command=`$ECHO "X$relink_command" | $Xsed -e "$sed_quote_subst"`
+ relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"`
if test "$hardcode_automatic" = yes ; then
relink_command=
fi
@@ -8056,12 +9279,23 @@ EOF
*.la)
func_basename "$deplib"
name="$func_basename_result"
- eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
+ func_resolve_sysroot "$deplib"
+ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result`
test -z "$libdir" && \
func_fatal_error "\`$deplib' is not a valid libtool archive"
- newdependency_libs="$newdependency_libs $libdir/$name"
+ func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name"
+ ;;
+ -L*)
+ func_stripname -L '' "$deplib"
+ func_replace_sysroot "$func_stripname_result"
+ func_append newdependency_libs " -L$func_replace_sysroot_result"
+ ;;
+ -R*)
+ func_stripname -R '' "$deplib"
+ func_replace_sysroot "$func_stripname_result"
+ func_append newdependency_libs " -R$func_replace_sysroot_result"
;;
- *) newdependency_libs="$newdependency_libs $deplib" ;;
+ *) func_append newdependency_libs " $deplib" ;;
esac
done
dependency_libs="$newdependency_libs"
@@ -8075,9 +9309,9 @@ EOF
eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
test -z "$libdir" && \
func_fatal_error "\`$lib' is not a valid libtool archive"
- newdlfiles="$newdlfiles $libdir/$name"
+ func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name"
;;
- *) newdlfiles="$newdlfiles $lib" ;;
+ *) func_append newdlfiles " $lib" ;;
esac
done
dlfiles="$newdlfiles"
@@ -8094,7 +9328,7 @@ EOF
eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
test -z "$libdir" && \
func_fatal_error "\`$lib' is not a valid libtool archive"
- newdlprefiles="$newdlprefiles $libdir/$name"
+ func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name"
;;
esac
done
@@ -8106,7 +9340,7 @@ EOF
[\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;;
*) abs=`pwd`"/$lib" ;;
esac
- newdlfiles="$newdlfiles $abs"
+ func_append newdlfiles " $abs"
done
dlfiles="$newdlfiles"
newdlprefiles=
@@ -8115,15 +9349,33 @@ EOF
[\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;;
*) abs=`pwd`"/$lib" ;;
esac
- newdlprefiles="$newdlprefiles $abs"
+ func_append newdlprefiles " $abs"
done
dlprefiles="$newdlprefiles"
fi
$RM $output
# place dlname in correct position for cygwin
+ # In fact, it would be nice if we could use this code for all target
+ # systems that can't hard-code library paths into their executables
+ # and that have no shared library path variable independent of PATH,
+ # but it turns out we can't easily determine that from inspecting
+ # libtool variables, so we have to hard-code the OSs to which it
+ # applies here; at the moment, that means platforms that use the PE
+ # object format with DLL files. See the long comment at the top of
+ # tests/bindir.at for full details.
tdlname=$dlname
case $host,$output,$installed,$module,$dlname in
- *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;;
+ *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll)
+ # If a -bindir argument was supplied, place the dll there.
+ if test "x$bindir" != x ;
+ then
+ func_relative_path "$install_libdir" "$bindir"
+ tdlname=$func_relative_path_result$dlname
+ else
+ # Otherwise fall back on heuristic.
+ tdlname=../bin/$dlname
+ fi
+ ;;
esac
$ECHO > $output "\
# $outputname - a libtool library file
@@ -8182,7 +9434,7 @@ relink_command=\"$relink_command\""
exit $EXIT_SUCCESS
}
-{ test "$mode" = link || test "$mode" = relink; } &&
+{ test "$opt_mode" = link || test "$opt_mode" = relink; } &&
func_mode_link ${1+"$@"}
@@ -8202,9 +9454,9 @@ func_mode_uninstall ()
for arg
do
case $arg in
- -f) RM="$RM $arg"; rmforce=yes ;;
- -*) RM="$RM $arg" ;;
- *) files="$files $arg" ;;
+ -f) func_append RM " $arg"; rmforce=yes ;;
+ -*) func_append RM " $arg" ;;
+ *) func_append files " $arg" ;;
esac
done
@@ -8213,24 +9465,23 @@ func_mode_uninstall ()
rmdirs=
- origobjdir="$objdir"
for file in $files; do
func_dirname "$file" "" "."
dir="$func_dirname_result"
if test "X$dir" = X.; then
- objdir="$origobjdir"
+ odir="$objdir"
else
- objdir="$dir/$origobjdir"
+ odir="$dir/$objdir"
fi
func_basename "$file"
name="$func_basename_result"
- test "$mode" = uninstall && objdir="$dir"
+ test "$opt_mode" = uninstall && odir="$dir"
- # Remember objdir for removal later, being careful to avoid duplicates
- if test "$mode" = clean; then
+ # Remember odir for removal later, being careful to avoid duplicates
+ if test "$opt_mode" = clean; then
case " $rmdirs " in
- *" $objdir "*) ;;
- *) rmdirs="$rmdirs $objdir" ;;
+ *" $odir "*) ;;
+ *) func_append rmdirs " $odir" ;;
esac
fi
@@ -8256,18 +9507,17 @@ func_mode_uninstall ()
# Delete the libtool libraries and symlinks.
for n in $library_names; do
- rmfiles="$rmfiles $objdir/$n"
+ func_append rmfiles " $odir/$n"
done
- test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library"
+ test -n "$old_library" && func_append rmfiles " $odir/$old_library"
- case "$mode" in
+ case "$opt_mode" in
clean)
- case " $library_names " in
- # " " in the beginning catches empty $dlname
+ case " $library_names " in
*" $dlname "*) ;;
- *) rmfiles="$rmfiles $objdir/$dlname" ;;
+ *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;;
esac
- test -n "$libdir" && rmfiles="$rmfiles $objdir/$name $objdir/${name}i"
+ test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i"
;;
uninstall)
if test -n "$library_names"; then
@@ -8295,19 +9545,19 @@ func_mode_uninstall ()
# Add PIC object to the list of files to remove.
if test -n "$pic_object" &&
test "$pic_object" != none; then
- rmfiles="$rmfiles $dir/$pic_object"
+ func_append rmfiles " $dir/$pic_object"
fi
# Add non-PIC object to the list of files to remove.
if test -n "$non_pic_object" &&
test "$non_pic_object" != none; then
- rmfiles="$rmfiles $dir/$non_pic_object"
+ func_append rmfiles " $dir/$non_pic_object"
fi
fi
;;
*)
- if test "$mode" = clean ; then
+ if test "$opt_mode" = clean ; then
noexename=$name
case $file in
*.exe)
@@ -8317,7 +9567,7 @@ func_mode_uninstall ()
noexename=$func_stripname_result
# $file with .exe has already been added to rmfiles,
# add $file without .exe
- rmfiles="$rmfiles $file"
+ func_append rmfiles " $file"
;;
esac
# Do a test to see if this is a libtool program.
@@ -8326,7 +9576,7 @@ func_mode_uninstall ()
func_ltwrapper_scriptname "$file"
relink_command=
func_source $func_ltwrapper_scriptname_result
- rmfiles="$rmfiles $func_ltwrapper_scriptname_result"
+ func_append rmfiles " $func_ltwrapper_scriptname_result"
else
relink_command=
func_source $dir/$noexename
@@ -8334,12 +9584,12 @@ func_mode_uninstall ()
# note $name still contains .exe if it was in $file originally
# as does the version of $file that was added into $rmfiles
- rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}"
+ func_append rmfiles " $odir/$name $odir/${name}S.${objext}"
if test "$fast_install" = yes && test -n "$relink_command"; then
- rmfiles="$rmfiles $objdir/lt-$name"
+ func_append rmfiles " $odir/lt-$name"
fi
if test "X$noexename" != "X$name" ; then
- rmfiles="$rmfiles $objdir/lt-${noexename}.c"
+ func_append rmfiles " $odir/lt-${noexename}.c"
fi
fi
fi
@@ -8347,7 +9597,6 @@ func_mode_uninstall ()
esac
func_show_eval "$RM $rmfiles" 'exit_status=1'
done
- objdir="$origobjdir"
# Try to remove the ${objdir}s in the directories where we deleted files
for dir in $rmdirs; do
@@ -8359,16 +9608,16 @@ func_mode_uninstall ()
exit $exit_status
}
-{ test "$mode" = uninstall || test "$mode" = clean; } &&
+{ test "$opt_mode" = uninstall || test "$opt_mode" = clean; } &&
func_mode_uninstall ${1+"$@"}
-test -z "$mode" && {
+test -z "$opt_mode" && {
help="$generic_help"
func_fatal_help "you must specify a MODE"
}
test -z "$exec_cmd" && \
- func_fatal_help "invalid operation mode \`$mode'"
+ func_fatal_help "invalid operation mode \`$opt_mode'"
if test -n "$exec_cmd"; then
eval exec "$exec_cmd"
diff --git a/Modules/_ctypes/libffi/m4/asmcfi.m4 b/Modules/_ctypes/libffi/m4/asmcfi.m4
new file mode 100644
index 0000000..dbf73a0
--- /dev/null
+++ b/Modules/_ctypes/libffi/m4/asmcfi.m4
@@ -0,0 +1,13 @@
+AC_DEFUN([GCC_AS_CFI_PSEUDO_OP],
+[AC_CACHE_CHECK([assembler .cfi pseudo-op support],
+ gcc_cv_as_cfi_pseudo_op, [
+ gcc_cv_as_cfi_pseudo_op=unknown
+ AC_TRY_COMPILE([asm (".cfi_startproc\n\t.cfi_endproc");],,
+ [gcc_cv_as_cfi_pseudo_op=yes],
+ [gcc_cv_as_cfi_pseudo_op=no])
+ ])
+ if test "x$gcc_cv_as_cfi_pseudo_op" = xyes; then
+ AC_DEFINE(HAVE_AS_CFI_PSEUDO_OP, 1,
+ [Define if your assembler supports .cfi_* directives.])
+ fi
+])
diff --git a/Modules/_ctypes/libffi/m4/ax_append_flag.m4 b/Modules/_ctypes/libffi/m4/ax_append_flag.m4
new file mode 100644
index 0000000..1d38b76
--- /dev/null
+++ b/Modules/_ctypes/libffi/m4/ax_append_flag.m4
@@ -0,0 +1,69 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_append_flag.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_APPEND_FLAG(FLAG, [FLAGS-VARIABLE])
+#
+# DESCRIPTION
+#
+# FLAG is appended to the FLAGS-VARIABLE shell variable, with a space
+# added in between.
+#
+# If FLAGS-VARIABLE is not specified, the current language's flags (e.g.
+# CFLAGS) is used. FLAGS-VARIABLE is not changed if it already contains
+# FLAG. If FLAGS-VARIABLE is unset in the shell, it is set to exactly
+# FLAG.
+#
+# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Guido U. Draheim <guidod@gmx.de>
+# Copyright (c) 2011 Maarten Bosmans <mkbosmans@gmail.com>
+#
+# This program is free software: you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 2
+
+AC_DEFUN([AX_APPEND_FLAG],
+[AC_PREREQ(2.59)dnl for _AC_LANG_PREFIX
+AS_VAR_PUSHDEF([FLAGS], [m4_default($2,_AC_LANG_PREFIX[FLAGS])])dnl
+AS_VAR_SET_IF(FLAGS,
+ [case " AS_VAR_GET(FLAGS) " in
+ *" $1 "*)
+ AC_RUN_LOG([: FLAGS already contains $1])
+ ;;
+ *)
+ AC_RUN_LOG([: FLAGS="$FLAGS $1"])
+ AS_VAR_SET(FLAGS, ["AS_VAR_GET(FLAGS) $1"])
+ ;;
+ esac],
+ [AS_VAR_SET(FLAGS,["$1"])])
+AS_VAR_POPDEF([FLAGS])dnl
+])dnl AX_APPEND_FLAG
diff --git a/Modules/_ctypes/libffi/m4/ax_cc_maxopt.m4 b/Modules/_ctypes/libffi/m4/ax_cc_maxopt.m4
new file mode 100644
index 0000000..62e3b53
--- /dev/null
+++ b/Modules/_ctypes/libffi/m4/ax_cc_maxopt.m4
@@ -0,0 +1,181 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_cc_maxopt.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_CC_MAXOPT
+#
+# DESCRIPTION
+#
+# Try to turn on "good" C optimization flags for various compilers and
+# architectures, for some definition of "good". (In our case, good for
+# FFTW and hopefully for other scientific codes. Modify as needed.)
+#
+# The user can override the flags by setting the CFLAGS environment
+# variable. The user can also specify --enable-portable-binary in order to
+# disable any optimization flags that might result in a binary that only
+# runs on the host architecture.
+#
+# Note also that the flags assume that ANSI C aliasing rules are followed
+# by the code (e.g. for gcc's -fstrict-aliasing), and that floating-point
+# computations can be re-ordered as needed.
+#
+# Requires macros: AX_CHECK_COMPILE_FLAG, AX_COMPILER_VENDOR,
+# AX_GCC_ARCHFLAG, AX_GCC_X86_CPUID.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Steven G. Johnson <stevenj@alum.mit.edu>
+# Copyright (c) 2008 Matteo Frigo
+#
+# This program is free software: you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 13
+
+AC_DEFUN([AX_CC_MAXOPT],
+[
+AC_REQUIRE([AC_PROG_CC])
+AC_REQUIRE([AX_COMPILER_VENDOR])
+AC_REQUIRE([AC_CANONICAL_HOST])
+
+AC_ARG_ENABLE(portable-binary, [AS_HELP_STRING([--enable-portable-binary], [disable compiler optimizations that would produce unportable binaries])],
+ acx_maxopt_portable=$enableval, acx_maxopt_portable=no)
+
+# Try to determine "good" native compiler flags if none specified via CFLAGS
+if test "$ac_test_CFLAGS" != "set"; then
+ CFLAGS=""
+ case $ax_cv_c_compiler_vendor in
+ dec) CFLAGS="-newc -w0 -O5 -ansi_alias -ansi_args -fp_reorder -tune host"
+ if test "x$acx_maxopt_portable" = xno; then
+ CFLAGS="$CFLAGS -arch host"
+ fi;;
+
+ sun) CFLAGS="-native -fast -xO5 -dalign"
+ if test "x$acx_maxopt_portable" = xyes; then
+ CFLAGS="$CFLAGS -xarch=generic"
+ fi;;
+
+ hp) CFLAGS="+Oall +Optrs_ansi +DSnative"
+ if test "x$acx_maxopt_portable" = xyes; then
+ CFLAGS="$CFLAGS +DAportable"
+ fi;;
+
+ ibm) if test "x$acx_maxopt_portable" = xno; then
+ xlc_opt="-qarch=auto -qtune=auto"
+ else
+ xlc_opt="-qtune=auto"
+ fi
+ AX_CHECK_COMPILE_FLAG($xlc_opt,
+ CFLAGS="-O3 -qansialias -w $xlc_opt",
+ [CFLAGS="-O3 -qansialias -w"
+ echo "******************************************************"
+ echo "* You seem to have the IBM C compiler. It is *"
+ echo "* recommended for best performance that you use: *"
+ echo "* *"
+ echo "* CFLAGS=-O3 -qarch=xxx -qtune=xxx -qansialias -w *"
+ echo "* ^^^ ^^^ *"
+ echo "* where xxx is pwr2, pwr3, 604, or whatever kind of *"
+ echo "* CPU you have. (Set the CFLAGS environment var. *"
+ echo "* and re-run configure.) For more info, man cc. *"
+ echo "******************************************************"])
+ ;;
+
+ intel) CFLAGS="-O3 -ansi_alias"
+ if test "x$acx_maxopt_portable" = xno; then
+ icc_archflag=unknown
+ icc_flags=""
+ case $host_cpu in
+ i686*|x86_64*)
+ # icc accepts gcc assembly syntax, so these should work:
+ AX_GCC_X86_CPUID(0)
+ AX_GCC_X86_CPUID(1)
+ case $ax_cv_gcc_x86_cpuid_0 in # see AX_GCC_ARCHFLAG
+ *:756e6547:*:*) # Intel
+ case $ax_cv_gcc_x86_cpuid_1 in
+ *6a?:*[[234]]:*:*|*6[[789b]]?:*:*:*) icc_flags="-xK";;
+ *f3[[347]]:*:*:*|*f4[1347]:*:*:*) icc_flags="-xP -xN -xW -xK";;
+ *f??:*:*:*) icc_flags="-xN -xW -xK";;
+ esac ;;
+ esac ;;
+ esac
+ if test "x$icc_flags" != x; then
+ for flag in $icc_flags; do
+ AX_CHECK_COMPILE_FLAG($flag, [icc_archflag=$flag; break])
+ done
+ fi
+ AC_MSG_CHECKING([for icc architecture flag])
+ AC_MSG_RESULT($icc_archflag)
+ if test "x$icc_archflag" != xunknown; then
+ CFLAGS="$CFLAGS $icc_archflag"
+ fi
+ fi
+ ;;
+
+ gnu)
+ # default optimization flags for gcc on all systems
+ CFLAGS="-O3 -fomit-frame-pointer"
+
+ # -malign-double for x86 systems
+ # LIBFFI -- DON'T DO THIS - CHANGES ABI
+ # AX_CHECK_COMPILE_FLAG(-malign-double, CFLAGS="$CFLAGS -malign-double")
+
+ # -fstrict-aliasing for gcc-2.95+
+ AX_CHECK_COMPILE_FLAG(-fstrict-aliasing,
+ CFLAGS="$CFLAGS -fstrict-aliasing")
+
+ # note that we enable "unsafe" fp optimization with other compilers, too
+ AX_CHECK_COMPILE_FLAG(-ffast-math, CFLAGS="$CFLAGS -ffast-math")
+
+ AX_GCC_ARCHFLAG($acx_maxopt_portable)
+ ;;
+ esac
+
+ if test -z "$CFLAGS"; then
+ echo ""
+ echo "********************************************************"
+ echo "* WARNING: Don't know the best CFLAGS for this system *"
+ echo "* Use ./configure CFLAGS=... to specify your own flags *"
+ echo "* (otherwise, a default of CFLAGS=-O3 will be used) *"
+ echo "********************************************************"
+ echo ""
+ CFLAGS="-O3"
+ fi
+
+ AX_CHECK_COMPILE_FLAG($CFLAGS, [], [
+ echo ""
+ echo "********************************************************"
+ echo "* WARNING: The guessed CFLAGS don't seem to work with *"
+ echo "* your compiler. *"
+ echo "* Use ./configure CFLAGS=... to specify your own flags *"
+ echo "********************************************************"
+ echo ""
+ CFLAGS=""
+ ])
+
+fi
+])
diff --git a/Modules/_ctypes/libffi/m4/ax_cflags_warn_all.m4 b/Modules/_ctypes/libffi/m4/ax_cflags_warn_all.m4
new file mode 100644
index 0000000..0fa3e18
--- /dev/null
+++ b/Modules/_ctypes/libffi/m4/ax_cflags_warn_all.m4
@@ -0,0 +1,122 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_cflags_warn_all.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_CFLAGS_WARN_ALL [(shellvar [,default, [A/NA]])]
+# AX_CXXFLAGS_WARN_ALL [(shellvar [,default, [A/NA]])]
+# AX_FCFLAGS_WARN_ALL [(shellvar [,default, [A/NA]])]
+#
+# DESCRIPTION
+#
+# Try to find a compiler option that enables most reasonable warnings.
+#
+# For the GNU compiler it will be -Wall (and -ansi -pedantic) The result
+# is added to the shellvar being CFLAGS, CXXFLAGS, or FCFLAGS by default.
+#
+# Currently this macro knows about the GCC, Solaris, Digital Unix, AIX,
+# HP-UX, IRIX, NEC SX-5 (Super-UX 10), Cray J90 (Unicos 10.0.0.8), and
+# Intel compilers. For a given compiler, the Fortran flags are much more
+# experimental than their C equivalents.
+#
+# - $1 shell-variable-to-add-to : CFLAGS, CXXFLAGS, or FCFLAGS
+# - $2 add-value-if-not-found : nothing
+# - $3 action-if-found : add value to shellvariable
+# - $4 action-if-not-found : nothing
+#
+# NOTE: These macros depend on AX_APPEND_FLAG.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Guido U. Draheim <guidod@gmx.de>
+# Copyright (c) 2010 Rhys Ulerich <rhys.ulerich@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 14
+
+AC_DEFUN([AX_FLAGS_WARN_ALL],[dnl
+AS_VAR_PUSHDEF([FLAGS],[_AC_LANG_PREFIX[]FLAGS])dnl
+AS_VAR_PUSHDEF([VAR],[ac_cv_[]_AC_LANG_ABBREV[]flags_warn_all])dnl
+AC_CACHE_CHECK([m4_ifval($1,$1,FLAGS) for maximum warnings],
+VAR,[VAR="no, unknown"
+ac_save_[]FLAGS="$[]FLAGS"
+for ac_arg dnl
+in "-warn all % -warn all" dnl Intel
+ "-pedantic % -Wall" dnl GCC
+ "-xstrconst % -v" dnl Solaris C
+ "-std1 % -verbose -w0 -warnprotos" dnl Digital Unix
+ "-qlanglvl=ansi % -qsrcmsg -qinfo=all:noppt:noppc:noobs:nocnd" dnl AIX
+ "-ansi -ansiE % -fullwarn" dnl IRIX
+ "+ESlit % +w1" dnl HP-UX C
+ "-Xc % -pvctl[,]fullmsg" dnl NEC SX-5 (Super-UX 10)
+ "-h conform % -h msglevel 2" dnl Cray C (Unicos)
+ #
+do FLAGS="$ac_save_[]FLAGS "`echo $ac_arg | sed -e 's,%%.*,,' -e 's,%,,'`
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM],
+ [VAR=`echo $ac_arg | sed -e 's,.*% *,,'` ; break])
+done
+FLAGS="$ac_save_[]FLAGS"
+])
+AS_VAR_POPDEF([FLAGS])dnl
+AC_REQUIRE([AX_APPEND_FLAG])
+case ".$VAR" in
+ .ok|.ok,*) m4_ifvaln($3,$3) ;;
+ .|.no|.no,*) m4_default($4,[m4_ifval($2,[AX_APPEND_FLAG([$2], [$1])])]) ;;
+ *) m4_default($3,[AX_APPEND_FLAG([$VAR], [$1])]) ;;
+esac
+AS_VAR_POPDEF([VAR])dnl
+])dnl AX_FLAGS_WARN_ALL
+dnl implementation tactics:
+dnl the for-argument contains a list of options. The first part of
+dnl these does only exist to detect the compiler - usually it is
+dnl a global option to enable -ansi or -extrawarnings. All other
+dnl compilers will fail about it. That was needed since a lot of
+dnl compilers will give false positives for some option-syntax
+dnl like -Woption or -Xoption as they think of it is a pass-through
+dnl to later compile stages or something. The "%" is used as a
+dnl delimiter. A non-option comment can be given after "%%" marks
+dnl which will be shown but not added to the respective C/CXXFLAGS.
+
+AC_DEFUN([AX_CFLAGS_WARN_ALL],[dnl
+AC_LANG_PUSH([C])
+AX_FLAGS_WARN_ALL([$1], [$2], [$3], [$4])
+AC_LANG_POP([C])
+])
+
+AC_DEFUN([AX_CXXFLAGS_WARN_ALL],[dnl
+AC_LANG_PUSH([C++])
+AX_FLAGS_WARN_ALL([$1], [$2], [$3], [$4])
+AC_LANG_POP([C++])
+])
+
+AC_DEFUN([AX_FCFLAGS_WARN_ALL],[dnl
+AC_LANG_PUSH([Fortran])
+AX_FLAGS_WARN_ALL([$1], [$2], [$3], [$4])
+AC_LANG_POP([Fortran])
+])
diff --git a/Modules/_ctypes/libffi/m4/ax_check_compile_flag.m4 b/Modules/_ctypes/libffi/m4/ax_check_compile_flag.m4
new file mode 100644
index 0000000..c3a8d69
--- /dev/null
+++ b/Modules/_ctypes/libffi/m4/ax_check_compile_flag.m4
@@ -0,0 +1,72 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS])
+#
+# DESCRIPTION
+#
+# Check whether the given FLAG works with the current language's compiler
+# or gives an error. (Warnings, however, are ignored)
+#
+# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on
+# success/failure.
+#
+# If EXTRA-FLAGS is defined, it is added to the current language's default
+# flags (e.g. CFLAGS) when the check is done. The check is thus made with
+# the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to
+# force the compiler to issue an error when a bad flag is given.
+#
+# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this
+# macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Guido U. Draheim <guidod@gmx.de>
+# Copyright (c) 2011 Maarten Bosmans <mkbosmans@gmail.com>
+#
+# This program is free software: you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 2
+
+AC_DEFUN([AX_CHECK_COMPILE_FLAG],
+[AC_PREREQ(2.59)dnl for _AC_LANG_PREFIX
+AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl
+AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [
+ ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS
+ _AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1"
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM()],
+ [AS_VAR_SET(CACHEVAR,[yes])],
+ [AS_VAR_SET(CACHEVAR,[no])])
+ _AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags])
+AS_IF([test x"AS_VAR_GET(CACHEVAR)" = xyes],
+ [m4_default([$2], :)],
+ [m4_default([$3], :)])
+AS_VAR_POPDEF([CACHEVAR])dnl
+])dnl AX_CHECK_COMPILE_FLAGS
diff --git a/Modules/_ctypes/libffi/m4/ax_compiler_vendor.m4 b/Modules/_ctypes/libffi/m4/ax_compiler_vendor.m4
new file mode 100644
index 0000000..73e32ea
--- /dev/null
+++ b/Modules/_ctypes/libffi/m4/ax_compiler_vendor.m4
@@ -0,0 +1,84 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_compiler_vendor.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_COMPILER_VENDOR
+#
+# DESCRIPTION
+#
+# Determine the vendor of the C/C++ compiler, e.g., gnu, intel, ibm, sun,
+# hp, borland, comeau, dec, cray, kai, lcc, metrowerks, sgi, microsoft,
+# watcom, etc. The vendor is returned in the cache variable
+# $ax_cv_c_compiler_vendor for C and $ax_cv_cxx_compiler_vendor for C++.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Steven G. Johnson <stevenj@alum.mit.edu>
+# Copyright (c) 2008 Matteo Frigo
+#
+# This program is free software: you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 11
+
+AC_DEFUN([AX_COMPILER_VENDOR],
+[AC_CACHE_CHECK([for _AC_LANG compiler vendor], ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor,
+ [# note: don't check for gcc first since some other compilers define __GNUC__
+ vendors="intel: __ICC,__ECC,__INTEL_COMPILER
+ ibm: __xlc__,__xlC__,__IBMC__,__IBMCPP__
+ pathscale: __PATHCC__,__PATHSCALE__
+ clang: __clang__
+ gnu: __GNUC__
+ sun: __SUNPRO_C,__SUNPRO_CC
+ hp: __HP_cc,__HP_aCC
+ dec: __DECC,__DECCXX,__DECC_VER,__DECCXX_VER
+ borland: __BORLANDC__,__TURBOC__
+ comeau: __COMO__
+ cray: _CRAYC
+ kai: __KCC
+ lcc: __LCC__
+ sgi: __sgi,sgi
+ microsoft: _MSC_VER
+ metrowerks: __MWERKS__
+ watcom: __WATCOMC__
+ portland: __PGI
+ unknown: UNKNOWN"
+ for ventest in $vendors; do
+ case $ventest in
+ *:) vendor=$ventest; continue ;;
+ *) vencpp="defined("`echo $ventest | sed 's/,/) || defined(/g'`")" ;;
+ esac
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM(,[
+ #if !($vencpp)
+ thisisanerror;
+ #endif
+ ])], [break])
+ done
+ ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor=`echo $vendor | cut -d: -f1`
+ ])
+])
diff --git a/Modules/_ctypes/libffi/m4/ax_configure_args.m4 b/Modules/_ctypes/libffi/m4/ax_configure_args.m4
new file mode 100644
index 0000000..0726b1b
--- /dev/null
+++ b/Modules/_ctypes/libffi/m4/ax_configure_args.m4
@@ -0,0 +1,70 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_configure_args.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_CONFIGURE_ARGS
+#
+# DESCRIPTION
+#
+# Helper macro for AX_ENABLE_BUILDDIR.
+#
+# The traditional way of starting a subdir-configure is running the script
+# with ${1+"$@"} but since autoconf 2.60 this is broken. Instead we have
+# to rely on eval'ing $ac_configure_args however some old autoconf
+# versions do not provide that. To ensure maximum portability of autoconf
+# extension macros this helper can be AC_REQUIRE'd so that
+# $ac_configure_args will alsways be present.
+#
+# Sadly, the traditional "exec $SHELL" of the enable_builddir macros is
+# spoiled now and must be replaced by "eval + exit $?".
+#
+# Example:
+#
+# AC_DEFUN([AX_ENABLE_SUBDIR],[dnl
+# AC_REQUIRE([AX_CONFIGURE_ARGS])dnl
+# eval $SHELL $ac_configure_args || exit $?
+# ...])
+#
+# LICENSE
+#
+# Copyright (c) 2008 Guido U. Draheim <guidod@gmx.de>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 9
+
+AC_DEFUN([AX_CONFIGURE_ARGS],[
+ # [$]@ is unsable in 2.60+ but earlier autoconf had no ac_configure_args
+ if test "${ac_configure_args+set}" != "set" ; then
+ ac_configure_args=
+ for ac_arg in ${1+"[$]@"}; do
+ ac_configure_args="$ac_configure_args '$ac_arg'"
+ done
+ fi
+])
diff --git a/Modules/_ctypes/libffi/m4/ax_enable_builddir.m4 b/Modules/_ctypes/libffi/m4/ax_enable_builddir.m4
new file mode 100644
index 0000000..3fb8731
--- /dev/null
+++ b/Modules/_ctypes/libffi/m4/ax_enable_builddir.m4
@@ -0,0 +1,300 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_enable_builddir.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_ENABLE_BUILDDIR [(dirstring-or-command [,Makefile.mk [,-all]])]
+#
+# DESCRIPTION
+#
+# If the current configure was run within the srcdir then we move all
+# configure-files into a subdir and let the configure steps continue
+# there. We provide an option --disable-builddir to suppress the move into
+# a separate builddir.
+#
+# Defaults:
+#
+# $1 = $host (overridden with $HOST)
+# $2 = Makefile.mk
+# $3 = -all
+#
+# This macro must be called before AM_INIT_AUTOMAKE. It creates a default
+# toplevel srcdir Makefile from the information found in the created
+# toplevel builddir Makefile. It just copies the variables and
+# rule-targets, each extended with a default rule-execution that recurses
+# into the build directory of the current "HOST". You can override the
+# auto-dection through `config.guess` and build-time of course, as in
+#
+# make HOST=i386-mingw-cross
+#
+# which can of course set at configure time as well using
+#
+# configure --host=i386-mingw-cross
+#
+# After the default has been created, additional rules can be appended
+# that will not just recurse into the subdirectories and only ever exist
+# in the srcdir toplevel makefile - these parts are read from the $2 =
+# Makefile.mk file
+#
+# The automatic rules are usually scanning the toplevel Makefile for lines
+# like '#### $host |$builddir' to recognize the place where to recurse
+# into. Usually, the last one is the only one used. However, almost all
+# targets have an additional "*-all" rule which makes the script to
+# recurse into _all_ variants of the current HOST (!!) setting. The "-all"
+# suffix can be overriden for the macro as well.
+#
+# a special rule is only given for things like "dist" that will copy the
+# tarball from the builddir to the sourcedir (or $(PUB)) for reason of
+# convenience.
+#
+# LICENSE
+#
+# Copyright (c) 2009 Guido U. Draheim <guidod@gmx.de>
+# Copyright (c) 2009 Alan Jenkins <alan-jenkins@tuffmail.co.uk>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 23
+
+AC_DEFUN([AX_ENABLE_BUILDDIR],[
+AC_REQUIRE([AC_CANONICAL_HOST])[]dnl
+AC_REQUIRE([AX_CONFIGURE_ARGS])[]dnl
+AC_REQUIRE([AM_AUX_DIR_EXPAND])[]dnl
+AC_BEFORE([$0],[AM_INIT_AUTOMAKE])dnl
+AS_VAR_PUSHDEF([SUB],[ax_enable_builddir])dnl
+AS_VAR_PUSHDEF([AUX],[ax_enable_builddir_auxdir])dnl
+AS_VAR_PUSHDEF([SED],[ax_enable_builddir_sed])dnl
+SUB="."
+AC_ARG_ENABLE([builddir], AS_HELP_STRING(
+ [--disable-builddir],[disable automatic build in subdir of sources])
+ ,[SUB="$enableval"], [SUB="auto"])
+if test ".$ac_srcdir_defaulted" != ".no" ; then
+if test ".$srcdir" = ".." ; then
+ if test -f config.status ; then
+ AC_MSG_NOTICE(toplevel srcdir already configured... skipping subdir build)
+ else
+ test ".$SUB" = "." && SUB="."
+ test ".$SUB" = ".no" && SUB="."
+ test ".$TARGET" = "." && TARGET="$target"
+ test ".$SUB" = ".auto" && SUB="m4_ifval([$1], [$1],[$TARGET])"
+ if test ".$SUB" != ".." ; then # we know where to go and
+ AS_MKDIR_P([$SUB])
+ echo __.$SUB.__ > $SUB/conftest.tmp
+ cd $SUB
+ if grep __.$SUB.__ conftest.tmp >/dev/null 2>/dev/null ; then
+ rm conftest.tmp
+ AC_MSG_RESULT([continue configure in default builddir "./$SUB"])
+ else
+ AC_MSG_ERROR([could not change to default builddir "./$SUB"])
+ fi
+ srcdir=`echo "$SUB" |
+ sed -e 's,^\./,,;s,[[^/]]$,&/,;s,[[^/]]*/,../,g;s,[[/]]$,,;'`
+ # going to restart from subdirectory location
+ test -f $srcdir/config.log && mv $srcdir/config.log .
+ test -f $srcdir/confdefs.h && mv $srcdir/confdefs.h .
+ test -f $srcdir/conftest.log && mv $srcdir/conftest.log .
+ test -f $srcdir/$cache_file && mv $srcdir/$cache_file .
+ AC_MSG_RESULT(....exec $SHELL $srcdir/[$]0 "--srcdir=$srcdir" "--enable-builddir=$SUB" ${1+"[$]@"})
+ case "[$]0" in # restart
+ [/\\]*) eval $SHELL "'[$]0'" "'--srcdir=$srcdir'" "'--enable-builddir=$SUB'" $ac_configure_args ;;
+ *) eval $SHELL "'$srcdir/[$]0'" "'--srcdir=$srcdir'" "'--enable-builddir=$SUB'" $ac_configure_args ;;
+ esac ; exit $?
+ fi
+ fi
+fi fi
+test ".$SUB" = ".auto" && SUB="."
+dnl ac_path_prog uses "set dummy" to override $@ which would defeat the "exec"
+AC_PATH_PROG(SED,gsed sed, sed)
+AUX="$am_aux_dir"
+AS_VAR_POPDEF([SED])dnl
+AS_VAR_POPDEF([AUX])dnl
+AS_VAR_POPDEF([SUB])dnl
+AC_CONFIG_COMMANDS([buildir],[dnl .............. config.status ..............
+AS_VAR_PUSHDEF([SUB],[ax_enable_builddir])dnl
+AS_VAR_PUSHDEF([TOP],[top_srcdir])dnl
+AS_VAR_PUSHDEF([SRC],[ac_top_srcdir])dnl
+AS_VAR_PUSHDEF([AUX],[ax_enable_builddir_auxdir])dnl
+AS_VAR_PUSHDEF([SED],[ax_enable_builddir_sed])dnl
+pushdef([END],[Makefile.mk])dnl
+pushdef([_ALL],[ifelse([$3],,[-all],[$3])])dnl
+ SRC="$ax_enable_builddir_srcdir"
+ if test ".$SUB" = ".." ; then
+ if test -f "$TOP/Makefile" ; then
+ AC_MSG_NOTICE([skipping TOP/Makefile - left untouched])
+ else
+ AC_MSG_NOTICE([skipping TOP/Makefile - not created])
+ fi
+ else
+ if test -f "$SRC/Makefile" ; then
+ a=`grep "^VERSION " "$SRC/Makefile"` ; b=`grep "^VERSION " Makefile`
+ test "$a" != "$b" && rm "$SRC/Makefile"
+ fi
+ if test -f "$SRC/Makefile" ; then
+ echo "$SRC/Makefile : $SRC/Makefile.in" > $tmp/conftemp.mk
+ echo " []@ echo 'REMOVED,,,' >\$[]@" >> $tmp/conftemp.mk
+ eval "${MAKE-make} -f $tmp/conftemp.mk 2>/dev/null >/dev/null"
+ if grep '^REMOVED,,,' "$SRC/Makefile" >/dev/null
+ then rm $SRC/Makefile ; fi
+ cp $tmp/conftemp.mk $SRC/makefiles.mk~ ## DEBUGGING
+ fi
+ if test ! -f "$SRC/Makefile" ; then
+ AC_MSG_NOTICE([create TOP/Makefile guessed from local Makefile])
+ x='`' ; cat >$tmp/conftemp.sed <<_EOF
+/^\$/n
+x
+/^\$/bS
+x
+/\\\\\$/{H;d;}
+{H;s/.*//;x;}
+bM
+:S
+x
+/\\\\\$/{h;d;}
+{h;s/.*//;x;}
+:M
+s/\\(\\n\\) /\\1 /g
+/^ /d
+/^[[ ]]*[[\\#]]/d
+/^VPATH *=/d
+s/^srcdir *=.*/srcdir = ./
+s/^top_srcdir *=.*/top_srcdir = ./
+/[[:=]]/!d
+/^\\./d
+dnl Now handle rules (i.e. lines containing ":" but not " = ").
+/ = /b
+/ .= /b
+/:/!b
+s/:.*/:/
+s/ / /g
+s/ \\([[a-z]][[a-z-]]*[[a-zA-Z0-9]]\\)\\([[ :]]\\)/ \\1 \\1[]_ALL\\2/g
+s/^\\([[a-z]][[a-z-]]*[[a-zA-Z0-9]]\\)\\([[ :]]\\)/\\1 \\1[]_ALL\\2/
+s/ / /g
+/^all all[]_ALL[[ :]]/i\\
+all-configured : all[]_ALL
+dnl dist-all exists... and would make for dist-all-all
+s/ [[a-zA-Z0-9-]]*[]_ALL [[a-zA-Z0-9-]]*[]_ALL[]_ALL//g
+/[]_ALL[]_ALL/d
+a\\
+ @ HOST="\$(HOST)\" \\\\\\
+ ; test ".\$\$HOST" = "." && HOST=$x sh $AUX/config.guess $x \\\\\\
+ ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\
+ ; use=$x basename "\$\@" _ALL $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\
+ ; echo "MAKE \$\$HOST : \$\$n * \$\@"; if test "\$\$n" -eq "0" ; then : \\\\\\
+ ; BUILD=$x grep "^####.*|" Makefile |tail -1| sed -e 's/.*|//' $x ; fi \\\\\\
+ ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\
+ ; test "\$\$use" = "\$\@" && BUILD=$x echo "\$\$BUILD" | tail -1 $x \\\\\\
+ ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\
+ ; (cd "\$\$i" && test ! -f configure && \$(MAKE) \$\$use) || exit; done
+dnl special rule add-on: "dist" copies the tarball to $(PUB). (source tree)
+/dist[]_ALL *:/a\\
+ @ HOST="\$(HOST)\" \\\\\\
+ ; test ".\$\$HOST" = "." && HOST=$x sh $AUX/config.guess $x \\\\\\
+ ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\
+ ; found=$x echo \$\$BUILD | wc -w $x \\\\\\
+ ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).tar.*" \\\\\\
+ ; if test "\$\$found" -eq "0" ; then : \\\\\\
+ ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\
+ ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\
+ ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).tar.* \\\\\\
+ ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done
+dnl special rule add-on: "dist-foo" copies all the archives to $(PUB). (source tree)
+/dist-[[a-zA-Z0-9]]*[]_ALL *:/a\\
+ @ HOST="\$(HOST)\" \\\\\\
+ ; test ".\$\$HOST" = "." && HOST=$x sh ./config.guess $x \\\\\\
+ ; BUILD=$x grep "^#### \$\$HOST " Makefile | sed -e 's/.*|//' $x \\\\\\
+ ; found=$x echo \$\$BUILD | wc -w $x \\\\\\
+ ; echo "MAKE \$\$HOST : \$\$found \$(PACKAGE)-\$(VERSION).*" \\\\\\
+ ; if test "\$\$found" -eq "0" ; then : \\\\\\
+ ; BUILD=$x grep "^#### .*|" Makefile |tail -1| sed -e 's/.*|//' $x \\\\\\
+ ; fi ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\
+ ; for f in \$\$i/\$(PACKAGE)-\$(VERSION).* \\\\\\
+ ; do test -f "\$\$f" && mv "\$\$f" \$(PUB). ; done ; break ; done
+dnl special rule add-on: "distclean" removes all local builddirs completely
+/distclean[]_ALL *:/a\\
+ @ HOST="\$(HOST)\" \\\\\\
+ ; test ".\$\$HOST" = "." && HOST=$x sh $AUX/config.guess $x \\\\\\
+ ; BUILD=$x grep "^#### .*|" Makefile | sed -e 's/.*|//' $x \\\\\\
+ ; use=$x basename "\$\@" _ALL $x; n=$x echo \$\$BUILD | wc -w $x \\\\\\
+ ; echo "MAKE \$\$HOST : \$\$n * \$\@ (all local builds)" \\\\\\
+ ; test ".\$\$BUILD" = "." && BUILD="." \\\\\\
+ ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\
+ ; echo "# rm -r \$\$i"; done ; echo "# (sleep 3)" ; sleep 3 \\\\\\
+ ; for i in \$\$BUILD ; do test ".\$\$i" = "." && continue \\\\\\
+ ; echo "\$\$i" | grep "^/" > /dev/null && continue \\\\\\
+ ; echo "\$\$i" | grep "^../" > /dev/null && continue \\\\\\
+ ; echo "rm -r \$\$i"; (rm -r "\$\$i") ; done ; rm Makefile
+_EOF
+ cp "$tmp/conftemp.sed" "$SRC/makefile.sed~" ## DEBUGGING
+ $SED -f $tmp/conftemp.sed Makefile >$SRC/Makefile
+ if test -f "$SRC/m4_ifval([$2],[$2],[END])" ; then
+ AC_MSG_NOTICE([extend TOP/Makefile with TOP/m4_ifval([$2],[$2],[END])])
+ cat $SRC/END >>$SRC/Makefile
+ fi ; xxxx="####"
+ echo "$xxxx CONFIGURATIONS FOR TOPLEVEL MAKEFILE: " >>$SRC/Makefile
+ # sanity check
+ if grep '^; echo "MAKE ' $SRC/Makefile >/dev/null ; then
+ AC_MSG_NOTICE([buggy sed found - it deletes tab in "a" text parts])
+ $SED -e '/^@ HOST=/s/^/ /' -e '/^; /s/^/ /' $SRC/Makefile \
+ >$SRC/Makefile~
+ (test -s $SRC/Makefile~ && mv $SRC/Makefile~ $SRC/Makefile) 2>/dev/null
+ fi
+ else
+ xxxx="\\#\\#\\#\\#"
+ # echo "/^$xxxx *$ax_enable_builddir_host /d" >$tmp/conftemp.sed
+ echo "s!^$xxxx [[^|]]* | *$SUB *\$!$xxxx ...... $SUB!" >$tmp/conftemp.sed
+ $SED -f "$tmp/conftemp.sed" "$SRC/Makefile" >$tmp/mkfile.tmp
+ cp "$tmp/conftemp.sed" "$SRC/makefiles.sed~" ## DEBUGGING
+ cp "$tmp/mkfile.tmp" "$SRC/makefiles.out~" ## DEBUGGING
+ if cmp -s "$SRC/Makefile" "$tmp/mkfile.tmp" 2>/dev/null ; then
+ AC_MSG_NOTICE([keeping TOP/Makefile from earlier configure])
+ rm "$tmp/mkfile.tmp"
+ else
+ AC_MSG_NOTICE([reusing TOP/Makefile from earlier configure])
+ mv "$tmp/mkfile.tmp" "$SRC/Makefile"
+ fi
+ fi
+ AC_MSG_NOTICE([build in $SUB (HOST=$ax_enable_builddir_host)])
+ xxxx="####"
+ echo "$xxxx" "$ax_enable_builddir_host" "|$SUB" >>$SRC/Makefile
+ fi
+popdef([END])dnl
+AS_VAR_POPDEF([SED])dnl
+AS_VAR_POPDEF([AUX])dnl
+AS_VAR_POPDEF([SRC])dnl
+AS_VAR_POPDEF([TOP])dnl
+AS_VAR_POPDEF([SUB])dnl
+],[dnl
+ax_enable_builddir_srcdir="$srcdir" # $srcdir
+ax_enable_builddir_host="$HOST" # $HOST / $host
+ax_enable_builddir_version="$VERSION" # $VERSION
+ax_enable_builddir_package="$PACKAGE" # $PACKAGE
+ax_enable_builddir_auxdir="$ax_enable_builddir_auxdir" # $AUX
+ax_enable_builddir_sed="$ax_enable_builddir_sed" # $SED
+ax_enable_builddir="$ax_enable_builddir" # $SUB
+])dnl
+])
diff --git a/Modules/_ctypes/libffi/m4/ax_gcc_archflag.m4 b/Modules/_ctypes/libffi/m4/ax_gcc_archflag.m4
new file mode 100644
index 0000000..3fd050e
--- /dev/null
+++ b/Modules/_ctypes/libffi/m4/ax_gcc_archflag.m4
@@ -0,0 +1,225 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_gcc_archflag.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_GCC_ARCHFLAG([PORTABLE?], [ACTION-SUCCESS], [ACTION-FAILURE])
+#
+# DESCRIPTION
+#
+# This macro tries to guess the "native" arch corresponding to the target
+# architecture for use with gcc's -march=arch or -mtune=arch flags. If
+# found, the cache variable $ax_cv_gcc_archflag is set to this flag and
+# ACTION-SUCCESS is executed; otherwise $ax_cv_gcc_archflag is set to
+# "unknown" and ACTION-FAILURE is executed. The default ACTION-SUCCESS is
+# to add $ax_cv_gcc_archflag to the end of $CFLAGS.
+#
+# PORTABLE? should be either [yes] (default) or [no]. In the former case,
+# the flag is set to -mtune (or equivalent) so that the architecture is
+# only used for tuning, but the instruction set used is still portable. In
+# the latter case, the flag is set to -march (or equivalent) so that
+# architecture-specific instructions are enabled.
+#
+# The user can specify --with-gcc-arch=<arch> in order to override the
+# macro's choice of architecture, or --without-gcc-arch to disable this.
+#
+# When cross-compiling, or if $CC is not gcc, then ACTION-FAILURE is
+# called unless the user specified --with-gcc-arch manually.
+#
+# Requires macros: AX_CHECK_COMPILE_FLAG, AX_GCC_X86_CPUID
+#
+# (The main emphasis here is on recent CPUs, on the principle that doing
+# high-performance computing on old hardware is uncommon.)
+#
+# LICENSE
+#
+# Copyright (c) 2008 Steven G. Johnson <stevenj@alum.mit.edu>
+# Copyright (c) 2008 Matteo Frigo
+# Copyright (c) 2012 Tsukasa Oi
+#
+# This program is free software: you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 11
+
+AC_DEFUN([AX_GCC_ARCHFLAG],
+[AC_REQUIRE([AC_PROG_CC])
+AC_REQUIRE([AC_CANONICAL_HOST])
+
+AC_ARG_WITH(gcc-arch, [AS_HELP_STRING([--with-gcc-arch=<arch>], [use architecture <arch> for gcc -march/-mtune, instead of guessing])],
+ ax_gcc_arch=$withval, ax_gcc_arch=yes)
+
+AC_MSG_CHECKING([for gcc architecture flag])
+AC_MSG_RESULT([])
+AC_CACHE_VAL(ax_cv_gcc_archflag,
+[
+ax_cv_gcc_archflag="unknown"
+
+if test "$GCC" = yes; then
+
+if test "x$ax_gcc_arch" = xyes; then
+ax_gcc_arch=""
+if test "$cross_compiling" = no; then
+case $host_cpu in
+ i[[3456]]86*|x86_64*) # use cpuid codes
+ AX_GCC_X86_CPUID(0)
+ AX_GCC_X86_CPUID(1)
+ case $ax_cv_gcc_x86_cpuid_0 in
+ *:756e6547:*:*) # Intel
+ case $ax_cv_gcc_x86_cpuid_1 in
+ *5[[48]]?:*:*:*) ax_gcc_arch="pentium-mmx pentium" ;;
+ *5??:*:*:*) ax_gcc_arch=pentium ;;
+ *0?6[[3456]]?:*:*:*) ax_gcc_arch="pentium2 pentiumpro" ;;
+ *0?6a?:*[[01]]:*:*) ax_gcc_arch="pentium2 pentiumpro" ;;
+ *0?6a?:*[[234]]:*:*) ax_gcc_arch="pentium3 pentiumpro" ;;
+ *0?6[[9de]]?:*:*:*) ax_gcc_arch="pentium-m pentium3 pentiumpro" ;;
+ *0?6[[78b]]?:*:*:*) ax_gcc_arch="pentium3 pentiumpro" ;;
+ *0?6f?:*:*:*|*1?66?:*:*:*) ax_gcc_arch="core2 pentium-m pentium3 pentiumpro" ;;
+ *1?6[[7d]]?:*:*:*) ax_gcc_arch="penryn core2 pentium-m pentium3 pentiumpro" ;;
+ *1?6[[aef]]?:*:*:*|*2?6[[5cef]]?:*:*:*) ax_gcc_arch="corei7 core2 pentium-m pentium3 pentiumpro" ;;
+ *1?6c?:*:*:*|*[[23]]?66?:*:*:*) ax_gcc_arch="atom core2 pentium-m pentium3 pentiumpro" ;;
+ *2?6[[ad]]?:*:*:*) ax_gcc_arch="corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;;
+ *0?6??:*:*:*) ax_gcc_arch=pentiumpro ;;
+ *6??:*:*:*) ax_gcc_arch="core2 pentiumpro" ;;
+ ?000?f3[[347]]:*:*:*|?000?f4[1347]:*:*:*|?000?f6?:*:*:*)
+ case $host_cpu in
+ x86_64*) ax_gcc_arch="nocona pentium4 pentiumpro" ;;
+ *) ax_gcc_arch="prescott pentium4 pentiumpro" ;;
+ esac ;;
+ ?000?f??:*:*:*) ax_gcc_arch="pentium4 pentiumpro";;
+ esac ;;
+ *:68747541:*:*) # AMD
+ case $ax_cv_gcc_x86_cpuid_1 in
+ *5[[67]]?:*:*:*) ax_gcc_arch=k6 ;;
+ *5[[8d]]?:*:*:*) ax_gcc_arch="k6-2 k6" ;;
+ *5[[9]]?:*:*:*) ax_gcc_arch="k6-3 k6" ;;
+ *60?:*:*:*) ax_gcc_arch=k7 ;;
+ *6[[12]]?:*:*:*) ax_gcc_arch="athlon k7" ;;
+ *6[[34]]?:*:*:*) ax_gcc_arch="athlon-tbird k7" ;;
+ *67?:*:*:*) ax_gcc_arch="athlon-4 athlon k7" ;;
+ *6[[68a]]?:*:*:*)
+ AX_GCC_X86_CPUID(0x80000006) # L2 cache size
+ case $ax_cv_gcc_x86_cpuid_0x80000006 in
+ *:*:*[[1-9a-f]]??????:*) # (L2 = ecx >> 16) >= 256
+ ax_gcc_arch="athlon-xp athlon-4 athlon k7" ;;
+ *) ax_gcc_arch="athlon-4 athlon k7" ;;
+ esac ;;
+ ?00??f[[4cef8b]]?:*:*:*) ax_gcc_arch="athlon64 k8" ;;
+ ?00??f5?:*:*:*) ax_gcc_arch="opteron k8" ;;
+ ?00??f7?:*:*:*) ax_gcc_arch="athlon-fx opteron k8" ;;
+ ?00??f??:*:*:*) ax_gcc_arch="k8" ;;
+ ?05??f??:*:*:*) ax_gcc_arch="btver1 amdfam10 k8" ;;
+ ?06??f??:*:*:*) ax_gcc_arch="bdver1 amdfam10 k8" ;;
+ *f??:*:*:*) ax_gcc_arch="amdfam10 k8" ;;
+ esac ;;
+ *:746e6543:*:*) # IDT
+ case $ax_cv_gcc_x86_cpuid_1 in
+ *54?:*:*:*) ax_gcc_arch=winchip-c6 ;;
+ *58?:*:*:*) ax_gcc_arch=winchip2 ;;
+ *6[[78]]?:*:*:*) ax_gcc_arch=c3 ;;
+ *69?:*:*:*) ax_gcc_arch="c3-2 c3" ;;
+ esac ;;
+ esac
+ if test x"$ax_gcc_arch" = x; then # fallback
+ case $host_cpu in
+ i586*) ax_gcc_arch=pentium ;;
+ i686*) ax_gcc_arch=pentiumpro ;;
+ esac
+ fi
+ ;;
+
+ sparc*)
+ AC_PATH_PROG([PRTDIAG], [prtdiag], [prtdiag], [$PATH:/usr/platform/`uname -i`/sbin/:/usr/platform/`uname -m`/sbin/])
+ cputype=`(((grep cpu /proc/cpuinfo | cut -d: -f2) ; ($PRTDIAG -v |grep -i sparc) ; grep -i cpu /var/run/dmesg.boot ) | head -n 1) 2> /dev/null`
+ cputype=`echo "$cputype" | tr -d ' -' |tr $as_cr_LETTERS $as_cr_letters`
+ case $cputype in
+ *ultrasparciv*) ax_gcc_arch="ultrasparc4 ultrasparc3 ultrasparc v9" ;;
+ *ultrasparciii*) ax_gcc_arch="ultrasparc3 ultrasparc v9" ;;
+ *ultrasparc*) ax_gcc_arch="ultrasparc v9" ;;
+ *supersparc*|*tms390z5[[05]]*) ax_gcc_arch="supersparc v8" ;;
+ *hypersparc*|*rt62[[056]]*) ax_gcc_arch="hypersparc v8" ;;
+ *cypress*) ax_gcc_arch=cypress ;;
+ esac ;;
+
+ alphaev5) ax_gcc_arch=ev5 ;;
+ alphaev56) ax_gcc_arch=ev56 ;;
+ alphapca56) ax_gcc_arch="pca56 ev56" ;;
+ alphapca57) ax_gcc_arch="pca57 pca56 ev56" ;;
+ alphaev6) ax_gcc_arch=ev6 ;;
+ alphaev67) ax_gcc_arch=ev67 ;;
+ alphaev68) ax_gcc_arch="ev68 ev67" ;;
+ alphaev69) ax_gcc_arch="ev69 ev68 ev67" ;;
+ alphaev7) ax_gcc_arch="ev7 ev69 ev68 ev67" ;;
+ alphaev79) ax_gcc_arch="ev79 ev7 ev69 ev68 ev67" ;;
+
+ powerpc*)
+ cputype=`((grep cpu /proc/cpuinfo | head -n 1 | cut -d: -f2 | cut -d, -f1 | sed 's/ //g') ; /usr/bin/machine ; /bin/machine; grep CPU /var/run/dmesg.boot | head -n 1 | cut -d" " -f2) 2> /dev/null`
+ cputype=`echo $cputype | sed -e 's/ppc//g;s/ *//g'`
+ case $cputype in
+ *750*) ax_gcc_arch="750 G3" ;;
+ *740[[0-9]]*) ax_gcc_arch="$cputype 7400 G4" ;;
+ *74[[4-5]][[0-9]]*) ax_gcc_arch="$cputype 7450 G4" ;;
+ *74[[0-9]][[0-9]]*) ax_gcc_arch="$cputype G4" ;;
+ *970*) ax_gcc_arch="970 G5 power4";;
+ *POWER4*|*power4*|*gq*) ax_gcc_arch="power4 970";;
+ *POWER5*|*power5*|*gr*|*gs*) ax_gcc_arch="power5 power4 970";;
+ 603ev|8240) ax_gcc_arch="$cputype 603e 603";;
+ *) ax_gcc_arch=$cputype ;;
+ esac
+ ax_gcc_arch="$ax_gcc_arch powerpc"
+ ;;
+esac
+fi # not cross-compiling
+fi # guess arch
+
+if test "x$ax_gcc_arch" != x -a "x$ax_gcc_arch" != xno; then
+for arch in $ax_gcc_arch; do
+ if test "x[]m4_default([$1],yes)" = xyes; then # if we require portable code
+ flags="-mtune=$arch"
+ # -mcpu=$arch and m$arch generate nonportable code on every arch except
+ # x86. And some other arches (e.g. Alpha) don't accept -mtune. Grrr.
+ case $host_cpu in i*86|x86_64*) flags="$flags -mcpu=$arch -m$arch";; esac
+ else
+ flags="-march=$arch -mcpu=$arch -m$arch"
+ fi
+ for flag in $flags; do
+ AX_CHECK_COMPILE_FLAG($flag, [ax_cv_gcc_archflag=$flag; break])
+ done
+ test "x$ax_cv_gcc_archflag" = xunknown || break
+done
+fi
+
+fi # $GCC=yes
+])
+AC_MSG_CHECKING([for gcc architecture flag])
+AC_MSG_RESULT($ax_cv_gcc_archflag)
+if test "x$ax_cv_gcc_archflag" = xunknown; then
+ m4_default([$3],:)
+else
+ m4_default([$2], [CFLAGS="$CFLAGS $ax_cv_gcc_archflag"])
+fi
+])
diff --git a/Modules/_ctypes/libffi/m4/ax_gcc_x86_cpuid.m4 b/Modules/_ctypes/libffi/m4/ax_gcc_x86_cpuid.m4
new file mode 100644
index 0000000..7d46fee
--- /dev/null
+++ b/Modules/_ctypes/libffi/m4/ax_gcc_x86_cpuid.m4
@@ -0,0 +1,79 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_gcc_x86_cpuid.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_GCC_X86_CPUID(OP)
+#
+# DESCRIPTION
+#
+# On Pentium and later x86 processors, with gcc or a compiler that has a
+# compatible syntax for inline assembly instructions, run a small program
+# that executes the cpuid instruction with input OP. This can be used to
+# detect the CPU type.
+#
+# On output, the values of the eax, ebx, ecx, and edx registers are stored
+# as hexadecimal strings as "eax:ebx:ecx:edx" in the cache variable
+# ax_cv_gcc_x86_cpuid_OP.
+#
+# If the cpuid instruction fails (because you are running a
+# cross-compiler, or because you are not using gcc, or because you are on
+# a processor that doesn't have this instruction), ax_cv_gcc_x86_cpuid_OP
+# is set to the string "unknown".
+#
+# This macro mainly exists to be used in AX_GCC_ARCHFLAG.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Steven G. Johnson <stevenj@alum.mit.edu>
+# Copyright (c) 2008 Matteo Frigo
+#
+# This program is free software: you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 7
+
+AC_DEFUN([AX_GCC_X86_CPUID],
+[AC_REQUIRE([AC_PROG_CC])
+AC_LANG_PUSH([C])
+AC_CACHE_CHECK(for x86 cpuid $1 output, ax_cv_gcc_x86_cpuid_$1,
+ [AC_RUN_IFELSE([AC_LANG_PROGRAM([#include <stdio.h>], [
+ int op = $1, eax, ebx, ecx, edx;
+ FILE *f;
+ __asm__("cpuid"
+ : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "a" (op));
+ f = fopen("conftest_cpuid", "w"); if (!f) return 1;
+ fprintf(f, "%x:%x:%x:%x\n", eax, ebx, ecx, edx);
+ fclose(f);
+ return 0;
+])],
+ [ax_cv_gcc_x86_cpuid_$1=`cat conftest_cpuid`; rm -f conftest_cpuid],
+ [ax_cv_gcc_x86_cpuid_$1=unknown; rm -f conftest_cpuid],
+ [ax_cv_gcc_x86_cpuid_$1=unknown])])
+AC_LANG_POP([C])
+])
diff --git a/Modules/_ctypes/libffi/m4/libtool.m4 b/Modules/_ctypes/libffi/m4/libtool.m4
index 671cde1..3318f27 100644
--- a/Modules/_ctypes/libffi/m4/libtool.m4
+++ b/Modules/_ctypes/libffi/m4/libtool.m4
@@ -1,7 +1,8 @@
# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*-
#
# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,
-# 2006, 2007, 2008 Free Software Foundation, Inc.
+# 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+# Foundation, Inc.
# Written by Gordon Matzigkeit, 1996
#
# This file is free software; the Free Software Foundation gives
@@ -10,7 +11,8 @@
m4_define([_LT_COPYING], [dnl
# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,
-# 2006, 2007, 2008 Free Software Foundation, Inc.
+# 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+# Foundation, Inc.
# Written by Gordon Matzigkeit, 1996
#
# This file is part of GNU Libtool.
@@ -37,7 +39,7 @@ m4_define([_LT_COPYING], [dnl
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
])
-# serial 56 LT_INIT
+# serial 57 LT_INIT
# LT_PREREQ(VERSION)
@@ -66,6 +68,7 @@ esac
# ------------------
AC_DEFUN([LT_INIT],
[AC_PREREQ([2.58])dnl We use AC_INCLUDES_DEFAULT
+AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl
AC_BEFORE([$0], [LT_LANG])dnl
AC_BEFORE([$0], [LT_OUTPUT])dnl
AC_BEFORE([$0], [LTDL_INIT])dnl
@@ -82,6 +85,8 @@ AC_REQUIRE([LTVERSION_VERSION])dnl
AC_REQUIRE([LTOBSOLETE_VERSION])dnl
m4_require([_LT_PROG_LTMAIN])dnl
+_LT_SHELL_INIT([SHELL=${CONFIG_SHELL-/bin/sh}])
+
dnl Parse OPTIONS
_LT_SET_OPTIONS([$0], [$1])
@@ -118,7 +123,7 @@ m4_defun([_LT_CC_BASENAME],
*) break;;
esac
done
-cc_basename=`$ECHO "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"`
+cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
])
@@ -138,6 +143,11 @@ m4_defun([_LT_FILEUTILS_DEFAULTS],
m4_defun([_LT_SETUP],
[AC_REQUIRE([AC_CANONICAL_HOST])dnl
AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+AC_REQUIRE([_LT_PREPARE_SED_QUOTE_VARS])dnl
+AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl
+
+_LT_DECL([], [PATH_SEPARATOR], [1], [The PATH separator for the build system])dnl
+dnl
_LT_DECL([], [host_alias], [0], [The host system])dnl
_LT_DECL([], [host], [0])dnl
_LT_DECL([], [host_os], [0])dnl
@@ -160,10 +170,13 @@ _LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl
dnl
m4_require([_LT_FILEUTILS_DEFAULTS])dnl
m4_require([_LT_CHECK_SHELL_FEATURES])dnl
+m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl
m4_require([_LT_CMD_RELOAD])dnl
m4_require([_LT_CHECK_MAGIC_METHOD])dnl
+m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl
m4_require([_LT_CMD_OLD_ARCHIVE])dnl
m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl
+m4_require([_LT_WITH_SYSROOT])dnl
_LT_CONFIG_LIBTOOL_INIT([
# See if we are running on zsh, and set the options which allow our
@@ -179,7 +192,6 @@ fi
_LT_CHECK_OBJDIR
m4_require([_LT_TAG_COMPILER])dnl
-_LT_PROG_ECHO_BACKSLASH
case $host_os in
aix3*)
@@ -193,23 +205,6 @@ aix3*)
;;
esac
-# Sed substitution that helps us do robust quoting. It backslashifies
-# metacharacters that are still active within double-quoted strings.
-sed_quote_subst='s/\([["`$\\]]\)/\\\1/g'
-
-# Same as above, but do not quote variable references.
-double_quote_subst='s/\([["`\\]]\)/\\\1/g'
-
-# Sed substitution to delay expansion of an escaped shell variable in a
-# double_quote_subst'ed string.
-delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
-
-# Sed substitution to delay expansion of an escaped single quote.
-delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g'
-
-# Sed substitution to avoid accidental globbing in evaled expressions
-no_glob_subst='s/\*/\\\*/g'
-
# Global variables:
ofile=libtool
can_build_shared=yes
@@ -250,6 +245,28 @@ _LT_CONFIG_COMMANDS
])# _LT_SETUP
+# _LT_PREPARE_SED_QUOTE_VARS
+# --------------------------
+# Define a few sed substitution that help us do robust quoting.
+m4_defun([_LT_PREPARE_SED_QUOTE_VARS],
+[# Backslashify metacharacters that are still active within
+# double-quoted strings.
+sed_quote_subst='s/\([["`$\\]]\)/\\\1/g'
+
+# Same as above, but do not quote variable references.
+double_quote_subst='s/\([["`\\]]\)/\\\1/g'
+
+# Sed substitution to delay expansion of an escaped shell variable in a
+# double_quote_subst'ed string.
+delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
+
+# Sed substitution to delay expansion of an escaped single quote.
+delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g'
+
+# Sed substitution to avoid accidental globbing in evaled expressions
+no_glob_subst='s/\*/\\\*/g'
+])
+
# _LT_PROG_LTMAIN
# ---------------
# Note that this code is called both from `configure', and `config.status'
@@ -408,7 +425,7 @@ m4_define([_lt_decl_all_varnames],
# declaration there will have the same value as in `configure'. VARNAME
# must have a single quote delimited value for this to work.
m4_define([_LT_CONFIG_STATUS_DECLARE],
-[$1='`$ECHO "X$][$1" | $Xsed -e "$delay_single_quote_subst"`'])
+[$1='`$ECHO "$][$1" | $SED "$delay_single_quote_subst"`'])
# _LT_CONFIG_STATUS_DECLARATIONS
@@ -418,7 +435,7 @@ m4_define([_LT_CONFIG_STATUS_DECLARE],
# embedded single quotes properly. In configure, this macro expands
# each variable declared with _LT_DECL (and _LT_TAGDECL) into:
#
-# <var>='`$ECHO "X$<var>" | $Xsed -e "$delay_single_quote_subst"`'
+# <var>='`$ECHO "$<var>" | $SED "$delay_single_quote_subst"`'
m4_defun([_LT_CONFIG_STATUS_DECLARATIONS],
[m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames),
[m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])])
@@ -517,12 +534,20 @@ LTCC='$LTCC'
LTCFLAGS='$LTCFLAGS'
compiler='$compiler_DEFAULT'
+# A function that is used when there is no print builtin or printf.
+func_fallback_echo ()
+{
+ eval 'cat <<_LTECHO_EOF
+\$[]1
+_LTECHO_EOF'
+}
+
# Quote evaled strings.
for var in lt_decl_all_varnames([[ \
]], lt_decl_quote_varnames); do
- case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in
+ case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
*[[\\\\\\\`\\"\\\$]]*)
- eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$sed_quote_subst\\"\\\`\\\\\\""
+ eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\""
;;
*)
eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
@@ -533,9 +558,9 @@ done
# Double-quote double-evaled strings.
for var in lt_decl_all_varnames([[ \
]], lt_decl_dquote_varnames); do
- case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in
+ case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
*[[\\\\\\\`\\"\\\$]]*)
- eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\""
+ eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\""
;;
*)
eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
@@ -543,16 +568,38 @@ for var in lt_decl_all_varnames([[ \
esac
done
-# Fix-up fallback echo if it was mangled by the above quoting rules.
-case \$lt_ECHO in
-*'\\\[$]0 --fallback-echo"')dnl "
- lt_ECHO=\`\$ECHO "X\$lt_ECHO" | \$Xsed -e 's/\\\\\\\\\\\\\\\[$]0 --fallback-echo"\[$]/\[$]0 --fallback-echo"/'\`
- ;;
-esac
-
_LT_OUTPUT_LIBTOOL_INIT
])
+# _LT_GENERATED_FILE_INIT(FILE, [COMMENT])
+# ------------------------------------
+# Generate a child script FILE with all initialization necessary to
+# reuse the environment learned by the parent script, and make the
+# file executable. If COMMENT is supplied, it is inserted after the
+# `#!' sequence but before initialization text begins. After this
+# macro, additional text can be appended to FILE to form the body of
+# the child script. The macro ends with non-zero status if the
+# file could not be fully written (such as if the disk is full).
+m4_ifdef([AS_INIT_GENERATED],
+[m4_defun([_LT_GENERATED_FILE_INIT],[AS_INIT_GENERATED($@)])],
+[m4_defun([_LT_GENERATED_FILE_INIT],
+[m4_require([AS_PREPARE])]dnl
+[m4_pushdef([AS_MESSAGE_LOG_FD])]dnl
+[lt_write_fail=0
+cat >$1 <<_ASEOF || lt_write_fail=1
+#! $SHELL
+# Generated by $as_me.
+$2
+SHELL=\${CONFIG_SHELL-$SHELL}
+export SHELL
+_ASEOF
+cat >>$1 <<\_ASEOF || lt_write_fail=1
+AS_SHELL_SANITIZE
+_AS_PREPARE
+exec AS_MESSAGE_FD>&1
+_ASEOF
+test $lt_write_fail = 0 && chmod +x $1[]dnl
+m4_popdef([AS_MESSAGE_LOG_FD])])])# _LT_GENERATED_FILE_INIT
# LT_OUTPUT
# ---------
@@ -562,20 +609,11 @@ _LT_OUTPUT_LIBTOOL_INIT
AC_DEFUN([LT_OUTPUT],
[: ${CONFIG_LT=./config.lt}
AC_MSG_NOTICE([creating $CONFIG_LT])
-cat >"$CONFIG_LT" <<_LTEOF
-#! $SHELL
-# Generated by $as_me.
-# Run this file to recreate a libtool stub with the current configuration.
-
-lt_cl_silent=false
-SHELL=\${CONFIG_SHELL-$SHELL}
-_LTEOF
+_LT_GENERATED_FILE_INIT(["$CONFIG_LT"],
+[# Run this file to recreate a libtool stub with the current configuration.])
cat >>"$CONFIG_LT" <<\_LTEOF
-AS_SHELL_SANITIZE
-_AS_PREPARE
-
-exec AS_MESSAGE_FD>&1
+lt_cl_silent=false
exec AS_MESSAGE_LOG_FD>>config.log
{
echo
@@ -601,7 +639,7 @@ m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl
m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION])
configured by $[0], generated by m4_PACKAGE_STRING.
-Copyright (C) 2008 Free Software Foundation, Inc.
+Copyright (C) 2011 Free Software Foundation, Inc.
This config.lt script is free software; the Free Software Foundation
gives unlimited permision to copy, distribute and modify it."
@@ -646,15 +684,13 @@ chmod +x "$CONFIG_LT"
# appending to config.log, which fails on DOS, as config.log is still kept
# open by configure. Here we exec the FD to /dev/null, effectively closing
# config.log, so it can be properly (re)opened and appended to by config.lt.
-if test "$no_create" != yes; then
- lt_cl_success=:
- test "$silent" = yes &&
- lt_config_lt_args="$lt_config_lt_args --quiet"
- exec AS_MESSAGE_LOG_FD>/dev/null
- $SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false
- exec AS_MESSAGE_LOG_FD>>config.log
- $lt_cl_success || AS_EXIT(1)
-fi
+lt_cl_success=:
+test "$silent" = yes &&
+ lt_config_lt_args="$lt_config_lt_args --quiet"
+exec AS_MESSAGE_LOG_FD>/dev/null
+$SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false
+exec AS_MESSAGE_LOG_FD>>config.log
+$lt_cl_success || AS_EXIT(1)
])# LT_OUTPUT
@@ -717,15 +753,12 @@ _LT_EOF
# if finds mixed CR/LF and LF-only lines. Since sed operates in
# text mode, it properly converts lines to CR/LF. This bash problem
# is reportedly fixed, but why not run on old versions too?
- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \
- || (rm -f "$cfgfile"; exit 1)
-
- _LT_PROG_XSI_SHELLFNS
+ sed '$q' "$ltmain" >> "$cfgfile" \
+ || (rm -f "$cfgfile"; exit 1)
- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \
- || (rm -f "$cfgfile"; exit 1)
+ _LT_PROG_REPLACE_SHELLFNS
- mv -f "$cfgfile" "$ofile" ||
+ mv -f "$cfgfile" "$ofile" ||
(rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile")
chmod +x "$ofile"
],
@@ -770,6 +803,7 @@ AC_DEFUN([LT_LANG],
m4_case([$1],
[C], [_LT_LANG(C)],
[C++], [_LT_LANG(CXX)],
+ [Go], [_LT_LANG(GO)],
[Java], [_LT_LANG(GCJ)],
[Fortran 77], [_LT_LANG(F77)],
[Fortran], [_LT_LANG(FC)],
@@ -791,6 +825,31 @@ m4_defun([_LT_LANG],
])# _LT_LANG
+m4_ifndef([AC_PROG_GO], [
+############################################################
+# NOTE: This macro has been submitted for inclusion into #
+# GNU Autoconf as AC_PROG_GO. When it is available in #
+# a released version of Autoconf we should remove this #
+# macro and use it instead. #
+############################################################
+m4_defun([AC_PROG_GO],
+[AC_LANG_PUSH(Go)dnl
+AC_ARG_VAR([GOC], [Go compiler command])dnl
+AC_ARG_VAR([GOFLAGS], [Go compiler flags])dnl
+_AC_ARG_VAR_LDFLAGS()dnl
+AC_CHECK_TOOL(GOC, gccgo)
+if test -z "$GOC"; then
+ if test -n "$ac_tool_prefix"; then
+ AC_CHECK_PROG(GOC, [${ac_tool_prefix}gccgo], [${ac_tool_prefix}gccgo])
+ fi
+fi
+if test -z "$GOC"; then
+ AC_CHECK_PROG(GOC, gccgo, gccgo, false)
+fi
+])#m4_defun
+])#m4_ifndef
+
+
# _LT_LANG_DEFAULT_CONFIG
# -----------------------
m4_defun([_LT_LANG_DEFAULT_CONFIG],
@@ -821,6 +880,10 @@ AC_PROVIDE_IFELSE([AC_PROG_GCJ],
m4_ifdef([LT_PROG_GCJ],
[m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])])
+AC_PROVIDE_IFELSE([AC_PROG_GO],
+ [LT_LANG(GO)],
+ [m4_define([AC_PROG_GO], defn([AC_PROG_GO])[LT_LANG(GO)])])
+
AC_PROVIDE_IFELSE([LT_PROG_RC],
[LT_LANG(RC)],
[m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])])
@@ -831,11 +894,13 @@ AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)])
AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)])
AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)])
AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)])
+AU_DEFUN([AC_LIBTOOL_RC], [LT_LANG(Windows Resource)])
dnl aclocal-1.4 backwards compatibility:
dnl AC_DEFUN([AC_LIBTOOL_CXX], [])
dnl AC_DEFUN([AC_LIBTOOL_F77], [])
dnl AC_DEFUN([AC_LIBTOOL_FC], [])
dnl AC_DEFUN([AC_LIBTOOL_GCJ], [])
+dnl AC_DEFUN([AC_LIBTOOL_RC], [])
# _LT_TAG_COMPILER
@@ -921,7 +986,13 @@ m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[
$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
-dynamiclib -Wl,-single_module conftest.c 2>conftest.err
_lt_result=$?
- if test -f libconftest.dylib && test ! -s conftest.err && test $_lt_result = 0; then
+ # If there is a non-empty error log, and "single_module"
+ # appears in it, assume the flag caused a linker warning
+ if test -s conftest.err && $GREP single_module conftest.err; then
+ cat conftest.err >&AS_MESSAGE_LOG_FD
+ # Otherwise, if the output was created with a 0 exit code from
+ # the compiler, it worked.
+ elif test -f libconftest.dylib && test $_lt_result -eq 0; then
lt_cv_apple_cc_single_mod=yes
else
cat conftest.err >&AS_MESSAGE_LOG_FD
@@ -929,6 +1000,7 @@ m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[
rm -rf libconftest.dylib*
rm -f conftest.*
fi])
+
AC_CACHE_CHECK([for -exported_symbols_list linker flag],
[lt_cv_ld_exported_symbols_list],
[lt_cv_ld_exported_symbols_list=no
@@ -940,6 +1012,34 @@ m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[
[lt_cv_ld_exported_symbols_list=no])
LDFLAGS="$save_LDFLAGS"
])
+
+ AC_CACHE_CHECK([for -force_load linker flag],[lt_cv_ld_force_load],
+ [lt_cv_ld_force_load=no
+ cat > conftest.c << _LT_EOF
+int forced_loaded() { return 2;}
+_LT_EOF
+ echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD
+ $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD
+ echo "$AR cru libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD
+ $AR cru libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD
+ echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD
+ $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD
+ cat > conftest.c << _LT_EOF
+int main() { return 0;}
+_LT_EOF
+ echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&AS_MESSAGE_LOG_FD
+ $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err
+ _lt_result=$?
+ if test -s conftest.err && $GREP force_load conftest.err; then
+ cat conftest.err >&AS_MESSAGE_LOG_FD
+ elif test -f conftest && test $_lt_result -eq 0 && $GREP forced_load conftest >/dev/null 2>&1 ; then
+ lt_cv_ld_force_load=yes
+ else
+ cat conftest.err >&AS_MESSAGE_LOG_FD
+ fi
+ rm -f conftest.err libconftest.a conftest conftest.c
+ rm -rf conftest.dSYM
+ ])
case $host_os in
rhapsody* | darwin1.[[012]])
_lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;;
@@ -967,7 +1067,7 @@ m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[
else
_lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}'
fi
- if test "$DSYMUTIL" != ":"; then
+ if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then
_lt_dsymutil='~$DSYMUTIL $lib || :'
else
_lt_dsymutil=
@@ -977,8 +1077,8 @@ m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[
])
-# _LT_DARWIN_LINKER_FEATURES
-# --------------------------
+# _LT_DARWIN_LINKER_FEATURES([TAG])
+# ---------------------------------
# Checks for linker and compiler features on darwin
m4_defun([_LT_DARWIN_LINKER_FEATURES],
[
@@ -987,7 +1087,13 @@ m4_defun([_LT_DARWIN_LINKER_FEATURES],
_LT_TAGVAR(hardcode_direct, $1)=no
_LT_TAGVAR(hardcode_automatic, $1)=yes
_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
- _LT_TAGVAR(whole_archive_flag_spec, $1)=''
+ if test "$lt_cv_ld_force_load" = "yes"; then
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
+ m4_case([$1], [F77], [_LT_TAGVAR(compiler_needs_object, $1)=yes],
+ [FC], [_LT_TAGVAR(compiler_needs_object, $1)=yes])
+ else
+ _LT_TAGVAR(whole_archive_flag_spec, $1)=''
+ fi
_LT_TAGVAR(link_all_deplibs, $1)=yes
_LT_TAGVAR(allow_undefined_flag, $1)="$_lt_dar_allow_undefined"
case $cc_basename in
@@ -995,7 +1101,7 @@ m4_defun([_LT_DARWIN_LINKER_FEATURES],
*) _lt_dar_can_shared=$GCC ;;
esac
if test "$_lt_dar_can_shared" = "yes"; then
- output_verbose_link_cmd=echo
+ output_verbose_link_cmd=func_echo_all
_LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}"
_LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}"
_LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}"
@@ -1011,203 +1117,142 @@ m4_defun([_LT_DARWIN_LINKER_FEATURES],
fi
])
-# _LT_SYS_MODULE_PATH_AIX
-# -----------------------
+# _LT_SYS_MODULE_PATH_AIX([TAGNAME])
+# ----------------------------------
# Links a minimal program and checks the executable
# for the system default hardcoded library path. In most cases,
# this is /usr/lib:/lib, but when the MPI compilers are used
# the location of the communication and MPI libs are included too.
# If we don't find anything, use the default library path according
# to the aix ld manual.
+# Store the results from the different compilers for each TAGNAME.
+# Allow to override them for all tags through lt_cv_aix_libpath.
m4_defun([_LT_SYS_MODULE_PATH_AIX],
[m4_require([_LT_DECL_SED])dnl
-AC_LINK_IFELSE(AC_LANG_PROGRAM,[
-lt_aix_libpath_sed='
- /Import File Strings/,/^$/ {
- /^0/ {
- s/^0 *\(.*\)$/\1/
- p
- }
- }'
-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
-# Check for a 64-bit object if we didn't find anything.
-if test -z "$aix_libpath"; then
- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
-fi],[])
-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
+if test "${lt_cv_aix_libpath+set}" = set; then
+ aix_libpath=$lt_cv_aix_libpath
+else
+ AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])],
+ [AC_LINK_IFELSE([AC_LANG_PROGRAM],[
+ lt_aix_libpath_sed='[
+ /Import File Strings/,/^$/ {
+ /^0/ {
+ s/^0 *\([^ ]*\) *$/\1/
+ p
+ }
+ }]'
+ _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ # Check for a 64-bit object if we didn't find anything.
+ if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then
+ _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ fi],[])
+ if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then
+ _LT_TAGVAR([lt_cv_aix_libpath_], [$1])="/usr/lib:/lib"
+ fi
+ ])
+ aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])
+fi
])# _LT_SYS_MODULE_PATH_AIX
# _LT_SHELL_INIT(ARG)
# -------------------
m4_define([_LT_SHELL_INIT],
-[ifdef([AC_DIVERSION_NOTICE],
- [AC_DIVERT_PUSH(AC_DIVERSION_NOTICE)],
- [AC_DIVERT_PUSH(NOTICE)])
-$1
-AC_DIVERT_POP
-])# _LT_SHELL_INIT
+[m4_divert_text([M4SH-INIT], [$1
+])])# _LT_SHELL_INIT
+
# _LT_PROG_ECHO_BACKSLASH
# -----------------------
-# Add some code to the start of the generated configure script which
-# will find an echo command which doesn't interpret backslashes.
+# Find how we can fake an echo command that does not interpret backslash.
+# In particular, with Autoconf 2.60 or later we add some code to the start
+# of the generated configure script which will find a shell with a builtin
+# printf (which we can use as an echo command).
m4_defun([_LT_PROG_ECHO_BACKSLASH],
-[_LT_SHELL_INIT([
-# Check that we are running under the correct shell.
-SHELL=${CONFIG_SHELL-/bin/sh}
-
-case X$lt_ECHO in
-X*--fallback-echo)
- # Remove one level of quotation (which was required for Make).
- ECHO=`echo "$lt_ECHO" | sed 's,\\\\\[$]\\[$]0,'[$]0','`
- ;;
-esac
-
-ECHO=${lt_ECHO-echo}
-if test "X[$]1" = X--no-reexec; then
- # Discard the --no-reexec flag, and continue.
- shift
-elif test "X[$]1" = X--fallback-echo; then
- # Avoid inline document here, it may be left over
- :
-elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' ; then
- # Yippee, $ECHO works!
- :
+[ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO
+ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO
+
+AC_MSG_CHECKING([how to print strings])
+# Test print first, because it will be a builtin if present.
+if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \
+ test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then
+ ECHO='print -r --'
+elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then
+ ECHO='printf %s\n'
else
- # Restart under the correct shell.
- exec $SHELL "[$]0" --no-reexec ${1+"[$]@"}
-fi
-
-if test "X[$]1" = X--fallback-echo; then
- # used as fallback echo
- shift
- cat <<_LT_EOF
-[$]*
-_LT_EOF
- exit 0
+ # Use this function as a fallback that always works.
+ func_fallback_echo ()
+ {
+ eval 'cat <<_LTECHO_EOF
+$[]1
+_LTECHO_EOF'
+ }
+ ECHO='func_fallback_echo'
fi
-# The HP-UX ksh and POSIX shell print the target directory to stdout
-# if CDPATH is set.
-(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
-
-if test -z "$lt_ECHO"; then
- if test "X${echo_test_string+set}" != Xset; then
- # find a string as large as possible, as long as the shell can cope with it
- for cmd in 'sed 50q "[$]0"' 'sed 20q "[$]0"' 'sed 10q "[$]0"' 'sed 2q "[$]0"' 'echo test'; do
- # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ...
- if { echo_test_string=`eval $cmd`; } 2>/dev/null &&
- { test "X$echo_test_string" = "X$echo_test_string"; } 2>/dev/null
- then
- break
- fi
- done
- fi
-
- if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' &&
- echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` &&
- test "X$echo_testing_string" = "X$echo_test_string"; then
- :
- else
- # The Solaris, AIX, and Digital Unix default echo programs unquote
- # backslashes. This makes it impossible to quote backslashes using
- # echo "$something" | sed 's/\\/\\\\/g'
- #
- # So, first we look for a working echo in the user's PATH.
-
- lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
- for dir in $PATH /usr/ucb; do
- IFS="$lt_save_ifs"
- if (test -f $dir/echo || test -f $dir/echo$ac_exeext) &&
- test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' &&
- echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` &&
- test "X$echo_testing_string" = "X$echo_test_string"; then
- ECHO="$dir/echo"
- break
- fi
- done
- IFS="$lt_save_ifs"
-
- if test "X$ECHO" = Xecho; then
- # We didn't find a better echo, so look for alternatives.
- if test "X`{ print -r '\t'; } 2>/dev/null`" = 'X\t' &&
- echo_testing_string=`{ print -r "$echo_test_string"; } 2>/dev/null` &&
- test "X$echo_testing_string" = "X$echo_test_string"; then
- # This shell has a builtin print -r that does the trick.
- ECHO='print -r'
- elif { test -f /bin/ksh || test -f /bin/ksh$ac_exeext; } &&
- test "X$CONFIG_SHELL" != X/bin/ksh; then
- # If we have ksh, try running configure again with it.
- ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh}
- export ORIGINAL_CONFIG_SHELL
- CONFIG_SHELL=/bin/ksh
- export CONFIG_SHELL
- exec $CONFIG_SHELL "[$]0" --no-reexec ${1+"[$]@"}
- else
- # Try using printf.
- ECHO='printf %s\n'
- if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' &&
- echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` &&
- test "X$echo_testing_string" = "X$echo_test_string"; then
- # Cool, printf works
- :
- elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` &&
- test "X$echo_testing_string" = 'X\t' &&
- echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` &&
- test "X$echo_testing_string" = "X$echo_test_string"; then
- CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL
- export CONFIG_SHELL
- SHELL="$CONFIG_SHELL"
- export SHELL
- ECHO="$CONFIG_SHELL [$]0 --fallback-echo"
- elif echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` &&
- test "X$echo_testing_string" = 'X\t' &&
- echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` &&
- test "X$echo_testing_string" = "X$echo_test_string"; then
- ECHO="$CONFIG_SHELL [$]0 --fallback-echo"
- else
- # maybe with a smaller string...
- prev=:
+# func_echo_all arg...
+# Invoke $ECHO with all args, space-separated.
+func_echo_all ()
+{
+ $ECHO "$*"
+}
- for cmd in 'echo test' 'sed 2q "[$]0"' 'sed 10q "[$]0"' 'sed 20q "[$]0"' 'sed 50q "[$]0"'; do
- if { test "X$echo_test_string" = "X`eval $cmd`"; } 2>/dev/null
- then
- break
- fi
- prev="$cmd"
- done
+case "$ECHO" in
+ printf*) AC_MSG_RESULT([printf]) ;;
+ print*) AC_MSG_RESULT([print -r]) ;;
+ *) AC_MSG_RESULT([cat]) ;;
+esac
- if test "$prev" != 'sed 50q "[$]0"'; then
- echo_test_string=`eval $prev`
- export echo_test_string
- exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "[$]0" ${1+"[$]@"}
- else
- # Oops. We lost completely, so just stick with echo.
- ECHO=echo
- fi
- fi
- fi
- fi
- fi
-fi
+m4_ifdef([_AS_DETECT_SUGGESTED],
+[_AS_DETECT_SUGGESTED([
+ test -n "${ZSH_VERSION+set}${BASH_VERSION+set}" || (
+ ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO
+ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO
+ PATH=/empty FPATH=/empty; export PATH FPATH
+ test "X`printf %s $ECHO`" = "X$ECHO" \
+ || test "X`print -r -- $ECHO`" = "X$ECHO" )])])
-# Copy echo and quote the copy suitably for passing to libtool from
-# the Makefile, instead of quoting the original, which is used later.
-lt_ECHO=$ECHO
-if test "X$lt_ECHO" = "X$CONFIG_SHELL [$]0 --fallback-echo"; then
- lt_ECHO="$CONFIG_SHELL \\\$\[$]0 --fallback-echo"
-fi
-
-AC_SUBST(lt_ECHO)
-])
_LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts])
-_LT_DECL([], [ECHO], [1],
- [An echo program that does not interpret backslashes])
+_LT_DECL([], [ECHO], [1], [An echo program that protects backslashes])
])# _LT_PROG_ECHO_BACKSLASH
+# _LT_WITH_SYSROOT
+# ----------------
+AC_DEFUN([_LT_WITH_SYSROOT],
+[AC_MSG_CHECKING([for sysroot])
+AC_ARG_WITH([sysroot],
+[ --with-sysroot[=DIR] Search for dependent libraries within DIR
+ (or the compiler's sysroot if not specified).],
+[], [with_sysroot=no])
+
+dnl lt_sysroot will always be passed unquoted. We quote it here
+dnl in case the user passed a directory name.
+lt_sysroot=
+case ${with_sysroot} in #(
+ yes)
+ if test "$GCC" = yes; then
+ lt_sysroot=`$CC --print-sysroot 2>/dev/null`
+ fi
+ ;; #(
+ /*)
+ lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"`
+ ;; #(
+ no|'')
+ ;; #(
+ *)
+ AC_MSG_RESULT([${with_sysroot}])
+ AC_MSG_ERROR([The sysroot must be an absolute path.])
+ ;;
+esac
+
+ AC_MSG_RESULT([${lt_sysroot:-no}])
+_LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl
+[dependent libraries, and in which our libraries should be installed.])])
+
# _LT_ENABLE_LOCK
# ---------------
m4_defun([_LT_ENABLE_LOCK],
@@ -1236,7 +1281,7 @@ ia64-*-hpux*)
;;
*-*-irix6*)
# Find out which ABI we are using.
- echo '[#]line __oline__ "configure"' > conftest.$ac_ext
+ echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext
if AC_TRY_EVAL(ac_compile); then
if test "$lt_cv_prog_gnu_ld" = yes; then
case `/usr/bin/file conftest.$ac_objext` in
@@ -1279,7 +1324,14 @@ s390*-*linux*|s390*-*tpf*|sparc*-*linux*)
LD="${LD-ld} -m elf_i386_fbsd"
;;
x86_64-*linux*)
- LD="${LD-ld} -m elf_i386"
+ case `/usr/bin/file conftest.o` in
+ *x86-64*)
+ LD="${LD-ld} -m elf32_x86_64"
+ ;;
+ *)
+ LD="${LD-ld} -m elf_i386"
+ ;;
+ esac
;;
ppc64-*linux*|powerpc64-*linux*)
LD="${LD-ld} -m elf32ppclinux"
@@ -1329,14 +1381,27 @@ s390*-*linux*|s390*-*tpf*|sparc*-*linux*)
CFLAGS="$SAVE_CFLAGS"
fi
;;
-sparc*-*solaris*)
+*-*solaris*)
# Find out which ABI we are using.
echo 'int i;' > conftest.$ac_ext
if AC_TRY_EVAL(ac_compile); then
case `/usr/bin/file conftest.o` in
*64-bit*)
case $lt_cv_prog_gnu_ld in
- yes*) LD="${LD-ld} -m elf64_sparc" ;;
+ yes*)
+ case $host in
+ i?86-*-solaris*)
+ LD="${LD-ld} -m elf_x86_64"
+ ;;
+ sparc*-*-solaris*)
+ LD="${LD-ld} -m elf64_sparc"
+ ;;
+ esac
+ # GNU ld 2.21 introduced _sol2 emulations. Use them if available.
+ if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then
+ LD="${LD-ld}_sol2"
+ fi
+ ;;
*)
if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then
LD="${LD-ld} -64"
@@ -1354,14 +1419,47 @@ need_locks="$enable_libtool_lock"
])# _LT_ENABLE_LOCK
+# _LT_PROG_AR
+# -----------
+m4_defun([_LT_PROG_AR],
+[AC_CHECK_TOOLS(AR, [ar], false)
+: ${AR=ar}
+: ${AR_FLAGS=cru}
+_LT_DECL([], [AR], [1], [The archiver])
+_LT_DECL([], [AR_FLAGS], [1], [Flags to create an archive])
+
+AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file],
+ [lt_cv_ar_at_file=no
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM],
+ [echo conftest.$ac_objext > conftest.lst
+ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD'
+ AC_TRY_EVAL([lt_ar_try])
+ if test "$ac_status" -eq 0; then
+ # Ensure the archiver fails upon bogus file names.
+ rm -f conftest.$ac_objext libconftest.a
+ AC_TRY_EVAL([lt_ar_try])
+ if test "$ac_status" -ne 0; then
+ lt_cv_ar_at_file=@
+ fi
+ fi
+ rm -f conftest.* libconftest.a
+ ])
+ ])
+
+if test "x$lt_cv_ar_at_file" = xno; then
+ archiver_list_spec=
+else
+ archiver_list_spec=$lt_cv_ar_at_file
+fi
+_LT_DECL([], [archiver_list_spec], [1],
+ [How to feed a file listing to the archiver])
+])# _LT_PROG_AR
+
+
# _LT_CMD_OLD_ARCHIVE
# -------------------
m4_defun([_LT_CMD_OLD_ARCHIVE],
-[AC_CHECK_TOOL(AR, ar, false)
-test -z "$AR" && AR=ar
-test -z "$AR_FLAGS" && AR_FLAGS=cru
-_LT_DECL([], [AR], [1], [The archiver])
-_LT_DECL([], [AR_FLAGS], [1])
+[_LT_PROG_AR
AC_CHECK_TOOL(STRIP, strip, :)
test -z "$STRIP" && STRIP=:
@@ -1380,18 +1478,27 @@ old_postuninstall_cmds=
if test -n "$RANLIB"; then
case $host_os in
openbsd*)
- old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib"
+ old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib"
;;
*)
- old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib"
+ old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib"
;;
esac
- old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib"
+ old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib"
fi
+
+case $host_os in
+ darwin*)
+ lock_old_archive_extraction=yes ;;
+ *)
+ lock_old_archive_extraction=no ;;
+esac
_LT_DECL([], [old_postinstall_cmds], [2])
_LT_DECL([], [old_postuninstall_cmds], [2])
_LT_TAGDECL([], [old_archive_cmds], [2],
[Commands used to build an old-style archive])
+_LT_DECL([], [lock_old_archive_extraction], [0],
+ [Whether to use a lock for old archive extraction])
])# _LT_CMD_OLD_ARCHIVE
@@ -1416,15 +1523,15 @@ AC_CACHE_CHECK([$1], [$2],
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD)
+ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&AS_MESSAGE_LOG_FD
- echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
+ echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
- $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp
+ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
$SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
$2=yes
@@ -1464,7 +1571,7 @@ AC_CACHE_CHECK([$1], [$2],
if test -s conftest.err; then
# Append any errors to the config.log.
cat conftest.err 1>&AS_MESSAGE_LOG_FD
- $ECHO "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp
+ $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
$SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
if diff conftest.exp conftest.er2 >/dev/null; then
$2=yes
@@ -1527,6 +1634,11 @@ AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl
lt_cv_sys_max_cmd_len=8192;
;;
+ mint*)
+ # On MiNT this can take a long time and run out of memory.
+ lt_cv_sys_max_cmd_len=8192;
+ ;;
+
amigaos*)
# On AmigaOS with pdksh, this test takes hours, literally.
# So we just punt and use a minimum line length of 8192.
@@ -1552,6 +1664,11 @@ AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl
lt_cv_sys_max_cmd_len=196608
;;
+ os2*)
+ # The test takes a long time on OS/2.
+ lt_cv_sys_max_cmd_len=8192
+ ;;
+
osf*)
# Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure
# due to this test when exec_disable_arg_limit is 1 on Tru64. It is not
@@ -1578,7 +1695,8 @@ AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl
;;
*)
lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null`
- if test -n "$lt_cv_sys_max_cmd_len"; then
+ if test -n "$lt_cv_sys_max_cmd_len" && \
+ test undefined != "$lt_cv_sys_max_cmd_len"; then
lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
else
@@ -1591,8 +1709,8 @@ AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl
# If test is not a shell built-in, we'll probably end up computing a
# maximum length that is only half of the actual maximum length, but
# we can't tell.
- while { test "X"`$SHELL [$]0 --fallback-echo "X$teststring$teststring" 2>/dev/null` \
- = "XX$teststring$teststring"; } >/dev/null 2>&1 &&
+ while { test "X"`env echo "$teststring$teststring" 2>/dev/null` \
+ = "X$teststring$teststring"; } >/dev/null 2>&1 &&
test $i != 17 # 1/2 MB should be enough
do
i=`expr $i + 1`
@@ -1643,7 +1761,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-[#line __oline__ "configure"
+[#line $LINENO "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -1684,7 +1802,13 @@ else
# endif
#endif
-void fnord() { int i=42;}
+/* When -fvisbility=hidden is used, assume the code has been annotated
+ correspondingly for the symbols needed. */
+#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
+int fnord () __attribute__((visibility("default")));
+#endif
+
+int fnord () { return 42; }
int main ()
{
void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
@@ -1693,7 +1817,11 @@ int main ()
if (self)
{
if (dlsym (self,"fnord")) status = $lt_dlno_uscore;
- else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore;
+ else
+ {
+ if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore;
+ else puts (dlerror ());
+ }
/* dlclose (self); */
}
else
@@ -1869,16 +1997,16 @@ AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext],
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
- (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD)
+ (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&AS_MESSAGE_LOG_FD
- echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
+ echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings
- $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp
+ $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
$SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes
@@ -2037,6 +2165,7 @@ m4_require([_LT_DECL_EGREP])dnl
m4_require([_LT_FILEUTILS_DEFAULTS])dnl
m4_require([_LT_DECL_OBJDUMP])dnl
m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_CHECK_SHELL_FEATURES])dnl
AC_MSG_CHECKING([dynamic linker characteristics])
m4_if([$1],
[], [
@@ -2045,16 +2174,23 @@ if test "$GCC" = yes; then
darwin*) lt_awk_arg="/^libraries:/,/LR/" ;;
*) lt_awk_arg="/^libraries:/" ;;
esac
- lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e "s,=/,/,g"`
- if $ECHO "$lt_search_path_spec" | $GREP ';' >/dev/null ; then
+ case $host_os in
+ mingw* | cegcc*) lt_sed_strip_eq="s,=\([[A-Za-z]]:\),\1,g" ;;
+ *) lt_sed_strip_eq="s,=/,/,g" ;;
+ esac
+ lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq`
+ case $lt_search_path_spec in
+ *\;*)
# if the path contains ";" then we assume it to be the separator
# otherwise default to the standard path separator (i.e. ":") - it is
# assumed that no part of a normal pathname contains ";" but that should
# okay in the real world where ";" in dirpaths is itself problematic.
- lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e 's/;/ /g'`
- else
- lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
- fi
+ lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'`
+ ;;
+ *)
+ lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"`
+ ;;
+ esac
# Ok, now we have the path, separated by spaces, we can step through it
# and add multilib dir if necessary.
lt_tmp_lt_search_path_spec=
@@ -2067,7 +2203,7 @@ if test "$GCC" = yes; then
lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path"
fi
done
- lt_search_path_spec=`$ECHO $lt_tmp_lt_search_path_spec | awk '
+ lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk '
BEGIN {RS=" "; FS="/|\n";} {
lt_foo="";
lt_count=0;
@@ -2087,7 +2223,13 @@ BEGIN {RS=" "; FS="/|\n";} {
if (lt_foo != "") { lt_freq[[lt_foo]]++; }
if (lt_freq[[lt_foo]] == 1) { print lt_foo; }
}'`
- sys_lib_search_path_spec=`$ECHO $lt_search_path_spec`
+ # AWK program above erroneously prepends '/' to C:/dos/paths
+ # for these hosts.
+ case $host_os in
+ mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\
+ $SED 's,/\([[A-Za-z]]:\),\1,g'` ;;
+ esac
+ sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP`
else
sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
fi])
@@ -2113,7 +2255,7 @@ need_version=unknown
case $host_os in
aix3*)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a'
shlibpath_var=LIBPATH
@@ -2122,7 +2264,7 @@ aix3*)
;;
aix[[4-9]]*)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
need_lib_prefix=no
need_version=no
hardcode_into_libs=yes
@@ -2175,7 +2317,7 @@ amigaos*)
m68k)
library_names_spec='$libname.ixlibrary $libname.a'
# Create ${libname}_ixlibrary.a entries in /sys/libs.
- finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$ECHO "X$lib" | $Xsed -e '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
+ finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
;;
esac
;;
@@ -2187,7 +2329,7 @@ beos*)
;;
bsdi[[45]]*)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
need_version=no
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
soname_spec='${libname}${release}${shared_ext}$major'
@@ -2206,8 +2348,9 @@ cygwin* | mingw* | pw32* | cegcc*)
need_version=no
need_lib_prefix=no
- case $GCC,$host_os in
- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*)
+ case $GCC,$cc_basename in
+ yes,*)
+ # gcc
library_names_spec='$libname.dll.a'
# DLL is installed to $(libdir)/../bin by postinstall_cmds
postinstall_cmds='base_file=`basename \${file}`~
@@ -2228,36 +2371,83 @@ cygwin* | mingw* | pw32* | cegcc*)
cygwin*)
# Cygwin DLLs use 'cyg' prefix rather than 'lib'
soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}'
- sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib"
+m4_if([$1], [],[
+ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"])
;;
mingw* | cegcc*)
# MinGW DLLs use traditional 'lib' prefix
soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}'
- sys_lib_search_path_spec=`$CC -print-search-dirs | $GREP "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"`
- if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then
- # It is most probably a Windows format PATH printed by
- # mingw gcc, but we are running on Cygwin. Gcc prints its search
- # path with ; separators, and with drive letters. We can handle the
- # drive letters (cygwin fileutils understands them), so leave them,
- # especially as we might pass files found there to a mingw objdump,
- # which wouldn't understand a cygwinified path. Ahh.
- sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
- else
- sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
- fi
;;
pw32*)
# pw32 DLLs use 'pw' prefix rather than 'lib'
library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}'
;;
esac
+ dynamic_linker='Win32 ld.exe'
+ ;;
+
+ *,cl*)
+ # Native MSVC
+ libname_spec='$name'
+ soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}'
+ library_names_spec='${libname}.dll.lib'
+
+ case $build_os in
+ mingw*)
+ sys_lib_search_path_spec=
+ lt_save_ifs=$IFS
+ IFS=';'
+ for lt_path in $LIB
+ do
+ IFS=$lt_save_ifs
+ # Let DOS variable expansion print the short 8.3 style file name.
+ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"`
+ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path"
+ done
+ IFS=$lt_save_ifs
+ # Convert to MSYS style.
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'`
+ ;;
+ cygwin*)
+ # Convert to unix form, then to dos form, then back to unix form
+ # but this time dos style (no spaces!) so that the unix form looks
+ # like /cygdrive/c/PROGRA~1:/cygdr...
+ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"`
+ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null`
+ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+ ;;
+ *)
+ sys_lib_search_path_spec="$LIB"
+ if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then
+ # It is most probably a Windows format PATH.
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
+ else
+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+ fi
+ # FIXME: find the short name or the path components, as spaces are
+ # common. (e.g. "Program Files" -> "PROGRA~1")
+ ;;
+ esac
+
+ # DLL is installed to $(libdir)/../bin by postinstall_cmds
+ postinstall_cmds='base_file=`basename \${file}`~
+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+ dldir=$destdir/`dirname \$dlpath`~
+ test -d \$dldir || mkdir -p \$dldir~
+ $install_prog $dir/$dlname \$dldir/$dlname'
+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+ dlpath=$dir/\$dldll~
+ $RM \$dlpath'
+ shlibpath_overrides_runpath=yes
+ dynamic_linker='Win32 link.exe'
;;
*)
+ # Assume MSVC wrapper
library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib'
+ dynamic_linker='Win32 ld.exe'
;;
esac
- dynamic_linker='Win32 ld.exe'
# FIXME: first we should search . and the directory the executable is in
shlibpath_var=PATH
;;
@@ -2278,7 +2468,7 @@ m4_if([$1], [],[
;;
dgux*)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
need_lib_prefix=no
need_version=no
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext'
@@ -2286,10 +2476,6 @@ dgux*)
shlibpath_var=LD_LIBRARY_PATH
;;
-freebsd1*)
- dynamic_linker=no
- ;;
-
freebsd* | dragonfly*)
# DragonFly does not have aout. When/if they implement a new
# versioning mechanism, adjust this.
@@ -2297,7 +2483,7 @@ freebsd* | dragonfly*)
objformat=`/usr/bin/objformat`
else
case $host_os in
- freebsd[[123]]*) objformat=aout ;;
+ freebsd[[23]].*) objformat=aout ;;
*) objformat=elf ;;
esac
fi
@@ -2315,7 +2501,7 @@ freebsd* | dragonfly*)
esac
shlibpath_var=LD_LIBRARY_PATH
case $host_os in
- freebsd2*)
+ freebsd2.*)
shlibpath_overrides_runpath=yes
;;
freebsd3.[[01]]* | freebsdelf3.[[01]]*)
@@ -2335,12 +2521,26 @@ freebsd* | dragonfly*)
;;
gnu*)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
need_lib_prefix=no
need_version=no
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
soname_spec='${libname}${release}${shared_ext}$major'
shlibpath_var=LD_LIBRARY_PATH
+ shlibpath_overrides_runpath=no
+ hardcode_into_libs=yes
+ ;;
+
+haiku*)
+ version_type=linux # correct to gnu/linux during the next big refactor
+ need_lib_prefix=no
+ need_version=no
+ dynamic_linker="$host_os runtime_loader"
+ library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
+ soname_spec='${libname}${release}${shared_ext}$major'
+ shlibpath_var=LIBRARY_PATH
+ shlibpath_overrides_runpath=yes
+ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib'
hardcode_into_libs=yes
;;
@@ -2386,12 +2586,14 @@ hpux9* | hpux10* | hpux11*)
soname_spec='${libname}${release}${shared_ext}$major'
;;
esac
- # HP-UX runs *really* slowly unless shared libraries are mode 555.
+ # HP-UX runs *really* slowly unless shared libraries are mode 555, ...
postinstall_cmds='chmod 555 $lib'
+ # or fails outright, so override atomically:
+ install_override_mode=555
;;
interix[[3-9]]*)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
need_lib_prefix=no
need_version=no
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
@@ -2407,7 +2609,7 @@ irix5* | irix6* | nonstopux*)
nonstopux*) version_type=nonstopux ;;
*)
if test "$lt_cv_prog_gnu_ld" = yes; then
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
else
version_type=irix
fi ;;
@@ -2444,9 +2646,9 @@ linux*oldld* | linux*aout* | linux*coff*)
dynamic_linker=no
;;
-# This must be Linux ELF.
-linux* | k*bsd*-gnu)
- version_type=linux
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu)
+ version_type=linux # correct to gnu/linux during the next big refactor
need_lib_prefix=no
need_version=no
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
@@ -2454,16 +2656,21 @@ linux* | k*bsd*-gnu)
finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
shlibpath_var=LD_LIBRARY_PATH
shlibpath_overrides_runpath=no
+
# Some binutils ld are patched to set DT_RUNPATH
- save_LDFLAGS=$LDFLAGS
- save_libdir=$libdir
- eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \
- LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\""
- AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])],
- [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null],
- [shlibpath_overrides_runpath=yes])])
- LDFLAGS=$save_LDFLAGS
- libdir=$save_libdir
+ AC_CACHE_VAL([lt_cv_shlibpath_overrides_runpath],
+ [lt_cv_shlibpath_overrides_runpath=no
+ save_LDFLAGS=$LDFLAGS
+ save_libdir=$libdir
+ eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \
+ LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\""
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])],
+ [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null],
+ [lt_cv_shlibpath_overrides_runpath=yes])])
+ LDFLAGS=$save_LDFLAGS
+ libdir=$save_libdir
+ ])
+ shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath
# This implies no fast_install, which is unacceptable.
# Some rework will be needed to allow for fast_install
@@ -2475,8 +2682,9 @@ linux* | k*bsd*-gnu)
# Append ld.so.conf contents to the search path
if test -f /etc/ld.so.conf; then
- lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '`
+ lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '`
sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra"
+
fi
# We used to test for /lib/ld.so.1 and disable shared libraries on
@@ -2507,7 +2715,7 @@ netbsd*)
;;
newsos6)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
shlibpath_var=LD_LIBRARY_PATH
shlibpath_overrides_runpath=yes
@@ -2576,7 +2784,7 @@ rdos*)
;;
solaris*)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
need_lib_prefix=no
need_version=no
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
@@ -2601,7 +2809,7 @@ sunos4*)
;;
sysv4 | sysv4.3*)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
soname_spec='${libname}${release}${shared_ext}$major'
shlibpath_var=LD_LIBRARY_PATH
@@ -2625,7 +2833,7 @@ sysv4 | sysv4.3*)
sysv4*MP*)
if test -d /usr/nec ;then
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}'
soname_spec='$libname${shared_ext}.$major'
shlibpath_var=LD_LIBRARY_PATH
@@ -2656,7 +2864,7 @@ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
tpf*)
# TPF is a cross-target only. Preferred cross-host = GNU/Linux.
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
need_lib_prefix=no
need_version=no
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
@@ -2666,7 +2874,7 @@ tpf*)
;;
uts4*)
- version_type=linux
+ version_type=linux # correct to gnu/linux during the next big refactor
library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
soname_spec='${libname}${release}${shared_ext}$major'
shlibpath_var=LD_LIBRARY_PATH
@@ -2708,6 +2916,8 @@ _LT_DECL([], [library_names_spec], [1],
The last name is the one that the linker finds with -lNAME]])
_LT_DECL([], [soname_spec], [1],
[[The coded name of the library, if different from the real name]])
+_LT_DECL([], [install_override_mode], [1],
+ [Permission mode override for installation of shared libraries])
_LT_DECL([], [postinstall_cmds], [2],
[Command to use after installation of a shared archive])
_LT_DECL([], [postuninstall_cmds], [2],
@@ -2820,6 +3030,7 @@ AC_REQUIRE([AC_CANONICAL_HOST])dnl
AC_REQUIRE([AC_CANONICAL_BUILD])dnl
m4_require([_LT_DECL_SED])dnl
m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_PROG_ECHO_BACKSLASH])dnl
AC_ARG_WITH([gnu-ld],
[AS_HELP_STRING([--with-gnu-ld],
@@ -2941,6 +3152,11 @@ case $reload_flag in
esac
reload_cmds='$LD$reload_flag -o $output$reload_objs'
case $host_os in
+ cygwin* | mingw* | pw32* | cegcc*)
+ if test "$GCC" != yes; then
+ reload_cmds=false
+ fi
+ ;;
darwin*)
if test "$GCC" = yes; then
reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs'
@@ -2949,8 +3165,8 @@ case $host_os in
fi
;;
esac
-_LT_DECL([], [reload_flag], [1], [How to create reloadable object files])dnl
-_LT_DECL([], [reload_cmds], [2])dnl
+_LT_TAGDECL([], [reload_flag], [1], [How to create reloadable object files])dnl
+_LT_TAGDECL([], [reload_cmds], [2])dnl
])# _LT_CMD_RELOAD
@@ -3002,16 +3218,18 @@ mingw* | pw32*)
# Base MSYS/MinGW do not provide the 'file' command needed by
# func_win32_libid shell function, so use a weaker test based on 'objdump',
# unless we find 'file', for example because we are cross-compiling.
- if ( file / ) >/dev/null 2>&1; then
+ # func_win32_libid assumes BSD nm, so disallow it if using MS dumpbin.
+ if ( test "$lt_cv_nm_interface" = "BSD nm" && file / ) >/dev/null 2>&1; then
lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
lt_cv_file_magic_cmd='func_win32_libid'
else
- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?'
+ # Keep this pattern in sync with the one in func_win32_libid.
+ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)'
lt_cv_file_magic_cmd='$OBJDUMP -f'
fi
;;
-cegcc)
+cegcc*)
# use the weaker test based on 'objdump'. See mingw*.
lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?'
lt_cv_file_magic_cmd='$OBJDUMP -f'
@@ -3041,6 +3259,10 @@ gnu*)
lt_cv_deplibs_check_method=pass_all
;;
+haiku*)
+ lt_cv_deplibs_check_method=pass_all
+ ;;
+
hpux10.20* | hpux11*)
lt_cv_file_magic_cmd=/usr/bin/file
case $host_cpu in
@@ -3049,11 +3271,11 @@ hpux10.20* | hpux11*)
lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so
;;
hppa*64*)
- [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]']
+ [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]']
lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl
;;
*)
- lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]].[[0-9]]) shared library'
+ lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]]\.[[0-9]]) shared library'
lt_cv_file_magic_test_file=/usr/lib/libc.sl
;;
esac
@@ -3074,8 +3296,8 @@ irix5* | irix6* | nonstopux*)
lt_cv_deplibs_check_method=pass_all
;;
-# This must be Linux ELF.
-linux* | k*bsd*-gnu)
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu)
lt_cv_deplibs_check_method=pass_all
;;
@@ -3153,6 +3375,21 @@ tpf*)
;;
esac
])
+
+file_magic_glob=
+want_nocaseglob=no
+if test "$build" = "$host"; then
+ case $host_os in
+ mingw* | pw32*)
+ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then
+ want_nocaseglob=yes
+ else
+ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"`
+ fi
+ ;;
+ esac
+fi
+
file_magic_cmd=$lt_cv_file_magic_cmd
deplibs_check_method=$lt_cv_deplibs_check_method
test -z "$deplibs_check_method" && deplibs_check_method=unknown
@@ -3160,7 +3397,11 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown
_LT_DECL([], [deplibs_check_method], [1],
[Method to check whether dependent libraries are shared objects])
_LT_DECL([], [file_magic_cmd], [1],
- [Command to use when deplibs_check_method == "file_magic"])
+ [Command to use when deplibs_check_method = "file_magic"])
+_LT_DECL([], [file_magic_glob], [1],
+ [How to find potential files when deplibs_check_method = "file_magic"])
+_LT_DECL([], [want_nocaseglob], [1],
+ [Find potential files using nocaseglob when deplibs_check_method = "file_magic"])
])# _LT_CHECK_MAGIC_METHOD
@@ -3217,7 +3458,19 @@ if test "$lt_cv_path_NM" != "no"; then
NM="$lt_cv_path_NM"
else
# Didn't find any BSD compatible name lister, look for dumpbin.
- AC_CHECK_TOOLS(DUMPBIN, ["dumpbin -symbols" "link -dump -symbols"], :)
+ if test -n "$DUMPBIN"; then :
+ # Let the user override the test.
+ else
+ AC_CHECK_TOOLS(DUMPBIN, [dumpbin "link -dump"], :)
+ case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in
+ *COFF*)
+ DUMPBIN="$DUMPBIN -symbols"
+ ;;
+ *)
+ DUMPBIN=:
+ ;;
+ esac
+ fi
AC_SUBST([DUMPBIN])
if test "$DUMPBIN" != ":"; then
NM="$DUMPBIN"
@@ -3230,13 +3483,13 @@ _LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl
AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface],
[lt_cv_nm_interface="BSD nm"
echo "int some_variable = 0;" > conftest.$ac_ext
- (eval echo "\"\$as_me:__oline__: $ac_compile\"" >&AS_MESSAGE_LOG_FD)
+ (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&AS_MESSAGE_LOG_FD)
(eval "$ac_compile" 2>conftest.err)
cat conftest.err >&AS_MESSAGE_LOG_FD
- (eval echo "\"\$as_me:__oline__: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD)
+ (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD)
(eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out)
cat conftest.err >&AS_MESSAGE_LOG_FD
- (eval echo "\"\$as_me:__oline__: output\"" >&AS_MESSAGE_LOG_FD)
+ (eval echo "\"\$as_me:$LINENO: output\"" >&AS_MESSAGE_LOG_FD)
cat conftest.out >&AS_MESSAGE_LOG_FD
if $GREP 'External.*some_variable' conftest.out > /dev/null; then
lt_cv_nm_interface="MS dumpbin"
@@ -3251,6 +3504,67 @@ dnl aclocal-1.4 backwards compatibility:
dnl AC_DEFUN([AM_PROG_NM], [])
dnl AC_DEFUN([AC_PROG_NM], [])
+# _LT_CHECK_SHAREDLIB_FROM_LINKLIB
+# --------------------------------
+# how to determine the name of the shared library
+# associated with a specific link library.
+# -- PORTME fill in with the dynamic library characteristics
+m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB],
+[m4_require([_LT_DECL_EGREP])
+m4_require([_LT_DECL_OBJDUMP])
+m4_require([_LT_DECL_DLLTOOL])
+AC_CACHE_CHECK([how to associate runtime and link libraries],
+lt_cv_sharedlib_from_linklib_cmd,
+[lt_cv_sharedlib_from_linklib_cmd='unknown'
+
+case $host_os in
+cygwin* | mingw* | pw32* | cegcc*)
+ # two different shell functions defined in ltmain.sh
+ # decide which to use based on capabilities of $DLLTOOL
+ case `$DLLTOOL --help 2>&1` in
+ *--identify-strict*)
+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib
+ ;;
+ *)
+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback
+ ;;
+ esac
+ ;;
+*)
+ # fallback: assume linklib IS sharedlib
+ lt_cv_sharedlib_from_linklib_cmd="$ECHO"
+ ;;
+esac
+])
+sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd
+test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO
+
+_LT_DECL([], [sharedlib_from_linklib_cmd], [1],
+ [Command to associate shared and link libraries])
+])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB
+
+
+# _LT_PATH_MANIFEST_TOOL
+# ----------------------
+# locate the manifest tool
+m4_defun([_LT_PATH_MANIFEST_TOOL],
+[AC_CHECK_TOOL(MANIFEST_TOOL, mt, :)
+test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt
+AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool],
+ [lt_cv_path_mainfest_tool=no
+ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD
+ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out
+ cat conftest.err >&AS_MESSAGE_LOG_FD
+ if $GREP 'Manifest Tool' conftest.out > /dev/null; then
+ lt_cv_path_mainfest_tool=yes
+ fi
+ rm -f conftest*])
+if test "x$lt_cv_path_mainfest_tool" != xyes; then
+ MANIFEST_TOOL=:
+fi
+_LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl
+])# _LT_PATH_MANIFEST_TOOL
+
# LT_LIB_M
# --------
@@ -3259,7 +3573,7 @@ AC_DEFUN([LT_LIB_M],
[AC_REQUIRE([AC_CANONICAL_HOST])dnl
LIBM=
case $host in
-*-*-beos* | *-*-cygwin* | *-*-pw32* | *-*-darwin*)
+*-*-beos* | *-*-cegcc* | *-*-cygwin* | *-*-haiku* | *-*-pw32* | *-*-darwin*)
# These system don't have libm, or don't need it
;;
*-ncr-sysv4.3*)
@@ -3287,7 +3601,12 @@ m4_defun([_LT_COMPILER_NO_RTTI],
_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=
if test "$GCC" = yes; then
- _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin'
+ case $cc_basename in
+ nvcc*)
+ _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -Xcompiler -fno-builtin' ;;
+ *)
+ _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ;;
+ esac
_LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions],
lt_cv_prog_compiler_rtti_exceptions,
@@ -3304,6 +3623,7 @@ _LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1],
m4_defun([_LT_CMD_GLOBAL_SYMBOLS],
[AC_REQUIRE([AC_CANONICAL_HOST])dnl
AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([AC_PROG_AWK])dnl
AC_REQUIRE([LT_PATH_NM])dnl
AC_REQUIRE([LT_PATH_LD])dnl
m4_require([_LT_DECL_SED])dnl
@@ -3371,8 +3691,8 @@ esac
lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'"
# Transform an extracted symbol line into symbol name and symbol address
-lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'"
-lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'"
+lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'"
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'"
# Handle CRLF in mingw tool chain
opt_cr=
@@ -3396,6 +3716,7 @@ for ac_symprfx in "" "_"; do
# which start with @ or ?.
lt_cv_sys_global_symbol_pipe="$AWK ['"\
" {last_section=section; section=\$ 3};"\
+" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\
" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\
" \$ 0!~/External *\|/{next};"\
" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\
@@ -3408,6 +3729,7 @@ for ac_symprfx in "" "_"; do
else
lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'"
fi
+ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'"
# Check to see that the pipe works correctly.
pipe_works=no
@@ -3429,7 +3751,7 @@ _LT_EOF
if AC_TRY_EVAL(ac_compile); then
# Now try to grab the symbols.
nlist=conftest.nm
- if AC_TRY_EVAL(NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) && test -s "$nlist"; then
+ if AC_TRY_EVAL(NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) && test -s "$nlist"; then
# Try sorting and uniquifying the output.
if sort "$nlist" | uniq > "$nlist"T; then
mv -f "$nlist"T "$nlist"
@@ -3441,6 +3763,18 @@ _LT_EOF
if $GREP ' nm_test_var$' "$nlist" >/dev/null; then
if $GREP ' nm_test_func$' "$nlist" >/dev/null; then
cat <<_LT_EOF > conftest.$ac_ext
+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */
+#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE)
+/* DATA imports from DLLs on WIN32 con't be const, because runtime
+ relocations are performed -- see ld's documentation on pseudo-relocs. */
+# define LT@&t@_DLSYM_CONST
+#elif defined(__osf__)
+/* This system does not cope well with relocations in const data. */
+# define LT@&t@_DLSYM_CONST
+#else
+# define LT@&t@_DLSYM_CONST const
+#endif
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -3452,7 +3786,7 @@ _LT_EOF
cat <<_LT_EOF >> conftest.$ac_ext
/* The mapping between symbol names and symbols. */
-const struct {
+LT@&t@_DLSYM_CONST struct {
const char *name;
void *address;
}
@@ -3478,15 +3812,15 @@ static const void *lt_preloaded_setup() {
_LT_EOF
# Now try linking the two files.
mv conftest.$ac_objext conftstm.$ac_objext
- lt_save_LIBS="$LIBS"
- lt_save_CFLAGS="$CFLAGS"
+ lt_globsym_save_LIBS=$LIBS
+ lt_globsym_save_CFLAGS=$CFLAGS
LIBS="conftstm.$ac_objext"
CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)"
if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then
pipe_works=yes
fi
- LIBS="$lt_save_LIBS"
- CFLAGS="$lt_save_CFLAGS"
+ LIBS=$lt_globsym_save_LIBS
+ CFLAGS=$lt_globsym_save_CFLAGS
else
echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD
fi
@@ -3519,6 +3853,13 @@ else
AC_MSG_RESULT(ok)
fi
+# Response file support.
+if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+ nm_file_list_spec='@'
+elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then
+ nm_file_list_spec='@'
+fi
+
_LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1],
[Take the output of nm and produce a listing of raw symbols and C names])
_LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1],
@@ -3529,6 +3870,8 @@ _LT_DECL([global_symbol_to_c_name_address],
_LT_DECL([global_symbol_to_c_name_address_lib_prefix],
[lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1],
[Transform the output of nm in a C name address pair when lib prefix is needed])
+_LT_DECL([], [nm_file_list_spec], [1],
+ [Specify filename containing input files for $NM])
]) # _LT_CMD_GLOBAL_SYMBOLS
@@ -3540,7 +3883,6 @@ _LT_TAGVAR(lt_prog_compiler_wl, $1)=
_LT_TAGVAR(lt_prog_compiler_pic, $1)=
_LT_TAGVAR(lt_prog_compiler_static, $1)=
-AC_MSG_CHECKING([for $compiler option to produce PIC])
m4_if([$1], [CXX], [
# C++ specific cases for pic, static, wl, etc.
if test "$GXX" = yes; then
@@ -3591,6 +3933,11 @@ m4_if([$1], [CXX], [
# DJGPP does not support shared libraries at all
_LT_TAGVAR(lt_prog_compiler_pic, $1)=
;;
+ haiku*)
+ # PIC is the default for Haiku.
+ # The "-static" flag exists, but is broken.
+ _LT_TAGVAR(lt_prog_compiler_static, $1)=
+ ;;
interix[[3-9]]*)
# Interix 3.x gcc -fpic/-fPIC options generate broken code.
# Instead, we relocate shared libraries at runtime.
@@ -3640,6 +3987,12 @@ m4_if([$1], [CXX], [
;;
esac
;;
+ mingw* | cygwin* | os2* | pw32* | cegcc*)
+ # This hack is so that the source file can tell whether it is being
+ # built for inclusion in a dll (and should export symbols for example).
+ m4_if([$1], [GCJ], [],
+ [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+ ;;
dgux*)
case $cc_basename in
ec++*)
@@ -3696,7 +4049,7 @@ m4_if([$1], [CXX], [
;;
esac
;;
- linux* | k*bsd*-gnu)
+ linux* | k*bsd*-gnu | kopensolaris*-gnu)
case $cc_basename in
KCC*)
# KAI C++ Compiler
@@ -3729,8 +4082,8 @@ m4_if([$1], [CXX], [
_LT_TAGVAR(lt_prog_compiler_pic, $1)=
_LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
;;
- xlc* | xlC*)
- # IBM XL 8.0 on PPC
+ xlc* | xlC* | bgxl[[cC]]* | mpixl[[cC]]*)
+ # IBM XL 8.0, 9.0 on PPC and BlueGene
_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
_LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic'
_LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink'
@@ -3792,7 +4145,7 @@ m4_if([$1], [CXX], [
;;
solaris*)
case $cc_basename in
- CC*)
+ CC* | sunCC*)
# Sun C++ 4.2, 5.x and Centerline C++
_LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
@@ -3896,6 +4249,12 @@ m4_if([$1], [CXX], [
_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common'
;;
+ haiku*)
+ # PIC is the default for Haiku.
+ # The "-static" flag exists, but is broken.
+ _LT_TAGVAR(lt_prog_compiler_static, $1)=
+ ;;
+
hpux*)
# PIC is the default for 64-bit PA HP-UX, but not for 32-bit
# PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag
@@ -3938,6 +4297,15 @@ m4_if([$1], [CXX], [
_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
;;
esac
+
+ case $cc_basename in
+ nvcc*) # Cuda Compiler Driver 2.2
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Xlinker '
+ if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)="-Xcompiler $_LT_TAGVAR(lt_prog_compiler_pic, $1)"
+ fi
+ ;;
+ esac
else
# PORTME Check for flag to pass linker flags through the system compiler.
case $host_os in
@@ -3980,7 +4348,7 @@ m4_if([$1], [CXX], [
_LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
;;
- linux* | k*bsd*-gnu)
+ linux* | k*bsd*-gnu | kopensolaris*-gnu)
case $cc_basename in
# old Intel for x86_64 which still supported -KPIC.
ecc*)
@@ -4001,7 +4369,13 @@ m4_if([$1], [CXX], [
_LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared'
_LT_TAGVAR(lt_prog_compiler_static, $1)='--static'
;;
- pgcc* | pgf77* | pgf90* | pgf95*)
+ nagfor*)
+ # NAG Fortran compiler
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ ;;
+ pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*)
# Portland Group compilers (*not* the Pentium gcc compiler,
# which looks to be a dead project)
_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
@@ -4013,25 +4387,40 @@ m4_if([$1], [CXX], [
# All Alpha code is PIC.
_LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
;;
- xl*)
- # IBM XL C 8.0/Fortran 10.1 on PPC
+ xl* | bgxl* | bgf* | mpixl*)
+ # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene
_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
_LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic'
_LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink'
;;
*)
case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [[1-7]].* | *Sun*Fortran*\ 8.[[0-3]]*)
+ # Sun Fortran 8.3 passes all unrecognized flags to the linker
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)=''
+ ;;
+ *Sun\ F* | *Sun*Fortran*)
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+ ;;
*Sun\ C*)
# Sun C 5.9
_LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
;;
- *Sun\ F*)
- # Sun Fortran 8.3 passes all unrecognized flags to the linker
- _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+ *Intel*\ [[CF]]*Compiler*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+ ;;
+ *Portland\ Group*)
+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
- _LT_TAGVAR(lt_prog_compiler_wl, $1)=''
;;
esac
;;
@@ -4063,7 +4452,7 @@ m4_if([$1], [CXX], [
_LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
case $cc_basename in
- f77* | f90* | f95*)
+ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*)
_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';;
*)
_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';;
@@ -4120,9 +4509,11 @@ case $host_os in
_LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])"
;;
esac
-AC_MSG_RESULT([$_LT_TAGVAR(lt_prog_compiler_pic, $1)])
-_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1],
- [How to pass a linker flag through the compiler])
+
+AC_CACHE_CHECK([for $compiler option to produce PIC],
+ [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)],
+ [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)])
+_LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)
#
# Check to make sure the PIC flag actually works.
@@ -4141,6 +4532,8 @@ fi
_LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1],
[Additional compiler flags for building library objects])
+_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1],
+ [How to pass a linker flag through the compiler])
#
# Check to make sure the static flag actually works.
#
@@ -4161,6 +4554,7 @@ _LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1],
m4_defun([_LT_LINKER_SHLIBS],
[AC_REQUIRE([LT_PATH_LD])dnl
AC_REQUIRE([LT_PATH_NM])dnl
+m4_require([_LT_PATH_MANIFEST_TOOL])dnl
m4_require([_LT_FILEUTILS_DEFAULTS])dnl
m4_require([_LT_DECL_EGREP])dnl
m4_require([_LT_DECL_SED])dnl
@@ -4169,27 +4563,37 @@ m4_require([_LT_TAG_COMPILER])dnl
AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries])
m4_if([$1], [CXX], [
_LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+ _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*']
case $host_os in
aix[[4-9]]*)
# If we're using GNU nm, then we don't want the "-C" option.
# -C means demangle to AIX nm, but means don't demangle with GNU nm
+ # Also, AIX nm treats weak defined symbols like other global defined
+ # symbols, whereas GNU nm marks them as "W".
if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
- _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
else
_LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
fi
;;
pw32*)
_LT_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds"
- ;;
+ ;;
cygwin* | mingw* | cegcc*)
- _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;/^.*[[ ]]__nm__/s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols'
- ;;
+ case $cc_basename in
+ cl*)
+ _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+ ;;
+ *)
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols'
+ _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname']
+ ;;
+ esac
+ ;;
*)
_LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
- ;;
+ ;;
esac
- _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*']
], [
runpath_var=
_LT_TAGVAR(allow_undefined_flag, $1)=
@@ -4204,7 +4608,6 @@ m4_if([$1], [CXX], [
_LT_TAGVAR(hardcode_direct, $1)=no
_LT_TAGVAR(hardcode_direct_absolute, $1)=no
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
- _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)=
_LT_TAGVAR(hardcode_libdir_separator, $1)=
_LT_TAGVAR(hardcode_minus_L, $1)=no
_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
@@ -4252,7 +4655,33 @@ dnl Note also adjust exclude_expsyms for C++ above.
esac
_LT_TAGVAR(ld_shlibs, $1)=yes
+
+ # On some targets, GNU ld is compatible enough with the native linker
+ # that we're better off using the native interface for both.
+ lt_use_gnu_ld_interface=no
if test "$with_gnu_ld" = yes; then
+ case $host_os in
+ aix*)
+ # The AIX port of GNU ld has always aspired to compatibility
+ # with the native linker. However, as the warning in the GNU ld
+ # block says, versions before 2.19.5* couldn't really create working
+ # shared libraries, regardless of the interface used.
+ case `$LD -v 2>&1` in
+ *\ \(GNU\ Binutils\)\ 2.19.5*) ;;
+ *\ \(GNU\ Binutils\)\ 2.[[2-9]]*) ;;
+ *\ \(GNU\ Binutils\)\ [[3-9]]*) ;;
+ *)
+ lt_use_gnu_ld_interface=yes
+ ;;
+ esac
+ ;;
+ *)
+ lt_use_gnu_ld_interface=yes
+ ;;
+ esac
+ fi
+
+ if test "$lt_use_gnu_ld_interface" = yes; then
# If archive_cmds runs LD, not CC, wlarc should be empty
wlarc='${wl}'
@@ -4270,6 +4699,7 @@ dnl Note also adjust exclude_expsyms for C++ above.
fi
supports_anon_versioning=no
case `$LD -v 2>&1` in
+ *GNU\ gold*) supports_anon_versioning=yes ;;
*\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11
*\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ...
*\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ...
@@ -4285,11 +4715,12 @@ dnl Note also adjust exclude_expsyms for C++ above.
_LT_TAGVAR(ld_shlibs, $1)=no
cat <<_LT_EOF 1>&2
-*** Warning: the GNU linker, at least up to release 2.9.1, is reported
+*** Warning: the GNU linker, at least up to release 2.19, is reported
*** to be unable to reliably create shared libraries on AIX.
*** Therefore, libtool is disabling shared libraries support. If you
-*** really care for shared libraries, you may want to modify your PATH
-*** so that a non-GNU linker is found, and then restart.
+*** really care for shared libraries, you may want to install binutils
+*** 2.20 or above, or modify your PATH so that a non-GNU linker is found.
+*** You will then need to restart the configuration process.
_LT_EOF
fi
@@ -4325,10 +4756,12 @@ _LT_EOF
# _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless,
# as there is no search path for DLLs.
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols'
_LT_TAGVAR(allow_undefined_flag, $1)=unsupported
_LT_TAGVAR(always_export_symbols, $1)=no
_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
- _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols'
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols'
+ _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname']
if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
_LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
@@ -4346,6 +4779,11 @@ _LT_EOF
fi
;;
+ haiku*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ ;;
+
interix[[3-9]]*)
_LT_TAGVAR(hardcode_direct, $1)=no
_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
@@ -4361,7 +4799,7 @@ _LT_EOF
_LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
;;
- gnu* | linux* | tpf* | k*bsd*-gnu)
+ gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu)
tmp_diet=no
if test "$host_os" = linux-dietlibc; then
case $cc_basename in
@@ -4371,15 +4809,16 @@ _LT_EOF
if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \
&& test "$tmp_diet" = no
then
- tmp_addflag=
+ tmp_addflag=' $pic_flag'
tmp_sharedflag='-shared'
case $cc_basename,$host_cpu in
pgcc*) # Portland Group C compiler
- _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive'
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
tmp_addflag=' $pic_flag'
;;
- pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers
- _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive'
+ pgf77* | pgf90* | pgf95* | pgfortran*)
+ # Portland Group f77 and f90 compilers
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
tmp_addflag=' $pic_flag -Mnomain' ;;
ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64
tmp_addflag=' -i_dynamic' ;;
@@ -4390,13 +4829,17 @@ _LT_EOF
lf95*) # Lahey Fortran 8.1
_LT_TAGVAR(whole_archive_flag_spec, $1)=
tmp_sharedflag='--shared' ;;
- xl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below)
+ xl[[cC]]* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below)
tmp_sharedflag='-qmkshrobj'
tmp_addflag= ;;
+ nvcc*) # Cuda Compiler Driver 2.2
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ _LT_TAGVAR(compiler_needs_object, $1)=yes
+ ;;
esac
case `$CC -V 2>&1 | sed 5q` in
*Sun\ C*) # Sun C 5.9
- _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive'
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
_LT_TAGVAR(compiler_needs_object, $1)=yes
tmp_sharedflag='-G' ;;
*Sun\ F*) # Sun Fortran 8.3
@@ -4412,17 +4855,16 @@ _LT_EOF
fi
case $cc_basename in
- xlf*)
+ xlf* | bgf* | bgxlf* | mpixlf*)
# IBM XL Fortran 10.1 on PPC cannot create shared libs itself
_LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive'
- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
- _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='-rpath $libdir'
- _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib'
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+ _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib'
if test "x$supports_anon_versioning" = xyes; then
_LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
echo "local: *; };" >> $output_objdir/$libname.ver~
- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib'
+ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib'
fi
;;
esac
@@ -4436,8 +4878,8 @@ _LT_EOF
_LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
wlarc=
else
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
fi
;;
@@ -4455,8 +4897,8 @@ _LT_EOF
_LT_EOF
elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
else
_LT_TAGVAR(ld_shlibs, $1)=no
fi
@@ -4502,8 +4944,8 @@ _LT_EOF
*)
if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
else
_LT_TAGVAR(ld_shlibs, $1)=no
fi
@@ -4543,8 +4985,10 @@ _LT_EOF
else
# If we're using GNU nm, then we don't want the "-C" option.
# -C means demangle to AIX nm, but means don't demangle with GNU nm
+ # Also, AIX nm treats weak defined symbols like other global
+ # defined symbols, whereas GNU nm marks them as "W".
if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
- _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
else
_LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
fi
@@ -4631,9 +5075,9 @@ _LT_EOF
_LT_TAGVAR(allow_undefined_flag, $1)='-berok'
# Determine the default libpath from the value encoded in an
# empty executable.
- _LT_SYS_MODULE_PATH_AIX
+ _LT_SYS_MODULE_PATH_AIX([$1])
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath"
- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
else
if test "$host_cpu" = ia64; then
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib'
@@ -4642,14 +5086,19 @@ _LT_EOF
else
# Determine the default libpath from the value encoded in an
# empty executable.
- _LT_SYS_MODULE_PATH_AIX
+ _LT_SYS_MODULE_PATH_AIX([$1])
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath"
# Warning - without using the other run time loading flags,
# -berok will link without error, but may produce a broken library.
_LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok'
_LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok'
- # Exported symbols can be pulled into shared objects from archives
- _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience'
+ if test "$with_gnu_ld" = yes; then
+ # We only use this code for GNU lds that support --whole-archive.
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+ else
+ # Exported symbols can be pulled into shared objects from archives
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience'
+ fi
_LT_TAGVAR(archive_cmds_need_lc, $1)=yes
# This is similar to how AIX traditionally builds its shared libraries.
_LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
@@ -4681,20 +5130,64 @@ _LT_EOF
# Microsoft Visual C++.
# hardcode_libdir_flag_spec is actually meaningless, as there is
# no search path for DLLs.
- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
- _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
- # Tell ltmain to make .lib files, not .a files.
- libext=lib
- # Tell ltmain to make .dll files, not .so files.
- shrext_cmds=".dll"
- # FIXME: Setting linknames here is a bad hack.
- _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `$ECHO "X$deplibs" | $Xsed -e '\''s/ -lc$//'\''` -link -dll~linknames='
- # The linker will automatically build a .lib file if we build a DLL.
- _LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
- # FIXME: Should let the user specify the lib program.
- _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs'
- _LT_TAGVAR(fix_srcfile_path, $1)='`cygpath -w "$srcfile"`'
- _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+ case $cc_basename in
+ cl*)
+ # Native MSVC
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ _LT_TAGVAR(always_export_symbols, $1)=yes
+ _LT_TAGVAR(file_list_spec, $1)='@'
+ # Tell ltmain to make .lib files, not .a files.
+ libext=lib
+ # Tell ltmain to make .dll files, not .so files.
+ shrext_cmds=".dll"
+ # FIXME: Setting linknames here is a bad hack.
+ _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames='
+ _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp;
+ else
+ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp;
+ fi~
+ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+ linknames='
+ # The linker will not automatically build a static lib if we build a DLL.
+ # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
+ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+ _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols'
+ # Don't use ranlib
+ _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib'
+ _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~
+ lt_tool_outputfile="@TOOL_OUTPUT@"~
+ case $lt_outputfile in
+ *.exe|*.EXE) ;;
+ *)
+ lt_outputfile="$lt_outputfile.exe"
+ lt_tool_outputfile="$lt_tool_outputfile.exe"
+ ;;
+ esac~
+ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then
+ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+ $RM "$lt_outputfile.manifest";
+ fi'
+ ;;
+ *)
+ # Assume MSVC wrapper
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ # Tell ltmain to make .lib files, not .a files.
+ libext=lib
+ # Tell ltmain to make .dll files, not .so files.
+ shrext_cmds=".dll"
+ # FIXME: Setting linknames here is a bad hack.
+ _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames='
+ # The linker will automatically build a .lib file if we build a DLL.
+ _LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
+ # FIXME: Should let the user specify the lib program.
+ _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs'
+ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+ ;;
+ esac
;;
darwin* | rhapsody*)
@@ -4707,10 +5200,6 @@ _LT_EOF
_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
;;
- freebsd1*)
- _LT_TAGVAR(ld_shlibs, $1)=no
- ;;
-
# FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
# support. Future versions do this automatically, but an explicit c++rt0.o
# does not break anything, and helps significantly (at the cost of a little
@@ -4723,7 +5212,7 @@ _LT_EOF
;;
# Unfortunately, older versions of FreeBSD 2 do not have this feature.
- freebsd2*)
+ freebsd2.*)
_LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
_LT_TAGVAR(hardcode_direct, $1)=yes
_LT_TAGVAR(hardcode_minus_L, $1)=yes
@@ -4732,7 +5221,7 @@ _LT_EOF
# FreeBSD 3 and greater uses gcc -shared to do shared libraries.
freebsd* | dragonfly*)
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
_LT_TAGVAR(hardcode_direct, $1)=yes
_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
@@ -4740,7 +5229,7 @@ _LT_EOF
hpux9*)
if test "$GCC" = yes; then
- _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
else
_LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
fi
@@ -4755,14 +5244,13 @@ _LT_EOF
;;
hpux10*)
- if test "$GCC" = yes -a "$with_gnu_ld" = no; then
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+ if test "$GCC" = yes && test "$with_gnu_ld" = no; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
else
_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
fi
if test "$with_gnu_ld" = no; then
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
- _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir'
_LT_TAGVAR(hardcode_libdir_separator, $1)=:
_LT_TAGVAR(hardcode_direct, $1)=yes
_LT_TAGVAR(hardcode_direct_absolute, $1)=yes
@@ -4774,16 +5262,16 @@ _LT_EOF
;;
hpux11*)
- if test "$GCC" = yes -a "$with_gnu_ld" = no; then
+ if test "$GCC" = yes && test "$with_gnu_ld" = no; then
case $host_cpu in
hppa*64*)
_LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
;;
ia64*)
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
;;
*)
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
;;
esac
else
@@ -4795,7 +5283,14 @@ _LT_EOF
_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
;;
*)
- _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+ m4_if($1, [], [
+ # Older versions of the 11.00 compiler do not understand -b yet
+ # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does)
+ _LT_LINKER_OPTION([if $CC understands -b],
+ _LT_TAGVAR(lt_cv_prog_compiler__b, $1), [-b],
+ [_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'],
+ [_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'])],
+ [_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'])
;;
esac
fi
@@ -4823,19 +5318,34 @@ _LT_EOF
irix5* | irix6* | nonstopux*)
if test "$GCC" = yes; then
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
# Try to use the -exported_symbol ld option, if it does not
# work, assume that -exports_file does not work either and
# implicitly export all symbols.
- save_LDFLAGS="$LDFLAGS"
- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null"
- AC_LINK_IFELSE(int foo(void) {},
- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib'
- )
- LDFLAGS="$save_LDFLAGS"
+ # This should be the same for all languages, so no per-tag cache variable.
+ AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol],
+ [lt_cv_irix_exported_symbol],
+ [save_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null"
+ AC_LINK_IFELSE(
+ [AC_LANG_SOURCE(
+ [AC_LANG_CASE([C], [[int foo (void) { return 0; }]],
+ [C++], [[int foo (void) { return 0; }]],
+ [Fortran 77], [[
+ subroutine foo
+ end]],
+ [Fortran], [[
+ subroutine foo
+ end]])])],
+ [lt_cv_irix_exported_symbol=yes],
+ [lt_cv_irix_exported_symbol=no])
+ LDFLAGS="$save_LDFLAGS"])
+ if test "$lt_cv_irix_exported_symbol" = yes; then
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib'
+ fi
else
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib'
- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib'
fi
_LT_TAGVAR(archive_cmds_need_lc, $1)='no'
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
@@ -4897,17 +5407,17 @@ _LT_EOF
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
_LT_TAGVAR(hardcode_minus_L, $1)=yes
_LT_TAGVAR(allow_undefined_flag, $1)=unsupported
- _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$ECHO DATA >> $output_objdir/$libname.def~$ECHO " SINGLE NONSHARED" >> $output_objdir/$libname.def~$ECHO EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def'
+ _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def'
_LT_TAGVAR(old_archive_from_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def'
;;
osf3*)
if test "$GCC" = yes; then
_LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
else
_LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
fi
_LT_TAGVAR(archive_cmds_need_lc, $1)='no'
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
@@ -4917,13 +5427,13 @@ _LT_EOF
osf4* | osf5*) # as osf3* with the addition of -msym flag
if test "$GCC" = yes; then
_LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
else
_LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
_LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~
- $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp'
+ $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp'
# Both c and cxx compiler support -rpath directly
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
@@ -4936,9 +5446,9 @@ _LT_EOF
_LT_TAGVAR(no_undefined_flag, $1)=' -z defs'
if test "$GCC" = yes; then
wlarc='${wl}'
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
_LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
else
case `$CC -V 2>&1` in
*"Compilers 5.0"*)
@@ -5114,36 +5624,38 @@ x|xyes)
# Test whether the compiler implicitly links with -lc since on some
# systems, -lgcc has to come before -lc. If gcc already passes -lc
# to ld, don't add -lc before -lgcc.
- AC_MSG_CHECKING([whether -lc should be explicitly linked in])
- $RM conftest*
- echo "$lt_simple_compile_test_code" > conftest.$ac_ext
-
- if AC_TRY_EVAL(ac_compile) 2>conftest.err; then
- soname=conftest
- lib=conftest
- libobjs=conftest.$ac_objext
- deplibs=
- wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1)
- pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1)
- compiler_flags=-v
- linker_flags=-v
- verstring=
- output_objdir=.
- libname=conftest
- lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1)
- _LT_TAGVAR(allow_undefined_flag, $1)=
- if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1)
- then
- _LT_TAGVAR(archive_cmds_need_lc, $1)=no
- else
- _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
- fi
- _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag
- else
- cat conftest.err 1>&5
- fi
- $RM conftest*
- AC_MSG_RESULT([$_LT_TAGVAR(archive_cmds_need_lc, $1)])
+ AC_CACHE_CHECK([whether -lc should be explicitly linked in],
+ [lt_cv_]_LT_TAGVAR(archive_cmds_need_lc, $1),
+ [$RM conftest*
+ echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+ if AC_TRY_EVAL(ac_compile) 2>conftest.err; then
+ soname=conftest
+ lib=conftest
+ libobjs=conftest.$ac_objext
+ deplibs=
+ wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1)
+ pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1)
+ compiler_flags=-v
+ linker_flags=-v
+ verstring=
+ output_objdir=.
+ libname=conftest
+ lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1)
+ _LT_TAGVAR(allow_undefined_flag, $1)=
+ if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1)
+ then
+ lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+ else
+ lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+ fi
+ _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag
+ else
+ cat conftest.err 1>&5
+ fi
+ $RM conftest*
+ ])
+ _LT_TAGVAR(archive_cmds_need_lc, $1)=$lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)
;;
esac
fi
@@ -5180,9 +5692,6 @@ _LT_TAGDECL([], [no_undefined_flag], [1],
_LT_TAGDECL([], [hardcode_libdir_flag_spec], [1],
[Flag to hardcode $libdir into a binary during linking.
This must work even if $libdir does not exist])
-_LT_TAGDECL([], [hardcode_libdir_flag_spec_ld], [1],
- [[If ld is used when linking, flag to hardcode $libdir into a binary
- during linking. This must work even if $libdir does not exist]])
_LT_TAGDECL([], [hardcode_libdir_separator], [1],
[Whether we need a single "-rpath" flag with a separated argument])
_LT_TAGDECL([], [hardcode_direct], [0],
@@ -5208,8 +5717,6 @@ _LT_TAGDECL([], [inherit_rpath], [0],
to runtime path list])
_LT_TAGDECL([], [link_all_deplibs], [0],
[Whether libtool must link a program against all its dependency libraries])
-_LT_TAGDECL([], [fix_srcfile_path], [1],
- [Fix the shell variable $srcfile for the compiler])
_LT_TAGDECL([], [always_export_symbols], [0],
[Set to "yes" if exported symbols are required])
_LT_TAGDECL([], [export_symbols_cmds], [2],
@@ -5220,6 +5727,8 @@ _LT_TAGDECL([], [include_expsyms], [1],
[Symbols that must always be exported])
_LT_TAGDECL([], [prelink_cmds], [2],
[Commands necessary for linking programs (against libraries) with templates])
+_LT_TAGDECL([], [postlink_cmds], [2],
+ [Commands necessary for finishing linking programs])
_LT_TAGDECL([], [file_list_spec], [1],
[Specify filename containing input files])
dnl FIXME: Not yet implemented
@@ -5313,37 +5822,22 @@ CC="$lt_save_CC"
])# _LT_LANG_C_CONFIG
-# _LT_PROG_CXX
-# ------------
-# Since AC_PROG_CXX is broken, in that it returns g++ if there is no c++
-# compiler, we have our own version here.
-m4_defun([_LT_PROG_CXX],
-[
-pushdef([AC_MSG_ERROR], [_lt_caught_CXX_error=yes])
-AC_PROG_CXX
-if test -n "$CXX" && ( test "X$CXX" != "Xno" &&
- ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) ||
- (test "X$CXX" != "Xg++"))) ; then
- AC_PROG_CXXCPP
-else
- _lt_caught_CXX_error=yes
-fi
-popdef([AC_MSG_ERROR])
-])# _LT_PROG_CXX
-
-dnl aclocal-1.4 backwards compatibility:
-dnl AC_DEFUN([_LT_PROG_CXX], [])
-
-
# _LT_LANG_CXX_CONFIG([TAG])
# --------------------------
# Ensure that the configuration variables for a C++ compiler are suitably
# defined. These variables are subsequently used by _LT_CONFIG to write
# the compiler configuration to `libtool'.
m4_defun([_LT_LANG_CXX_CONFIG],
-[AC_REQUIRE([_LT_PROG_CXX])dnl
-m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_PATH_MANIFEST_TOOL])dnl
+if test -n "$CXX" && ( test "X$CXX" != "Xno" &&
+ ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) ||
+ (test "X$CXX" != "Xg++"))) ; then
+ AC_PROG_CXXCPP
+else
+ _lt_caught_CXX_error=yes
+fi
AC_LANG_PUSH(C++)
_LT_TAGVAR(archive_cmds_need_lc, $1)=no
@@ -5355,7 +5849,6 @@ _LT_TAGVAR(export_dynamic_flag_spec, $1)=
_LT_TAGVAR(hardcode_direct, $1)=no
_LT_TAGVAR(hardcode_direct_absolute, $1)=no
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
-_LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)=
_LT_TAGVAR(hardcode_libdir_separator, $1)=
_LT_TAGVAR(hardcode_minus_L, $1)=no
_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
@@ -5365,6 +5858,8 @@ _LT_TAGVAR(module_cmds, $1)=
_LT_TAGVAR(module_expsym_cmds, $1)=
_LT_TAGVAR(link_all_deplibs, $1)=unknown
_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
_LT_TAGVAR(no_undefined_flag, $1)=
_LT_TAGVAR(whole_archive_flag_spec, $1)=
_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
@@ -5396,6 +5891,7 @@ if test "$_lt_caught_CXX_error" != yes; then
# Allow CC to be a program name with arguments.
lt_save_CC=$CC
+ lt_save_CFLAGS=$CFLAGS
lt_save_LD=$LD
lt_save_GCC=$GCC
GCC=$GXX
@@ -5413,6 +5909,7 @@ if test "$_lt_caught_CXX_error" != yes; then
fi
test -z "${LDCXX+set}" || LD=$LDCXX
CC=${CXX-"c++"}
+ CFLAGS=$CXXFLAGS
compiler=$CC
_LT_TAGVAR(compiler, $1)=$CC
_LT_CC_BASENAME([$compiler])
@@ -5434,8 +5931,8 @@ if test "$_lt_caught_CXX_error" != yes; then
# Check if GNU C++ uses GNU ld as the underlying linker, since the
# archiving commands below assume that GNU ld is being used.
if test "$with_gnu_ld" = yes; then
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
_LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
@@ -5467,7 +5964,7 @@ if test "$_lt_caught_CXX_error" != yes; then
# Commands to make compiler produce verbose output that lists
# what "hidden" libraries, object files and flags are used when
# linking a shared library.
- output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"'
+ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
else
GXX=no
@@ -5576,10 +6073,10 @@ if test "$_lt_caught_CXX_error" != yes; then
_LT_TAGVAR(allow_undefined_flag, $1)='-berok'
# Determine the default libpath from the value encoded in an empty
# executable.
- _LT_SYS_MODULE_PATH_AIX
+ _LT_SYS_MODULE_PATH_AIX([$1])
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath"
- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
else
if test "$host_cpu" = ia64; then
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib'
@@ -5588,14 +6085,19 @@ if test "$_lt_caught_CXX_error" != yes; then
else
# Determine the default libpath from the value encoded in an
# empty executable.
- _LT_SYS_MODULE_PATH_AIX
+ _LT_SYS_MODULE_PATH_AIX([$1])
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath"
# Warning - without using the other run time loading flags,
# -berok will link without error, but may produce a broken library.
_LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok'
_LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok'
- # Exported symbols can be pulled into shared objects from archives
- _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience'
+ if test "$with_gnu_ld" = yes; then
+ # We only use this code for GNU lds that support --whole-archive.
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+ else
+ # Exported symbols can be pulled into shared objects from archives
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience'
+ fi
_LT_TAGVAR(archive_cmds_need_lc, $1)=yes
# This is similar to how AIX traditionally builds its shared
# libraries.
@@ -5625,28 +6127,75 @@ if test "$_lt_caught_CXX_error" != yes; then
;;
cygwin* | mingw* | pw32* | cegcc*)
- # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless,
- # as there is no search path for DLLs.
- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
- _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
- _LT_TAGVAR(always_export_symbols, $1)=no
- _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
-
- if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
- # If the export-symbols file already is a .def file (1st line
- # is EXPORTS), use it as is; otherwise, prepend...
- _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
- cp $export_symbols $output_objdir/$soname.def;
- else
- echo EXPORTS > $output_objdir/$soname.def;
- cat $export_symbols >> $output_objdir/$soname.def;
- fi~
- $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
- else
- _LT_TAGVAR(ld_shlibs, $1)=no
- fi
- ;;
+ case $GXX,$cc_basename in
+ ,cl* | no,cl*)
+ # Native MSVC
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ _LT_TAGVAR(always_export_symbols, $1)=yes
+ _LT_TAGVAR(file_list_spec, $1)='@'
+ # Tell ltmain to make .lib files, not .a files.
+ libext=lib
+ # Tell ltmain to make .dll files, not .so files.
+ shrext_cmds=".dll"
+ # FIXME: Setting linknames here is a bad hack.
+ _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames='
+ _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+ $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp;
+ else
+ $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp;
+ fi~
+ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+ linknames='
+ # The linker will not automatically build a static lib if we build a DLL.
+ # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
+ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+ # Don't use ranlib
+ _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib'
+ _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~
+ lt_tool_outputfile="@TOOL_OUTPUT@"~
+ case $lt_outputfile in
+ *.exe|*.EXE) ;;
+ *)
+ lt_outputfile="$lt_outputfile.exe"
+ lt_tool_outputfile="$lt_tool_outputfile.exe"
+ ;;
+ esac~
+ func_to_tool_file "$lt_outputfile"~
+ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then
+ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+ $RM "$lt_outputfile.manifest";
+ fi'
+ ;;
+ *)
+ # g++
+ # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless,
+ # as there is no search path for DLLs.
+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols'
+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+ _LT_TAGVAR(always_export_symbols, $1)=no
+ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+
+ if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+ # If the export-symbols file already is a .def file (1st line
+ # is EXPORTS), use it as is; otherwise, prepend...
+ _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+ cp $export_symbols $output_objdir/$soname.def;
+ else
+ echo EXPORTS > $output_objdir/$soname.def;
+ cat $export_symbols >> $output_objdir/$soname.def;
+ fi~
+ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+ else
+ _LT_TAGVAR(ld_shlibs, $1)=no
+ fi
+ ;;
+ esac
+ ;;
darwin* | rhapsody*)
_LT_DARWIN_LINKER_FEATURES($1)
;;
@@ -5669,7 +6218,7 @@ if test "$_lt_caught_CXX_error" != yes; then
esac
;;
- freebsd[[12]]*)
+ freebsd2.*)
# C++ shared libraries reported to be fairly broken before
# switch to ELF
_LT_TAGVAR(ld_shlibs, $1)=no
@@ -5688,6 +6237,11 @@ if test "$_lt_caught_CXX_error" != yes; then
gnu*)
;;
+ haiku*)
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ _LT_TAGVAR(link_all_deplibs, $1)=yes
+ ;;
+
hpux9*)
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
_LT_TAGVAR(hardcode_libdir_separator, $1)=:
@@ -5712,11 +6266,11 @@ if test "$_lt_caught_CXX_error" != yes; then
# explicitly linking system object files so we need to strip them
# from the output so that they don't get included in the library
# dependencies.
- output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed'
+ output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
;;
*)
if test "$GXX" = yes; then
- _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
else
# FIXME: insert proper C++ library support
_LT_TAGVAR(ld_shlibs, $1)=no
@@ -5777,7 +6331,7 @@ if test "$_lt_caught_CXX_error" != yes; then
# explicitly linking system object files so we need to strip them
# from the output so that they don't get included in the library
# dependencies.
- output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed'
+ output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
;;
*)
if test "$GXX" = yes; then
@@ -5787,10 +6341,10 @@ if test "$_lt_caught_CXX_error" != yes; then
_LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
;;
ia64*)
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
;;
*)
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
;;
esac
fi
@@ -5820,7 +6374,7 @@ if test "$_lt_caught_CXX_error" != yes; then
case $cc_basename in
CC*)
# SGI C++
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
# Archives containing C++ object files must be created using
# "CC -ar", where "CC" is the IRIX C++ compiler. This is
@@ -5831,9 +6385,9 @@ if test "$_lt_caught_CXX_error" != yes; then
*)
if test "$GXX" = yes; then
if test "$with_gnu_ld" = no; then
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
else
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` -o $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib'
fi
fi
_LT_TAGVAR(link_all_deplibs, $1)=yes
@@ -5844,7 +6398,7 @@ if test "$_lt_caught_CXX_error" != yes; then
_LT_TAGVAR(inherit_rpath, $1)=yes
;;
- linux* | k*bsd*-gnu)
+ linux* | k*bsd*-gnu | kopensolaris*-gnu)
case $cc_basename in
KCC*)
# Kuck and Associates, Inc. (KAI) C++ Compiler
@@ -5862,7 +6416,7 @@ if test "$_lt_caught_CXX_error" != yes; then
# explicitly linking system object files so we need to strip them
# from the output so that they don't get included in the library
# dependencies.
- output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed'
+ output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
_LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
@@ -5899,26 +6453,26 @@ if test "$_lt_caught_CXX_error" != yes; then
pgCC* | pgcpp*)
# Portland Group C++ compiler
case `$CC -V` in
- *pgCC\ [[1-5]]* | *pgcpp\ [[1-5]]*)
+ *pgCC\ [[1-5]].* | *pgcpp\ [[1-5]].*)
_LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~
rm -rf $tpldir~
$CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~
- compile_command="$compile_command `find $tpldir -name \*.o | $NL2SP`"'
+ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"'
_LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~
rm -rf $tpldir~
$CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~
- $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | $NL2SP`~
+ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~
$RANLIB $oldlib'
_LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~
rm -rf $tpldir~
$CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
- $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib'
+ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib'
_LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~
rm -rf $tpldir~
$CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
- $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib'
+ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib'
;;
- *) # Version 6 will use weak symbols
+ *) # Version 6 and above use weak symbols
_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib'
_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib'
;;
@@ -5926,7 +6480,7 @@ if test "$_lt_caught_CXX_error" != yes; then
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir'
_LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
- _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive'
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
;;
cxx*)
# Compaq C++
@@ -5945,9 +6499,9 @@ if test "$_lt_caught_CXX_error" != yes; then
# explicitly linking system object files so we need to strip them
# from the output so that they don't get included in the library
# dependencies.
- output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`$ECHO "X$templist" | $Xsed -e "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed'
+ output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed'
;;
- xl*)
+ xl* | mpixl* | bgxl*)
# IBM XL 8.0 on PPC, with GNU ld
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
_LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
@@ -5967,13 +6521,13 @@ if test "$_lt_caught_CXX_error" != yes; then
_LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols'
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
- _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive'
+ _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
_LT_TAGVAR(compiler_needs_object, $1)=yes
# Not sure whether something based on
# $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1
# would be better.
- output_verbose_link_cmd='echo'
+ output_verbose_link_cmd='func_echo_all'
# Archives containing C++ object files must be created using
# "CC -xar", where "CC" is the Sun C++ compiler. This is
@@ -6042,7 +6596,7 @@ if test "$_lt_caught_CXX_error" != yes; then
_LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
_LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
fi
- output_verbose_link_cmd=echo
+ output_verbose_link_cmd=func_echo_all
else
_LT_TAGVAR(ld_shlibs, $1)=no
fi
@@ -6077,15 +6631,15 @@ if test "$_lt_caught_CXX_error" != yes; then
case $host in
osf3*)
_LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && $ECHO "X${wl}-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && func_echo_all "${wl}-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
;;
*)
_LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
_LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~
echo "-hidden">> $lib.exp~
- $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~
+ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~
$RM $lib.exp'
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
;;
@@ -6101,17 +6655,17 @@ if test "$_lt_caught_CXX_error" != yes; then
# explicitly linking system object files so we need to strip them
# from the output so that they don't get included in the library
# dependencies.
- output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`$ECHO "X$templist" | $Xsed -e "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed'
+ output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
;;
*)
if test "$GXX" = yes && test "$with_gnu_ld" = no; then
_LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
case $host in
osf3*)
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
;;
*)
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
;;
esac
@@ -6121,7 +6675,7 @@ if test "$_lt_caught_CXX_error" != yes; then
# Commands to make compiler produce verbose output that lists
# what "hidden" libraries, object files and flags are used when
# linking a shared library.
- output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"'
+ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
else
# FIXME: insert proper C++ library support
@@ -6157,7 +6711,7 @@ if test "$_lt_caught_CXX_error" != yes; then
solaris*)
case $cc_basename in
- CC*)
+ CC* | sunCC*)
# Sun C++ 4.2, 5.x and Centerline C++
_LT_TAGVAR(archive_cmds_need_lc,$1)=yes
_LT_TAGVAR(no_undefined_flag, $1)=' -zdefs'
@@ -6178,7 +6732,7 @@ if test "$_lt_caught_CXX_error" != yes; then
esac
_LT_TAGVAR(link_all_deplibs, $1)=yes
- output_verbose_link_cmd='echo'
+ output_verbose_link_cmd='func_echo_all'
# Archives containing C++ object files must be created using
# "CC -xar", where "CC" is the Sun C++ compiler. This is
@@ -6198,14 +6752,14 @@ if test "$_lt_caught_CXX_error" != yes; then
if test "$GXX" = yes && test "$with_gnu_ld" = no; then
_LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs'
if $CC --version | $GREP -v '^2\.7' > /dev/null; then
- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
_LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
- $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+ $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
# Commands to make compiler produce verbose output that lists
# what "hidden" libraries, object files and flags are used when
# linking a shared library.
- output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"'
+ output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
else
# g++ 2.7 appears to require `-G' NOT `-shared' on this
# platform.
@@ -6216,7 +6770,7 @@ if test "$_lt_caught_CXX_error" != yes; then
# Commands to make compiler produce verbose output that lists
# what "hidden" libraries, object files and flags are used when
# linking a shared library.
- output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"'
+ output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
fi
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir'
@@ -6270,6 +6824,10 @@ if test "$_lt_caught_CXX_error" != yes; then
CC*)
_LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ _LT_TAGVAR(old_archive_cmds, $1)='$CC -Tprelink_objects $oldobjs~
+ '"$_LT_TAGVAR(old_archive_cmds, $1)"
+ _LT_TAGVAR(reload_cmds, $1)='$CC -Tprelink_objects $reload_objs~
+ '"$_LT_TAGVAR(reload_cmds, $1)"
;;
*)
_LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
@@ -6325,6 +6883,7 @@ if test "$_lt_caught_CXX_error" != yes; then
fi # test -n "$compiler"
CC=$lt_save_CC
+ CFLAGS=$lt_save_CFLAGS
LDCXX=$LD
LD=$lt_save_LD
GCC=$lt_save_GCC
@@ -6339,6 +6898,29 @@ AC_LANG_POP
])# _LT_LANG_CXX_CONFIG
+# _LT_FUNC_STRIPNAME_CNF
+# ----------------------
+# func_stripname_cnf prefix suffix name
+# strip PREFIX and SUFFIX off of NAME.
+# PREFIX and SUFFIX must not contain globbing or regex special
+# characters, hashes, percent signs, but SUFFIX may contain a leading
+# dot (in which case that matches only a dot).
+#
+# This function is identical to the (non-XSI) version of func_stripname,
+# except this one can be used by m4 code that may be executed by configure,
+# rather than the libtool script.
+m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl
+AC_REQUIRE([_LT_DECL_SED])
+AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])
+func_stripname_cnf ()
+{
+ case ${2} in
+ .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;;
+ *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;;
+ esac
+} # func_stripname_cnf
+])# _LT_FUNC_STRIPNAME_CNF
+
# _LT_SYS_HIDDEN_LIBDEPS([TAGNAME])
# ---------------------------------
# Figure out "hidden" library dependencies from verbose
@@ -6347,6 +6929,7 @@ AC_LANG_POP
# objects, libraries and library flags.
m4_defun([_LT_SYS_HIDDEN_LIBDEPS],
[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl
# Dependencies to place before and after the object being linked:
_LT_TAGVAR(predep_objects, $1)=
_LT_TAGVAR(postdep_objects, $1)=
@@ -6396,7 +6979,20 @@ public class foo {
}
};
_LT_EOF
+], [$1], [GO], [cat > conftest.$ac_ext <<_LT_EOF
+package foo
+func foo() {
+}
+_LT_EOF
])
+
+_lt_libdeps_save_CFLAGS=$CFLAGS
+case "$CC $CFLAGS " in #(
+*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;;
+*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;;
+*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;;
+esac
+
dnl Parse the compiler output and extract the necessary
dnl objects, libraries and library flags.
if AC_TRY_EVAL(ac_compile); then
@@ -6408,7 +7004,7 @@ if AC_TRY_EVAL(ac_compile); then
pre_test_object_deps_done=no
for p in `eval "$output_verbose_link_cmd"`; do
- case $p in
+ case ${prev}${p} in
-L* | -R* | -l*)
# Some compilers place space between "-{L,R}" and the path.
@@ -6417,13 +7013,22 @@ if AC_TRY_EVAL(ac_compile); then
test $p = "-R"; then
prev=$p
continue
- else
- prev=
fi
+ # Expand the sysroot to ease extracting the directories later.
+ if test -z "$prev"; then
+ case $p in
+ -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;;
+ -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;;
+ -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;;
+ esac
+ fi
+ case $p in
+ =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;;
+ esac
if test "$pre_test_object_deps_done" = no; then
- case $p in
- -L* | -R*)
+ case ${prev} in
+ -L | -R)
# Internal compiler library paths should come after those
# provided the user. The postdeps already come after the
# user supplied libs so there is no need to process them.
@@ -6443,8 +7048,10 @@ if AC_TRY_EVAL(ac_compile); then
_LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} ${prev}${p}"
fi
fi
+ prev=
;;
+ *.lto.$objext) ;; # Ignore GCC LTO objects
*.$objext)
# This assumes that the test object file only shows up
# once in the compiler output.
@@ -6480,6 +7087,7 @@ else
fi
$RM -f confest.$objext
+CFLAGS=$_lt_libdeps_save_CFLAGS
# PORTME: override above test on systems where it is broken
m4_if([$1], [CXX],
@@ -6516,7 +7124,7 @@ linux*)
solaris*)
case $cc_basename in
- CC*)
+ CC* | sunCC*)
# The more standards-conforming stlport4 library is
# incompatible with the Cstd library. Avoid specifying
# it if it's in CXXFLAGS. Ignore libCrun as
@@ -6560,32 +7168,16 @@ _LT_TAGDECL([], [compiler_lib_search_path], [1],
])# _LT_SYS_HIDDEN_LIBDEPS
-# _LT_PROG_F77
-# ------------
-# Since AC_PROG_F77 is broken, in that it returns the empty string
-# if there is no fortran compiler, we have our own version here.
-m4_defun([_LT_PROG_F77],
-[
-pushdef([AC_MSG_ERROR], [_lt_disable_F77=yes])
-AC_PROG_F77
-if test -z "$F77" || test "X$F77" = "Xno"; then
- _lt_disable_F77=yes
-fi
-popdef([AC_MSG_ERROR])
-])# _LT_PROG_F77
-
-dnl aclocal-1.4 backwards compatibility:
-dnl AC_DEFUN([_LT_PROG_F77], [])
-
-
# _LT_LANG_F77_CONFIG([TAG])
# --------------------------
# Ensure that the configuration variables for a Fortran 77 compiler are
# suitably defined. These variables are subsequently used by _LT_CONFIG
# to write the compiler configuration to `libtool'.
m4_defun([_LT_LANG_F77_CONFIG],
-[AC_REQUIRE([_LT_PROG_F77])dnl
-AC_LANG_PUSH(Fortran 77)
+[AC_LANG_PUSH(Fortran 77)
+if test -z "$F77" || test "X$F77" = "Xno"; then
+ _lt_disable_F77=yes
+fi
_LT_TAGVAR(archive_cmds_need_lc, $1)=no
_LT_TAGVAR(allow_undefined_flag, $1)=
@@ -6595,7 +7187,6 @@ _LT_TAGVAR(export_dynamic_flag_spec, $1)=
_LT_TAGVAR(hardcode_direct, $1)=no
_LT_TAGVAR(hardcode_direct_absolute, $1)=no
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
-_LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)=
_LT_TAGVAR(hardcode_libdir_separator, $1)=
_LT_TAGVAR(hardcode_minus_L, $1)=no
_LT_TAGVAR(hardcode_automatic, $1)=no
@@ -6604,6 +7195,8 @@ _LT_TAGVAR(module_cmds, $1)=
_LT_TAGVAR(module_expsym_cmds, $1)=
_LT_TAGVAR(link_all_deplibs, $1)=unknown
_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
_LT_TAGVAR(no_undefined_flag, $1)=
_LT_TAGVAR(whole_archive_flag_spec, $1)=
_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
@@ -6643,7 +7236,9 @@ if test "$_lt_disable_F77" != yes; then
# Allow CC to be a program name with arguments.
lt_save_CC="$CC"
lt_save_GCC=$GCC
+ lt_save_CFLAGS=$CFLAGS
CC=${F77-"f77"}
+ CFLAGS=$FFLAGS
compiler=$CC
_LT_TAGVAR(compiler, $1)=$CC
_LT_CC_BASENAME([$compiler])
@@ -6697,38 +7292,24 @@ if test "$_lt_disable_F77" != yes; then
GCC=$lt_save_GCC
CC="$lt_save_CC"
+ CFLAGS="$lt_save_CFLAGS"
fi # test "$_lt_disable_F77" != yes
AC_LANG_POP
])# _LT_LANG_F77_CONFIG
-# _LT_PROG_FC
-# -----------
-# Since AC_PROG_FC is broken, in that it returns the empty string
-# if there is no fortran compiler, we have our own version here.
-m4_defun([_LT_PROG_FC],
-[
-pushdef([AC_MSG_ERROR], [_lt_disable_FC=yes])
-AC_PROG_FC
-if test -z "$FC" || test "X$FC" = "Xno"; then
- _lt_disable_FC=yes
-fi
-popdef([AC_MSG_ERROR])
-])# _LT_PROG_FC
-
-dnl aclocal-1.4 backwards compatibility:
-dnl AC_DEFUN([_LT_PROG_FC], [])
-
-
# _LT_LANG_FC_CONFIG([TAG])
# -------------------------
# Ensure that the configuration variables for a Fortran compiler are
# suitably defined. These variables are subsequently used by _LT_CONFIG
# to write the compiler configuration to `libtool'.
m4_defun([_LT_LANG_FC_CONFIG],
-[AC_REQUIRE([_LT_PROG_FC])dnl
-AC_LANG_PUSH(Fortran)
+[AC_LANG_PUSH(Fortran)
+
+if test -z "$FC" || test "X$FC" = "Xno"; then
+ _lt_disable_FC=yes
+fi
_LT_TAGVAR(archive_cmds_need_lc, $1)=no
_LT_TAGVAR(allow_undefined_flag, $1)=
@@ -6738,7 +7319,6 @@ _LT_TAGVAR(export_dynamic_flag_spec, $1)=
_LT_TAGVAR(hardcode_direct, $1)=no
_LT_TAGVAR(hardcode_direct_absolute, $1)=no
_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
-_LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)=
_LT_TAGVAR(hardcode_libdir_separator, $1)=
_LT_TAGVAR(hardcode_minus_L, $1)=no
_LT_TAGVAR(hardcode_automatic, $1)=no
@@ -6747,6 +7327,8 @@ _LT_TAGVAR(module_cmds, $1)=
_LT_TAGVAR(module_expsym_cmds, $1)=
_LT_TAGVAR(link_all_deplibs, $1)=unknown
_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
_LT_TAGVAR(no_undefined_flag, $1)=
_LT_TAGVAR(whole_archive_flag_spec, $1)=
_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
@@ -6786,7 +7368,9 @@ if test "$_lt_disable_FC" != yes; then
# Allow CC to be a program name with arguments.
lt_save_CC="$CC"
lt_save_GCC=$GCC
+ lt_save_CFLAGS=$CFLAGS
CC=${FC-"f95"}
+ CFLAGS=$FCFLAGS
compiler=$CC
GCC=$ac_cv_fc_compiler_gnu
@@ -6842,7 +7426,8 @@ if test "$_lt_disable_FC" != yes; then
fi # test -n "$compiler"
GCC=$lt_save_GCC
- CC="$lt_save_CC"
+ CC=$lt_save_CC
+ CFLAGS=$lt_save_CFLAGS
fi # test "$_lt_disable_FC" != yes
AC_LANG_POP
@@ -6879,10 +7464,12 @@ _LT_COMPILER_BOILERPLATE
_LT_LINKER_BOILERPLATE
# Allow CC to be a program name with arguments.
-lt_save_CC="$CC"
+lt_save_CC=$CC
+lt_save_CFLAGS=$CFLAGS
lt_save_GCC=$GCC
GCC=yes
CC=${GCJ-"gcj"}
+CFLAGS=$GCJFLAGS
compiler=$CC
_LT_TAGVAR(compiler, $1)=$CC
_LT_TAGVAR(LD, $1)="$LD"
@@ -6892,6 +7479,8 @@ _LT_CC_BASENAME([$compiler])
_LT_TAGVAR(archive_cmds_need_lc, $1)=no
_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
## CAVEAT EMPTOR:
## There is no encapsulation within the following macros, do not change
@@ -6911,10 +7500,82 @@ fi
AC_LANG_RESTORE
GCC=$lt_save_GCC
-CC="$lt_save_CC"
+CC=$lt_save_CC
+CFLAGS=$lt_save_CFLAGS
])# _LT_LANG_GCJ_CONFIG
+# _LT_LANG_GO_CONFIG([TAG])
+# --------------------------
+# Ensure that the configuration variables for the GNU Go compiler
+# are suitably defined. These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_GO_CONFIG],
+[AC_REQUIRE([LT_PROG_GO])dnl
+AC_LANG_SAVE
+
+# Source file extension for Go test sources.
+ac_ext=go
+
+# Object file extension for compiled Go test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code="package main; func main() { }"
+
+# Code to be used in simple link tests
+lt_simple_link_test_code='package main; func main() { }'
+
+# ltmain only uses $CC for tagged configurations so make sure $CC is set.
+_LT_TAG_COMPILER
+
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
+# Allow CC to be a program name with arguments.
+lt_save_CC=$CC
+lt_save_CFLAGS=$CFLAGS
+lt_save_GCC=$GCC
+GCC=yes
+CC=${GOC-"gccgo"}
+CFLAGS=$GOFLAGS
+compiler=$CC
+_LT_TAGVAR(compiler, $1)=$CC
+_LT_TAGVAR(LD, $1)="$LD"
+_LT_CC_BASENAME([$compiler])
+
+# Go did not exist at the time GCC didn't implicitly link libc in.
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+
+## CAVEAT EMPTOR:
+## There is no encapsulation within the following macros, do not change
+## the running order or otherwise move them around unless you know exactly
+## what you are doing...
+if test -n "$compiler"; then
+ _LT_COMPILER_NO_RTTI($1)
+ _LT_COMPILER_PIC($1)
+ _LT_COMPILER_C_O($1)
+ _LT_COMPILER_FILE_LOCKS($1)
+ _LT_LINKER_SHLIBS($1)
+ _LT_LINKER_HARDCODE_LIBPATH($1)
+
+ _LT_CONFIG($1)
+fi
+
+AC_LANG_RESTORE
+
+GCC=$lt_save_GCC
+CC=$lt_save_CC
+CFLAGS=$lt_save_CFLAGS
+])# _LT_LANG_GO_CONFIG
+
+
# _LT_LANG_RC_CONFIG([TAG])
# -------------------------
# Ensure that the configuration variables for the Windows resource compiler
@@ -6946,9 +7607,11 @@ _LT_LINKER_BOILERPLATE
# Allow CC to be a program name with arguments.
lt_save_CC="$CC"
+lt_save_CFLAGS=$CFLAGS
lt_save_GCC=$GCC
GCC=
CC=${RC-"windres"}
+CFLAGS=
compiler=$CC
_LT_TAGVAR(compiler, $1)=$CC
_LT_CC_BASENAME([$compiler])
@@ -6961,7 +7624,8 @@ fi
GCC=$lt_save_GCC
AC_LANG_RESTORE
-CC="$lt_save_CC"
+CC=$lt_save_CC
+CFLAGS=$lt_save_CFLAGS
])# _LT_LANG_RC_CONFIG
@@ -6981,6 +7645,13 @@ dnl aclocal-1.4 backwards compatibility:
dnl AC_DEFUN([LT_AC_PROG_GCJ], [])
+# LT_PROG_GO
+# ----------
+AC_DEFUN([LT_PROG_GO],
+[AC_CHECK_TOOL(GOC, gccgo,)
+])
+
+
# LT_PROG_RC
# ----------
AC_DEFUN([LT_PROG_RC],
@@ -7020,6 +7691,15 @@ _LT_DECL([], [OBJDUMP], [1], [An object symbol dumper])
AC_SUBST([OBJDUMP])
])
+# _LT_DECL_DLLTOOL
+# ----------------
+# Ensure DLLTOOL variable is set.
+m4_defun([_LT_DECL_DLLTOOL],
+[AC_CHECK_TOOL(DLLTOOL, dlltool, false)
+test -z "$DLLTOOL" && DLLTOOL=dlltool
+_LT_DECL([], [DLLTOOL], [1], [DLL creation program])
+AC_SUBST([DLLTOOL])
+])
# _LT_DECL_SED
# ------------
@@ -7113,8 +7793,8 @@ m4_defun([_LT_CHECK_SHELL_FEATURES],
# Try some XSI features
xsi_shell=no
( _lt_dummy="a/b/c"
- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \
- = c,a/b,, \
+ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \
+ = c,a/b,b/c, \
&& eval 'test $(( 1 + 1 )) -eq 2 \
&& test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \
&& xsi_shell=yes
@@ -7153,208 +7833,162 @@ _LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl
])# _LT_CHECK_SHELL_FEATURES
-# _LT_PROG_XSI_SHELLFNS
-# ---------------------
-# Bourne and XSI compatible variants of some useful shell functions.
-m4_defun([_LT_PROG_XSI_SHELLFNS],
-[case $xsi_shell in
- yes)
- cat << \_LT_EOF >> "$cfgfile"
-
-# func_dirname file append nondir_replacement
-# Compute the dirname of FILE. If nonempty, add APPEND to the result,
-# otherwise set result to NONDIR_REPLACEMENT.
-func_dirname ()
-{
- case ${1} in
- */*) func_dirname_result="${1%/*}${2}" ;;
- * ) func_dirname_result="${3}" ;;
- esac
-}
-
-# func_basename file
-func_basename ()
-{
- func_basename_result="${1##*/}"
-}
-
-# func_dirname_and_basename file append nondir_replacement
-# perform func_basename and func_dirname in a single function
-# call:
-# dirname: Compute the dirname of FILE. If nonempty,
-# add APPEND to the result, otherwise set result
-# to NONDIR_REPLACEMENT.
-# value returned in "$func_dirname_result"
-# basename: Compute filename of FILE.
-# value retuned in "$func_basename_result"
-# Implementation must be kept synchronized with func_dirname
-# and func_basename. For efficiency, we do not delegate to
-# those functions but instead duplicate the functionality here.
-func_dirname_and_basename ()
-{
- case ${1} in
- */*) func_dirname_result="${1%/*}${2}" ;;
- * ) func_dirname_result="${3}" ;;
- esac
- func_basename_result="${1##*/}"
-}
-
-# func_stripname prefix suffix name
-# strip PREFIX and SUFFIX off of NAME.
-# PREFIX and SUFFIX must not contain globbing or regex special
-# characters, hashes, percent signs, but SUFFIX may contain a leading
-# dot (in which case that matches only a dot).
-func_stripname ()
-{
- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are
- # positional parameters, so assign one to ordinary parameter first.
- func_stripname_result=${3}
- func_stripname_result=${func_stripname_result#"${1}"}
- func_stripname_result=${func_stripname_result%"${2}"}
-}
-
-# func_opt_split
-func_opt_split ()
-{
- func_opt_split_opt=${1%%=*}
- func_opt_split_arg=${1#*=}
-}
-
-# func_lo2o object
-func_lo2o ()
-{
- case ${1} in
- *.lo) func_lo2o_result=${1%.lo}.${objext} ;;
- *) func_lo2o_result=${1} ;;
- esac
-}
-
-# func_xform libobj-or-source
-func_xform ()
-{
- func_xform_result=${1%.*}.lo
-}
-
-# func_arith arithmetic-term...
-func_arith ()
-{
- func_arith_result=$(( $[*] ))
-}
-
-# func_len string
-# STRING may not start with a hyphen.
-func_len ()
-{
- func_len_result=${#1}
-}
+# _LT_PROG_FUNCTION_REPLACE (FUNCNAME, REPLACEMENT-BODY)
+# ------------------------------------------------------
+# In `$cfgfile', look for function FUNCNAME delimited by `^FUNCNAME ()$' and
+# '^} FUNCNAME ', and replace its body with REPLACEMENT-BODY.
+m4_defun([_LT_PROG_FUNCTION_REPLACE],
+[dnl {
+sed -e '/^$1 ()$/,/^} # $1 /c\
+$1 ()\
+{\
+m4_bpatsubsts([$2], [$], [\\], [^\([ ]\)], [\\\1])
+} # Extended-shell $1 implementation' "$cfgfile" > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+])
-_LT_EOF
- ;;
- *) # Bourne compatible functions.
- cat << \_LT_EOF >> "$cfgfile"
-# func_dirname file append nondir_replacement
-# Compute the dirname of FILE. If nonempty, add APPEND to the result,
-# otherwise set result to NONDIR_REPLACEMENT.
-func_dirname ()
-{
- # Extract subdirectory from the argument.
- func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"`
- if test "X$func_dirname_result" = "X${1}"; then
- func_dirname_result="${3}"
- else
- func_dirname_result="$func_dirname_result${2}"
- fi
-}
+# _LT_PROG_REPLACE_SHELLFNS
+# -------------------------
+# Replace existing portable implementations of several shell functions with
+# equivalent extended shell implementations where those features are available..
+m4_defun([_LT_PROG_REPLACE_SHELLFNS],
+[if test x"$xsi_shell" = xyes; then
+ _LT_PROG_FUNCTION_REPLACE([func_dirname], [dnl
+ case ${1} in
+ */*) func_dirname_result="${1%/*}${2}" ;;
+ * ) func_dirname_result="${3}" ;;
+ esac])
+
+ _LT_PROG_FUNCTION_REPLACE([func_basename], [dnl
+ func_basename_result="${1##*/}"])
+
+ _LT_PROG_FUNCTION_REPLACE([func_dirname_and_basename], [dnl
+ case ${1} in
+ */*) func_dirname_result="${1%/*}${2}" ;;
+ * ) func_dirname_result="${3}" ;;
+ esac
+ func_basename_result="${1##*/}"])
-# func_basename file
-func_basename ()
-{
- func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"`
-}
+ _LT_PROG_FUNCTION_REPLACE([func_stripname], [dnl
+ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are
+ # positional parameters, so assign one to ordinary parameter first.
+ func_stripname_result=${3}
+ func_stripname_result=${func_stripname_result#"${1}"}
+ func_stripname_result=${func_stripname_result%"${2}"}])
-dnl func_dirname_and_basename
-dnl A portable version of this function is already defined in general.m4sh
-dnl so there is no need for it here.
+ _LT_PROG_FUNCTION_REPLACE([func_split_long_opt], [dnl
+ func_split_long_opt_name=${1%%=*}
+ func_split_long_opt_arg=${1#*=}])
-# func_stripname prefix suffix name
-# strip PREFIX and SUFFIX off of NAME.
-# PREFIX and SUFFIX must not contain globbing or regex special
-# characters, hashes, percent signs, but SUFFIX may contain a leading
-# dot (in which case that matches only a dot).
-# func_strip_suffix prefix name
-func_stripname ()
-{
- case ${2} in
- .*) func_stripname_result=`$ECHO "X${3}" \
- | $Xsed -e "s%^${1}%%" -e "s%\\\\${2}\$%%"`;;
- *) func_stripname_result=`$ECHO "X${3}" \
- | $Xsed -e "s%^${1}%%" -e "s%${2}\$%%"`;;
- esac
-}
+ _LT_PROG_FUNCTION_REPLACE([func_split_short_opt], [dnl
+ func_split_short_opt_arg=${1#??}
+ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}])
-# sed scripts:
-my_sed_long_opt='1s/^\(-[[^=]]*\)=.*/\1/;q'
-my_sed_long_arg='1s/^-[[^=]]*=//'
+ _LT_PROG_FUNCTION_REPLACE([func_lo2o], [dnl
+ case ${1} in
+ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;
+ *) func_lo2o_result=${1} ;;
+ esac])
-# func_opt_split
-func_opt_split ()
-{
- func_opt_split_opt=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_opt"`
- func_opt_split_arg=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_arg"`
-}
+ _LT_PROG_FUNCTION_REPLACE([func_xform], [ func_xform_result=${1%.*}.lo])
-# func_lo2o object
-func_lo2o ()
-{
- func_lo2o_result=`$ECHO "X${1}" | $Xsed -e "$lo2o"`
-}
+ _LT_PROG_FUNCTION_REPLACE([func_arith], [ func_arith_result=$(( $[*] ))])
-# func_xform libobj-or-source
-func_xform ()
-{
- func_xform_result=`$ECHO "X${1}" | $Xsed -e 's/\.[[^.]]*$/.lo/'`
-}
+ _LT_PROG_FUNCTION_REPLACE([func_len], [ func_len_result=${#1}])
+fi
-# func_arith arithmetic-term...
-func_arith ()
-{
- func_arith_result=`expr "$[@]"`
-}
+if test x"$lt_shell_append" = xyes; then
+ _LT_PROG_FUNCTION_REPLACE([func_append], [ eval "${1}+=\\${2}"])
-# func_len string
-# STRING may not start with a hyphen.
-func_len ()
-{
- func_len_result=`expr "$[1]" : ".*" 2>/dev/null || echo $max_cmd_len`
-}
+ _LT_PROG_FUNCTION_REPLACE([func_append_quoted], [dnl
+ func_quote_for_eval "${2}"
+dnl m4 expansion turns \\\\ into \\, and then the shell eval turns that into \
+ eval "${1}+=\\\\ \\$func_quote_for_eval_result"])
-_LT_EOF
-esac
+ # Save a `func_append' function call where possible by direct use of '+='
+ sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+ test 0 -eq $? || _lt_function_replace_fail=:
+else
+ # Save a `func_append' function call even when '+=' is not available
+ sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \
+ && mv -f "$cfgfile.tmp" "$cfgfile" \
+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+ test 0 -eq $? || _lt_function_replace_fail=:
+fi
-case $lt_shell_append in
- yes)
- cat << \_LT_EOF >> "$cfgfile"
+if test x"$_lt_function_replace_fail" = x":"; then
+ AC_MSG_WARN([Unable to substitute extended shell functions in $ofile])
+fi
+])
-# func_append var value
-# Append VALUE to the end of shell variable VAR.
-func_append ()
-{
- eval "$[1]+=\$[2]"
-}
-_LT_EOF
+# _LT_PATH_CONVERSION_FUNCTIONS
+# -----------------------------
+# Determine which file name conversion functions should be used by
+# func_to_host_file (and, implicitly, by func_to_host_path). These are needed
+# for certain cross-compile configurations and native mingw.
+m4_defun([_LT_PATH_CONVERSION_FUNCTIONS],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+AC_MSG_CHECKING([how to convert $build file names to $host format])
+AC_CACHE_VAL(lt_cv_to_host_file_cmd,
+[case $host in
+ *-*-mingw* )
+ case $build in
+ *-*-mingw* ) # actually msys
+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32
+ ;;
+ *-*-cygwin* )
+ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32
+ ;;
+ * ) # otherwise, assume *nix
+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32
+ ;;
+ esac
;;
- *)
- cat << \_LT_EOF >> "$cfgfile"
-
-# func_append var value
-# Append VALUE to the end of shell variable VAR.
-func_append ()
-{
- eval "$[1]=\$$[1]\$[2]"
-}
-
-_LT_EOF
+ *-*-cygwin* )
+ case $build in
+ *-*-mingw* ) # actually msys
+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin
+ ;;
+ *-*-cygwin* )
+ lt_cv_to_host_file_cmd=func_convert_file_noop
+ ;;
+ * ) # otherwise, assume *nix
+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin
+ ;;
+ esac
;;
- esac
+ * ) # unhandled hosts (and "normal" native builds)
+ lt_cv_to_host_file_cmd=func_convert_file_noop
+ ;;
+esac
+])
+to_host_file_cmd=$lt_cv_to_host_file_cmd
+AC_MSG_RESULT([$lt_cv_to_host_file_cmd])
+_LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd],
+ [0], [convert $build file names to $host format])dnl
+
+AC_MSG_CHECKING([how to convert $build file names to toolchain format])
+AC_CACHE_VAL(lt_cv_to_tool_file_cmd,
+[#assume ordinary cross tools, or native build.
+lt_cv_to_tool_file_cmd=func_convert_file_noop
+case $host in
+ *-*-mingw* )
+ case $build in
+ *-*-mingw* ) # actually msys
+ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32
+ ;;
+ esac
+ ;;
+esac
])
+to_tool_file_cmd=$lt_cv_to_tool_file_cmd
+AC_MSG_RESULT([$lt_cv_to_tool_file_cmd])
+_LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd],
+ [0], [convert $build files to toolchain format])dnl
+])# _LT_PATH_CONVERSION_FUNCTIONS
diff --git a/Modules/_ctypes/libffi/m4/ltoptions.m4 b/Modules/_ctypes/libffi/m4/ltoptions.m4
index 34151a3..5d9acd8 100644
--- a/Modules/_ctypes/libffi/m4/ltoptions.m4
+++ b/Modules/_ctypes/libffi/m4/ltoptions.m4
@@ -1,13 +1,14 @@
# Helper functions for option handling. -*- Autoconf -*-
#
-# Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc.
+# Copyright (C) 2004, 2005, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
# Written by Gary V. Vaughan, 2004
#
# This file is free software; the Free Software Foundation gives
# unlimited permission to copy and/or distribute it, with or without
# modifications, as long as this notice is preserved.
-# serial 6 ltoptions.m4
+# serial 7 ltoptions.m4
# This is to help aclocal find these macros, as it can't see m4_define.
AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])])
@@ -125,7 +126,7 @@ LT_OPTION_DEFINE([LT_INIT], [win32-dll],
[enable_win32_dll=yes
case $host in
-*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-cegcc*)
+*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*)
AC_CHECK_TOOL(AS, as, false)
AC_CHECK_TOOL(DLLTOOL, dlltool, false)
AC_CHECK_TOOL(OBJDUMP, objdump, false)
@@ -133,13 +134,13 @@ case $host in
esac
test -z "$AS" && AS=as
-_LT_DECL([], [AS], [0], [Assembler program])dnl
+_LT_DECL([], [AS], [1], [Assembler program])dnl
test -z "$DLLTOOL" && DLLTOOL=dlltool
-_LT_DECL([], [DLLTOOL], [0], [DLL creation program])dnl
+_LT_DECL([], [DLLTOOL], [1], [DLL creation program])dnl
test -z "$OBJDUMP" && OBJDUMP=objdump
-_LT_DECL([], [OBJDUMP], [0], [Object dumper program])dnl
+_LT_DECL([], [OBJDUMP], [1], [Object dumper program])dnl
])# win32-dll
AU_DEFUN([AC_LIBTOOL_WIN32_DLL],
@@ -325,9 +326,24 @@ dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], [])
# MODE is either `yes' or `no'. If omitted, it defaults to `both'.
m4_define([_LT_WITH_PIC],
[AC_ARG_WITH([pic],
- [AS_HELP_STRING([--with-pic],
+ [AS_HELP_STRING([--with-pic@<:@=PKGS@:>@],
[try to use only PIC/non-PIC objects @<:@default=use both@:>@])],
- [pic_mode="$withval"],
+ [lt_p=${PACKAGE-default}
+ case $withval in
+ yes|no) pic_mode=$withval ;;
+ *)
+ pic_mode=default
+ # Look at the argument we got. We use all the common list separators.
+ lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+ for lt_pkg in $withval; do
+ IFS="$lt_save_ifs"
+ if test "X$lt_pkg" = "X$lt_p"; then
+ pic_mode=yes
+ fi
+ done
+ IFS="$lt_save_ifs"
+ ;;
+ esac],
[pic_mode=default])
test -z "$pic_mode" && pic_mode=m4_default([$1], [default])
diff --git a/Modules/_ctypes/libffi/m4/ltversion.m4 b/Modules/_ctypes/libffi/m4/ltversion.m4
index b8e154f..07a8602 100644
--- a/Modules/_ctypes/libffi/m4/ltversion.m4
+++ b/Modules/_ctypes/libffi/m4/ltversion.m4
@@ -7,17 +7,17 @@
# unlimited permission to copy and/or distribute it, with or without
# modifications, as long as this notice is preserved.
-# Generated from ltversion.in.
+# @configure_input@
-# serial 3012 ltversion.m4
+# serial 3337 ltversion.m4
# This file is part of GNU Libtool
-m4_define([LT_PACKAGE_VERSION], [2.2.6])
-m4_define([LT_PACKAGE_REVISION], [1.3012])
+m4_define([LT_PACKAGE_VERSION], [2.4.2])
+m4_define([LT_PACKAGE_REVISION], [1.3337])
AC_DEFUN([LTVERSION_VERSION],
-[macro_version='2.2.6'
-macro_revision='1.3012'
+[macro_version='2.4.2'
+macro_revision='1.3337'
_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?])
_LT_DECL(, macro_revision, 0)
])
diff --git a/Modules/_ctypes/libffi/m4/lt~obsolete.m4 b/Modules/_ctypes/libffi/m4/lt~obsolete.m4
index 637bb20..c573da9 100644
--- a/Modules/_ctypes/libffi/m4/lt~obsolete.m4
+++ b/Modules/_ctypes/libffi/m4/lt~obsolete.m4
@@ -1,13 +1,13 @@
# lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*-
#
-# Copyright (C) 2004, 2005, 2007 Free Software Foundation, Inc.
+# Copyright (C) 2004, 2005, 2007, 2009 Free Software Foundation, Inc.
# Written by Scott James Remnant, 2004.
#
# This file is free software; the Free Software Foundation gives
# unlimited permission to copy and/or distribute it, with or without
# modifications, as long as this notice is preserved.
-# serial 4 lt~obsolete.m4
+# serial 5 lt~obsolete.m4
# These exist entirely to fool aclocal when bootstrapping libtool.
#
@@ -77,7 +77,6 @@ m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])])
m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])])
m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])])
m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])])
-m4_ifndef([AC_LIBTOOL_RC], [AC_DEFUN([AC_LIBTOOL_RC])])
m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])])
m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])])
m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])])
@@ -90,3 +89,10 @@ m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])])
m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])])
m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])])
m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])])
+m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS], [AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])])
+m4_ifndef([_LT_AC_PROG_CXXCPP], [AC_DEFUN([_LT_AC_PROG_CXXCPP])])
+m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS], [AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])])
+m4_ifndef([_LT_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])])
+m4_ifndef([_LT_PROG_F77], [AC_DEFUN([_LT_PROG_F77])])
+m4_ifndef([_LT_PROG_FC], [AC_DEFUN([_LT_PROG_FC])])
+m4_ifndef([_LT_PROG_CXX], [AC_DEFUN([_LT_PROG_CXX])])
diff --git a/Modules/_ctypes/libffi/man/Makefile.am b/Modules/_ctypes/libffi/man/Makefile.am
index 2519277..afcbfb6 100644
--- a/Modules/_ctypes/libffi/man/Makefile.am
+++ b/Modules/_ctypes/libffi/man/Makefile.am
@@ -2,7 +2,7 @@
AUTOMAKE_OPTIONS=foreign
-EXTRA_DIST = ffi.3 ffi_call.3 ffi_prep_cif.3
+EXTRA_DIST = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3
-man_MANS = ffi.3 ffi_call.3 ffi_prep_cif.3
+man_MANS = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3
diff --git a/Modules/_ctypes/libffi/man/Makefile.in b/Modules/_ctypes/libffi/man/Makefile.in
index 3e40be2..c02e1f2 100644
--- a/Modules/_ctypes/libffi/man/Makefile.in
+++ b/Modules/_ctypes/libffi/man/Makefile.in
@@ -1,9 +1,8 @@
-# Makefile.in generated by automake 1.11 from Makefile.am.
+# Makefile.in generated by automake 1.12.2 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
-# Inc.
+# Copyright (C) 1994-2012 Free Software Foundation, Inc.
+
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -15,6 +14,23 @@
@SET_MAKE@
VPATH = @srcdir@
+am__make_dryrun = \
+ { \
+ am__dry=no; \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ echo 'am--echo: ; @echo "AM" OK' | $(MAKE) -f - 2>/dev/null \
+ | grep '^AM OK$$' >/dev/null || am__dry=yes;; \
+ *) \
+ for am__flg in $$MAKEFLAGS; do \
+ case $$am__flg in \
+ *=*|--*) ;; \
+ *n*) am__dry=yes; break;; \
+ esac; \
+ done;; \
+ esac; \
+ test $$am__dry = yes; \
+ }
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -37,7 +53,19 @@ target_triplet = @target@
subdir = man
DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \
+am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \
+ $(top_srcdir)/m4/ax_append_flag.m4 \
+ $(top_srcdir)/m4/ax_cc_maxopt.m4 \
+ $(top_srcdir)/m4/ax_cflags_warn_all.m4 \
+ $(top_srcdir)/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/m4/ax_compiler_vendor.m4 \
+ $(top_srcdir)/m4/ax_configure_args.m4 \
+ $(top_srcdir)/m4/ax_enable_builddir.m4 \
+ $(top_srcdir)/m4/ax_gcc_archflag.m4 \
+ $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \
+ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
+ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
+ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \
$(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
@@ -47,6 +75,11 @@ CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
SOURCES =
DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
@@ -68,6 +101,12 @@ am__nobase_list = $(am__nobase_strip_setup); \
am__base_list = \
sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+ test -z "$$files" \
+ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+ $(am__cd) "$$dir" && rm -f $$files; }; \
+ }
man3dir = $(mandir)/man3
am__installdirs = "$(DESTDIR)$(man3dir)"
NROFF = nroff
@@ -76,6 +115,7 @@ DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
+AM_LTLDFLAGS = @AM_LTLDFLAGS@
AM_RUNTESTFLAGS = @AM_RUNTESTFLAGS@
AR = @AR@
AUTOCONF = @AUTOCONF@
@@ -93,6 +133,7 @@ CPPFLAGS = @CPPFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
ECHO_C = @ECHO_C@
@@ -100,6 +141,7 @@ ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
+FFI_EXEC_TRAMPOLINE_TABLE = @FFI_EXEC_TRAMPOLINE_TABLE@
FGREP = @FGREP@
GREP = @GREP@
HAVE_LONG_DOUBLE = @HAVE_LONG_DOUBLE@
@@ -118,6 +160,7 @@ LN_S = @LN_S@
LTLIBOBJS = @LTLIBOBJS@
MAINT = @MAINT@
MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
MKDIR_P = @MKDIR_P@
NM = @NM@
NMEDIT = @NMEDIT@
@@ -130,8 +173,10 @@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
+PRTDIAG = @PRTDIAG@
RANLIB = @RANLIB@
SED = @SED@
SET_MAKE = @SET_MAKE@
@@ -144,6 +189,7 @@ abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
ac_ct_CC = @ac_ct_CC@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
am__include = @am__include@
@@ -151,6 +197,7 @@ am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
+ax_enable_builddir_sed = @ax_enable_builddir_sed@
bindir = @bindir@
build = @build@
build_alias = @build_alias@
@@ -176,7 +223,6 @@ libdir = @libdir@
libexecdir = @libexecdir@
localedir = @localedir@
localstatedir = @localstatedir@
-lt_ECHO = @lt_ECHO@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
@@ -187,6 +233,7 @@ psdir = @psdir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
+sys_symbol_underscore = @sys_symbol_underscore@
sysconfdir = @sysconfdir@
target = @target@
target_alias = @target_alias@
@@ -199,8 +246,8 @@ top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
AUTOMAKE_OPTIONS = foreign
-EXTRA_DIST = ffi.3 ffi_call.3 ffi_prep_cif.3
-man_MANS = ffi.3 ffi_call.3 ffi_prep_cif.3
+EXTRA_DIST = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3
+man_MANS = ffi.3 ffi_call.3 ffi_prep_cif.3 ffi_prep_cif_var.3
all: all-am
.SUFFIXES:
@@ -242,11 +289,18 @@ clean-libtool:
-rm -rf .libs _libs
install-man3: $(man_MANS)
@$(NORMAL_INSTALL)
- test -z "$(man3dir)" || $(MKDIR_P) "$(DESTDIR)$(man3dir)"
- @list=''; test -n "$(man3dir)" || exit 0; \
- { for i in $$list; do echo "$$i"; done; \
- l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \
- sed -n '/\.3[a-z]*$$/p'; \
+ @list1=''; \
+ list2='$(man_MANS)'; \
+ test -n "$(man3dir)" \
+ && test -n "`echo $$list1$$list2`" \
+ || exit 0; \
+ echo " $(MKDIR_P) '$(DESTDIR)$(man3dir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(man3dir)" || exit 1; \
+ { for i in $$list1; do echo "$$i"; done; \
+ if test -n "$$list2"; then \
+ for i in $$list2; do echo "$$i"; done \
+ | sed -n '/\.3[a-z]*$$/p'; \
+ fi; \
} | while read p; do \
if test -f $$p; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; echo "$$p"; \
@@ -275,15 +329,15 @@ uninstall-man3:
sed -n '/\.3[a-z]*$$/p'; \
} | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^3][0-9a-z]*$$,3,;x' \
-e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \
- test -z "$$files" || { \
- echo " ( cd '$(DESTDIR)$(man3dir)' && rm -f" $$files ")"; \
- cd "$(DESTDIR)$(man3dir)" && rm -f $$files; }
+ dir='$(DESTDIR)$(man3dir)'; $(am__uninstall_files_from_dir)
tags: TAGS
TAGS:
ctags: CTAGS
CTAGS:
+cscope cscopelist:
+
distdir: $(DISTFILES)
@list='$(MANS)'; if test -n "$$list"; then \
@@ -292,10 +346,10 @@ distdir: $(DISTFILES)
if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \
if test -n "$$list" && \
grep 'ab help2man is required to generate this page' $$list >/dev/null; then \
- echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \
+ echo "error: found man pages containing the 'missing help2man' replacement text:" >&2; \
grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \
echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \
- echo " typically \`make maintainer-clean' will remove them" >&2; \
+ echo " typically 'make maintainer-clean' will remove them" >&2; \
exit 1; \
else :; fi; \
else :; fi
@@ -345,10 +399,15 @@ install-am: all-am
installcheck: installcheck-am
install-strip:
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- `test -z '$(STRIP)' || \
- echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
mostlyclean-generic:
clean-generic:
diff --git a/Modules/_ctypes/libffi/man/ffi.3 b/Modules/_ctypes/libffi/man/ffi.3
index 18b5d5d..1f1d303 100644
--- a/Modules/_ctypes/libffi/man/ffi.3
+++ b/Modules/_ctypes/libffi/man/ffi.3
@@ -16,6 +16,15 @@ libffi, -lffi
.Fa "ffi_type **atypes"
.Fc
.Ft void
+.Fo ffi_prep_cif_var
+.Fa "ffi_cif *cif"
+.Fa "ffi_abi abi"
+.Fa "unsigned int nfixedargs"
+.Fa "unsigned int ntotalargs"
+.Fa "ffi_type *rtype"
+.Fa "ffi_type **atypes"
+.Fc
+.Ft void
.Fo ffi_call
.Fa "ffi_cif *cif"
.Fa "void (*fn)(void)"
@@ -28,4 +37,5 @@ generate a call to another function at runtime without requiring knowledge of
the called function's interface at compile time.
.Sh SEE ALSO
.Xr ffi_prep_cif 3 ,
+.Xr ffi_prep_cif_var 3 ,
.Xr ffi_call 3
diff --git a/Modules/_ctypes/libffi/man/ffi_prep_cif.3 b/Modules/_ctypes/libffi/man/ffi_prep_cif.3
index 9436b31..ab2be8a 100644
--- a/Modules/_ctypes/libffi/man/ffi_prep_cif.3
+++ b/Modules/_ctypes/libffi/man/ffi_prep_cif.3
@@ -37,7 +37,9 @@ structs that describe the data type, size and alignment of each argument.
points to an
.Nm ffi_type
that describes the data type, size and alignment of the
-return value.
+return value. Note that to call a variadic function
+.Nm ffi_prep_cif_var
+must be used instead.
.Sh RETURN VALUES
Upon successful completion,
.Nm ffi_prep_cif
@@ -59,8 +61,8 @@ does not refer to a valid ABI,
.Nm FFI_BAD_ABI
will be returned. Available ABIs are
defined in
-.Nm <ffitarget.h>
-.
+.Nm <ffitarget.h> .
.Sh SEE ALSO
.Xr ffi 3 ,
-.Xr ffi_call 3
+.Xr ffi_call 3 ,
+.Xr ffi_prep_cif_var 3
diff --git a/Modules/_ctypes/libffi/man/ffi_prep_cif_var.3 b/Modules/_ctypes/libffi/man/ffi_prep_cif_var.3
new file mode 100644
index 0000000..7e19d0b
--- /dev/null
+++ b/Modules/_ctypes/libffi/man/ffi_prep_cif_var.3
@@ -0,0 +1,73 @@
+.Dd January 25, 2011
+.Dt ffi_prep_cif_var 3
+.Sh NAME
+.Nm ffi_prep_cif_var
+.Nd Prepare a
+.Nm ffi_cif
+structure for use with
+.Nm ffi_call
+for variadic functions.
+.Sh SYNOPSIS
+.In ffi.h
+.Ft ffi_status
+.Fo ffi_prep_cif_var
+.Fa "ffi_cif *cif"
+.Fa "ffi_abi abi"
+.Fa "unsigned int nfixedargs"
+.Fa "unsigned int ntotalargs"
+.Fa "ffi_type *rtype"
+.Fa "ffi_type **atypes"
+.Fc
+.Sh DESCRIPTION
+The
+.Nm ffi_prep_cif_var
+function prepares a
+.Nm ffi_cif
+structure for use with
+.Nm ffi_call
+for variadic functions.
+.Fa abi
+specifies a set of calling conventions to use.
+.Fa atypes
+is an array of
+.Fa ntotalargs
+pointers to
+.Nm ffi_type
+structs that describe the data type, size and alignment of each argument.
+.Fa rtype
+points to an
+.Nm ffi_type
+that describes the data type, size and alignment of the
+return value.
+.Fa nfixedargs
+must contain the number of fixed (non-variadic) arguments.
+Note that to call a non-variadic function
+.Nm ffi_prep_cif
+must be used.
+.Sh RETURN VALUES
+Upon successful completion,
+.Nm ffi_prep_cif_var
+returns
+.Nm FFI_OK .
+It will return
+.Nm FFI_BAD_TYPEDEF
+if
+.Fa cif
+is
+.Nm NULL
+or
+.Fa atypes
+or
+.Fa rtype
+is malformed. If
+.Fa abi
+does not refer to a valid ABI,
+.Nm FFI_BAD_ABI
+will be returned. Available ABIs are
+defined in
+.Nm <ffitarget.h>
+.
+.Sh SEE ALSO
+.Xr ffi 3 ,
+.Xr ffi_call 3 ,
+.Xr ffi_prep_cif 3
diff --git a/Modules/_ctypes/libffi/mdate-sh b/Modules/_ctypes/libffi/mdate-sh
index cd916c0..cd916c0 100755..100644
--- a/Modules/_ctypes/libffi/mdate-sh
+++ b/Modules/_ctypes/libffi/mdate-sh
diff --git a/Modules/_ctypes/libffi/missing b/Modules/_ctypes/libffi/missing
index 894e786..28055d2 100755
--- a/Modules/_ctypes/libffi/missing
+++ b/Modules/_ctypes/libffi/missing
@@ -1,10 +1,10 @@
#! /bin/sh
# Common stub for a few missing GNU programs while installing.
-scriptversion=2005-06-08.21
+scriptversion=2009-04-28.21; # UTC
-# Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004, 2005
-# Free Software Foundation, Inc.
+# Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004, 2005, 2006,
+# 2008, 2009 Free Software Foundation, Inc.
# Originally by Fran,cois Pinard <pinard@iro.umontreal.ca>, 1996.
# This program is free software; you can redistribute it and/or modify
@@ -18,9 +18,7 @@ scriptversion=2005-06-08.21
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
-# 02110-1301, USA.
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
@@ -33,6 +31,8 @@ if test $# -eq 0; then
fi
run=:
+sed_output='s/.* --output[ =]\([^ ]*\).*/\1/p'
+sed_minuso='s/.* -o \([^ ]*\).*/\1/p'
# In the cases where this matters, `missing' is being run in the
# srcdir already.
@@ -44,7 +44,7 @@ fi
msg="missing on your system"
-case "$1" in
+case $1 in
--run)
# Try to run requested program, and just exit if it succeeds.
run=
@@ -77,6 +77,7 @@ Supported PROGRAM values:
aclocal touch file \`aclocal.m4'
autoconf touch file \`configure'
autoheader touch file \`config.h.in'
+ autom4te touch the output file, or create a stub one
automake touch all \`Makefile.in' files
bison create \`y.tab.[ch]', if possible, from existing .[ch]
flex create \`lex.yy.c', if possible, from existing .c
@@ -86,6 +87,9 @@ Supported PROGRAM values:
tar try tar, gnutar, gtar, then tar without non-portable flags
yacc create \`y.tab.[ch]', if possible, from existing .[ch]
+Version suffixes to PROGRAM as well as the prefixes \`gnu-', \`gnu', and
+\`g' are ignored when checking the name.
+
Send bug reports to <bug-automake@gnu.org>."
exit $?
;;
@@ -103,15 +107,22 @@ Send bug reports to <bug-automake@gnu.org>."
esac
+# normalize program name to check for.
+program=`echo "$1" | sed '
+ s/^gnu-//; t
+ s/^gnu//; t
+ s/^g//; t'`
+
# Now exit if we have it, but it failed. Also exit now if we
# don't have it and --version was passed (most likely to detect
-# the program).
-case "$1" in
- lex|yacc)
+# the program). This is about non-GNU programs, so use $1 not
+# $program.
+case $1 in
+ lex*|yacc*)
# Not GNU programs, they don't have --version.
;;
- tar)
+ tar*)
if test -n "$run"; then
echo 1>&2 "ERROR: \`tar' requires --run"
exit 1
@@ -135,7 +146,7 @@ esac
# If it does not exist, or fails to run (possibly an outdated version),
# try to emulate it.
-case "$1" in
+case $program in
aclocal*)
echo 1>&2 "\
WARNING: \`$1' is $msg. You should only need it if
@@ -145,7 +156,7 @@ WARNING: \`$1' is $msg. You should only need it if
touch aclocal.m4
;;
- autoconf)
+ autoconf*)
echo 1>&2 "\
WARNING: \`$1' is $msg. You should only need it if
you modified \`${configure_ac}'. You might want to install the
@@ -154,7 +165,7 @@ WARNING: \`$1' is $msg. You should only need it if
touch configure
;;
- autoheader)
+ autoheader*)
echo 1>&2 "\
WARNING: \`$1' is $msg. You should only need it if
you modified \`acconfig.h' or \`${configure_ac}'. You might want
@@ -164,7 +175,7 @@ WARNING: \`$1' is $msg. You should only need it if
test -z "$files" && files="config.h"
touch_files=
for f in $files; do
- case "$f" in
+ case $f in
*:*) touch_files="$touch_files "`echo "$f" |
sed -e 's/^[^:]*://' -e 's/:.*//'`;;
*) touch_files="$touch_files $f.in";;
@@ -184,7 +195,7 @@ WARNING: \`$1' is $msg. You should only need it if
while read f; do touch "$f"; done
;;
- autom4te)
+ autom4te*)
echo 1>&2 "\
WARNING: \`$1' is needed, but is $msg.
You might have modified some files without having the
@@ -192,8 +203,8 @@ WARNING: \`$1' is needed, but is $msg.
You can get \`$1' as part of \`Autoconf' from any GNU
archive site."
- file=`echo "$*" | sed -n 's/.*--output[ =]*\([^ ]*\).*/\1/p'`
- test -z "$file" && file=`echo "$*" | sed -n 's/.*-o[ ]*\([^ ]*\).*/\1/p'`
+ file=`echo "$*" | sed -n "$sed_output"`
+ test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
if test -f "$file"; then
touch $file
else
@@ -207,80 +218,78 @@ WARNING: \`$1' is needed, but is $msg.
fi
;;
- bison|yacc)
+ bison*|yacc*)
echo 1>&2 "\
WARNING: \`$1' $msg. You should only need it if
you modified a \`.y' file. You may need the \`Bison' package
in order for those modifications to take effect. You can get
\`Bison' from any GNU archive site."
rm -f y.tab.c y.tab.h
- if [ $# -ne 1 ]; then
+ if test $# -ne 1; then
eval LASTARG="\${$#}"
- case "$LASTARG" in
+ case $LASTARG in
*.y)
SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'`
- if [ -f "$SRCFILE" ]; then
+ if test -f "$SRCFILE"; then
cp "$SRCFILE" y.tab.c
fi
SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'`
- if [ -f "$SRCFILE" ]; then
+ if test -f "$SRCFILE"; then
cp "$SRCFILE" y.tab.h
fi
;;
esac
fi
- if [ ! -f y.tab.h ]; then
+ if test ! -f y.tab.h; then
echo >y.tab.h
fi
- if [ ! -f y.tab.c ]; then
+ if test ! -f y.tab.c; then
echo 'main() { return 0; }' >y.tab.c
fi
;;
- lex|flex)
+ lex*|flex*)
echo 1>&2 "\
WARNING: \`$1' is $msg. You should only need it if
you modified a \`.l' file. You may need the \`Flex' package
in order for those modifications to take effect. You can get
\`Flex' from any GNU archive site."
rm -f lex.yy.c
- if [ $# -ne 1 ]; then
+ if test $# -ne 1; then
eval LASTARG="\${$#}"
- case "$LASTARG" in
+ case $LASTARG in
*.l)
SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'`
- if [ -f "$SRCFILE" ]; then
+ if test -f "$SRCFILE"; then
cp "$SRCFILE" lex.yy.c
fi
;;
esac
fi
- if [ ! -f lex.yy.c ]; then
+ if test ! -f lex.yy.c; then
echo 'main() { return 0; }' >lex.yy.c
fi
;;
- help2man)
+ help2man*)
echo 1>&2 "\
WARNING: \`$1' is $msg. You should only need it if
you modified a dependency of a manual page. You may need the
\`Help2man' package in order for those modifications to take
effect. You can get \`Help2man' from any GNU archive site."
- file=`echo "$*" | sed -n 's/.*-o \([^ ]*\).*/\1/p'`
- if test -z "$file"; then
- file=`echo "$*" | sed -n 's/.*--output=\([^ ]*\).*/\1/p'`
- fi
- if [ -f "$file" ]; then
+ file=`echo "$*" | sed -n "$sed_output"`
+ test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
+ if test -f "$file"; then
touch $file
else
test -z "$file" || exec >$file
echo ".ab help2man is required to generate this page"
- exit 1
+ exit $?
fi
;;
- makeinfo)
+ makeinfo*)
echo 1>&2 "\
WARNING: \`$1' is $msg. You should only need it if
you modified a \`.texi' or \`.texinfo' file, or any other file
@@ -289,11 +298,17 @@ WARNING: \`$1' is $msg. You should only need it if
DU, IRIX). You might want to install the \`Texinfo' package or
the \`GNU make' package. Grab either from any GNU archive site."
# The file to touch is that specified with -o ...
- file=`echo "$*" | sed -n 's/.*-o \([^ ]*\).*/\1/p'`
+ file=`echo "$*" | sed -n "$sed_output"`
+ test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
if test -z "$file"; then
# ... or it is the one specified with @setfilename ...
infile=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'`
- file=`sed -n '/^@setfilename/ { s/.* \([^ ]*\) *$/\1/; p; q; }' $infile`
+ file=`sed -n '
+ /^@setfilename/{
+ s/.* \([^ ]*\) *$/\1/
+ p
+ q
+ }' $infile`
# ... or it is derived from the source name (dir/f.texi becomes f.info)
test -z "$file" && file=`echo "$infile" | sed 's,.*/,,;s,.[^.]*$,,'`.info
fi
@@ -303,7 +318,7 @@ WARNING: \`$1' is $msg. You should only need it if
touch $file
;;
- tar)
+ tar*)
shift
# We have already tried tar in the generic part.
@@ -317,13 +332,13 @@ WARNING: \`$1' is $msg. You should only need it if
fi
firstarg="$1"
if shift; then
- case "$firstarg" in
+ case $firstarg in
*o*)
firstarg=`echo "$firstarg" | sed s/o//`
tar "$firstarg" "$@" && exit 0
;;
esac
- case "$firstarg" in
+ case $firstarg in
*h*)
firstarg=`echo "$firstarg" | sed s/h//`
tar "$firstarg" "$@" && exit 0
@@ -356,5 +371,6 @@ exit 0
# eval: (add-hook 'write-file-hooks 'time-stamp)
# time-stamp-start: "scriptversion="
# time-stamp-format: "%:y-%02m-%02d.%02H"
-# time-stamp-end: "$"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
# End:
diff --git a/Modules/_ctypes/libffi/msvcc.sh b/Modules/_ctypes/libffi/msvcc.sh
index 8301839..dcdbeab 100644..100755
--- a/Modules/_ctypes/libffi/msvcc.sh
+++ b/Modules/_ctypes/libffi/msvcc.sh
@@ -42,13 +42,11 @@
# format and translated into something sensible for cl or ml.
#
-# Disable specific warnings, and enable warnings-as-errors so we catch any
-# mistranslated args.
-nowarn="-wd4127 -wd4820 -wd4706 -wd4100 -wd4255 -wd4668 -wd4053 -wd4324"
-args="-nologo -W3 -WX $nowarn"
+args="-nologo -W3"
md=-MD
cl="cl"
ml="ml"
+safeseh="-safeseh"
output=
while [ $# -gt 0 ]
@@ -66,15 +64,28 @@ do
-m64)
cl="cl" # "$MSVC/x86_amd64/cl"
ml="ml64" # "$MSVC/x86_amd64/ml64"
+ safeseh=
+ shift 1
+ ;;
+ -O0)
+ args="$args -Od"
shift 1
;;
-O*)
- args="$args $1"
+ # If we're optimizing, make sure we explicitly turn on some optimizations
+ # that are implicitly disabled by debug symbols (-Zi).
+ args="$args $1 -OPT:REF -OPT:ICF -INCREMENTAL:NO"
shift 1
;;
-g)
- # Can't specify -RTC1 or -Zi in opt. -Gy is ok. Use -OPT:REF?
- args="$args -D_DEBUG -RTC1 -Zi"
+ # Enable debug symbol generation.
+ args="$args -Zi -DEBUG"
+ shift 1
+ ;;
+ -DFFI_DEBUG)
+ # Link against debug CRT and enable runtime error checks.
+ args="$args -RTC1"
+ defines="$defines $1"
md=-MDd
shift 1
;;
@@ -111,7 +122,8 @@ do
shift 1
;;
-Wall)
- args="$args -Wall"
+ # -Wall on MSVC is overzealous, and we already build with -W3. Nothing
+ # to do here.
shift 1
;;
-Werror)
@@ -166,7 +178,7 @@ if [ -n "$assembly" ]; then
echo "$cl -nologo -EP $includes $defines $src > $ppsrc"
"$cl" -nologo -EP $includes $defines $src > $ppsrc || exit $?
output="$(echo $output | sed 's%/F[dpa][^ ]*%%g')"
- args="-nologo -safeseh $single $output $ppsrc"
+ args="-nologo $safeseh $single $output $ppsrc"
echo "$ml $args"
eval "\"$ml\" $args"
diff --git a/Modules/_ctypes/libffi/src/aarch64/ffi.c b/Modules/_ctypes/libffi/src/aarch64/ffi.c
new file mode 100644
index 0000000..1405665
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/aarch64/ffi.c
@@ -0,0 +1,1076 @@
+/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+``Software''), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include <stdio.h>
+
+#include <ffi.h>
+#include <ffi_common.h>
+
+#include <stdlib.h>
+
+/* Stack alignment requirement in bytes */
+#define AARCH64_STACK_ALIGN 16
+
+#define N_X_ARG_REG 8
+#define N_V_ARG_REG 8
+
+#define AARCH64_FFI_WITH_V (1 << AARCH64_FFI_WITH_V_BIT)
+
+union _d
+{
+ UINT64 d;
+ UINT32 s[2];
+};
+
+struct call_context
+{
+ UINT64 x [AARCH64_N_XREG];
+ struct
+ {
+ union _d d[2];
+ } v [AARCH64_N_VREG];
+};
+
+static void *
+get_x_addr (struct call_context *context, unsigned n)
+{
+ return &context->x[n];
+}
+
+static void *
+get_s_addr (struct call_context *context, unsigned n)
+{
+#if defined __AARCH64EB__
+ return &context->v[n].d[1].s[1];
+#else
+ return &context->v[n].d[0].s[0];
+#endif
+}
+
+static void *
+get_d_addr (struct call_context *context, unsigned n)
+{
+#if defined __AARCH64EB__
+ return &context->v[n].d[1];
+#else
+ return &context->v[n].d[0];
+#endif
+}
+
+static void *
+get_v_addr (struct call_context *context, unsigned n)
+{
+ return &context->v[n];
+}
+
+/* Return the memory location at which a basic type would reside
+ were it to have been stored in register n. */
+
+static void *
+get_basic_type_addr (unsigned short type, struct call_context *context,
+ unsigned n)
+{
+ switch (type)
+ {
+ case FFI_TYPE_FLOAT:
+ return get_s_addr (context, n);
+ case FFI_TYPE_DOUBLE:
+ return get_d_addr (context, n);
+ case FFI_TYPE_LONGDOUBLE:
+ return get_v_addr (context, n);
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ return get_x_addr (context, n);
+ default:
+ FFI_ASSERT (0);
+ return NULL;
+ }
+}
+
+/* Return the alignment width for each of the basic types. */
+
+static size_t
+get_basic_type_alignment (unsigned short type)
+{
+ switch (type)
+ {
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ return sizeof (UINT64);
+ case FFI_TYPE_LONGDOUBLE:
+ return sizeof (long double);
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ return sizeof (UINT64);
+
+ default:
+ FFI_ASSERT (0);
+ return 0;
+ }
+}
+
+/* Return the size in bytes for each of the basic types. */
+
+static size_t
+get_basic_type_size (unsigned short type)
+{
+ switch (type)
+ {
+ case FFI_TYPE_FLOAT:
+ return sizeof (UINT32);
+ case FFI_TYPE_DOUBLE:
+ return sizeof (UINT64);
+ case FFI_TYPE_LONGDOUBLE:
+ return sizeof (long double);
+ case FFI_TYPE_UINT8:
+ return sizeof (UINT8);
+ case FFI_TYPE_SINT8:
+ return sizeof (SINT8);
+ case FFI_TYPE_UINT16:
+ return sizeof (UINT16);
+ case FFI_TYPE_SINT16:
+ return sizeof (SINT16);
+ case FFI_TYPE_UINT32:
+ return sizeof (UINT32);
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT32:
+ return sizeof (SINT32);
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ return sizeof (UINT64);
+ case FFI_TYPE_SINT64:
+ return sizeof (SINT64);
+
+ default:
+ FFI_ASSERT (0);
+ return 0;
+ }
+}
+
+extern void
+ffi_call_SYSV (unsigned (*)(struct call_context *context, unsigned char *,
+ extended_cif *),
+ struct call_context *context,
+ extended_cif *,
+ unsigned,
+ void (*fn)(void));
+
+extern void
+ffi_closure_SYSV (ffi_closure *);
+
+/* Test for an FFI floating point representation. */
+
+static unsigned
+is_floating_type (unsigned short type)
+{
+ return (type == FFI_TYPE_FLOAT || type == FFI_TYPE_DOUBLE
+ || type == FFI_TYPE_LONGDOUBLE);
+}
+
+/* Test for a homogeneous structure. */
+
+static unsigned short
+get_homogeneous_type (ffi_type *ty)
+{
+ if (ty->type == FFI_TYPE_STRUCT && ty->elements)
+ {
+ unsigned i;
+ unsigned short candidate_type
+ = get_homogeneous_type (ty->elements[0]);
+ for (i =1; ty->elements[i]; i++)
+ {
+ unsigned short iteration_type = 0;
+ /* If we have a nested struct, we must find its homogeneous type.
+ If that fits with our candidate type, we are still
+ homogeneous. */
+ if (ty->elements[i]->type == FFI_TYPE_STRUCT
+ && ty->elements[i]->elements)
+ {
+ iteration_type = get_homogeneous_type (ty->elements[i]);
+ }
+ else
+ {
+ iteration_type = ty->elements[i]->type;
+ }
+
+ /* If we are not homogeneous, return FFI_TYPE_STRUCT. */
+ if (candidate_type != iteration_type)
+ return FFI_TYPE_STRUCT;
+ }
+ return candidate_type;
+ }
+
+ /* Base case, we have no more levels of nesting, so we
+ are a basic type, and so, trivially homogeneous in that type. */
+ return ty->type;
+}
+
+/* Determine the number of elements within a STRUCT.
+
+ Note, we must handle nested structs.
+
+ If ty is not a STRUCT this function will return 0. */
+
+static unsigned
+element_count (ffi_type *ty)
+{
+ if (ty->type == FFI_TYPE_STRUCT && ty->elements)
+ {
+ unsigned n;
+ unsigned elems = 0;
+ for (n = 0; ty->elements[n]; n++)
+ {
+ if (ty->elements[n]->type == FFI_TYPE_STRUCT
+ && ty->elements[n]->elements)
+ elems += element_count (ty->elements[n]);
+ else
+ elems++;
+ }
+ return elems;
+ }
+ return 0;
+}
+
+/* Test for a homogeneous floating point aggregate.
+
+ A homogeneous floating point aggregate is a homogeneous aggregate of
+ a half- single- or double- precision floating point type with one
+ to four elements. Note that this includes nested structs of the
+ basic type. */
+
+static int
+is_hfa (ffi_type *ty)
+{
+ if (ty->type == FFI_TYPE_STRUCT
+ && ty->elements[0]
+ && is_floating_type (get_homogeneous_type (ty)))
+ {
+ unsigned n = element_count (ty);
+ return n >= 1 && n <= 4;
+ }
+ return 0;
+}
+
+/* Test if an ffi_type is a candidate for passing in a register.
+
+ This test does not check that sufficient registers of the
+ appropriate class are actually available, merely that IFF
+ sufficient registers are available then the argument will be passed
+ in register(s).
+
+ Note that an ffi_type that is deemed to be a register candidate
+ will always be returned in registers.
+
+ Returns 1 if a register candidate else 0. */
+
+static int
+is_register_candidate (ffi_type *ty)
+{
+ switch (ty->type)
+ {
+ case FFI_TYPE_VOID:
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT64:
+ return 1;
+
+ case FFI_TYPE_STRUCT:
+ if (is_hfa (ty))
+ {
+ return 1;
+ }
+ else if (ty->size > 16)
+ {
+ /* Too large. Will be replaced with a pointer to memory. The
+ pointer MAY be passed in a register, but the value will
+ not. This test specifically fails since the argument will
+ never be passed by value in registers. */
+ return 0;
+ }
+ else
+ {
+ /* Might be passed in registers depending on the number of
+ registers required. */
+ return (ty->size + 7) / 8 < N_X_ARG_REG;
+ }
+ break;
+
+ default:
+ FFI_ASSERT (0);
+ break;
+ }
+
+ return 0;
+}
+
+/* Test if an ffi_type argument or result is a candidate for a vector
+ register. */
+
+static int
+is_v_register_candidate (ffi_type *ty)
+{
+ return is_floating_type (ty->type)
+ || (ty->type == FFI_TYPE_STRUCT && is_hfa (ty));
+}
+
+/* Representation of the procedure call argument marshalling
+ state.
+
+ The terse state variable names match the names used in the AARCH64
+ PCS. */
+
+struct arg_state
+{
+ unsigned ngrn; /* Next general-purpose register number. */
+ unsigned nsrn; /* Next vector register number. */
+ unsigned nsaa; /* Next stack offset. */
+};
+
+/* Initialize a procedure call argument marshalling state. */
+static void
+arg_init (struct arg_state *state, unsigned call_frame_size)
+{
+ state->ngrn = 0;
+ state->nsrn = 0;
+ state->nsaa = 0;
+}
+
+/* Return the number of available consecutive core argument
+ registers. */
+
+static unsigned
+available_x (struct arg_state *state)
+{
+ return N_X_ARG_REG - state->ngrn;
+}
+
+/* Return the number of available consecutive vector argument
+ registers. */
+
+static unsigned
+available_v (struct arg_state *state)
+{
+ return N_V_ARG_REG - state->nsrn;
+}
+
+static void *
+allocate_to_x (struct call_context *context, struct arg_state *state)
+{
+ FFI_ASSERT (state->ngrn < N_X_ARG_REG)
+ return get_x_addr (context, (state->ngrn)++);
+}
+
+static void *
+allocate_to_s (struct call_context *context, struct arg_state *state)
+{
+ FFI_ASSERT (state->nsrn < N_V_ARG_REG)
+ return get_s_addr (context, (state->nsrn)++);
+}
+
+static void *
+allocate_to_d (struct call_context *context, struct arg_state *state)
+{
+ FFI_ASSERT (state->nsrn < N_V_ARG_REG)
+ return get_d_addr (context, (state->nsrn)++);
+}
+
+static void *
+allocate_to_v (struct call_context *context, struct arg_state *state)
+{
+ FFI_ASSERT (state->nsrn < N_V_ARG_REG)
+ return get_v_addr (context, (state->nsrn)++);
+}
+
+/* Allocate an aligned slot on the stack and return a pointer to it. */
+static void *
+allocate_to_stack (struct arg_state *state, void *stack, unsigned alignment,
+ unsigned size)
+{
+ void *allocation;
+
+ /* Round up the NSAA to the larger of 8 or the natural
+ alignment of the argument's type. */
+ state->nsaa = ALIGN (state->nsaa, alignment);
+ state->nsaa = ALIGN (state->nsaa, alignment);
+ state->nsaa = ALIGN (state->nsaa, 8);
+
+ allocation = stack + state->nsaa;
+
+ state->nsaa += size;
+ return allocation;
+}
+
+static void
+copy_basic_type (void *dest, void *source, unsigned short type)
+{
+ /* This is neccessary to ensure that basic types are copied
+ sign extended to 64-bits as libffi expects. */
+ switch (type)
+ {
+ case FFI_TYPE_FLOAT:
+ *(float *) dest = *(float *) source;
+ break;
+ case FFI_TYPE_DOUBLE:
+ *(double *) dest = *(double *) source;
+ break;
+ case FFI_TYPE_LONGDOUBLE:
+ *(long double *) dest = *(long double *) source;
+ break;
+ case FFI_TYPE_UINT8:
+ *(ffi_arg *) dest = *(UINT8 *) source;
+ break;
+ case FFI_TYPE_SINT8:
+ *(ffi_sarg *) dest = *(SINT8 *) source;
+ break;
+ case FFI_TYPE_UINT16:
+ *(ffi_arg *) dest = *(UINT16 *) source;
+ break;
+ case FFI_TYPE_SINT16:
+ *(ffi_sarg *) dest = *(SINT16 *) source;
+ break;
+ case FFI_TYPE_UINT32:
+ *(ffi_arg *) dest = *(UINT32 *) source;
+ break;
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT32:
+ *(ffi_sarg *) dest = *(SINT32 *) source;
+ break;
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ *(ffi_arg *) dest = *(UINT64 *) source;
+ break;
+ case FFI_TYPE_SINT64:
+ *(ffi_sarg *) dest = *(SINT64 *) source;
+ break;
+
+ default:
+ FFI_ASSERT (0);
+ }
+}
+
+static void
+copy_hfa_to_reg_or_stack (void *memory,
+ ffi_type *ty,
+ struct call_context *context,
+ unsigned char *stack,
+ struct arg_state *state)
+{
+ unsigned elems = element_count (ty);
+ if (available_v (state) < elems)
+ {
+ /* There are insufficient V registers. Further V register allocations
+ are prevented, the NSAA is adjusted (by allocate_to_stack ())
+ and the argument is copied to memory at the adjusted NSAA. */
+ state->nsrn = N_V_ARG_REG;
+ memcpy (allocate_to_stack (state, stack, ty->alignment, ty->size),
+ memory,
+ ty->size);
+ }
+ else
+ {
+ int i;
+ unsigned short type = get_homogeneous_type (ty);
+ unsigned elems = element_count (ty);
+ for (i = 0; i < elems; i++)
+ {
+ void *reg = allocate_to_v (context, state);
+ copy_basic_type (reg, memory, type);
+ memory += get_basic_type_size (type);
+ }
+ }
+}
+
+/* Either allocate an appropriate register for the argument type, or if
+ none are available, allocate a stack slot and return a pointer
+ to the allocated space. */
+
+static void *
+allocate_to_register_or_stack (struct call_context *context,
+ unsigned char *stack,
+ struct arg_state *state,
+ unsigned short type)
+{
+ size_t alignment = get_basic_type_alignment (type);
+ size_t size = alignment;
+ switch (type)
+ {
+ case FFI_TYPE_FLOAT:
+ /* This is the only case for which the allocated stack size
+ should not match the alignment of the type. */
+ size = sizeof (UINT32);
+ /* Fall through. */
+ case FFI_TYPE_DOUBLE:
+ if (state->nsrn < N_V_ARG_REG)
+ return allocate_to_d (context, state);
+ state->nsrn = N_V_ARG_REG;
+ break;
+ case FFI_TYPE_LONGDOUBLE:
+ if (state->nsrn < N_V_ARG_REG)
+ return allocate_to_v (context, state);
+ state->nsrn = N_V_ARG_REG;
+ break;
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ if (state->ngrn < N_X_ARG_REG)
+ return allocate_to_x (context, state);
+ state->ngrn = N_X_ARG_REG;
+ break;
+ default:
+ FFI_ASSERT (0);
+ }
+
+ return allocate_to_stack (state, stack, alignment, size);
+}
+
+/* Copy a value to an appropriate register, or if none are
+ available, to the stack. */
+
+static void
+copy_to_register_or_stack (struct call_context *context,
+ unsigned char *stack,
+ struct arg_state *state,
+ void *value,
+ unsigned short type)
+{
+ copy_basic_type (
+ allocate_to_register_or_stack (context, stack, state, type),
+ value,
+ type);
+}
+
+/* Marshall the arguments from FFI representation to procedure call
+ context and stack. */
+
+static unsigned
+aarch64_prep_args (struct call_context *context, unsigned char *stack,
+ extended_cif *ecif)
+{
+ int i;
+ struct arg_state state;
+
+ arg_init (&state, ALIGN(ecif->cif->bytes, 16));
+
+ for (i = 0; i < ecif->cif->nargs; i++)
+ {
+ ffi_type *ty = ecif->cif->arg_types[i];
+ switch (ty->type)
+ {
+ case FFI_TYPE_VOID:
+ FFI_ASSERT (0);
+ break;
+
+ /* If the argument is a basic type the argument is allocated to an
+ appropriate register, or if none are available, to the stack. */
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ copy_to_register_or_stack (context, stack, &state,
+ ecif->avalue[i], ty->type);
+ break;
+
+ case FFI_TYPE_STRUCT:
+ if (is_hfa (ty))
+ {
+ copy_hfa_to_reg_or_stack (ecif->avalue[i], ty, context,
+ stack, &state);
+ }
+ else if (ty->size > 16)
+ {
+ /* If the argument is a composite type that is larger than 16
+ bytes, then the argument has been copied to memory, and
+ the argument is replaced by a pointer to the copy. */
+
+ copy_to_register_or_stack (context, stack, &state,
+ &(ecif->avalue[i]), FFI_TYPE_POINTER);
+ }
+ else if (available_x (&state) >= (ty->size + 7) / 8)
+ {
+ /* If the argument is a composite type and the size in
+ double-words is not more than the number of available
+ X registers, then the argument is copied into consecutive
+ X registers. */
+ int j;
+ for (j = 0; j < (ty->size + 7) / 8; j++)
+ {
+ memcpy (allocate_to_x (context, &state),
+ &(((UINT64 *) ecif->avalue[i])[j]),
+ sizeof (UINT64));
+ }
+ }
+ else
+ {
+ /* Otherwise, there are insufficient X registers. Further X
+ register allocations are prevented, the NSAA is adjusted
+ (by allocate_to_stack ()) and the argument is copied to
+ memory at the adjusted NSAA. */
+ state.ngrn = N_X_ARG_REG;
+
+ memcpy (allocate_to_stack (&state, stack, ty->alignment,
+ ty->size), ecif->avalue + i, ty->size);
+ }
+ break;
+
+ default:
+ FFI_ASSERT (0);
+ break;
+ }
+ }
+
+ return ecif->cif->aarch64_flags;
+}
+
+ffi_status
+ffi_prep_cif_machdep (ffi_cif *cif)
+{
+ /* Round the stack up to a multiple of the stack alignment requirement. */
+ cif->bytes =
+ (cif->bytes + (AARCH64_STACK_ALIGN - 1)) & ~ (AARCH64_STACK_ALIGN - 1);
+
+ /* Initialize our flags. We are interested if this CIF will touch a
+ vector register, if so we will enable context save and load to
+ those registers, otherwise not. This is intended to be friendly
+ to lazy float context switching in the kernel. */
+ cif->aarch64_flags = 0;
+
+ if (is_v_register_candidate (cif->rtype))
+ {
+ cif->aarch64_flags |= AARCH64_FFI_WITH_V;
+ }
+ else
+ {
+ int i;
+ for (i = 0; i < cif->nargs; i++)
+ if (is_v_register_candidate (cif->arg_types[i]))
+ {
+ cif->aarch64_flags |= AARCH64_FFI_WITH_V;
+ break;
+ }
+ }
+
+ return FFI_OK;
+}
+
+/* Call a function with the provided arguments and capture the return
+ value. */
+void
+ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
+{
+ extended_cif ecif;
+
+ ecif.cif = cif;
+ ecif.avalue = avalue;
+ ecif.rvalue = rvalue;
+
+ switch (cif->abi)
+ {
+ case FFI_SYSV:
+ {
+ struct call_context context;
+ unsigned stack_bytes;
+
+ /* Figure out the total amount of stack space we need, the
+ above call frame space needs to be 16 bytes aligned to
+ ensure correct alignment of the first object inserted in
+ that space hence the ALIGN applied to cif->bytes.*/
+ stack_bytes = ALIGN(cif->bytes, 16);
+
+ memset (&context, 0, sizeof (context));
+ if (is_register_candidate (cif->rtype))
+ {
+ ffi_call_SYSV (aarch64_prep_args, &context, &ecif, stack_bytes, fn);
+ switch (cif->rtype->type)
+ {
+ case FFI_TYPE_VOID:
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT64:
+ {
+ void *addr = get_basic_type_addr (cif->rtype->type,
+ &context, 0);
+ copy_basic_type (rvalue, addr, cif->rtype->type);
+ break;
+ }
+
+ case FFI_TYPE_STRUCT:
+ if (is_hfa (cif->rtype))
+ {
+ int j;
+ unsigned short type = get_homogeneous_type (cif->rtype);
+ unsigned elems = element_count (cif->rtype);
+ for (j = 0; j < elems; j++)
+ {
+ void *reg = get_basic_type_addr (type, &context, j);
+ copy_basic_type (rvalue, reg, type);
+ rvalue += get_basic_type_size (type);
+ }
+ }
+ else if ((cif->rtype->size + 7) / 8 < N_X_ARG_REG)
+ {
+ unsigned size = ALIGN (cif->rtype->size, sizeof (UINT64));
+ memcpy (rvalue, get_x_addr (&context, 0), size);
+ }
+ else
+ {
+ FFI_ASSERT (0);
+ }
+ break;
+
+ default:
+ FFI_ASSERT (0);
+ break;
+ }
+ }
+ else
+ {
+ memcpy (get_x_addr (&context, 8), &rvalue, sizeof (UINT64));
+ ffi_call_SYSV (aarch64_prep_args, &context, &ecif,
+ stack_bytes, fn);
+ }
+ break;
+ }
+
+ default:
+ FFI_ASSERT (0);
+ break;
+ }
+}
+
+static unsigned char trampoline [] =
+{ 0x70, 0x00, 0x00, 0x58, /* ldr x16, 1f */
+ 0x91, 0x00, 0x00, 0x10, /* adr x17, 2f */
+ 0x00, 0x02, 0x1f, 0xd6 /* br x16 */
+};
+
+/* Build a trampoline. */
+
+#define FFI_INIT_TRAMPOLINE(TRAMP,FUN,CTX,FLAGS) \
+ ({unsigned char *__tramp = (unsigned char*)(TRAMP); \
+ UINT64 __fun = (UINT64)(FUN); \
+ UINT64 __ctx = (UINT64)(CTX); \
+ UINT64 __flags = (UINT64)(FLAGS); \
+ memcpy (__tramp, trampoline, sizeof (trampoline)); \
+ memcpy (__tramp + 12, &__fun, sizeof (__fun)); \
+ memcpy (__tramp + 20, &__ctx, sizeof (__ctx)); \
+ memcpy (__tramp + 28, &__flags, sizeof (__flags)); \
+ __clear_cache(__tramp, __tramp + FFI_TRAMPOLINE_SIZE); \
+ })
+
+ffi_status
+ffi_prep_closure_loc (ffi_closure* closure,
+ ffi_cif* cif,
+ void (*fun)(ffi_cif*,void*,void**,void*),
+ void *user_data,
+ void *codeloc)
+{
+ if (cif->abi != FFI_SYSV)
+ return FFI_BAD_ABI;
+
+ FFI_INIT_TRAMPOLINE (&closure->tramp[0], &ffi_closure_SYSV, codeloc,
+ cif->aarch64_flags);
+
+ closure->cif = cif;
+ closure->user_data = user_data;
+ closure->fun = fun;
+
+ return FFI_OK;
+}
+
+/* Primary handler to setup and invoke a function within a closure.
+
+ A closure when invoked enters via the assembler wrapper
+ ffi_closure_SYSV(). The wrapper allocates a call context on the
+ stack, saves the interesting registers (from the perspective of
+ the calling convention) into the context then passes control to
+ ffi_closure_SYSV_inner() passing the saved context and a pointer to
+ the stack at the point ffi_closure_SYSV() was invoked.
+
+ On the return path the assembler wrapper will reload call context
+ regsiters.
+
+ ffi_closure_SYSV_inner() marshalls the call context into ffi value
+ desriptors, invokes the wrapped function, then marshalls the return
+ value back into the call context. */
+
+void
+ffi_closure_SYSV_inner (ffi_closure *closure, struct call_context *context,
+ void *stack)
+{
+ ffi_cif *cif = closure->cif;
+ void **avalue = (void**) alloca (cif->nargs * sizeof (void*));
+ void *rvalue = NULL;
+ int i;
+ struct arg_state state;
+
+ arg_init (&state, ALIGN(cif->bytes, 16));
+
+ for (i = 0; i < cif->nargs; i++)
+ {
+ ffi_type *ty = cif->arg_types[i];
+
+ switch (ty->type)
+ {
+ case FFI_TYPE_VOID:
+ FFI_ASSERT (0);
+ break;
+
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ avalue[i] = allocate_to_register_or_stack (context, stack,
+ &state, ty->type);
+ break;
+
+ case FFI_TYPE_STRUCT:
+ if (is_hfa (ty))
+ {
+ unsigned n = element_count (ty);
+ if (available_v (&state) < n)
+ {
+ state.nsrn = N_V_ARG_REG;
+ avalue[i] = allocate_to_stack (&state, stack, ty->alignment,
+ ty->size);
+ }
+ else
+ {
+ switch (get_homogeneous_type (ty))
+ {
+ case FFI_TYPE_FLOAT:
+ {
+ /* Eeek! We need a pointer to the structure,
+ however the homogeneous float elements are
+ being passed in individual S registers,
+ therefore the structure is not represented as
+ a contiguous sequence of bytes in our saved
+ register context. We need to fake up a copy
+ of the structure layed out in memory
+ correctly. The fake can be tossed once the
+ closure function has returned hence alloca()
+ is sufficient. */
+ int j;
+ UINT32 *p = avalue[i] = alloca (ty->size);
+ for (j = 0; j < element_count (ty); j++)
+ memcpy (&p[j],
+ allocate_to_s (context, &state),
+ sizeof (*p));
+ break;
+ }
+
+ case FFI_TYPE_DOUBLE:
+ {
+ /* Eeek! We need a pointer to the structure,
+ however the homogeneous float elements are
+ being passed in individual S registers,
+ therefore the structure is not represented as
+ a contiguous sequence of bytes in our saved
+ register context. We need to fake up a copy
+ of the structure layed out in memory
+ correctly. The fake can be tossed once the
+ closure function has returned hence alloca()
+ is sufficient. */
+ int j;
+ UINT64 *p = avalue[i] = alloca (ty->size);
+ for (j = 0; j < element_count (ty); j++)
+ memcpy (&p[j],
+ allocate_to_d (context, &state),
+ sizeof (*p));
+ break;
+ }
+
+ case FFI_TYPE_LONGDOUBLE:
+ memcpy (&avalue[i],
+ allocate_to_v (context, &state),
+ sizeof (*avalue));
+ break;
+
+ default:
+ FFI_ASSERT (0);
+ break;
+ }
+ }
+ }
+ else if (ty->size > 16)
+ {
+ /* Replace Composite type of size greater than 16 with a
+ pointer. */
+ memcpy (&avalue[i],
+ allocate_to_register_or_stack (context, stack,
+ &state, FFI_TYPE_POINTER),
+ sizeof (avalue[i]));
+ }
+ else if (available_x (&state) >= (ty->size + 7) / 8)
+ {
+ avalue[i] = get_x_addr (context, state.ngrn);
+ state.ngrn += (ty->size + 7) / 8;
+ }
+ else
+ {
+ state.ngrn = N_X_ARG_REG;
+
+ avalue[i] = allocate_to_stack (&state, stack, ty->alignment,
+ ty->size);
+ }
+ break;
+
+ default:
+ FFI_ASSERT (0);
+ break;
+ }
+ }
+
+ /* Figure out where the return value will be passed, either in
+ registers or in a memory block allocated by the caller and passed
+ in x8. */
+
+ if (is_register_candidate (cif->rtype))
+ {
+ /* Register candidates are *always* returned in registers. */
+
+ /* Allocate a scratchpad for the return value, we will let the
+ callee scrible the result into the scratch pad then move the
+ contents into the appropriate return value location for the
+ call convention. */
+ rvalue = alloca (cif->rtype->size);
+ (closure->fun) (cif, rvalue, avalue, closure->user_data);
+
+ /* Copy the return value into the call context so that it is returned
+ as expected to our caller. */
+ switch (cif->rtype->type)
+ {
+ case FFI_TYPE_VOID:
+ break;
+
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_LONGDOUBLE:
+ {
+ void *addr = get_basic_type_addr (cif->rtype->type, context, 0);
+ copy_basic_type (addr, rvalue, cif->rtype->type);
+ break;
+ }
+ case FFI_TYPE_STRUCT:
+ if (is_hfa (cif->rtype))
+ {
+ int i;
+ unsigned short type = get_homogeneous_type (cif->rtype);
+ unsigned elems = element_count (cif->rtype);
+ for (i = 0; i < elems; i++)
+ {
+ void *reg = get_basic_type_addr (type, context, i);
+ copy_basic_type (reg, rvalue, type);
+ rvalue += get_basic_type_size (type);
+ }
+ }
+ else if ((cif->rtype->size + 7) / 8 < N_X_ARG_REG)
+ {
+ unsigned size = ALIGN (cif->rtype->size, sizeof (UINT64)) ;
+ memcpy (get_x_addr (context, 0), rvalue, size);
+ }
+ else
+ {
+ FFI_ASSERT (0);
+ }
+ break;
+ default:
+ FFI_ASSERT (0);
+ break;
+ }
+ }
+ else
+ {
+ memcpy (&rvalue, get_x_addr (context, 8), sizeof (UINT64));
+ (closure->fun) (cif, rvalue, avalue, closure->user_data);
+ }
+}
+
diff --git a/Modules/_ctypes/libffi/src/aarch64/ffitarget.h b/Modules/_ctypes/libffi/src/aarch64/ffitarget.h
new file mode 100644
index 0000000..6f1a348
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/aarch64/ffitarget.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+``Software''), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef LIBFFI_TARGET_H
+#define LIBFFI_TARGET_H
+
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
+#ifndef LIBFFI_ASM
+typedef unsigned long ffi_arg;
+typedef signed long ffi_sarg;
+
+typedef enum ffi_abi
+ {
+ FFI_FIRST_ABI = 0,
+ FFI_SYSV,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_SYSV
+ } ffi_abi;
+#endif
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#define FFI_CLOSURES 1
+#define FFI_TRAMPOLINE_SIZE 36
+#define FFI_NATIVE_RAW_API 0
+
+/* ---- Internal ---- */
+
+
+#define FFI_EXTRA_CIF_FIELDS unsigned aarch64_flags
+
+#define AARCH64_FFI_WITH_V_BIT 0
+
+#define AARCH64_N_XREG 32
+#define AARCH64_N_VREG 32
+#define AARCH64_CALL_CONTEXT_SIZE (AARCH64_N_XREG * 8 + AARCH64_N_VREG * 16)
+
+#endif
diff --git a/Modules/_ctypes/libffi/src/aarch64/sysv.S b/Modules/_ctypes/libffi/src/aarch64/sysv.S
new file mode 100644
index 0000000..b8cd421
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/aarch64/sysv.S
@@ -0,0 +1,307 @@
+/* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+``Software''), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#define LIBFFI_ASM
+#include <fficonfig.h>
+#include <ffi.h>
+
+#define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off
+#define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off
+#define cfi_restore(reg) .cfi_restore reg
+#define cfi_def_cfa_register(reg) .cfi_def_cfa_register reg
+
+ .text
+ .globl ffi_call_SYSV
+ .type ffi_call_SYSV, #function
+
+/* ffi_call_SYSV()
+
+ Create a stack frame, setup an argument context, call the callee
+ and extract the result.
+
+ The maximum required argument stack size is provided,
+ ffi_call_SYSV() allocates that stack space then calls the
+ prepare_fn to populate register context and stack. The
+ argument passing registers are loaded from the register
+ context and the callee called, on return the register passing
+ register are saved back to the context. Our caller will
+ extract the return value from the final state of the saved
+ register context.
+
+ Prototype:
+
+ extern unsigned
+ ffi_call_SYSV (void (*)(struct call_context *context, unsigned char *,
+ extended_cif *),
+ struct call_context *context,
+ extended_cif *,
+ unsigned required_stack_size,
+ void (*fn)(void));
+
+ Therefore on entry we have:
+
+ x0 prepare_fn
+ x1 &context
+ x2 &ecif
+ x3 bytes
+ x4 fn
+
+ This function uses the following stack frame layout:
+
+ ==
+ saved x30(lr)
+ x29(fp)-> saved x29(fp)
+ saved x24
+ saved x23
+ saved x22
+ sp' -> saved x21
+ ...
+ sp -> (constructed callee stack arguments)
+ ==
+
+ Voila! */
+
+#define ffi_call_SYSV_FS (8 * 4)
+
+ .cfi_startproc
+ffi_call_SYSV:
+ stp x29, x30, [sp, #-16]!
+ cfi_adjust_cfa_offset (16)
+ cfi_rel_offset (x29, 0)
+ cfi_rel_offset (x30, 8)
+
+ mov x29, sp
+ cfi_def_cfa_register (x29)
+ sub sp, sp, #ffi_call_SYSV_FS
+
+ stp x21, x22, [sp, 0]
+ cfi_rel_offset (x21, 0 - ffi_call_SYSV_FS)
+ cfi_rel_offset (x22, 8 - ffi_call_SYSV_FS)
+
+ stp x23, x24, [sp, 16]
+ cfi_rel_offset (x23, 16 - ffi_call_SYSV_FS)
+ cfi_rel_offset (x24, 24 - ffi_call_SYSV_FS)
+
+ mov x21, x1
+ mov x22, x2
+ mov x24, x4
+
+ /* Allocate the stack space for the actual arguments, many
+ arguments will be passed in registers, but we assume
+ worst case and allocate sufficient stack for ALL of
+ the arguments. */
+ sub sp, sp, x3
+
+ /* unsigned (*prepare_fn) (struct call_context *context,
+ unsigned char *stack, extended_cif *ecif);
+ */
+ mov x23, x0
+ mov x0, x1
+ mov x1, sp
+ /* x2 already in place */
+ blr x23
+
+ /* Preserve the flags returned. */
+ mov x23, x0
+
+ /* Figure out if we should touch the vector registers. */
+ tbz x23, #AARCH64_FFI_WITH_V_BIT, 1f
+
+ /* Load the vector argument passing registers. */
+ ldp q0, q1, [x21, #8*32 + 0]
+ ldp q2, q3, [x21, #8*32 + 32]
+ ldp q4, q5, [x21, #8*32 + 64]
+ ldp q6, q7, [x21, #8*32 + 96]
+1:
+ /* Load the core argument passing registers. */
+ ldp x0, x1, [x21, #0]
+ ldp x2, x3, [x21, #16]
+ ldp x4, x5, [x21, #32]
+ ldp x6, x7, [x21, #48]
+
+ /* Don't forget x8 which may be holding the address of a return buffer.
+ */
+ ldr x8, [x21, #8*8]
+
+ blr x24
+
+ /* Save the core argument passing registers. */
+ stp x0, x1, [x21, #0]
+ stp x2, x3, [x21, #16]
+ stp x4, x5, [x21, #32]
+ stp x6, x7, [x21, #48]
+
+ /* Note nothing useful ever comes back in x8! */
+
+ /* Figure out if we should touch the vector registers. */
+ tbz x23, #AARCH64_FFI_WITH_V_BIT, 1f
+
+ /* Save the vector argument passing registers. */
+ stp q0, q1, [x21, #8*32 + 0]
+ stp q2, q3, [x21, #8*32 + 32]
+ stp q4, q5, [x21, #8*32 + 64]
+ stp q6, q7, [x21, #8*32 + 96]
+1:
+ /* All done, unwind our stack frame. */
+ ldp x21, x22, [x29, # - ffi_call_SYSV_FS]
+ cfi_restore (x21)
+ cfi_restore (x22)
+
+ ldp x23, x24, [x29, # - ffi_call_SYSV_FS + 16]
+ cfi_restore (x23)
+ cfi_restore (x24)
+
+ mov sp, x29
+ cfi_def_cfa_register (sp)
+
+ ldp x29, x30, [sp], #16
+ cfi_adjust_cfa_offset (-16)
+ cfi_restore (x29)
+ cfi_restore (x30)
+
+ ret
+
+ .cfi_endproc
+ .size ffi_call_SYSV, .-ffi_call_SYSV
+
+#define ffi_closure_SYSV_FS (8 * 2 + AARCH64_CALL_CONTEXT_SIZE)
+
+/* ffi_closure_SYSV
+
+ Closure invocation glue. This is the low level code invoked directly by
+ the closure trampoline to setup and call a closure.
+
+ On entry x17 points to a struct trampoline_data, x16 has been clobbered
+ all other registers are preserved.
+
+ We allocate a call context and save the argument passing registers,
+ then invoked the generic C ffi_closure_SYSV_inner() function to do all
+ the real work, on return we load the result passing registers back from
+ the call context.
+
+ On entry
+
+ extern void
+ ffi_closure_SYSV (struct trampoline_data *);
+
+ struct trampoline_data
+ {
+ UINT64 *ffi_closure;
+ UINT64 flags;
+ };
+
+ This function uses the following stack frame layout:
+
+ ==
+ saved x30(lr)
+ x29(fp)-> saved x29(fp)
+ saved x22
+ saved x21
+ ...
+ sp -> call_context
+ ==
+
+ Voila! */
+
+ .text
+ .globl ffi_closure_SYSV
+ .cfi_startproc
+ffi_closure_SYSV:
+ stp x29, x30, [sp, #-16]!
+ cfi_adjust_cfa_offset (16)
+ cfi_rel_offset (x29, 0)
+ cfi_rel_offset (x30, 8)
+
+ mov x29, sp
+
+ sub sp, sp, #ffi_closure_SYSV_FS
+ cfi_adjust_cfa_offset (ffi_closure_SYSV_FS)
+
+ stp x21, x22, [x29, #-16]
+ cfi_rel_offset (x21, 0)
+ cfi_rel_offset (x22, 8)
+
+ /* Load x21 with &call_context. */
+ mov x21, sp
+ /* Preserve our struct trampoline_data * */
+ mov x22, x17
+
+ /* Save the rest of the argument passing registers. */
+ stp x0, x1, [x21, #0]
+ stp x2, x3, [x21, #16]
+ stp x4, x5, [x21, #32]
+ stp x6, x7, [x21, #48]
+ /* Don't forget we may have been given a result scratch pad address.
+ */
+ str x8, [x21, #64]
+
+ /* Figure out if we should touch the vector registers. */
+ ldr x0, [x22, #8]
+ tbz x0, #AARCH64_FFI_WITH_V_BIT, 1f
+
+ /* Save the argument passing vector registers. */
+ stp q0, q1, [x21, #8*32 + 0]
+ stp q2, q3, [x21, #8*32 + 32]
+ stp q4, q5, [x21, #8*32 + 64]
+ stp q6, q7, [x21, #8*32 + 96]
+1:
+ /* Load &ffi_closure.. */
+ ldr x0, [x22, #0]
+ mov x1, x21
+ /* Compute the location of the stack at the point that the
+ trampoline was called. */
+ add x2, x29, #16
+
+ bl ffi_closure_SYSV_inner
+
+ /* Figure out if we should touch the vector registers. */
+ ldr x0, [x22, #8]
+ tbz x0, #AARCH64_FFI_WITH_V_BIT, 1f
+
+ /* Load the result passing vector registers. */
+ ldp q0, q1, [x21, #8*32 + 0]
+ ldp q2, q3, [x21, #8*32 + 32]
+ ldp q4, q5, [x21, #8*32 + 64]
+ ldp q6, q7, [x21, #8*32 + 96]
+1:
+ /* Load the result passing core registers. */
+ ldp x0, x1, [x21, #0]
+ ldp x2, x3, [x21, #16]
+ ldp x4, x5, [x21, #32]
+ ldp x6, x7, [x21, #48]
+ /* Note nothing usefull is returned in x8. */
+
+ /* We are done, unwind our frame. */
+ ldp x21, x22, [x29, #-16]
+ cfi_restore (x21)
+ cfi_restore (x22)
+
+ mov sp, x29
+ cfi_adjust_cfa_offset (-ffi_closure_SYSV_FS)
+
+ ldp x29, x30, [sp], #16
+ cfi_adjust_cfa_offset (-16)
+ cfi_restore (x29)
+ cfi_restore (x30)
+
+ ret
+ .cfi_endproc
+ .size ffi_closure_SYSV, .-ffi_closure_SYSV
diff --git a/Modules/_ctypes/libffi/src/alpha/ffi.c b/Modules/_ctypes/libffi/src/alpha/ffi.c
index 8d6b2ba..192f691 100644
--- a/Modules/_ctypes/libffi/src/alpha/ffi.c
+++ b/Modules/_ctypes/libffi/src/alpha/ffi.c
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------------
- ffi.c - Copyright (c) 1998, 2001, 2007, 2008 Red Hat, Inc.
+ ffi.c - Copyright (c) 2012 Anthony Green
+ Copyright (c) 1998, 2001, 2007, 2008 Red Hat, Inc.
Alpha Foreign Function Interface
@@ -178,6 +179,9 @@ ffi_prep_closure_loc (ffi_closure* closure,
{
unsigned int *tramp;
+ if (cif->abi != FFI_OSF)
+ return FFI_BAD_ABI;
+
tramp = (unsigned int *) &closure->tramp[0];
tramp[0] = 0x47fb0401; /* mov $27,$1 */
tramp[1] = 0xa77b0010; /* ldq $27,16($27) */
diff --git a/Modules/_ctypes/libffi/src/alpha/ffitarget.h b/Modules/_ctypes/libffi/src/alpha/ffitarget.h
index 7d06eb0..af145bc 100644
--- a/Modules/_ctypes/libffi/src/alpha/ffitarget.h
+++ b/Modules/_ctypes/libffi/src/alpha/ffitarget.h
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------*-C-*-
- ffitarget.h - Copyright (c) 1996-2003 Red Hat, Inc.
+ ffitarget.h - Copyright (c) 2012 Anthony Green
+ Copyright (c) 1996-2003 Red Hat, Inc.
Target configuration macros for Alpha.
Permission is hereby granted, free of charge, to any person obtaining
@@ -27,6 +28,10 @@
#ifndef LIBFFI_TARGET_H
#define LIBFFI_TARGET_H
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
#ifndef LIBFFI_ASM
typedef unsigned long ffi_arg;
typedef signed long ffi_sarg;
diff --git a/Modules/_ctypes/libffi/src/alpha/osf.S b/Modules/_ctypes/libffi/src/alpha/osf.S
index d0e7782..6b9f4df 100644
--- a/Modules/_ctypes/libffi/src/alpha/osf.S
+++ b/Modules/_ctypes/libffi/src/alpha/osf.S
@@ -1,5 +1,5 @@
/* -----------------------------------------------------------------------
- osf.S - Copyright (c) 1998, 2001, 2007, 2008 Red Hat
+ osf.S - Copyright (c) 1998, 2001, 2007, 2008, 2011 Red Hat
Alpha/OSF Foreign Function Interface
@@ -299,33 +299,51 @@ $load_table:
#endif
#ifdef __ELF__
+# define UA_SI .4byte
+# define FDE_ENCODING 0x1b /* pcrel sdata4 */
+# define FDE_ENCODE(X) .4byte X-.
+# define FDE_ARANGE(X) .4byte X
+#elif defined __osf__
+# define UA_SI .align 0; .long
+# define FDE_ENCODING 0x50 /* aligned absolute */
+# define FDE_ENCODE(X) .align 3; .quad X
+# define FDE_ARANGE(X) .align 0; .quad X
+#endif
+
+#ifdef __ELF__
.section .eh_frame,EH_FRAME_FLAGS,@progbits
+#elif defined __osf__
+ .data
+ .align 3
+ .globl _GLOBAL__F_ffi_call_osf
+_GLOBAL__F_ffi_call_osf:
+#endif
__FRAME_BEGIN__:
- .4byte $LECIE1-$LSCIE1 # Length of Common Information Entry
+ UA_SI $LECIE1-$LSCIE1 # Length of Common Information Entry
$LSCIE1:
- .4byte 0x0 # CIE Identifier Tag
+ UA_SI 0x0 # CIE Identifier Tag
.byte 0x1 # CIE Version
.ascii "zR\0" # CIE Augmentation
.byte 0x1 # uleb128 0x1; CIE Code Alignment Factor
.byte 0x78 # sleb128 -8; CIE Data Alignment Factor
.byte 26 # CIE RA Column
.byte 0x1 # uleb128 0x1; Augmentation size
- .byte 0x1b # FDE Encoding (pcrel sdata4)
+ .byte FDE_ENCODING # FDE Encoding
.byte 0xc # DW_CFA_def_cfa
.byte 30 # uleb128 column 30
.byte 0 # uleb128 offset 0
.align 3
$LECIE1:
$LSFDE1:
- .4byte $LEFDE1-$LASFDE1 # FDE Length
+ UA_SI $LEFDE1-$LASFDE1 # FDE Length
$LASFDE1:
- .4byte $LASFDE1-__FRAME_BEGIN__ # FDE CIE offset
- .4byte $LFB1-. # FDE initial location
- .4byte $LFE1-$LFB1 # FDE address range
+ UA_SI $LASFDE1-__FRAME_BEGIN__ # FDE CIE offset
+ FDE_ENCODE($LFB1) # FDE initial location
+ FDE_ARANGE($LFE1-$LFB1) # FDE address range
.byte 0x0 # uleb128 0x0; Augmentation size
.byte 0x4 # DW_CFA_advance_loc4
- .4byte $LCFI1-$LFB1
+ UA_SI $LCFI1-$LFB1
.byte 0x9a # DW_CFA_offset, column 26
.byte 4 # uleb128 4*-8
.byte 0x8f # DW_CFA_offset, column 15
@@ -335,32 +353,35 @@ $LASFDE1:
.byte 32 # uleb128 offset 32
.byte 0x4 # DW_CFA_advance_loc4
- .4byte $LCFI2-$LCFI1
+ UA_SI $LCFI2-$LCFI1
.byte 0xda # DW_CFA_restore, column 26
.align 3
$LEFDE1:
$LSFDE3:
- .4byte $LEFDE3-$LASFDE3 # FDE Length
+ UA_SI $LEFDE3-$LASFDE3 # FDE Length
$LASFDE3:
- .4byte $LASFDE3-__FRAME_BEGIN__ # FDE CIE offset
- .4byte $LFB2-. # FDE initial location
- .4byte $LFE2-$LFB2 # FDE address range
+ UA_SI $LASFDE3-__FRAME_BEGIN__ # FDE CIE offset
+ FDE_ENCODE($LFB2) # FDE initial location
+ FDE_ARANGE($LFE2-$LFB2) # FDE address range
.byte 0x0 # uleb128 0x0; Augmentation size
.byte 0x4 # DW_CFA_advance_loc4
- .4byte $LCFI5-$LFB2
+ UA_SI $LCFI5-$LFB2
.byte 0xe # DW_CFA_def_cfa_offset
.byte 0x80,0x1 # uleb128 128
.byte 0x4 # DW_CFA_advance_loc4
- .4byte $LCFI6-$LCFI5
+ UA_SI $LCFI6-$LCFI5
.byte 0x9a # DW_CFA_offset, column 26
.byte 16 # uleb128 offset 16*-8
.align 3
$LEFDE3:
+#if defined __osf__
+ .align 0
+ .long 0 # End of Table
+#endif
-#ifdef __linux__
+#if defined __ELF__ && defined __linux__
.section .note.GNU-stack,"",@progbits
#endif
-#endif
diff --git a/Modules/_ctypes/libffi/src/arm/ffi.c b/Modules/_ctypes/libffi/src/arm/ffi.c
index f6a6475..3ccceb9 100644
--- a/Modules/_ctypes/libffi/src/arm/ffi.c
+++ b/Modules/_ctypes/libffi/src/arm/ffi.c
@@ -1,6 +1,10 @@
/* -----------------------------------------------------------------------
- ffi.c - Copyright (c) 1998, 2008 Red Hat, Inc.
-
+ ffi.c - Copyright (c) 2011 Timothy Wall
+ Copyright (c) 2011 Plausible Labs Cooperative, Inc.
+ Copyright (c) 2011 Anthony Green
+ Copyright (c) 2011 Free Software Foundation
+ Copyright (c) 1998, 2008, 2011 Red Hat, Inc.
+
ARM Foreign Function Interface
Permission is hereby granted, free of charge, to any person obtaining
@@ -29,12 +33,20 @@
#include <stdlib.h>
-/* ffi_prep_args is called by the assembly routine once stack space
- has been allocated for the function's arguments */
+/* Forward declares. */
+static int vfp_type_p (ffi_type *);
+static void layout_vfp_args (ffi_cif *);
-void ffi_prep_args(char *stack, extended_cif *ecif)
+/* ffi_prep_args is called by the assembly routine once stack space
+ has been allocated for the function's arguments
+
+ The vfp_space parameter is the load area for VFP regs, the return
+ value is cif->vfp_used (word bitset of VFP regs used for passing
+ arguments). These are only used for the VFP hard-float ABI.
+*/
+int ffi_prep_args(char *stack, extended_cif *ecif, float *vfp_space)
{
- register unsigned int i;
+ register unsigned int i, vi = 0;
register void **p_argv;
register char *argp;
register ffi_type **p_arg;
@@ -53,10 +65,31 @@ void ffi_prep_args(char *stack, extended_cif *ecif)
i--, p_arg++)
{
size_t z;
+ size_t alignment;
+
+ /* Allocated in VFP registers. */
+ if (ecif->cif->abi == FFI_VFP
+ && vi < ecif->cif->vfp_nargs && vfp_type_p (*p_arg))
+ {
+ float* vfp_slot = vfp_space + ecif->cif->vfp_args[vi++];
+ if ((*p_arg)->type == FFI_TYPE_FLOAT)
+ *((float*)vfp_slot) = *((float*)*p_argv);
+ else if ((*p_arg)->type == FFI_TYPE_DOUBLE)
+ *((double*)vfp_slot) = *((double*)*p_argv);
+ else
+ memcpy(vfp_slot, *p_argv, (*p_arg)->size);
+ p_argv++;
+ continue;
+ }
/* Align if necessary */
- if (((*p_arg)->alignment - 1) & (unsigned) argp) {
- argp = (char *) ALIGN(argp, (*p_arg)->alignment);
+ alignment = (*p_arg)->alignment;
+#ifdef _WIN32_WCE
+ if (alignment > 4)
+ alignment = 4;
+#endif
+ if ((alignment - 1) & (unsigned) argp) {
+ argp = (char *) ALIGN(argp, alignment);
}
if ((*p_arg)->type == FFI_TYPE_STRUCT)
@@ -103,13 +136,15 @@ void ffi_prep_args(char *stack, extended_cif *ecif)
p_argv++;
argp += z;
}
-
- return;
+
+ /* Indicate the VFP registers used. */
+ return ecif->cif->vfp_used;
}
/* Perform machine dependent cif processing */
ffi_status ffi_prep_cif_machdep(ffi_cif *cif)
{
+ int type_code;
/* Round the stack up to a multiple of 8 bytes. This isn't needed
everywhere, but it is on some platforms, and it doesn't harm anything
when it isn't needed. */
@@ -130,7 +165,14 @@ ffi_status ffi_prep_cif_machdep(ffi_cif *cif)
break;
case FFI_TYPE_STRUCT:
- if (cif->rtype->size <= 4)
+ if (cif->abi == FFI_VFP
+ && (type_code = vfp_type_p (cif->rtype)) != 0)
+ {
+ /* A Composite Type passed in VFP registers, either
+ FFI_TYPE_STRUCT_VFP_FLOAT or FFI_TYPE_STRUCT_VFP_DOUBLE. */
+ cif->flags = (unsigned) type_code;
+ }
+ else if (cif->rtype->size <= 4)
/* A Composite Type not larger than 4 bytes is returned in r0. */
cif->flags = (unsigned)FFI_TYPE_INT;
else
@@ -145,11 +187,30 @@ ffi_status ffi_prep_cif_machdep(ffi_cif *cif)
break;
}
+ /* Map out the register placements of VFP register args.
+ The VFP hard-float calling conventions are slightly more sophisticated than
+ the base calling conventions, so we do it here instead of in ffi_prep_args(). */
+ if (cif->abi == FFI_VFP)
+ layout_vfp_args (cif);
+
return FFI_OK;
}
-extern void ffi_call_SYSV(void (*)(char *, extended_cif *), extended_cif *,
- unsigned, unsigned, unsigned *, void (*fn)(void));
+/* Perform machine dependent cif processing for variadic calls */
+ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif,
+ unsigned int nfixedargs,
+ unsigned int ntotalargs)
+{
+ /* VFP variadic calls actually use the SYSV ABI */
+ if (cif->abi == FFI_VFP)
+ cif->abi = FFI_SYSV;
+
+ return ffi_prep_cif_machdep(cif);
+}
+
+/* Prototypes for assembly functions, in sysv.S */
+extern void ffi_call_SYSV (void (*fn)(void), extended_cif *, unsigned, unsigned, unsigned *);
+extern void ffi_call_VFP (void (*fn)(void), extended_cif *, unsigned, unsigned, unsigned *);
void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
{
@@ -157,6 +218,8 @@ void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
int small_struct = (cif->flags == FFI_TYPE_INT
&& cif->rtype->type == FFI_TYPE_STRUCT);
+ int vfp_struct = (cif->flags == FFI_TYPE_STRUCT_VFP_FLOAT
+ || cif->flags == FFI_TYPE_STRUCT_VFP_DOUBLE);
ecif.cif = cif;
ecif.avalue = avalue;
@@ -173,38 +236,53 @@ void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
}
else if (small_struct)
ecif.rvalue = &temp;
+ else if (vfp_struct)
+ {
+ /* Largest case is double x 4. */
+ ecif.rvalue = alloca(32);
+ }
else
ecif.rvalue = rvalue;
switch (cif->abi)
{
case FFI_SYSV:
- ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes, cif->flags, ecif.rvalue,
- fn);
+ ffi_call_SYSV (fn, &ecif, cif->bytes, cif->flags, ecif.rvalue);
+ break;
+ case FFI_VFP:
+#ifdef __ARM_EABI__
+ ffi_call_VFP (fn, &ecif, cif->bytes, cif->flags, ecif.rvalue);
break;
+#endif
+
default:
FFI_ASSERT(0);
break;
}
if (small_struct)
memcpy (rvalue, &temp, cif->rtype->size);
+ else if (vfp_struct)
+ memcpy (rvalue, ecif.rvalue, cif->rtype->size);
}
/** private members **/
static void ffi_prep_incoming_args_SYSV (char *stack, void **ret,
- void** args, ffi_cif* cif);
+ void** args, ffi_cif* cif, float *vfp_stack);
void ffi_closure_SYSV (ffi_closure *);
+void ffi_closure_VFP (ffi_closure *);
+
/* This function is jumped to by the trampoline */
unsigned int
-ffi_closure_SYSV_inner (closure, respp, args)
+ffi_closure_SYSV_inner (closure, respp, args, vfp_args)
ffi_closure *closure;
void **respp;
void *args;
+ void *vfp_args;
{
// our various things...
ffi_cif *cif;
@@ -219,7 +297,7 @@ ffi_closure_SYSV_inner (closure, respp, args)
* a structure, it will re-set RESP to point to the
* structure return address. */
- ffi_prep_incoming_args_SYSV(args, respp, arg_area, cif);
+ ffi_prep_incoming_args_SYSV(args, respp, arg_area, cif, vfp_args);
(closure->fun) (cif, *respp, arg_area, closure->user_data);
@@ -229,10 +307,12 @@ ffi_closure_SYSV_inner (closure, respp, args)
/*@-exportheader@*/
static void
ffi_prep_incoming_args_SYSV(char *stack, void **rvalue,
- void **avalue, ffi_cif *cif)
+ void **avalue, ffi_cif *cif,
+ /* Used only under VFP hard-float ABI. */
+ float *vfp_stack)
/*@=exportheader@*/
{
- register unsigned int i;
+ register unsigned int i, vi = 0;
register void **p_argv;
register char *argp;
register ffi_type **p_arg;
@@ -249,10 +329,23 @@ ffi_prep_incoming_args_SYSV(char *stack, void **rvalue,
for (i = cif->nargs, p_arg = cif->arg_types; (i != 0); i--, p_arg++)
{
size_t z;
-
- size_t alignment = (*p_arg)->alignment;
+ size_t alignment;
+
+ if (cif->abi == FFI_VFP
+ && vi < cif->vfp_nargs && vfp_type_p (*p_arg))
+ {
+ *p_argv++ = (void*)(vfp_stack + cif->vfp_args[vi++]);
+ continue;
+ }
+
+ alignment = (*p_arg)->alignment;
if (alignment < 4)
alignment = 4;
+#ifdef _WIN32_WCE
+ else
+ if (alignment > 4)
+ alignment = 4;
+#endif
/* Align if necessary */
if ((alignment - 1) & (unsigned) argp) {
argp = (char *) ALIGN(argp, alignment);
@@ -273,18 +366,237 @@ ffi_prep_incoming_args_SYSV(char *stack, void **rvalue,
/* How to make a trampoline. */
+extern unsigned int ffi_arm_trampoline[3];
+
+#if FFI_EXEC_TRAMPOLINE_TABLE
+
+#include <mach/mach.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+extern void *ffi_closure_trampoline_table_page;
+
+typedef struct ffi_trampoline_table ffi_trampoline_table;
+typedef struct ffi_trampoline_table_entry ffi_trampoline_table_entry;
+
+struct ffi_trampoline_table {
+ /* contigious writable and executable pages */
+ vm_address_t config_page;
+ vm_address_t trampoline_page;
+
+ /* free list tracking */
+ uint16_t free_count;
+ ffi_trampoline_table_entry *free_list;
+ ffi_trampoline_table_entry *free_list_pool;
+
+ ffi_trampoline_table *prev;
+ ffi_trampoline_table *next;
+};
+
+struct ffi_trampoline_table_entry {
+ void *(*trampoline)();
+ ffi_trampoline_table_entry *next;
+};
+
+/* Override the standard architecture trampoline size */
+// XXX TODO - Fix
+#undef FFI_TRAMPOLINE_SIZE
+#define FFI_TRAMPOLINE_SIZE 12
+
+/* The trampoline configuration is placed at 4080 bytes prior to the trampoline's entry point */
+#define FFI_TRAMPOLINE_CODELOC_CONFIG(codeloc) ((void **) (((uint8_t *) codeloc) - 4080));
+
+/* The first 16 bytes of the config page are unused, as they are unaddressable from the trampoline page. */
+#define FFI_TRAMPOLINE_CONFIG_PAGE_OFFSET 16
+
+/* Total number of trampolines that fit in one trampoline table */
+#define FFI_TRAMPOLINE_COUNT ((PAGE_SIZE - FFI_TRAMPOLINE_CONFIG_PAGE_OFFSET) / FFI_TRAMPOLINE_SIZE)
+
+static pthread_mutex_t ffi_trampoline_lock = PTHREAD_MUTEX_INITIALIZER;
+static ffi_trampoline_table *ffi_trampoline_tables = NULL;
+
+static ffi_trampoline_table *
+ffi_trampoline_table_alloc ()
+{
+ ffi_trampoline_table *table = NULL;
+
+ /* Loop until we can allocate two contigious pages */
+ while (table == NULL) {
+ vm_address_t config_page = 0x0;
+ kern_return_t kt;
+
+ /* Try to allocate two pages */
+ kt = vm_allocate (mach_task_self (), &config_page, PAGE_SIZE*2, VM_FLAGS_ANYWHERE);
+ if (kt != KERN_SUCCESS) {
+ fprintf(stderr, "vm_allocate() failure: %d at %s:%d\n", kt, __FILE__, __LINE__);
+ break;
+ }
+
+ /* Now drop the second half of the allocation to make room for the trampoline table */
+ vm_address_t trampoline_page = config_page+PAGE_SIZE;
+ kt = vm_deallocate (mach_task_self (), trampoline_page, PAGE_SIZE);
+ if (kt != KERN_SUCCESS) {
+ fprintf(stderr, "vm_deallocate() failure: %d at %s:%d\n", kt, __FILE__, __LINE__);
+ break;
+ }
+
+ /* Remap the trampoline table to directly follow the config page */
+ vm_prot_t cur_prot;
+ vm_prot_t max_prot;
+
+ kt = vm_remap (mach_task_self (), &trampoline_page, PAGE_SIZE, 0x0, FALSE, mach_task_self (), (vm_address_t) &ffi_closure_trampoline_table_page, FALSE, &cur_prot, &max_prot, VM_INHERIT_SHARE);
+
+ /* If we lost access to the destination trampoline page, drop our config allocation mapping and retry */
+ if (kt != KERN_SUCCESS) {
+ /* Log unexpected failures */
+ if (kt != KERN_NO_SPACE) {
+ fprintf(stderr, "vm_remap() failure: %d at %s:%d\n", kt, __FILE__, __LINE__);
+ }
+
+ vm_deallocate (mach_task_self (), config_page, PAGE_SIZE);
+ continue;
+ }
+
+ /* We have valid trampoline and config pages */
+ table = calloc (1, sizeof(ffi_trampoline_table));
+ table->free_count = FFI_TRAMPOLINE_COUNT;
+ table->config_page = config_page;
+ table->trampoline_page = trampoline_page;
+
+ /* Create and initialize the free list */
+ table->free_list_pool = calloc(FFI_TRAMPOLINE_COUNT, sizeof(ffi_trampoline_table_entry));
+
+ uint16_t i;
+ for (i = 0; i < table->free_count; i++) {
+ ffi_trampoline_table_entry *entry = &table->free_list_pool[i];
+ entry->trampoline = (void *) (table->trampoline_page + (i * FFI_TRAMPOLINE_SIZE));
+
+ if (i < table->free_count - 1)
+ entry->next = &table->free_list_pool[i+1];
+ }
+
+ table->free_list = table->free_list_pool;
+ }
+
+ return table;
+}
+
+void *
+ffi_closure_alloc (size_t size, void **code)
+{
+ /* Create the closure */
+ ffi_closure *closure = malloc(size);
+ if (closure == NULL)
+ return NULL;
+
+ pthread_mutex_lock(&ffi_trampoline_lock);
+
+ /* Check for an active trampoline table with available entries. */
+ ffi_trampoline_table *table = ffi_trampoline_tables;
+ if (table == NULL || table->free_list == NULL) {
+ table = ffi_trampoline_table_alloc ();
+ if (table == NULL) {
+ free(closure);
+ return NULL;
+ }
+
+ /* Insert the new table at the top of the list */
+ table->next = ffi_trampoline_tables;
+ if (table->next != NULL)
+ table->next->prev = table;
+
+ ffi_trampoline_tables = table;
+ }
+
+ /* Claim the free entry */
+ ffi_trampoline_table_entry *entry = ffi_trampoline_tables->free_list;
+ ffi_trampoline_tables->free_list = entry->next;
+ ffi_trampoline_tables->free_count--;
+ entry->next = NULL;
+
+ pthread_mutex_unlock(&ffi_trampoline_lock);
+
+ /* Initialize the return values */
+ *code = entry->trampoline;
+ closure->trampoline_table = table;
+ closure->trampoline_table_entry = entry;
+
+ return closure;
+}
+
+void
+ffi_closure_free (void *ptr)
+{
+ ffi_closure *closure = ptr;
+
+ pthread_mutex_lock(&ffi_trampoline_lock);
+
+ /* Fetch the table and entry references */
+ ffi_trampoline_table *table = closure->trampoline_table;
+ ffi_trampoline_table_entry *entry = closure->trampoline_table_entry;
+
+ /* Return the entry to the free list */
+ entry->next = table->free_list;
+ table->free_list = entry;
+ table->free_count++;
+
+ /* If all trampolines within this table are free, and at least one other table exists, deallocate
+ * the table */
+ if (table->free_count == FFI_TRAMPOLINE_COUNT && ffi_trampoline_tables != table) {
+ /* Remove from the list */
+ if (table->prev != NULL)
+ table->prev->next = table->next;
+
+ if (table->next != NULL)
+ table->next->prev = table->prev;
+
+ /* Deallocate pages */
+ kern_return_t kt;
+ kt = vm_deallocate (mach_task_self (), table->config_page, PAGE_SIZE);
+ if (kt != KERN_SUCCESS)
+ fprintf(stderr, "vm_deallocate() failure: %d at %s:%d\n", kt, __FILE__, __LINE__);
+
+ kt = vm_deallocate (mach_task_self (), table->trampoline_page, PAGE_SIZE);
+ if (kt != KERN_SUCCESS)
+ fprintf(stderr, "vm_deallocate() failure: %d at %s:%d\n", kt, __FILE__, __LINE__);
+
+ /* Deallocate free list */
+ free (table->free_list_pool);
+ free (table);
+ } else if (ffi_trampoline_tables != table) {
+ /* Otherwise, bump this table to the top of the list */
+ table->prev = NULL;
+ table->next = ffi_trampoline_tables;
+ if (ffi_trampoline_tables != NULL)
+ ffi_trampoline_tables->prev = table;
+
+ ffi_trampoline_tables = table;
+ }
+
+ pthread_mutex_unlock (&ffi_trampoline_lock);
+
+ /* Free the closure */
+ free (closure);
+}
+
+#else
+
#define FFI_INIT_TRAMPOLINE(TRAMP,FUN,CTX) \
({ unsigned char *__tramp = (unsigned char*)(TRAMP); \
unsigned int __fun = (unsigned int)(FUN); \
unsigned int __ctx = (unsigned int)(CTX); \
- *(unsigned int*) &__tramp[0] = 0xe92d000f; /* stmfd sp!, {r0-r3} */ \
- *(unsigned int*) &__tramp[4] = 0xe59f0000; /* ldr r0, [pc] */ \
- *(unsigned int*) &__tramp[8] = 0xe59ff000; /* ldr pc, [pc] */ \
+ unsigned char *insns = (unsigned char *)(CTX); \
+ memcpy (__tramp, ffi_arm_trampoline, sizeof ffi_arm_trampoline); \
*(unsigned int*) &__tramp[12] = __ctx; \
*(unsigned int*) &__tramp[16] = __fun; \
- __clear_cache((&__tramp[0]), (&__tramp[19])); \
+ __clear_cache((&__tramp[0]), (&__tramp[19])); /* Clear data mapping. */ \
+ __clear_cache(insns, insns + 3 * sizeof (unsigned int)); \
+ /* Clear instruction \
+ mapping. */ \
})
+#endif
/* the cif must already be prep'ed */
@@ -295,15 +607,150 @@ ffi_prep_closure_loc (ffi_closure* closure,
void *user_data,
void *codeloc)
{
- FFI_ASSERT (cif->abi == FFI_SYSV);
-
+ void (*closure_func)(ffi_closure*) = NULL;
+
+ if (cif->abi == FFI_SYSV)
+ closure_func = &ffi_closure_SYSV;
+#ifdef __ARM_EABI__
+ else if (cif->abi == FFI_VFP)
+ closure_func = &ffi_closure_VFP;
+#endif
+ else
+ return FFI_BAD_ABI;
+
+#if FFI_EXEC_TRAMPOLINE_TABLE
+ void **config = FFI_TRAMPOLINE_CODELOC_CONFIG(codeloc);
+ config[0] = closure;
+ config[1] = closure_func;
+#else
FFI_INIT_TRAMPOLINE (&closure->tramp[0], \
- &ffi_closure_SYSV, \
+ closure_func, \
codeloc);
-
+#endif
+
closure->cif = cif;
closure->user_data = user_data;
closure->fun = fun;
return FFI_OK;
}
+
+/* Below are routines for VFP hard-float support. */
+
+static int rec_vfp_type_p (ffi_type *t, int *elt, int *elnum)
+{
+ switch (t->type)
+ {
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ *elt = (int) t->type;
+ *elnum = 1;
+ return 1;
+
+ case FFI_TYPE_STRUCT_VFP_FLOAT:
+ *elt = FFI_TYPE_FLOAT;
+ *elnum = t->size / sizeof (float);
+ return 1;
+
+ case FFI_TYPE_STRUCT_VFP_DOUBLE:
+ *elt = FFI_TYPE_DOUBLE;
+ *elnum = t->size / sizeof (double);
+ return 1;
+
+ case FFI_TYPE_STRUCT:;
+ {
+ int base_elt = 0, total_elnum = 0;
+ ffi_type **el = t->elements;
+ while (*el)
+ {
+ int el_elt = 0, el_elnum = 0;
+ if (! rec_vfp_type_p (*el, &el_elt, &el_elnum)
+ || (base_elt && base_elt != el_elt)
+ || total_elnum + el_elnum > 4)
+ return 0;
+ base_elt = el_elt;
+ total_elnum += el_elnum;
+ el++;
+ }
+ *elnum = total_elnum;
+ *elt = base_elt;
+ return 1;
+ }
+ default: ;
+ }
+ return 0;
+}
+
+static int vfp_type_p (ffi_type *t)
+{
+ int elt, elnum;
+ if (rec_vfp_type_p (t, &elt, &elnum))
+ {
+ if (t->type == FFI_TYPE_STRUCT)
+ {
+ if (elnum == 1)
+ t->type = elt;
+ else
+ t->type = (elt == FFI_TYPE_FLOAT
+ ? FFI_TYPE_STRUCT_VFP_FLOAT
+ : FFI_TYPE_STRUCT_VFP_DOUBLE);
+ }
+ return (int) t->type;
+ }
+ return 0;
+}
+
+static void place_vfp_arg (ffi_cif *cif, ffi_type *t)
+{
+ int reg = cif->vfp_reg_free;
+ int nregs = t->size / sizeof (float);
+ int align = ((t->type == FFI_TYPE_STRUCT_VFP_FLOAT
+ || t->type == FFI_TYPE_FLOAT) ? 1 : 2);
+ /* Align register number. */
+ if ((reg & 1) && align == 2)
+ reg++;
+ while (reg + nregs <= 16)
+ {
+ int s, new_used = 0;
+ for (s = reg; s < reg + nregs; s++)
+ {
+ new_used |= (1 << s);
+ if (cif->vfp_used & (1 << s))
+ {
+ reg += align;
+ goto next_reg;
+ }
+ }
+ /* Found regs to allocate. */
+ cif->vfp_used |= new_used;
+ cif->vfp_args[cif->vfp_nargs++] = reg;
+
+ /* Update vfp_reg_free. */
+ if (cif->vfp_used & (1 << cif->vfp_reg_free))
+ {
+ reg += nregs;
+ while (cif->vfp_used & (1 << reg))
+ reg += 1;
+ cif->vfp_reg_free = reg;
+ }
+ return;
+ next_reg: ;
+ }
+}
+
+static void layout_vfp_args (ffi_cif *cif)
+{
+ int i;
+ /* Init VFP fields */
+ cif->vfp_used = 0;
+ cif->vfp_nargs = 0;
+ cif->vfp_reg_free = 0;
+ memset (cif->vfp_args, -1, 16); /* Init to -1. */
+
+ for (i = 0; i < cif->nargs; i++)
+ {
+ ffi_type *t = cif->arg_types[i];
+ if (vfp_type_p (t))
+ place_vfp_arg (cif, t);
+ }
+}
diff --git a/Modules/_ctypes/libffi/src/arm/ffitarget.h b/Modules/_ctypes/libffi/src/arm/ffitarget.h
index a957426..26d494d 100644
--- a/Modules/_ctypes/libffi/src/arm/ffitarget.h
+++ b/Modules/_ctypes/libffi/src/arm/ffitarget.h
@@ -1,5 +1,8 @@
/* -----------------------------------------------------------------*-C-*-
- ffitarget.h - Copyright (c) 1996-2003 Red Hat, Inc.
+ ffitarget.h - Copyright (c) 2012 Anthony Green
+ Copyright (c) 2010 CodeSourcery
+ Copyright (c) 1996-2003 Red Hat, Inc.
+
Target configuration macros for ARM.
Permission is hereby granted, free of charge, to any person obtaining
@@ -27,6 +30,10 @@
#ifndef LIBFFI_TARGET_H
#define LIBFFI_TARGET_H
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
#ifndef LIBFFI_ASM
typedef unsigned long ffi_arg;
typedef signed long ffi_sarg;
@@ -34,11 +41,27 @@ typedef signed long ffi_sarg;
typedef enum ffi_abi {
FFI_FIRST_ABI = 0,
FFI_SYSV,
+ FFI_VFP,
+ FFI_LAST_ABI,
+#ifdef __ARM_PCS_VFP
+ FFI_DEFAULT_ABI = FFI_VFP,
+#else
FFI_DEFAULT_ABI = FFI_SYSV,
- FFI_LAST_ABI = FFI_DEFAULT_ABI + 1
+#endif
} ffi_abi;
#endif
+#define FFI_EXTRA_CIF_FIELDS \
+ int vfp_used; \
+ short vfp_reg_free, vfp_nargs; \
+ signed char vfp_args[16] \
+
+/* Internally used. */
+#define FFI_TYPE_STRUCT_VFP_FLOAT (FFI_TYPE_LAST + 1)
+#define FFI_TYPE_STRUCT_VFP_DOUBLE (FFI_TYPE_LAST + 2)
+
+#define FFI_TARGET_SPECIFIC_VARIADIC
+
/* ---- Definitions for closures ----------------------------------------- */
#define FFI_CLOSURES 1
@@ -46,4 +69,3 @@ typedef enum ffi_abi {
#define FFI_NATIVE_RAW_API 0
#endif
-
diff --git a/Modules/_ctypes/libffi/src/arm/gentramp.sh b/Modules/_ctypes/libffi/src/arm/gentramp.sh
new file mode 100755
index 0000000..74f0b86
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/arm/gentramp.sh
@@ -0,0 +1,118 @@
+#!/bin/sh
+
+# -----------------------------------------------------------------------
+# gentramp.sh - Copyright (c) 2010, Plausible Labs Cooperative, Inc.
+#
+# ARM Trampoline Page Generator
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# ``Software''), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+# -----------------------------------------------------------------------
+
+PROGNAME=$0
+
+# Each trampoline is exactly 3 instructions, or 12 bytes. If any of these values change,
+# the entire arm trampoline implementation must be updated to match, too.
+
+# Size of an individual trampoline, in bytes
+TRAMPOLINE_SIZE=12
+
+# Page size, in bytes
+PAGE_SIZE=4096
+
+# Compute the size of the reachable config page; The first 16 bytes of the config page
+# are unreachable due to our maximum pc-relative ldr offset.
+PAGE_AVAIL=`expr $PAGE_SIZE - 16`
+
+# Compute the number of of available trampolines.
+TRAMPOLINE_COUNT=`expr $PAGE_AVAIL / $TRAMPOLINE_SIZE`
+
+header () {
+ echo "# GENERATED CODE - DO NOT EDIT"
+ echo "# This file was generated by $PROGNAME"
+ echo ""
+
+ # Write out the license header
+cat << EOF
+# Copyright (c) 2010, Plausible Labs Cooperative, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# ``Software''), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+# -----------------------------------------------------------------------
+
+EOF
+
+ # Write out the trampoline table, aligned to the page boundary
+ echo ".text"
+ echo ".align 12"
+ echo ".globl _ffi_closure_trampoline_table_page"
+ echo "_ffi_closure_trampoline_table_page:"
+}
+
+
+# WARNING - Don't modify the trampoline code size without also updating the relevent libffi code
+trampoline () {
+ cat << END
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+END
+}
+
+main () {
+ # Write out the header
+ header
+
+ # Write out the trampolines
+ local i=0
+ while [ $i -lt ${TRAMPOLINE_COUNT} ]; do
+ trampoline
+ local i=`expr $i + 1`
+ done
+}
+
+main
diff --git a/Modules/_ctypes/libffi/src/arm/sysv.S b/Modules/_ctypes/libffi/src/arm/sysv.S
index 9064318..fb38cd6 100644
--- a/Modules/_ctypes/libffi/src/arm/sysv.S
+++ b/Modules/_ctypes/libffi/src/arm/sysv.S
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------------
- sysv.S - Copyright (c) 1998, 2008 Red Hat, Inc.
+ sysv.S - Copyright (c) 1998, 2008, 2011 Red Hat, Inc.
+ Copyright (c) 2011 Plausible Labs Cooperative, Inc.
ARM Foreign Function Interface
@@ -39,7 +40,11 @@
#else
#define CNAME(x) x
#endif
+#ifdef __APPLE__
+#define ENTRY(x) .globl _##x; _##x:
+#else
#define ENTRY(x) .globl CNAME(x); .type CNAME(x),%function; CNAME(x):
+#endif /* __APPLE__ */
#endif
#ifdef __ELF__
@@ -48,6 +53,12 @@
#define LSYM(x) x
#endif
+/* Use the SOFTFP return value ABI on Mac OS X, as per the iOS ABI
+ Function Call Guide */
+#ifdef __APPLE__
+#define __SOFTFP__
+#endif
+
/* We need a better way of testing for this, but for now, this is all
we can do. */
@ This selects the minimum architecture level required.
@@ -105,21 +116,33 @@
.align 0
.thumb
.thumb_func
+#ifdef __APPLE__
+ ENTRY($0)
+#else
ENTRY(\name)
+#endif
bx pc
nop
.arm
UNWIND .fnstart
/* A hook to tell gdb that we've switched to ARM mode. Also used to call
directly from other local arm routines. */
-_L__\name:
+#ifdef __APPLE__
+_L__$0:
+#else
+_L__\name:
+#endif
.endm
#else
.macro ARM_FUNC_START name
.text
.align 0
.arm
+#ifdef __APPLE__
+ ENTRY($0)
+#else
ENTRY(\name)
+#endif
UNWIND .fnstart
.endm
#endif
@@ -141,13 +164,11 @@ _L__\name:
#endif
.endm
-
@ r0: ffi_prep_args
@ r1: &ecif
@ r2: cif->bytes
@ r3: fig->flags
@ sp+0: ecif.rvalue
- @ sp+4: fn
@ This assumes we are using gas.
ARM_FUNC_START ffi_call_SYSV
@@ -162,24 +183,23 @@ ARM_FUNC_START ffi_call_SYSV
sub sp, fp, r2
@ Place all of the ffi_prep_args in position
- mov ip, r0
mov r0, sp
@ r1 already set
@ Call ffi_prep_args(stack, &ecif)
- call_reg(ip)
+ bl CNAME(ffi_prep_args)
@ move first 4 parameters in registers
ldmia sp, {r0-r3}
@ and adjust stack
- ldr ip, [fp, #8]
- cmp ip, #16
- movhs ip, #16
- add sp, sp, ip
+ sub lr, fp, sp @ cif->bytes == fp - sp
+ ldr ip, [fp] @ load fn() in advance
+ cmp lr, #16
+ movhs lr, #16
+ add sp, sp, lr
@ call (fn) (...)
- ldr ip, [fp, #28]
call_reg(ip)
@ Remove the space we pushed for the args
@@ -224,11 +244,19 @@ ARM_FUNC_START ffi_call_SYSV
#endif
LSYM(Lepilogue):
- RETLDM "r0-r3,fp"
+#if defined (__INTERWORKING__)
+ ldmia sp!, {r0-r3,fp, lr}
+ bx lr
+#else
+ ldmia sp!, {r0-r3,fp, pc}
+#endif
.ffi_call_SYSV_end:
UNWIND .fnend
+#ifdef __ELF__
.size CNAME(ffi_call_SYSV),.ffi_call_SYSV_end-CNAME(ffi_call_SYSV)
+#endif
+
/*
unsigned int FFI_HIDDEN
@@ -244,11 +272,11 @@ ARM_FUNC_START ffi_closure_SYSV
stmfd sp!, {ip, lr}
UNWIND .save {r0, lr}
add r2, sp, #8
- .pad #16
+ UNWIND .pad #16
sub sp, sp, #16
str sp, [sp, #8]
add r1, sp, #8
- bl ffi_closure_SYSV_inner
+ bl CNAME(ffi_closure_SYSV_inner)
cmp r0, #FFI_TYPE_INT
beq .Lretint
@@ -300,7 +328,177 @@ ARM_FUNC_START ffi_closure_SYSV
.ffi_closure_SYSV_end:
UNWIND .fnend
+#ifdef __ELF__
.size CNAME(ffi_closure_SYSV),.ffi_closure_SYSV_end-CNAME(ffi_closure_SYSV)
+#endif
+
+
+/* Below are VFP hard-float ABI call and closure implementations.
+ Add VFP FPU directive here. This is only compiled into the library
+ under EABI. */
+#ifdef __ARM_EABI__
+ .fpu vfp
+
+ @ r0: fn
+ @ r1: &ecif
+ @ r2: cif->bytes
+ @ r3: fig->flags
+ @ sp+0: ecif.rvalue
+
+ARM_FUNC_START ffi_call_VFP
+ @ Save registers
+ stmfd sp!, {r0-r3, fp, lr}
+ UNWIND .save {r0-r3, fp, lr}
+ mov fp, sp
+ UNWIND .setfp fp, sp
+
+ @ Make room for all of the new args.
+ sub sp, sp, r2
+
+ @ Make room for loading VFP args
+ sub sp, sp, #64
+
+ @ Place all of the ffi_prep_args in position
+ mov r0, sp
+ @ r1 already set
+ sub r2, fp, #64 @ VFP scratch space
+
+ @ Call ffi_prep_args(stack, &ecif, vfp_space)
+ bl CNAME(ffi_prep_args)
+
+ @ Load VFP register args if needed
+ cmp r0, #0
+ beq LSYM(Lbase_args)
+
+ @ Load only d0 if possible
+ cmp r0, #3
+ sub ip, fp, #64
+ flddle d0, [ip]
+ fldmiadgt ip, {d0-d7}
+
+LSYM(Lbase_args):
+ @ move first 4 parameters in registers
+ ldmia sp, {r0-r3}
+
+ @ and adjust stack
+ sub lr, ip, sp @ cif->bytes == (fp - 64) - sp
+ ldr ip, [fp] @ load fn() in advance
+ cmp lr, #16
+ movhs lr, #16
+ add sp, sp, lr
+
+ @ call (fn) (...)
+ call_reg(ip)
+
+ @ Remove the space we pushed for the args
+ mov sp, fp
+
+ @ Load r2 with the pointer to storage for
+ @ the return value
+ ldr r2, [sp, #24]
+
+ @ Load r3 with the return type code
+ ldr r3, [sp, #12]
+
+ @ If the return value pointer is NULL,
+ @ assume no return value.
+ cmp r2, #0
+ beq LSYM(Lepilogue_vfp)
+
+ cmp r3, #FFI_TYPE_INT
+ streq r0, [r2]
+ beq LSYM(Lepilogue_vfp)
+
+ cmp r3, #FFI_TYPE_SINT64
+ stmeqia r2, {r0, r1}
+ beq LSYM(Lepilogue_vfp)
+
+ cmp r3, #FFI_TYPE_FLOAT
+ fstseq s0, [r2]
+ beq LSYM(Lepilogue_vfp)
+
+ cmp r3, #FFI_TYPE_DOUBLE
+ fstdeq d0, [r2]
+ beq LSYM(Lepilogue_vfp)
+
+ cmp r3, #FFI_TYPE_STRUCT_VFP_FLOAT
+ cmpne r3, #FFI_TYPE_STRUCT_VFP_DOUBLE
+ fstmiadeq r2, {d0-d3}
+
+LSYM(Lepilogue_vfp):
+ RETLDM "r0-r3,fp"
+
+.ffi_call_VFP_end:
+ UNWIND .fnend
+ .size CNAME(ffi_call_VFP),.ffi_call_VFP_end-CNAME(ffi_call_VFP)
+
+
+ARM_FUNC_START ffi_closure_VFP
+ fstmfdd sp!, {d0-d7}
+ @ r0-r3, then d0-d7
+ UNWIND .pad #80
+ add ip, sp, #80
+ stmfd sp!, {ip, lr}
+ UNWIND .save {r0, lr}
+ add r2, sp, #72
+ add r3, sp, #8
+ UNWIND .pad #72
+ sub sp, sp, #72
+ str sp, [sp, #64]
+ add r1, sp, #64
+ bl CNAME(ffi_closure_SYSV_inner)
+
+ cmp r0, #FFI_TYPE_INT
+ beq .Lretint_vfp
+
+ cmp r0, #FFI_TYPE_FLOAT
+ beq .Lretfloat_vfp
+
+ cmp r0, #FFI_TYPE_DOUBLE
+ cmpne r0, #FFI_TYPE_LONGDOUBLE
+ beq .Lretdouble_vfp
+
+ cmp r0, #FFI_TYPE_SINT64
+ beq .Lretlonglong_vfp
+
+ cmp r0, #FFI_TYPE_STRUCT_VFP_FLOAT
+ beq .Lretfloat_struct_vfp
+
+ cmp r0, #FFI_TYPE_STRUCT_VFP_DOUBLE
+ beq .Lretdouble_struct_vfp
+
+.Lclosure_epilogue_vfp:
+ add sp, sp, #72
+ ldmfd sp, {sp, pc}
+
+.Lretfloat_vfp:
+ flds s0, [sp]
+ b .Lclosure_epilogue_vfp
+.Lretdouble_vfp:
+ fldd d0, [sp]
+ b .Lclosure_epilogue_vfp
+.Lretint_vfp:
+ ldr r0, [sp]
+ b .Lclosure_epilogue_vfp
+.Lretlonglong_vfp:
+ ldmia sp, {r0, r1}
+ b .Lclosure_epilogue_vfp
+.Lretfloat_struct_vfp:
+ fldmiad sp, {d0-d1}
+ b .Lclosure_epilogue_vfp
+.Lretdouble_struct_vfp:
+ fldmiad sp, {d0-d3}
+ b .Lclosure_epilogue_vfp
+
+.ffi_closure_VFP_end:
+ UNWIND .fnend
+ .size CNAME(ffi_closure_VFP),.ffi_closure_VFP_end-CNAME(ffi_closure_VFP)
+#endif
+
+ENTRY(ffi_arm_trampoline)
+ stmfd sp!, {r0-r3}
+ ldr r0, [pc]
+ ldr pc, [pc]
#if defined __ELF__ && defined __linux__
.section .note.GNU-stack,"",%progbits
diff --git a/Modules/_ctypes/libffi/src/arm/trampoline.S b/Modules/_ctypes/libffi/src/arm/trampoline.S
new file mode 100644
index 0000000..935e8de
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/arm/trampoline.S
@@ -0,0 +1,4450 @@
+# GENERATED CODE - DO NOT EDIT
+# This file was generated by src/arm/gentramp.sh
+
+# Copyright (c) 2010, Plausible Labs Cooperative, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# Software''), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED AS IS'', WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+# DEALINGS IN THE SOFTWARE.
+# -----------------------------------------------------------------------
+
+.text
+.align 12
+.globl _ffi_closure_trampoline_table_page
+_ffi_closure_trampoline_table_page:
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
+
+ // trampoline
+ // Save to stack
+ stmfd sp!, {r0-r3}
+
+ // Load the context argument from the config page.
+ // This places the first usable config value at _ffi_closure_trampoline_table-4080
+ // This accounts for the above 4-byte stmfd instruction, plus 8 bytes constant when loading from pc.
+ ldr r0, [pc, #-4092]
+
+ // Load the jump address from the config page.
+ ldr pc, [pc, #-4092]
+
diff --git a/Modules/_ctypes/libffi/src/avr32/ffi.c b/Modules/_ctypes/libffi/src/avr32/ffi.c
index 39fba2b..3d43397 100644
--- a/Modules/_ctypes/libffi/src/avr32/ffi.c
+++ b/Modules/_ctypes/libffi/src/avr32/ffi.c
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------------
- ffi.c - Copyright (c) 2009 Bradley Smith <brad@brad-smith.co.uk>
+ ffi.c - Copyright (c) 2011 Anthony Green
+ Copyright (c) 2009 Bradley Smith <brad@brad-smith.co.uk>
AVR32 Foreign Function Interface
@@ -394,7 +395,8 @@ ffi_status ffi_prep_closure_loc(ffi_closure* closure, ffi_cif* cif,
void (*fun)(ffi_cif*, void*, void**, void*), void *user_data,
void *codeloc)
{
- FFI_ASSERT(cif->abi == FFI_SYSV);
+ if (cif->abi != FFI_SYSV)
+ return FFI_BAD_ABI;
unsigned char *__tramp = (unsigned char*)(&closure->tramp[0]);
unsigned int __fun = (unsigned int)(&ffi_closure_SYSV);
diff --git a/Modules/_ctypes/libffi/src/avr32/ffitarget.h b/Modules/_ctypes/libffi/src/avr32/ffitarget.h
index 1c799b1..d0c7586 100644
--- a/Modules/_ctypes/libffi/src/avr32/ffitarget.h
+++ b/Modules/_ctypes/libffi/src/avr32/ffitarget.h
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------*-C-*-
- ffitarget.h - Copyright (c) 2009 Bradley Smith <brad@brad-smith.co.uk>
+ ffitarget.h - Copyright (c) 2012 Anthony Green
+ Copyright (c) 2009 Bradley Smith <brad@brad-smith.co.uk>
Target configuration macros for AVR32.
Permission is hereby granted, free of charge, to any person obtaining
@@ -27,6 +28,10 @@
#ifndef LIBFFI_TARGET_H
#define LIBFFI_TARGET_H
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
#ifndef LIBFFI_ASM
typedef unsigned long ffi_arg;
typedef signed long ffi_sarg;
@@ -34,8 +39,8 @@ typedef signed long ffi_sarg;
typedef enum ffi_abi {
FFI_FIRST_ABI = 0,
FFI_SYSV,
- FFI_DEFAULT_ABI = FFI_SYSV,
- FFI_LAST_ABI = FFI_DEFAULT_ABI + 1
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_SYSV
} ffi_abi;
#endif
diff --git a/Modules/_ctypes/libffi/src/bfin/ffi.c b/Modules/_ctypes/libffi/src/bfin/ffi.c
new file mode 100644
index 0000000..0beccc1
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/bfin/ffi.c
@@ -0,0 +1,195 @@
+/* -----------------------------------------------------------------------
+ ffi.c - Copyright (c) 2012 Alexandre K. I. de Mendonca <alexandre.keunecke@gmail.com>
+
+ Blackfin Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+#include <ffi.h>
+#include <ffi_common.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+
+/* Maximum number of GPRs available for argument passing. */
+#define MAX_GPRARGS 3
+
+/*
+ * Return types
+ */
+#define FFIBFIN_RET_VOID 0
+#define FFIBFIN_RET_BYTE 1
+#define FFIBFIN_RET_HALFWORD 2
+#define FFIBFIN_RET_INT64 3
+#define FFIBFIN_RET_INT32 4
+
+/*====================================================================*/
+/* PROTOTYPE *
+ /*====================================================================*/
+void ffi_prep_args(unsigned char *, extended_cif *);
+
+/*====================================================================*/
+/* Externals */
+/* (Assembly) */
+/*====================================================================*/
+
+extern void ffi_call_SYSV(unsigned, extended_cif *, void(*)(unsigned char *, extended_cif *), unsigned, void *, void(*fn)(void));
+
+/*====================================================================*/
+/* Implementation */
+/* */
+/*====================================================================*/
+
+
+/*
+ * This function calculates the return type (size) based on type.
+ */
+
+ffi_status ffi_prep_cif_machdep(ffi_cif *cif)
+{
+ /* --------------------------------------*
+ * Return handling *
+ * --------------------------------------*/
+ switch (cif->rtype->type) {
+ case FFI_TYPE_VOID:
+ cif->flags = FFIBFIN_RET_VOID;
+ break;
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ cif->flags = FFIBFIN_RET_HALFWORD;
+ break;
+ case FFI_TYPE_UINT8:
+ cif->flags = FFIBFIN_RET_BYTE;
+ break;
+ case FFI_TYPE_INT:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_SINT8:
+ cif->flags = FFIBFIN_RET_INT32;
+ break;
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_DOUBLE:
+ cif->flags = FFIBFIN_RET_INT64;
+ break;
+ case FFI_TYPE_STRUCT:
+ if (cif->rtype->size <= 4){
+ cif->flags = FFIBFIN_RET_INT32;
+ }else if (cif->rtype->size == 8){
+ cif->flags = FFIBFIN_RET_INT64;
+ }else{
+ //it will return via a hidden pointer in P0
+ cif->flags = FFIBFIN_RET_VOID;
+ }
+ break;
+ default:
+ FFI_ASSERT(0);
+ break;
+ }
+ return FFI_OK;
+}
+
+/*
+ * This will prepare the arguments and will call the assembly routine
+ * cif = the call interface
+ * fn = the function to be called
+ * rvalue = the return value
+ * avalue = the arguments
+ */
+void ffi_call(ffi_cif *cif, void(*fn)(void), void *rvalue, void **avalue)
+{
+ int ret_type = cif->flags;
+ extended_cif ecif;
+ ecif.cif = cif;
+ ecif.avalue = avalue;
+ ecif.rvalue = rvalue;
+
+ switch (cif->abi) {
+ case FFI_SYSV:
+ ffi_call_SYSV(cif->bytes, &ecif, ffi_prep_args, ret_type, ecif.rvalue, fn);
+ break;
+ default:
+ FFI_ASSERT(0);
+ break;
+ }
+}
+
+
+/*
+* This function prepares the parameters (copies them from the ecif to the stack)
+* to call the function (ffi_prep_args is called by the assembly routine in file
+* sysv.S, which also calls the actual function)
+*/
+void ffi_prep_args(unsigned char *stack, extended_cif *ecif)
+{
+ register unsigned int i = 0;
+ void **p_argv;
+ unsigned char *argp;
+ ffi_type **p_arg;
+ argp = stack;
+ p_argv = ecif->avalue;
+ for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types;
+ (i != 0);
+ i--, p_arg++) {
+ size_t z;
+ z = (*p_arg)->size;
+ if (z < sizeof(int)) {
+ z = sizeof(int);
+ switch ((*p_arg)->type) {
+ case FFI_TYPE_SINT8: {
+ signed char v = *(SINT8 *)(* p_argv);
+ signed int t = v;
+ *(signed int *) argp = t;
+ }
+ break;
+ case FFI_TYPE_UINT8: {
+ unsigned char v = *(UINT8 *)(* p_argv);
+ unsigned int t = v;
+ *(unsigned int *) argp = t;
+ }
+ break;
+ case FFI_TYPE_SINT16:
+ *(signed int *) argp = (signed int) * (SINT16 *)(* p_argv);
+ break;
+ case FFI_TYPE_UINT16:
+ *(unsigned int *) argp = (unsigned int) * (UINT16 *)(* p_argv);
+ break;
+ case FFI_TYPE_STRUCT:
+ memcpy(argp, *p_argv, (*p_arg)->size);
+ break;
+ default:
+ FFI_ASSERT(0);
+ break;
+ }
+ } else if (z == sizeof(int)) {
+ *(unsigned int *) argp = (unsigned int) * (UINT32 *)(* p_argv);
+ } else {
+ memcpy(argp, *p_argv, z);
+ }
+ p_argv++;
+ argp += z;
+ }
+}
+
+
+
diff --git a/Modules/_ctypes/libffi/src/bfin/ffitarget.h b/Modules/_ctypes/libffi/src/bfin/ffitarget.h
new file mode 100644
index 0000000..2175c01
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/bfin/ffitarget.h
@@ -0,0 +1,43 @@
+/* -----------------------------------------------------------------------
+ ffitarget.h - Copyright (c) 2012 Alexandre K. I. de Mendonca <alexandre.keunecke@gmail.com>
+
+ Blackfin Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#ifndef LIBFFI_TARGET_H
+#define LIBFFI_TARGET_H
+
+#ifndef LIBFFI_ASM
+typedef unsigned long ffi_arg;
+typedef signed long ffi_sarg;
+
+typedef enum ffi_abi {
+ FFI_FIRST_ABI = 0,
+ FFI_SYSV,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_SYSV
+} ffi_abi;
+#endif
+
+#endif
+
diff --git a/Modules/_ctypes/libffi/src/bfin/sysv.S b/Modules/_ctypes/libffi/src/bfin/sysv.S
new file mode 100644
index 0000000..ae7a152
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/bfin/sysv.S
@@ -0,0 +1,177 @@
+/* -----------------------------------------------------------------------
+ sysv.S - Copyright (c) 2012 Alexandre K. I. de Mendonca <alexandre.keunecke@gmail.com>
+
+ Blackfin Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#define LIBFFI_ASM
+#include <fficonfig.h>
+#include <ffi.h>
+
+.text
+.align 4
+
+ /*
+ There is a "feature" in the bfin toolchain that it puts a _ before funcion names
+ that's why the function here it's called _ffi_call_SYSV and not ffi_call_SYSV
+ */
+ .global _ffi_call_SYSV;
+ .type _ffi_call_SYSV, STT_FUNC;
+ .func ffi_call_SYSV
+
+ /*
+ cif->bytes = R0 (fp+8)
+ &ecif = R1 (fp+12)
+ ffi_prep_args = R2 (fp+16)
+ ret_type = stack (fp+20)
+ ecif.rvalue = stack (fp+24)
+ fn = stack (fp+28)
+ got (fp+32)
+ There is room for improvement here (we can use temporary registers
+ instead of saving the values in the memory)
+ REGS:
+ P5 => Stack pointer (function arguments)
+ R5 => cif->bytes
+ R4 => ret->type
+
+ FP-20 = P3
+ FP-16 = SP (parameters area)
+ FP-12 = SP (temp)
+ FP-08 = function return part 1 [R0]
+ FP-04 = function return part 2 [R1]
+ */
+
+_ffi_call_SYSV:
+.prologue:
+ LINK 20;
+ [FP-20] = P3;
+ [FP+8] = R0;
+ [FP+12] = R1;
+ [FP+16] = R2;
+
+.allocate_stack:
+ //alocate cif->bytes into the stack
+ R1 = [FP+8];
+ R0 = SP;
+ R0 = R0 - R1;
+ R1 = 4;
+ R0 = R0 - R1;
+ [FP-12] = SP;
+ SP = R0;
+ [FP-16] = SP;
+
+.call_prep_args:
+ //get the addr of prep_args
+ P0 = [P3 + _ffi_prep_args@FUNCDESC_GOT17M4];
+ P1 = [P0];
+ P3 = [P0+4];
+ R0 = [FP-16];//SP (parameter area)
+ R1 = [FP+12];//ecif
+ call (P1);
+
+.call_user_function:
+ //ajust SP so as to allow the user function access the parameters on the stack
+ SP = [FP-16]; //point to function parameters
+ R0 = [SP];
+ R1 = [SP+4];
+ R2 = [SP+8];
+ //load user function address
+ P0 = FP;
+ P0 +=28;
+ P1 = [P0];
+ P1 = [P1];
+ P3 = [P0+4];
+ /*
+ For functions returning aggregate values (struct) occupying more than 8 bytes,
+ the caller allocates the return value object on the stack and the address
+ of this object is passed to the callee as a hidden argument in register P0.
+ */
+ P0 = [FP+24];
+
+ call (P1);
+ SP = [FP-12];
+.compute_return:
+ P2 = [FP-20];
+ [FP-8] = R0;
+ [FP-4] = R1;
+
+ R0 = [FP+20];
+ R1 = R0 << 2;
+
+ R0 = [P2+.rettable@GOT17M4];
+ R0 = R1 + R0;
+ P2 = R0;
+ R1 = [P2];
+
+ P2 = [FP+-20];
+ R0 = [P2+.rettable@GOT17M4];
+ R0 = R1 + R0;
+ P2 = R0;
+ R0 = [FP-8];
+ R1 = [FP-4];
+ jump (P2);
+
+/*
+#define FFIBFIN_RET_VOID 0
+#define FFIBFIN_RET_BYTE 1
+#define FFIBFIN_RET_HALFWORD 2
+#define FFIBFIN_RET_INT64 3
+#define FFIBFIN_RET_INT32 4
+*/
+.align 4
+.align 4
+.rettable:
+ .dd .epilogue - .rettable
+ .dd .rbyte - .rettable;
+ .dd .rhalfword - .rettable;
+ .dd .rint64 - .rettable;
+ .dd .rint32 - .rettable;
+
+.rbyte:
+ P0 = [FP+24];
+ R0 = R0.B (Z);
+ [P0] = R0;
+ JUMP .epilogue
+.rhalfword:
+ P0 = [FP+24];
+ R0 = R0.L;
+ [P0] = R0;
+ JUMP .epilogue
+.rint64:
+ P0 = [FP+24];// &rvalue
+ [P0] = R0;
+ [P0+4] = R1;
+ JUMP .epilogue
+.rint32:
+ P0 = [FP+24];
+ [P0] = R0;
+.epilogue:
+ R0 = [FP+8];
+ R1 = [FP+12];
+ R2 = [FP+16];
+ P3 = [FP-20];
+ UNLINK;
+ RTS;
+
+.size _ffi_call_SYSV,.-_ffi_call_SYSV;
+.endfunc
diff --git a/Modules/_ctypes/libffi/src/closures.c b/Modules/_ctypes/libffi/src/closures.c
index 0b156e0..6298d6f 100644
--- a/Modules/_ctypes/libffi/src/closures.c
+++ b/Modules/_ctypes/libffi/src/closures.c
@@ -1,6 +1,7 @@
/* -----------------------------------------------------------------------
- closures.c - Copyright (c) 2007 Red Hat, Inc.
- Copyright (C) 2007, 2009 Free Software Foundation, Inc
+ closures.c - Copyright (c) 2007, 2009, 2010 Red Hat, Inc.
+ Copyright (C) 2007, 2009, 2010 Free Software Foundation, Inc
+ Copyright (c) 2011 Plausible Labs Cooperative, Inc.
Code to allocate and deallocate memory for closures.
@@ -32,7 +33,7 @@
#include <ffi.h>
#include <ffi_common.h>
-#ifndef FFI_MMAP_EXEC_WRIT
+#if !FFI_MMAP_EXEC_WRIT && !FFI_EXEC_TRAMPOLINE_TABLE
# if __gnu_linux__
/* This macro indicates it may be forbidden to map anonymous memory
with both write and execute permission. Code compiled when this
@@ -44,7 +45,7 @@
# define FFI_MMAP_EXEC_WRIT 1
# define HAVE_MNTENT 1
# endif
-# if defined(X86_WIN32) || defined(X86_WIN64)
+# if defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)
/* Windows systems may have Data Execution Protection (DEP) enabled,
which requires the use of VirtualMalloc/VirtualFree to alloc/free
executable memory. */
@@ -63,7 +64,11 @@
#if FFI_CLOSURES
-# if FFI_MMAP_EXEC_WRIT
+# if FFI_EXEC_TRAMPOLINE_TABLE
+
+// Per-target implementation; It's unclear what can reasonable be shared between two OS/architecture implementations.
+
+# elif FFI_MMAP_EXEC_WRIT /* !FFI_EXEC_TRAMPOLINE_TABLE */
#define USE_LOCKS 1
#define USE_DL_PREFIX 1
@@ -146,7 +151,7 @@ selinux_enabled_check (void)
p = strchr (p + 1, ' ');
if (p == NULL)
break;
- if (strncmp (p + 1, "selinuxfs ", 10) != 0)
+ if (strncmp (p + 1, "selinuxfs ", 10) == 0)
{
free (buf);
fclose (f);
@@ -167,7 +172,26 @@ selinux_enabled_check (void)
#endif /* !FFI_MMAP_EXEC_SELINUX */
-#elif defined (__CYGWIN__)
+/* On PaX enable kernels that have MPROTECT enable we can't use PROT_EXEC. */
+#ifdef FFI_MMAP_EXEC_EMUTRAMP_PAX
+#include <stdlib.h>
+
+static int emutramp_enabled = -1;
+
+static int
+emutramp_enabled_check (void)
+{
+ if (getenv ("FFI_DISABLE_EMUTRAMP") == NULL)
+ return 1;
+ else
+ return 0;
+}
+
+#define is_emutramp_enabled() (emutramp_enabled >= 0 ? emutramp_enabled \
+ : (emutramp_enabled = emutramp_enabled_check ()))
+#endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */
+
+#elif defined (__CYGWIN__) || defined(__INTERIX)
#include <sys/mman.h>
@@ -176,6 +200,10 @@ selinux_enabled_check (void)
#endif /* !defined(X86_WIN32) && !defined(X86_WIN64) */
+#ifndef FFI_MMAP_EXEC_EMUTRAMP_PAX
+#define is_emutramp_enabled() 0
+#endif /* FFI_MMAP_EXEC_EMUTRAMP_PAX */
+
/* Declare all functions defined in dlmalloc.c as static. */
static void *dlmalloc(size_t);
static void dlfree(void*);
@@ -193,11 +221,11 @@ static int dlmalloc_trim(size_t) MAYBE_UNUSED;
static size_t dlmalloc_usable_size(void*) MAYBE_UNUSED;
static void dlmalloc_stats(void) MAYBE_UNUSED;
-#if !(defined(X86_WIN32) || defined(X86_WIN64)) || defined (__CYGWIN__)
+#if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
/* Use these for mmap and munmap within dlmalloc.c. */
static void *dlmmap(void *, size_t, int, int, int, off_t);
static int dlmunmap(void *, size_t);
-#endif /* !(defined(X86_WIN32) || defined(X86_WIN64)) || defined (__CYGWIN__) */
+#endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
#define mmap dlmmap
#define munmap dlmunmap
@@ -207,7 +235,7 @@ static int dlmunmap(void *, size_t);
#undef mmap
#undef munmap
-#if !(defined(X86_WIN32) || defined(X86_WIN64)) || defined (__CYGWIN__)
+#if !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX)
/* A mutex used to synchronize access to *exec* variables in this file. */
static pthread_mutex_t open_temp_exec_file_mutex = PTHREAD_MUTEX_INITIALIZER;
@@ -294,7 +322,7 @@ open_temp_exec_file_mnt (const char *mounts)
struct mntent mnt;
char buf[MAXPATHLEN * 3];
- if (getmntent_r (last_mntent, &mnt, buf, sizeof (buf)))
+ if (getmntent_r (last_mntent, &mnt, buf, sizeof (buf)) == NULL)
return -1;
if (hasmntopt (&mnt, "ro")
@@ -453,6 +481,12 @@ dlmmap (void *start, size_t length, int prot,
printf ("mapping in %zi\n", length);
#endif
+ if (execfd == -1 && is_emutramp_enabled ())
+ {
+ ptr = mmap (start, length, prot & ~PROT_EXEC, flags, fd, offset);
+ return ptr;
+ }
+
if (execfd == -1 && !is_selinux_enabled ())
{
ptr = mmap (start, length, prot | PROT_EXEC, flags, fd, offset);
@@ -522,7 +556,7 @@ segment_holding_code (mstate m, char* addr)
}
#endif
-#endif /* !(defined(X86_WIN32) || defined(X86_WIN64)) || defined (__CYGWIN__) */
+#endif /* !(defined(X86_WIN32) || defined(X86_WIN64) || defined(__OS2__)) || defined (__CYGWIN__) || defined(__INTERIX) */
/* Allocate a chunk of memory with the given size. Returns a pointer
to the writable address, and sets *CODE to the executable
diff --git a/Modules/_ctypes/libffi/src/cris/ffi.c b/Modules/_ctypes/libffi/src/cris/ffi.c
index e9c3953..aaca5b1 100644
--- a/Modules/_ctypes/libffi/src/cris/ffi.c
+++ b/Modules/_ctypes/libffi/src/cris/ffi.c
@@ -153,21 +153,24 @@ ffi_prep_args (char *stack, extended_cif * ecif)
return (struct_count);
}
-ffi_status
-ffi_prep_cif (ffi_cif * cif,
- ffi_abi abi, unsigned int nargs,
- ffi_type * rtype, ffi_type ** atypes)
+ffi_status FFI_HIDDEN
+ffi_prep_cif_core (ffi_cif * cif,
+ ffi_abi abi, unsigned int isvariadic,
+ unsigned int nfixedargs, unsigned int ntotalargs,
+ ffi_type * rtype, ffi_type ** atypes)
{
unsigned bytes = 0;
unsigned int i;
ffi_type **ptr;
FFI_ASSERT (cif != NULL);
- FFI_ASSERT ((abi > FFI_FIRST_ABI) && (abi <= FFI_DEFAULT_ABI));
+ FFI_ASSERT((!isvariadic) || (nfixedargs >= 1));
+ FFI_ASSERT(nfixedargs <= ntotalargs);
+ FFI_ASSERT (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI);
cif->abi = abi;
cif->arg_types = atypes;
- cif->nargs = nargs;
+ cif->nargs = ntotalargs;
cif->rtype = rtype;
cif->flags = 0;
diff --git a/Modules/_ctypes/libffi/src/cris/ffitarget.h b/Modules/_ctypes/libffi/src/cris/ffitarget.h
index 4257f10..b837e97 100644
--- a/Modules/_ctypes/libffi/src/cris/ffitarget.h
+++ b/Modules/_ctypes/libffi/src/cris/ffitarget.h
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------*-C-*-
- ffitarget.h - Copyright (c) 1996-2003 Red Hat, Inc.
+ ffitarget.h - Copyright (c) 2012 Anthony Green
+ Copyright (c) 1996-2003 Red Hat, Inc.
Target configuration macros for CRIS.
Permission is hereby granted, free of charge, to any person obtaining
@@ -27,6 +28,10 @@
#ifndef LIBFFI_TARGET_H
#define LIBFFI_TARGET_H
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
#ifndef LIBFFI_ASM
typedef unsigned long ffi_arg;
typedef signed long ffi_sarg;
@@ -34,8 +39,8 @@ typedef signed long ffi_sarg;
typedef enum ffi_abi {
FFI_FIRST_ABI = 0,
FFI_SYSV,
- FFI_DEFAULT_ABI = FFI_SYSV,
- FFI_LAST_ABI = FFI_DEFAULT_ABI + 1
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_SYSV
} ffi_abi;
#endif
diff --git a/Modules/_ctypes/libffi/src/dlmalloc.c b/Modules/_ctypes/libffi/src/dlmalloc.c
index 582ddc3..2773953 100644
--- a/Modules/_ctypes/libffi/src/dlmalloc.c
+++ b/Modules/_ctypes/libffi/src/dlmalloc.c
@@ -464,6 +464,14 @@ DEFAULT_MMAP_THRESHOLD default: 256K
#define _GNU_SOURCE 1
#endif /* WIN32 */
+#ifdef __OS2__
+#define INCL_DOS
+#include <os2.h>
+#define HAVE_MMAP 1
+#define HAVE_MORECORE 0
+#define LACKS_SYS_MMAN_H
+#endif /* __OS2__ */
+
#if defined(DARWIN) || defined(_DARWIN)
/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
#ifndef HAVE_MORECORE
@@ -619,6 +627,9 @@ DEFAULT_MMAP_THRESHOLD default: 256K
#include "/usr/include/malloc.h"
#else /* HAVE_USR_INCLUDE_MALLOC_H */
+/* HP-UX's stdlib.h redefines mallinfo unless _STRUCT_MALLINFO is defined */
+#define _STRUCT_MALLINFO
+
struct mallinfo {
MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
@@ -1293,7 +1304,7 @@ extern void* sbrk(ptrdiff_t);
#define IS_MMAPPED_BIT (SIZE_T_ONE)
#define USE_MMAP_BIT (SIZE_T_ONE)
-#ifndef WIN32
+#if !defined(WIN32) && !defined (__OS2__)
#define CALL_MUNMAP(a, s) munmap((a), (s))
#define MMAP_PROT (PROT_READ|PROT_WRITE)
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
@@ -1316,6 +1327,42 @@ static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
#endif /* MAP_ANONYMOUS */
#define DIRECT_MMAP(s) CALL_MMAP(s)
+
+#elif defined(__OS2__)
+
+/* OS/2 MMAP via DosAllocMem */
+static void* os2mmap(size_t size) {
+ void* ptr;
+ if (DosAllocMem(&ptr, size, OBJ_ANY|PAG_COMMIT|PAG_READ|PAG_WRITE) &&
+ DosAllocMem(&ptr, size, PAG_COMMIT|PAG_READ|PAG_WRITE))
+ return MFAIL;
+ return ptr;
+}
+
+#define os2direct_mmap(n) os2mmap(n)
+
+/* This function supports releasing coalesed segments */
+static int os2munmap(void* ptr, size_t size) {
+ while (size) {
+ ULONG ulSize = size;
+ ULONG ulFlags = 0;
+ if (DosQueryMem(ptr, &ulSize, &ulFlags) != 0)
+ return -1;
+ if ((ulFlags & PAG_BASE) == 0 ||(ulFlags & PAG_COMMIT) == 0 ||
+ ulSize > size)
+ return -1;
+ if (DosFreeMem(ptr) != 0)
+ return -1;
+ ptr = ( void * ) ( ( char * ) ptr + ulSize );
+ size -= ulSize;
+ }
+ return 0;
+}
+
+#define CALL_MMAP(s) os2mmap(s)
+#define CALL_MUNMAP(a, s) os2munmap((a), (s))
+#define DIRECT_MMAP(s) os2direct_mmap(s)
+
#else /* WIN32 */
/* Win32 MMAP via VirtualAlloc */
@@ -1392,7 +1439,7 @@ static int win32munmap(void* ptr, size_t size) {
unique mparams values are initialized only once.
*/
-#ifndef WIN32
+#if !defined(WIN32) && !defined(__OS2__)
/* By default use posix locks */
#include <pthread.h>
#define MLOCK_T pthread_mutex_t
@@ -1406,6 +1453,16 @@ static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
+#elif defined(__OS2__)
+#define MLOCK_T HMTX
+#define INITIAL_LOCK(l) DosCreateMutexSem(0, l, 0, FALSE)
+#define ACQUIRE_LOCK(l) DosRequestMutexSem(*l, SEM_INDEFINITE_WAIT)
+#define RELEASE_LOCK(l) DosReleaseMutexSem(*l)
+#if HAVE_MORECORE
+static MLOCK_T morecore_mutex;
+#endif /* HAVE_MORECORE */
+static MLOCK_T magic_init_mutex;
+
#else /* WIN32 */
/*
Because lock-protected regions have bounded times, and there
@@ -2497,10 +2554,15 @@ static int init_mparams(void) {
}
RELEASE_MAGIC_INIT_LOCK();
-#ifndef WIN32
+#if !defined(WIN32) && !defined(__OS2__)
mparams.page_size = malloc_getpagesize;
mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
DEFAULT_GRANULARITY : mparams.page_size);
+#elif defined (__OS2__)
+ /* if low-memory is used, os2munmap() would break
+ if it were anything other than 64k */
+ mparams.page_size = 4096u;
+ mparams.granularity = 65536u;
#else /* WIN32 */
{
SYSTEM_INFO system_info;
@@ -4197,7 +4259,7 @@ void* dlmalloc(size_t bytes) {
void dlfree(void* mem) {
/*
- Consolidate freed chunks with preceeding or succeeding bordering
+ Consolidate freed chunks with preceding or succeeding bordering
free chunks, if they exist, and then place in a bin. Intermixed
with special cases for top, dv, mmapped chunks, and usage errors.
*/
diff --git a/Modules/_ctypes/libffi/src/frv/ffitarget.h b/Modules/_ctypes/libffi/src/frv/ffitarget.h
index 1c319ea..d42540e 100644
--- a/Modules/_ctypes/libffi/src/frv/ffitarget.h
+++ b/Modules/_ctypes/libffi/src/frv/ffitarget.h
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------*-C-*-
- ffitarget.h - Copyright (c) 1996-2004 Red Hat, Inc.
+ ffitarget.h - Copyright (c) 2012 Anthony Green
+ Copyright (c) 1996-2004 Red Hat, Inc.
Target configuration macros for FR-V
Permission is hereby granted, free of charge, to any person obtaining
@@ -27,6 +28,10 @@
#ifndef LIBFFI_TARGET_H
#define LIBFFI_TARGET_H
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
/* ---- System specific configurations ----------------------------------- */
#ifndef LIBFFI_ASM
@@ -35,13 +40,9 @@ typedef signed long ffi_sarg;
typedef enum ffi_abi {
FFI_FIRST_ABI = 0,
-
-#ifdef FRV
FFI_EABI,
- FFI_DEFAULT_ABI = FFI_EABI,
-#endif
-
- FFI_LAST_ABI = FFI_DEFAULT_ABI + 1
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_EABI
} ffi_abi;
#endif
diff --git a/Modules/_ctypes/libffi/src/ia64/ffi.c b/Modules/_ctypes/libffi/src/ia64/ffi.c
index 3f8fcc5..9533ef6 100644
--- a/Modules/_ctypes/libffi/src/ia64/ffi.c
+++ b/Modules/_ctypes/libffi/src/ia64/ffi.c
@@ -1,6 +1,7 @@
/* -----------------------------------------------------------------------
- ffi.c - Copyright (c) 1998, 2007, 2008 Red Hat, Inc.
+ ffi.c - Copyright (c) 1998, 2007, 2008, 2012 Red Hat, Inc.
Copyright (c) 2000 Hewlett Packard Company
+ Copyright (c) 2011 Anthony Green
IA64 Foreign Function Interface
@@ -225,7 +226,7 @@ ffi_prep_cif_machdep(ffi_cif *cif)
int flags;
/* Adjust cif->bytes to include space for the bits of the ia64_args frame
- that preceeds the integer register portion. The estimate that the
+ that precedes the integer register portion. The estimate that the
generic bits did for the argument space required is good enough for the
integer component. */
cif->bytes += offsetof(struct ia64_args, gp_regs[0]);
@@ -324,13 +325,17 @@ ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
case FFI_TYPE_FLOAT:
if (gpcount < 8 && fpcount < 8)
stf_spill (&stack->fp_regs[fpcount++], *(float *)avalue[i]);
- stack->gp_regs[gpcount++] = *(UINT32 *)avalue[i];
+ {
+ UINT32 tmp;
+ memcpy (&tmp, avalue[i], sizeof (UINT32));
+ stack->gp_regs[gpcount++] = tmp;
+ }
break;
case FFI_TYPE_DOUBLE:
if (gpcount < 8 && fpcount < 8)
stf_spill (&stack->fp_regs[fpcount++], *(double *)avalue[i]);
- stack->gp_regs[gpcount++] = *(UINT64 *)avalue[i];
+ memcpy (&stack->gp_regs[gpcount++], avalue[i], sizeof (UINT64));
break;
case FFI_TYPE_LONGDOUBLE:
@@ -425,7 +430,8 @@ ffi_prep_closure_loc (ffi_closure* closure,
struct ffi_ia64_trampoline_struct *tramp;
struct ia64_fd *fd;
- FFI_ASSERT (cif->abi == FFI_UNIX);
+ if (cif->abi != FFI_UNIX)
+ return FFI_BAD_ABI;
tramp = (struct ffi_ia64_trampoline_struct *)closure->tramp;
fd = (struct ia64_fd *)(void *)ffi_closure_unix;
diff --git a/Modules/_ctypes/libffi/src/ia64/ffitarget.h b/Modules/_ctypes/libffi/src/ia64/ffitarget.h
index d85c049..e68cea6 100644
--- a/Modules/_ctypes/libffi/src/ia64/ffitarget.h
+++ b/Modules/_ctypes/libffi/src/ia64/ffitarget.h
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------*-C-*-
- ffitarget.h - Copyright (c) 1996-2003 Red Hat, Inc.
+ ffitarget.h - Copyright (c) 2012 Anthony Green
+ Copyright (c) 1996-2003 Red Hat, Inc.
Target configuration macros for IA-64.
Permission is hereby granted, free of charge, to any person obtaining
@@ -27,6 +28,10 @@
#ifndef LIBFFI_TARGET_H
#define LIBFFI_TARGET_H
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
#ifndef LIBFFI_ASM
typedef unsigned long long ffi_arg;
typedef signed long long ffi_sarg;
@@ -34,8 +39,8 @@ typedef signed long long ffi_sarg;
typedef enum ffi_abi {
FFI_FIRST_ABI = 0,
FFI_UNIX, /* Linux and all Unix variants use the same conventions */
- FFI_DEFAULT_ABI = FFI_UNIX,
- FFI_LAST_ABI = FFI_DEFAULT_ABI + 1
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_UNIX
} ffi_abi;
#endif
diff --git a/Modules/_ctypes/libffi/src/java_raw_api.c b/Modules/_ctypes/libffi/src/java_raw_api.c
index 9c5383e..522c8bf 100644
--- a/Modules/_ctypes/libffi/src/java_raw_api.c
+++ b/Modules/_ctypes/libffi/src/java_raw_api.c
@@ -311,7 +311,7 @@ ffi_java_translate_args (ffi_cif *cif, void *rvalue,
ffi_raw_closure *cl = (ffi_raw_closure*)user_data;
ffi_java_ptrarray_to_raw (cif, avalue, raw);
- (*cl->fun) (cif, rvalue, raw, cl->user_data);
+ (*cl->fun) (cif, rvalue, (ffi_raw*)raw, cl->user_data);
ffi_java_raw_to_rvalue (cif, rvalue);
}
diff --git a/Modules/_ctypes/libffi/src/m32r/ffitarget.h b/Modules/_ctypes/libffi/src/m32r/ffitarget.h
index 6a761f6..6c34801 100644
--- a/Modules/_ctypes/libffi/src/m32r/ffitarget.h
+++ b/Modules/_ctypes/libffi/src/m32r/ffitarget.h
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------*-C-*-
- ffitarget.h - Copyright (c) 2004 Renesas Technology.
+ ffitarget.h - Copyright (c) 2012 Anthony Green
+ Copyright (c) 2004 Renesas Technology.
Target configuration macros for M32R.
Permission is hereby granted, free of charge, to any person obtaining
@@ -26,6 +27,10 @@
#ifndef LIBFFI_TARGET_H
#define LIBFFI_TARGET_H
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
/* ---- Generic type definitions ----------------------------------------- */
#ifndef LIBFFI_ASM
@@ -36,8 +41,8 @@ typedef enum ffi_abi
{
FFI_FIRST_ABI = 0,
FFI_SYSV,
- FFI_DEFAULT_ABI = FFI_SYSV,
- FFI_LAST_ABI = FFI_DEFAULT_ABI + 1
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_SYSV
} ffi_abi;
#endif
diff --git a/Modules/_ctypes/libffi/src/m68k/ffi.c b/Modules/_ctypes/libffi/src/m68k/ffi.c
index 600cf20..0dee938 100644
--- a/Modules/_ctypes/libffi/src/m68k/ffi.c
+++ b/Modules/_ctypes/libffi/src/m68k/ffi.c
@@ -1,7 +1,7 @@
/* -----------------------------------------------------------------------
ffi.c
-
- m68k Foreign Function Interface
+
+ m68k Foreign Function Interface
----------------------------------------------------------------------- */
#include <ffi.h>
@@ -9,8 +9,17 @@
#include <stdlib.h>
#include <unistd.h>
+#ifdef __rtems__
+void rtems_cache_flush_multiple_data_lines( const void *, size_t );
+#else
#include <sys/syscall.h>
+#ifdef __MINT__
+#include <mint/mintbind.h>
+#include <mint/ssystem.h>
+#else
#include <asm/cachectl.h>
+#endif
+#endif
void ffi_call_SYSV (extended_cif *,
unsigned, unsigned,
@@ -35,8 +44,12 @@ ffi_prep_args (void *stack, extended_cif *ecif)
argp = stack;
- if (ecif->cif->rtype->type == FFI_TYPE_STRUCT
- && !ecif->cif->flags)
+ if (
+#ifdef __MINT__
+ (ecif->cif->rtype->type == FFI_TYPE_LONGDOUBLE) ||
+#endif
+ (((ecif->cif->rtype->type == FFI_TYPE_STRUCT)
+ && !ecif->cif->flags)))
struct_value_ptr = ecif->rvalue;
else
struct_value_ptr = NULL;
@@ -47,12 +60,12 @@ ffi_prep_args (void *stack, extended_cif *ecif)
i != 0;
i--, p_arg++)
{
- size_t z;
+ size_t z = (*p_arg)->size;
+ int type = (*p_arg)->type;
- z = (*p_arg)->size;
if (z < sizeof (int))
{
- switch ((*p_arg)->type)
+ switch (type)
{
case FFI_TYPE_SINT8:
*(signed int *) argp = (signed int) *(SINT8 *) *p_argv;
@@ -71,7 +84,14 @@ ffi_prep_args (void *stack, extended_cif *ecif)
break;
case FFI_TYPE_STRUCT:
+#ifdef __MINT__
+ if (z == 1 || z == 2)
+ memcpy (argp + 2, *p_argv, z);
+ else
+ memcpy (argp, *p_argv, z);
+#else
memcpy (argp + sizeof (int) - z, *p_argv, z);
+#endif
break;
default:
@@ -103,6 +123,8 @@ ffi_prep_args (void *stack, extended_cif *ecif)
#define CIF_FLAGS_POINTER 32
#define CIF_FLAGS_STRUCT1 64
#define CIF_FLAGS_STRUCT2 128
+#define CIF_FLAGS_SINT8 256
+#define CIF_FLAGS_SINT16 512
/* Perform machine dependent cif processing */
ffi_status
@@ -116,17 +138,34 @@ ffi_prep_cif_machdep (ffi_cif *cif)
break;
case FFI_TYPE_STRUCT:
+ if (cif->rtype->elements[0]->type == FFI_TYPE_STRUCT &&
+ cif->rtype->elements[1])
+ {
+ cif->flags = 0;
+ break;
+ }
+
switch (cif->rtype->size)
{
case 1:
+#ifdef __MINT__
+ cif->flags = CIF_FLAGS_STRUCT2;
+#else
cif->flags = CIF_FLAGS_STRUCT1;
+#endif
break;
case 2:
cif->flags = CIF_FLAGS_STRUCT2;
break;
+#ifdef __MINT__
+ case 3:
+#endif
case 4:
cif->flags = CIF_FLAGS_INT;
break;
+#ifdef __MINT__
+ case 7:
+#endif
case 8:
cif->flags = CIF_FLAGS_DINT;
break;
@@ -144,9 +183,15 @@ ffi_prep_cif_machdep (ffi_cif *cif)
cif->flags = CIF_FLAGS_DOUBLE;
break;
+#if (FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE)
case FFI_TYPE_LONGDOUBLE:
+#ifdef __MINT__
+ cif->flags = 0;
+#else
cif->flags = CIF_FLAGS_LDOUBLE;
+#endif
break;
+#endif
case FFI_TYPE_POINTER:
cif->flags = CIF_FLAGS_POINTER;
@@ -157,6 +202,14 @@ ffi_prep_cif_machdep (ffi_cif *cif)
cif->flags = CIF_FLAGS_DINT;
break;
+ case FFI_TYPE_SINT16:
+ cif->flags = CIF_FLAGS_SINT16;
+ break;
+
+ case FFI_TYPE_SINT8:
+ cif->flags = CIF_FLAGS_SINT8;
+ break;
+
default:
cif->flags = CIF_FLAGS_INT;
break;
@@ -212,6 +265,26 @@ ffi_prep_incoming_args_SYSV (char *stack, void **avalue, ffi_cif *cif)
size_t z;
z = (*p_arg)->size;
+#ifdef __MINT__
+ if (cif->flags &&
+ cif->rtype->type == FFI_TYPE_STRUCT &&
+ (z == 1 || z == 2))
+ {
+ *p_argv = (void *) (argp + 2);
+
+ z = 4;
+ }
+ else
+ if (cif->flags &&
+ cif->rtype->type == FFI_TYPE_STRUCT &&
+ (z == 3 || z == 4))
+ {
+ *p_argv = (void *) (argp);
+
+ z = 4;
+ }
+ else
+#endif
if (z <= 4)
{
*p_argv = (void *) (argp + 4 - z);
@@ -255,19 +328,31 @@ ffi_prep_closure_loc (ffi_closure* closure,
void *user_data,
void *codeloc)
{
- FFI_ASSERT (cif->abi == FFI_SYSV);
+ if (cif->abi != FFI_SYSV)
+ return FFI_BAD_ABI;
*(unsigned short *)closure->tramp = 0x207c;
*(void **)(closure->tramp + 2) = codeloc;
*(unsigned short *)(closure->tramp + 6) = 0x4ef9;
- if (cif->rtype->type == FFI_TYPE_STRUCT
- && !cif->flags)
+
+ if (
+#ifdef __MINT__
+ (cif->rtype->type == FFI_TYPE_LONGDOUBLE) ||
+#endif
+ (((cif->rtype->type == FFI_TYPE_STRUCT)
+ && !cif->flags)))
*(void **)(closure->tramp + 8) = ffi_closure_struct_SYSV;
else
*(void **)(closure->tramp + 8) = ffi_closure_SYSV;
+#ifdef __rtems__
+ rtems_cache_flush_multiple_data_lines( codeloc, FFI_TRAMPOLINE_SIZE );
+#elif defined(__MINT__)
+ Ssystem(S_FLUSHCACHE, codeloc, FFI_TRAMPOLINE_SIZE);
+#else
syscall(SYS_cacheflush, codeloc, FLUSH_SCOPE_LINE,
FLUSH_CACHE_BOTH, FFI_TRAMPOLINE_SIZE);
+#endif
closure->cif = cif;
closure->user_data = user_data;
@@ -275,4 +360,3 @@ ffi_prep_closure_loc (ffi_closure* closure,
return FFI_OK;
}
-
diff --git a/Modules/_ctypes/libffi/src/m68k/ffitarget.h b/Modules/_ctypes/libffi/src/m68k/ffitarget.h
index 633717b..e81dde2 100644
--- a/Modules/_ctypes/libffi/src/m68k/ffitarget.h
+++ b/Modules/_ctypes/libffi/src/m68k/ffitarget.h
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------*-C-*-
- ffitarget.h - Copyright (c) 1996-2003 Red Hat, Inc.
+ ffitarget.h - Copyright (c) 2012 Anthony Green
+ Copyright (c) 1996-2003 Red Hat, Inc.
Target configuration macros for Motorola 68K.
Permission is hereby granted, free of charge, to any person obtaining
@@ -27,6 +28,10 @@
#ifndef LIBFFI_TARGET_H
#define LIBFFI_TARGET_H
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
#ifndef LIBFFI_ASM
typedef unsigned long ffi_arg;
typedef signed long ffi_sarg;
@@ -34,8 +39,8 @@ typedef signed long ffi_sarg;
typedef enum ffi_abi {
FFI_FIRST_ABI = 0,
FFI_SYSV,
- FFI_DEFAULT_ABI = FFI_SYSV,
- FFI_LAST_ABI = FFI_DEFAULT_ABI + 1
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_SYSV
} ffi_abi;
#endif
diff --git a/Modules/_ctypes/libffi/src/m68k/sysv.S b/Modules/_ctypes/libffi/src/m68k/sysv.S
index 58822e0..ec2b14f 100644
--- a/Modules/_ctypes/libffi/src/m68k/sysv.S
+++ b/Modules/_ctypes/libffi/src/m68k/sysv.S
@@ -1,8 +1,11 @@
/* -----------------------------------------------------------------------
- sysv.S - Copyright (c) 1998 Andreas Schwab
- Copyright (c) 2008 Red Hat, Inc.
-
- m68k Foreign Function Interface
+
+ sysv.S - Copyright (c) 2012 Alan Hourihane
+ Copyright (c) 1998, 2012 Andreas Schwab
+ Copyright (c) 2008 Red Hat, Inc.
+ Copyright (c) 2012 Thorsten Glaser
+
+ m68k Foreign Function Interface
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -41,13 +44,19 @@
#define CFI_ENDPROC()
#endif
+#ifdef __MINT__
+#define CALLFUNC(funcname) _ ## funcname
+#else
+#define CALLFUNC(funcname) funcname
+#endif
+
.text
- .globl ffi_call_SYSV
- .type ffi_call_SYSV,@function
+ .globl CALLFUNC(ffi_call_SYSV)
+ .type CALLFUNC(ffi_call_SYSV),@function
.align 4
-ffi_call_SYSV:
+CALLFUNC(ffi_call_SYSV):
CFI_STARTPROC()
link %fp,#0
CFI_OFFSET(14,-8)
@@ -62,14 +71,18 @@ ffi_call_SYSV:
move.l 8(%fp),-(%sp)
pea 4(%sp)
#if !defined __PIC__
- jsr ffi_prep_args
+ jsr CALLFUNC(ffi_prep_args)
#else
- bsr.l ffi_prep_args@PLTPC
+ bsr.l CALLFUNC(ffi_prep_args@PLTPC)
#endif
addq.l #8,%sp
| Pass pointer to struct value, if any
+#ifdef __MINT__
+ move.l %d0,%a1
+#else
move.l %a0,%a1
+#endif
| Call the function
move.l 24(%fp),%a0
@@ -85,7 +98,12 @@ ffi_call_SYSV:
move.l 16(%fp),%d2
| If the return value pointer is NULL, assume no return value.
+ | NOTE: On the mc68000, tst on an address register is not supported.
+#if !defined(__mc68020__) && !defined(__mc68030__) && !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcoldfire__)
+ cmp.w #0, %a1
+#else
tst.l %a1
+#endif
jbeq noretval
btst #0,%d2
@@ -103,25 +121,44 @@ retlongint:
retfloat:
btst #2,%d2
jbeq retdouble
+#if defined(__MC68881__) || defined(__HAVE_68881__)
fmove.s %fp0,(%a1)
+#else
+ move.l %d0,(%a1)
+#endif
jbra epilogue
retdouble:
btst #3,%d2
jbeq retlongdouble
+#if defined(__MC68881__) || defined(__HAVE_68881__)
fmove.d %fp0,(%a1)
+#else
+ move.l %d0,(%a1)+
+ move.l %d1,(%a1)
+#endif
jbra epilogue
retlongdouble:
btst #4,%d2
jbeq retpointer
+#if defined(__MC68881__) || defined(__HAVE_68881__)
fmove.x %fp0,(%a1)
+#else
+ move.l %d0,(%a1)+
+ move.l %d1,(%a1)+
+ move.l %d2,(%a1)
+#endif
jbra epilogue
retpointer:
btst #5,%d2
jbeq retstruct1
+#ifdef __MINT__
+ move.l %d0,(%a1)
+#else
move.l %a0,(%a1)
+#endif
jbra epilogue
retstruct1:
@@ -132,8 +169,28 @@ retstruct1:
retstruct2:
btst #7,%d2
- jbeq noretval
+ jbeq retsint8
move.w %d0,(%a1)
+ jbra epilogue
+
+retsint8:
+ btst #8,%d2
+ jbeq retsint16
+ | NOTE: On the mc68000, extb is not supported. 8->16, then 16->32.
+#if !defined(__mc68020__) && !defined(__mc68030__) && !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcoldfire__)
+ ext.w %d0
+ ext.l %d0
+#else
+ extb.l %d0
+#endif
+ move.l %d0,(%a1)
+ jbra epilogue
+
+retsint16:
+ btst #9,%d2
+ jbeq noretval
+ ext.l %d0
+ move.l %d0,(%a1)
noretval:
epilogue:
@@ -141,13 +198,13 @@ epilogue:
unlk %fp
rts
CFI_ENDPROC()
- .size ffi_call_SYSV,.-ffi_call_SYSV
+ .size CALLFUNC(ffi_call_SYSV),.-CALLFUNC(ffi_call_SYSV)
- .globl ffi_closure_SYSV
- .type ffi_closure_SYSV, @function
+ .globl CALLFUNC(ffi_closure_SYSV)
+ .type CALLFUNC(ffi_closure_SYSV), @function
.align 4
-ffi_closure_SYSV:
+CALLFUNC(ffi_closure_SYSV):
CFI_STARTPROC()
link %fp,#-12
CFI_OFFSET(14,-8)
@@ -157,16 +214,18 @@ ffi_closure_SYSV:
pea -12(%fp)
move.l %a0,-(%sp)
#if !defined __PIC__
- jsr ffi_closure_SYSV_inner
+ jsr CALLFUNC(ffi_closure_SYSV_inner)
#else
- bsr.l ffi_closure_SYSV_inner@PLTPC
+ bsr.l CALLFUNC(ffi_closure_SYSV_inner@PLTPC)
#endif
lsr.l #1,%d0
jne 1f
jcc .Lcls_epilogue
+ | CIF_FLAGS_INT
move.l -12(%fp),%d0
.Lcls_epilogue:
+ | no CIF_FLAGS_*
unlk %fp
rts
1:
@@ -174,43 +233,80 @@ ffi_closure_SYSV:
lsr.l #2,%d0
jne 1f
jcs .Lcls_ret_float
+ | CIF_FLAGS_DINT
move.l (%a0)+,%d0
move.l (%a0),%d1
jra .Lcls_epilogue
.Lcls_ret_float:
+#if defined(__MC68881__) || defined(__HAVE_68881__)
fmove.s (%a0),%fp0
+#else
+ move.l (%a0),%d0
+#endif
jra .Lcls_epilogue
1:
lsr.l #2,%d0
jne 1f
jcs .Lcls_ret_ldouble
+ | CIF_FLAGS_DOUBLE
+#if defined(__MC68881__) || defined(__HAVE_68881__)
fmove.d (%a0),%fp0
+#else
+ move.l (%a0)+,%d0
+ move.l (%a0),%d1
+#endif
jra .Lcls_epilogue
.Lcls_ret_ldouble:
+#if defined(__MC68881__) || defined(__HAVE_68881__)
fmove.x (%a0),%fp0
+#else
+ move.l (%a0)+,%d0
+ move.l (%a0)+,%d1
+ move.l (%a0),%d2
+#endif
jra .Lcls_epilogue
1:
lsr.l #2,%d0
- jne .Lcls_ret_struct2
+ jne 1f
jcs .Lcls_ret_struct1
+ | CIF_FLAGS_POINTER
move.l (%a0),%a0
move.l %a0,%d0
jra .Lcls_epilogue
.Lcls_ret_struct1:
move.b (%a0),%d0
jra .Lcls_epilogue
-.Lcls_ret_struct2:
+1:
+ lsr.l #2,%d0
+ jne 1f
+ jcs .Lcls_ret_sint8
+ | CIF_FLAGS_STRUCT2
move.w (%a0),%d0
jra .Lcls_epilogue
+.Lcls_ret_sint8:
+ move.l (%a0),%d0
+ | NOTE: On the mc68000, extb is not supported. 8->16, then 16->32.
+#if !defined(__mc68020__) && !defined(__mc68030__) && !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcoldfire__)
+ ext.w %d0
+ ext.l %d0
+#else
+ extb.l %d0
+#endif
+ jra .Lcls_epilogue
+1:
+ | CIF_FLAGS_SINT16
+ move.l (%a0),%d0
+ ext.l %d0
+ jra .Lcls_epilogue
CFI_ENDPROC()
- .size ffi_closure_SYSV,.-ffi_closure_SYSV
+ .size CALLFUNC(ffi_closure_SYSV),.-CALLFUNC(ffi_closure_SYSV)
- .globl ffi_closure_struct_SYSV
- .type ffi_closure_struct_SYSV, @function
+ .globl CALLFUNC(ffi_closure_struct_SYSV)
+ .type CALLFUNC(ffi_closure_struct_SYSV), @function
.align 4
-ffi_closure_struct_SYSV:
+CALLFUNC(ffi_closure_struct_SYSV):
CFI_STARTPROC()
link %fp,#0
CFI_OFFSET(14,-8)
@@ -220,14 +316,14 @@ ffi_closure_struct_SYSV:
move.l %a1,-(%sp)
move.l %a0,-(%sp)
#if !defined __PIC__
- jsr ffi_closure_SYSV_inner
+ jsr CALLFUNC(ffi_closure_SYSV_inner)
#else
- bsr.l ffi_closure_SYSV_inner@PLTPC
+ bsr.l CALLFUNC(ffi_closure_SYSV_inner@PLTPC)
#endif
unlk %fp
rts
CFI_ENDPROC()
- .size ffi_closure_struct_SYSV,.-ffi_closure_struct_SYSV
+ .size CALLFUNC(ffi_closure_struct_SYSV),.-CALLFUNC(ffi_closure_struct_SYSV)
#if defined __ELF__ && defined __linux__
.section .note.GNU-stack,"",@progbits
diff --git a/Modules/_ctypes/libffi/src/metag/ffi.c b/Modules/_ctypes/libffi/src/metag/ffi.c
new file mode 100644
index 0000000..46b383e
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/metag/ffi.c
@@ -0,0 +1,330 @@
+/* ----------------------------------------------------------------------
+ ffi.c - Copyright (c) 2013 Imagination Technologies
+
+ Meta Foreign Function Interface
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ `Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED `AS IS'', WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL SIMON POSNJAK BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+----------------------------------------------------------------------- */
+
+#include <ffi.h>
+#include <ffi_common.h>
+
+#include <stdlib.h>
+
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+
+/*
+ * ffi_prep_args is called by the assembly routine once stack space has been
+ * allocated for the function's arguments
+ */
+
+unsigned int ffi_prep_args(char *stack, extended_cif *ecif)
+{
+ register unsigned int i;
+ register void **p_argv;
+ register char *argp;
+ register ffi_type **p_arg;
+
+ argp = stack;
+
+ /* Store return value */
+ if ( ecif->cif->flags == FFI_TYPE_STRUCT ) {
+ argp -= 4;
+ *(void **) argp = ecif->rvalue;
+ }
+
+ p_argv = ecif->avalue;
+
+ /* point to next location */
+ for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types; (i != 0); i--, p_arg++, p_argv++)
+ {
+ size_t z;
+
+ /* Move argp to address of argument */
+ z = (*p_arg)->size;
+ argp -= z;
+
+ /* Align if necessary */
+ argp = (char *) ALIGN_DOWN(ALIGN_DOWN(argp, (*p_arg)->alignment), 4);
+
+ if (z < sizeof(int)) {
+ z = sizeof(int);
+ switch ((*p_arg)->type)
+ {
+ case FFI_TYPE_SINT8:
+ *(signed int *) argp = (signed int)*(SINT8 *)(* p_argv);
+ break;
+ case FFI_TYPE_UINT8:
+ *(unsigned int *) argp = (unsigned int)*(UINT8 *)(* p_argv);
+ break;
+ case FFI_TYPE_SINT16:
+ *(signed int *) argp = (signed int)*(SINT16 *)(* p_argv);
+ break;
+ case FFI_TYPE_UINT16:
+ *(unsigned int *) argp = (unsigned int)*(UINT16 *)(* p_argv);
+ case FFI_TYPE_STRUCT:
+ memcpy(argp, *p_argv, (*p_arg)->size);
+ break;
+ default:
+ FFI_ASSERT(0);
+ }
+ } else if ( z == sizeof(int)) {
+ *(unsigned int *) argp = (unsigned int)*(UINT32 *)(* p_argv);
+ } else {
+ memcpy(argp, *p_argv, z);
+ }
+ }
+
+ /* return the size of the arguments to be passed in registers,
+ padded to an 8 byte boundary to preserve stack alignment */
+ return ALIGN(MIN(stack - argp, 6*4), 8);
+}
+
+/* Perform machine dependent cif processing */
+ffi_status ffi_prep_cif_machdep(ffi_cif *cif)
+{
+ ffi_type **ptr;
+ unsigned i, bytes = 0;
+
+ for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++) {
+ if ((*ptr)->size == 0)
+ return FFI_BAD_TYPEDEF;
+
+ /* Perform a sanity check on the argument type, do this
+ check after the initialization. */
+ FFI_ASSERT_VALID_TYPE(*ptr);
+
+ /* Add any padding if necessary */
+ if (((*ptr)->alignment - 1) & bytes)
+ bytes = ALIGN(bytes, (*ptr)->alignment);
+
+ bytes += ALIGN((*ptr)->size, 4);
+ }
+
+ /* Ensure arg space is aligned to an 8-byte boundary */
+ bytes = ALIGN(bytes, 8);
+
+ /* Make space for the return structure pointer */
+ if (cif->rtype->type == FFI_TYPE_STRUCT) {
+ bytes += sizeof(void*);
+
+ /* Ensure stack is aligned to an 8-byte boundary */
+ bytes = ALIGN(bytes, 8);
+ }
+
+ cif->bytes = bytes;
+
+ /* Set the return type flag */
+ switch (cif->rtype->type) {
+ case FFI_TYPE_VOID:
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_DOUBLE:
+ cif->flags = (unsigned) cif->rtype->type;
+ break;
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_UINT64:
+ cif->flags = (unsigned) FFI_TYPE_SINT64;
+ break;
+ case FFI_TYPE_STRUCT:
+ /* Meta can store return values which are <= 64 bits */
+ if (cif->rtype->size <= 4)
+ /* Returned to D0Re0 as 32-bit value */
+ cif->flags = (unsigned)FFI_TYPE_INT;
+ else if ((cif->rtype->size > 4) && (cif->rtype->size <= 8))
+ /* Returned valued is stored to D1Re0|R0Re0 */
+ cif->flags = (unsigned)FFI_TYPE_DOUBLE;
+ else
+ /* value stored in memory */
+ cif->flags = (unsigned)FFI_TYPE_STRUCT;
+ break;
+ default:
+ cif->flags = (unsigned)FFI_TYPE_INT;
+ break;
+ }
+ return FFI_OK;
+}
+
+extern void ffi_call_SYSV(void (*fn)(void), extended_cif *, unsigned, unsigned, double *);
+
+/*
+ * Exported in API. Entry point
+ * cif -> ffi_cif object
+ * fn -> function pointer
+ * rvalue -> pointer to return value
+ * avalue -> vector of void * pointers pointing to memory locations holding the
+ * arguments
+ */
+void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
+{
+ extended_cif ecif;
+
+ int small_struct = (((cif->flags == FFI_TYPE_INT) || (cif->flags == FFI_TYPE_DOUBLE)) && (cif->rtype->type == FFI_TYPE_STRUCT));
+ ecif.cif = cif;
+ ecif.avalue = avalue;
+
+ double temp;
+
+ /*
+ * If the return value is a struct and we don't have a return value address
+ * then we need to make one
+ */
+
+ if ((rvalue == NULL ) && (cif->flags == FFI_TYPE_STRUCT))
+ ecif.rvalue = alloca(cif->rtype->size);
+ else if (small_struct)
+ ecif.rvalue = &temp;
+ else
+ ecif.rvalue = rvalue;
+
+ switch (cif->abi) {
+ case FFI_SYSV:
+ ffi_call_SYSV(fn, &ecif, cif->bytes, cif->flags, ecif.rvalue);
+ break;
+ default:
+ FFI_ASSERT(0);
+ break;
+ }
+
+ if (small_struct)
+ memcpy (rvalue, &temp, cif->rtype->size);
+}
+
+/* private members */
+
+static void ffi_prep_incoming_args_SYSV (char *, void **, void **,
+ ffi_cif*, float *);
+
+void ffi_closure_SYSV (ffi_closure *);
+
+/* Do NOT change that without changing the FFI_TRAMPOLINE_SIZE */
+extern unsigned int ffi_metag_trampoline[10]; /* 10 instructions */
+
+/* end of private members */
+
+/*
+ * __tramp: trampoline memory location
+ * __fun: assembly routine
+ * __ctx: memory location for wrapper
+ *
+ * At this point, tramp[0] == __ctx !
+ */
+void ffi_init_trampoline(unsigned char *__tramp, unsigned int __fun, unsigned int __ctx) {
+ memcpy (__tramp, ffi_metag_trampoline, sizeof(ffi_metag_trampoline));
+ *(unsigned int*) &__tramp[40] = __ctx;
+ *(unsigned int*) &__tramp[44] = __fun;
+ /* This will flush the instruction cache */
+ __builtin_meta2_cachewd(&__tramp[0], 1);
+ __builtin_meta2_cachewd(&__tramp[47], 1);
+}
+
+
+
+/* the cif must already be prepared */
+
+ffi_status
+ffi_prep_closure_loc (ffi_closure *closure,
+ ffi_cif* cif,
+ void (*fun)(ffi_cif*,void*,void**,void*),
+ void *user_data,
+ void *codeloc)
+{
+ void (*closure_func)(ffi_closure*) = NULL;
+
+ if (cif->abi == FFI_SYSV)
+ closure_func = &ffi_closure_SYSV;
+ else
+ return FFI_BAD_ABI;
+
+ ffi_init_trampoline(
+ (unsigned char*)&closure->tramp[0],
+ (unsigned int)closure_func,
+ (unsigned int)codeloc);
+
+ closure->cif = cif;
+ closure->user_data = user_data;
+ closure->fun = fun;
+
+ return FFI_OK;
+}
+
+
+/* This function is jumped to by the trampoline */
+unsigned int ffi_closure_SYSV_inner (closure, respp, args, vfp_args)
+ ffi_closure *closure;
+ void **respp;
+ void *args;
+ void *vfp_args;
+{
+ ffi_cif *cif;
+ void **arg_area;
+
+ cif = closure->cif;
+ arg_area = (void**) alloca (cif->nargs * sizeof (void*));
+
+ /*
+ * This call will initialize ARG_AREA, such that each
+ * element in that array points to the corresponding
+ * value on the stack; and if the function returns
+ * a structure, it will re-set RESP to point to the
+ * structure return address.
+ */
+ ffi_prep_incoming_args_SYSV(args, respp, arg_area, cif, vfp_args);
+
+ (closure->fun) ( cif, *respp, arg_area, closure->user_data);
+
+ return cif->flags;
+}
+
+static void ffi_prep_incoming_args_SYSV(char *stack, void **rvalue,
+ void **avalue, ffi_cif *cif,
+ float *vfp_stack)
+{
+ register unsigned int i;
+ register void **p_argv;
+ register char *argp;
+ register ffi_type **p_arg;
+
+ /* stack points to original arguments */
+ argp = stack;
+
+ /* Store return value */
+ if ( cif->flags == FFI_TYPE_STRUCT ) {
+ argp -= 4;
+ *rvalue = *(void **) argp;
+ }
+
+ p_argv = avalue;
+
+ for (i = cif->nargs, p_arg = cif->arg_types; (i != 0); i--, p_arg++) {
+ size_t z;
+ size_t alignment;
+
+ alignment = (*p_arg)->alignment;
+ if (alignment < 4)
+ alignment = 4;
+ if ((alignment - 1) & (unsigned)argp)
+ argp = (char *) ALIGN(argp, alignment);
+
+ z = (*p_arg)->size;
+ *p_argv = (void*) argp;
+ p_argv++;
+ argp -= z;
+ }
+ return;
+}
diff --git a/Modules/_ctypes/libffi/src/metag/ffitarget.h b/Modules/_ctypes/libffi/src/metag/ffitarget.h
new file mode 100644
index 0000000..7b9dbeb
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/metag/ffitarget.h
@@ -0,0 +1,53 @@
+/* -----------------------------------------------------------------*-C-*-
+ ffitarget.h - Copyright (c) 2013 Imagination Technologies Ltd.
+ Target configuration macros for Meta
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+ ----------------------------------------------------------------------- */
+
+#ifndef LIBFFI_TARGET_H
+#define LIBFFI_TARGET_H
+
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
+#ifndef LIBFFI_ASM
+typedef unsigned long ffi_arg;
+typedef signed long ffi_sarg;
+
+typedef enum ffi_abi {
+ FFI_FIRST_ABI = 0,
+ FFI_SYSV,
+ FFI_DEFAULT_ABI = FFI_SYSV,
+ FFI_LAST_ABI = FFI_DEFAULT_ABI + 1,
+} ffi_abi;
+#endif
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#define FFI_CLOSURES 1
+#define FFI_TRAMPOLINE_SIZE 48
+#define FFI_NATIVE_RAW_API 0
+
+#endif
+
diff --git a/Modules/_ctypes/libffi/src/metag/sysv.S b/Modules/_ctypes/libffi/src/metag/sysv.S
new file mode 100644
index 0000000..b4b2a3b
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/metag/sysv.S
@@ -0,0 +1,311 @@
+/* -----------------------------------------------------------------------
+ sysv.S - Copyright (c) 2013 Imagination Technologies Ltd.
+
+ Meta Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#define LIBFFI_ASM
+#include <fficonfig.h>
+#include <ffi.h>
+#ifdef HAVE_MACHINE_ASM_H
+#include <machine/asm.h>
+#else
+#ifdef __USER_LABEL_PREFIX__
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+#define CNAME(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+#else
+#define CNAME(x) x
+#endif
+#define ENTRY(x) .globl CNAME(x); .type CNAME(x), %function; CNAME(x):
+#endif
+
+#ifdef __ELF__
+#define LSYM(x) .x
+#else
+#define LSYM(x) x
+#endif
+
+.macro call_reg x=
+ .text
+ .balign 4
+ mov D1RtP, \x
+ swap D1RtP, PC
+.endm
+
+! Save register arguments
+.macro SAVE_ARGS
+ .text
+ .balign 4
+ setl [A0StP++], D0Ar6, D1Ar5
+ setl [A0StP++], D0Ar4, D1Ar3
+ setl [A0StP++], D0Ar2, D1Ar1
+.endm
+
+! Save retrun, frame pointer and other regs
+.macro SAVE_REGS regs=
+ .text
+ .balign 4
+ setl [A0StP++], D0FrT, D1RtP
+ ! Needs to be a pair of regs
+ .ifnc "\regs",""
+ setl [A0StP++], \regs
+ .endif
+.endm
+
+! Declare a global function
+.macro METAG_FUNC_START name
+ .text
+ .balign 4
+ ENTRY(\name)
+.endm
+
+! Return registers from the stack. Reverse SAVE_REGS operation
+.macro RET_REGS regs=, cond=
+ .ifnc "\regs", ""
+ getl \regs, [--A0StP]
+ .endif
+ getl D0FrT, D1RtP, [--A0StP]
+.endm
+
+! Return arguments
+.macro RET_ARGS
+ getl D0Ar2, D1Ar1, [--A0StP]
+ getl D0Ar4, D1Ar3, [--A0StP]
+ getl D0Ar6, D1Ar5, [--A0StP]
+.endm
+
+
+ ! D1Ar1: fn
+ ! D0Ar2: &ecif
+ ! D1Ar3: cif->bytes
+ ! D0Ar4: fig->flags
+ ! D1Ar5: ecif.rvalue
+
+ ! This assumes we are using GNU as
+METAG_FUNC_START ffi_call_SYSV
+ ! Save argument registers
+
+ SAVE_ARGS
+
+ ! new frame
+ mov D0FrT, A0FrP
+ add A0FrP, A0StP, #0
+
+ ! Preserve the old frame pointer
+ SAVE_REGS "D1.5, D0.5"
+
+ ! Make room for new args. cifs->bytes is the total space for input
+ ! and return arguments
+
+ add A0StP, A0StP, D1Ar3
+
+ ! Preserve cifs->bytes & fn
+ mov D0.5, D1Ar3
+ mov D1.5, D1Ar1
+
+ ! Place all of the ffi_prep_args in position
+ mov D1Ar1, A0StP
+
+ ! Call ffi_prep_args(stack, &ecif)
+#ifdef __PIC__
+ callr D1RtP, CNAME(ffi_prep_args@PLT)
+#else
+ callr D1RtP, CNAME(ffi_prep_args)
+#endif
+
+ ! Restore fn pointer
+
+ ! The foreign stack should look like this
+ ! XXXXX XXXXXX <--- stack pointer
+ ! FnArgN rvalue
+ ! FnArgN+2 FnArgN+1
+ ! FnArgN+4 FnArgN+3
+ ! ....
+ !
+
+ ! A0StP now points to the first (or return) argument + 4
+
+ ! Preserve cif->bytes
+ getl D0Ar2, D1Ar1, [--A0StP]
+ getl D0Ar4, D1Ar3, [--A0StP]
+ getl D0Ar6, D1Ar5, [--A0StP]
+
+ ! Place A0StP to the first argument again
+ add A0StP, A0StP, #24 ! That's because we loaded 6 regs x 4 byte each
+
+ ! A0FrP points to the initial stack without the reserved space for the
+ ! cifs->bytes, whilst A0StP points to the stack after the space allocation
+
+ ! fn was the first argument of ffi_call_SYSV.
+ ! The stack at this point looks like this:
+ !
+ ! A0StP(on entry to _SYSV) -> Arg6 Arg5 | low
+ ! Arg4 Arg3 |
+ ! Arg2 Arg1 |
+ ! A0FrP ----> D0FrtP D1RtP |
+ ! D1.5 D0.5 |
+ ! A0StP(bf prep_args) -> FnArgn FnArgn-1 |
+ ! FnArgn-2FnArgn-3 |
+ ! ................ | <= cifs->bytes
+ ! FnArg4 FnArg3 |
+ ! A0StP (prv_A0StP+cifs->bytes) FnArg2 FnArg1 | high
+ !
+ ! fn was in Arg1 so it's located in in A0FrP+#-0xC
+ !
+
+ ! D0Re0 contains the size of arguments stored in registers
+ sub A0StP, A0StP, D0Re0
+
+ ! Arg1 is the function pointer for the foreign call. This has been
+ ! preserved in D1.5
+
+ ! Time to call (fn). Arguments should be like this:
+ ! Arg1-Arg6 are loaded to regs
+ ! The rest of the arguments are stored in stack pointed by A0StP
+
+ call_reg D1.5
+
+ ! Reset stack.
+
+ mov A0StP, A0FrP
+
+ ! Load Arg1 with the pointer to storage for the return type
+ ! This was stored in Arg5
+
+ getd D1Ar1, [A0FrP+#-20]
+
+ ! Load D0Ar2 with the return type code. This was stored in Arg4 (flags)
+
+ getd D0Ar2, [A0FrP+#-16]
+
+ ! We are ready to start processing the return value
+ ! D0Re0 (and D1Re0) hold the return value
+
+ ! If the return value is NULL, assume no return value
+ cmp D1Ar1, #0
+ beq LSYM(Lepilogue)
+
+ ! return INT
+ cmp D0Ar2, #FFI_TYPE_INT
+ ! Sadly, there is no setd{cc} instruction so we need to workaround that
+ bne .INT64
+ setd [D1Ar1], D0Re0
+ b LSYM(Lepilogue)
+
+ ! return INT64
+.INT64:
+ cmp D0Ar2, #FFI_TYPE_SINT64
+ setleq [D1Ar1], D0Re0, D1Re0
+
+ ! return DOUBLE
+ cmp D0Ar2, #FFI_TYPE_DOUBLE
+ setl [D1AR1++], D0Re0, D1Re0
+
+LSYM(Lepilogue):
+ ! At this point, the stack pointer points right after the argument
+ ! saved area. We need to restore 4 regs, therefore we need to move
+ ! 16 bytes ahead.
+ add A0StP, A0StP, #16
+ RET_REGS "D1.5, D0.5"
+ RET_ARGS
+ getd D0Re0, [A0StP]
+ mov A0FrP, D0FrT
+ swap D1RtP, PC
+
+.ffi_call_SYSV_end:
+ .size CNAME(ffi_call_SYSV),.ffi_call_SYSV_end-CNAME(ffi_call_SYSV)
+
+
+/*
+ (called by ffi_metag_trampoline)
+ void ffi_closure_SYSV (ffi_closure*)
+
+ (called by ffi_closure_SYSV)
+ unsigned int FFI_HIDDEN
+ ffi_closure_SYSV_inner (closure,respp, args)
+ ffi_closure *closure;
+ void **respp;
+ void *args;
+*/
+
+METAG_FUNC_START ffi_closure_SYSV
+ ! We assume that D1Ar1 holds the address of the
+ ! ffi_closure struct. We will use that to fetch the
+ ! arguments. The stack pointer points to an empty space
+ ! and it is ready to store more data.
+
+ ! D1Ar1 is ready
+ ! Allocate stack space for return value
+ add A0StP, A0StP, #8
+ ! Store it to D0Ar2
+ sub D0Ar2, A0StP, #8
+
+ sub D1Ar3, A0FrP, #4
+
+ ! D1Ar3 contains the address of the original D1Ar1 argument
+ ! We need to subtract #4 later on
+
+ ! Preverve D0Ar2
+ mov D0.5, D0Ar2
+
+#ifdef __PIC__
+ callr D1RtP, CNAME(ffi_closure_SYSV_inner@PLT)
+#else
+ callr D1RtP, CNAME(ffi_closure_SYSV_inner)
+#endif
+
+ ! Check the return value and store it to D0.5
+ cmp D0Re0, #FFI_TYPE_INT
+ beq .Lretint
+ cmp D0Re0, #FFI_TYPE_DOUBLE
+ beq .Lretdouble
+.Lclosure_epilogue:
+ sub A0StP, A0StP, #8
+ RET_REGS "D1.5, D0.5"
+ RET_ARGS
+ swap D1RtP, PC
+
+.Lretint:
+ setd [D0.5], D0Re0
+ b .Lclosure_epilogue
+.Lretdouble:
+ setl [D0.5++], D0Re0, D1Re0
+ b .Lclosure_epilogue
+.ffi_closure_SYSV_end:
+.size CNAME(ffi_closure_SYSV),.ffi_closure_SYSV_end-CNAME(ffi_closure_SYSV)
+
+
+ENTRY(ffi_metag_trampoline)
+ SAVE_ARGS
+ ! New frame
+ mov A0FrP, A0StP
+ SAVE_REGS "D1.5, D0.5"
+ mov D0.5, PC
+ ! Load D1Ar1 the value of ffi_metag_trampoline
+ getd D1Ar1, [D0.5 + #8]
+ ! Jump to ffi_closure_SYSV
+ getd PC, [D0.5 + #12]
diff --git a/Modules/_ctypes/libffi/src/microblaze/ffi.c b/Modules/_ctypes/libffi/src/microblaze/ffi.c
new file mode 100644
index 0000000..5c155c5
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/microblaze/ffi.c
@@ -0,0 +1,321 @@
+/* -----------------------------------------------------------------------
+ ffi.c - Copyright (c) 2012, 2013 Xilinx, Inc
+
+ MicroBlaze Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#include <ffi.h>
+#include <ffi_common.h>
+
+extern void ffi_call_SYSV(void (*)(void*, extended_cif*), extended_cif*,
+ unsigned int, unsigned int, unsigned int*, void (*fn)(void),
+ unsigned int, unsigned int);
+
+extern void ffi_closure_SYSV(void);
+
+#define WORD_SIZE sizeof(unsigned int)
+#define ARGS_REGISTER_SIZE (WORD_SIZE * 6)
+#define WORD_ALIGN(x) ALIGN(x, WORD_SIZE)
+
+/* ffi_prep_args is called by the assembly routine once stack space
+ has been allocated for the function's arguments */
+void ffi_prep_args(void* stack, extended_cif* ecif)
+{
+ unsigned int i;
+ ffi_type** p_arg;
+ void** p_argv;
+ void* stack_args_p = stack;
+
+ p_argv = ecif->avalue;
+
+ if (ecif == NULL || ecif->cif == NULL) {
+ return; /* no description to prepare */
+ }
+
+ if ((ecif->cif->rtype != NULL) &&
+ (ecif->cif->rtype->type == FFI_TYPE_STRUCT))
+ {
+ /* if return type is a struct which is referenced on the stack/reg5,
+ * by a pointer. Stored the return value pointer in r5.
+ */
+ char* addr = stack_args_p;
+ memcpy(addr, &(ecif->rvalue), WORD_SIZE);
+ stack_args_p += WORD_SIZE;
+ }
+
+ if (ecif->avalue == NULL) {
+ return; /* no arguments to prepare */
+ }
+
+ for (i = 0, p_arg = ecif->cif->arg_types; i < ecif->cif->nargs;
+ i++, p_arg++)
+ {
+ size_t size = (*p_arg)->size;
+ int type = (*p_arg)->type;
+ void* value = p_argv[i];
+ char* addr = stack_args_p;
+ int aligned_size = WORD_ALIGN(size);
+
+ /* force word alignment on the stack */
+ stack_args_p += aligned_size;
+
+ switch (type)
+ {
+ case FFI_TYPE_UINT8:
+ *(unsigned int *)addr = (unsigned int)*(UINT8*)(value);
+ break;
+ case FFI_TYPE_SINT8:
+ *(signed int *)addr = (signed int)*(SINT8*)(value);
+ break;
+ case FFI_TYPE_UINT16:
+ *(unsigned int *)addr = (unsigned int)*(UINT16*)(value);
+ break;
+ case FFI_TYPE_SINT16:
+ *(signed int *)addr = (signed int)*(SINT16*)(value);
+ break;
+ case FFI_TYPE_STRUCT:
+#if __BIG_ENDIAN__
+ /*
+ * MicroBlaze toolchain appears to emit:
+ * bsrli r5, r5, 8 (caller)
+ * ...
+ * <branch to callee>
+ * ...
+ * bslli r5, r5, 8 (callee)
+ *
+ * For structs like "struct a { uint8_t a[3]; };", when passed
+ * by value.
+ *
+ * Structs like "struct b { uint16_t a; };" are also expected
+ * to be packed strangely in registers.
+ *
+ * This appears to be because the microblaze toolchain expects
+ * "struct b == uint16_t", which is only any issue for big
+ * endian.
+ *
+ * The following is a work around for big-endian only, for the
+ * above mentioned case, it will re-align the contents of a
+ * <= 3-byte struct value.
+ */
+ if (size < WORD_SIZE)
+ {
+ memcpy (addr + (WORD_SIZE - size), value, size);
+ break;
+ }
+#endif
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_DOUBLE:
+ default:
+ memcpy(addr, value, aligned_size);
+ }
+ }
+}
+
+ffi_status ffi_prep_cif_machdep(ffi_cif* cif)
+{
+ /* check ABI */
+ switch (cif->abi)
+ {
+ case FFI_SYSV:
+ break;
+ default:
+ return FFI_BAD_ABI;
+ }
+ return FFI_OK;
+}
+
+void ffi_call(ffi_cif* cif, void (*fn)(void), void* rvalue, void** avalue)
+{
+ extended_cif ecif;
+ ecif.cif = cif;
+ ecif.avalue = avalue;
+
+ /* If the return value is a struct and we don't have a return */
+ /* value address then we need to make one */
+ if ((rvalue == NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) {
+ ecif.rvalue = alloca(cif->rtype->size);
+ } else {
+ ecif.rvalue = rvalue;
+ }
+
+ switch (cif->abi)
+ {
+ case FFI_SYSV:
+ ffi_call_SYSV(ffi_prep_args, &ecif, cif->bytes, cif->flags,
+ ecif.rvalue, fn, cif->rtype->type, cif->rtype->size);
+ break;
+ default:
+ FFI_ASSERT(0);
+ break;
+ }
+}
+
+void ffi_closure_call_SYSV(void* register_args, void* stack_args,
+ ffi_closure* closure, void* rvalue,
+ unsigned int* rtype, unsigned int* rsize)
+{
+ /* prepare arguments for closure call */
+ ffi_cif* cif = closure->cif;
+ ffi_type** arg_types = cif->arg_types;
+
+ /* re-allocate data for the args. This needs to be done in order to keep
+ * multi-word objects (e.g. structs) in contigious memory. Callers are not
+ * required to store the value of args in the lower 6 words in the stack
+ * (although they are allocated in the stack).
+ */
+ char* stackclone = alloca(cif->bytes);
+ void** avalue = alloca(cif->nargs * sizeof(void*));
+ void* struct_rvalue = NULL;
+ char* ptr = stackclone;
+ int i;
+
+ /* copy registers into stack clone */
+ int registers_used = cif->bytes;
+ if (registers_used > ARGS_REGISTER_SIZE) {
+ registers_used = ARGS_REGISTER_SIZE;
+ }
+ memcpy(stackclone, register_args, registers_used);
+
+ /* copy stack allocated args into stack clone */
+ if (cif->bytes > ARGS_REGISTER_SIZE) {
+ int stack_used = cif->bytes - ARGS_REGISTER_SIZE;
+ memcpy(stackclone + ARGS_REGISTER_SIZE, stack_args, stack_used);
+ }
+
+ /* preserve struct type return pointer passing */
+ if ((cif->rtype != NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) {
+ struct_rvalue = *((void**)ptr);
+ ptr += WORD_SIZE;
+ }
+
+ /* populate arg pointer list */
+ for (i = 0; i < cif->nargs; i++)
+ {
+ switch (arg_types[i]->type)
+ {
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT8:
+#ifdef __BIG_ENDIAN__
+ avalue[i] = ptr + 3;
+#else
+ avalue[i] = ptr;
+#endif
+ break;
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT16:
+#ifdef __BIG_ENDIAN__
+ avalue[i] = ptr + 2;
+#else
+ avalue[i] = ptr;
+#endif
+ break;
+ case FFI_TYPE_STRUCT:
+#if __BIG_ENDIAN__
+ /*
+ * Work around strange ABI behaviour.
+ * (see info in ffi_prep_args)
+ */
+ if (arg_types[i]->size < WORD_SIZE)
+ {
+ memcpy (ptr, ptr + (WORD_SIZE - arg_types[i]->size), arg_types[i]->size);
+ }
+#endif
+ avalue[i] = (void*)ptr;
+ break;
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_DOUBLE:
+ avalue[i] = ptr;
+ break;
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_FLOAT:
+ default:
+ /* default 4-byte argument */
+ avalue[i] = ptr;
+ break;
+ }
+ ptr += WORD_ALIGN(arg_types[i]->size);
+ }
+
+ /* set the return type info passed back to the wrapper */
+ *rsize = cif->rtype->size;
+ *rtype = cif->rtype->type;
+ if (struct_rvalue != NULL) {
+ closure->fun(cif, struct_rvalue, avalue, closure->user_data);
+ /* copy struct return pointer value into function return value */
+ *((void**)rvalue) = struct_rvalue;
+ } else {
+ closure->fun(cif, rvalue, avalue, closure->user_data);
+ }
+}
+
+ffi_status ffi_prep_closure_loc(
+ ffi_closure* closure, ffi_cif* cif,
+ void (*fun)(ffi_cif*, void*, void**, void*),
+ void* user_data, void* codeloc)
+{
+ unsigned long* tramp = (unsigned long*)&(closure->tramp[0]);
+ unsigned long cls = (unsigned long)codeloc;
+ unsigned long fn = 0;
+ unsigned long fn_closure_call_sysv = (unsigned long)ffi_closure_call_SYSV;
+
+ closure->cif = cif;
+ closure->fun = fun;
+ closure->user_data = user_data;
+
+ switch (cif->abi)
+ {
+ case FFI_SYSV:
+ fn = (unsigned long)ffi_closure_SYSV;
+
+ /* load r11 (temp) with fn */
+ /* imm fn(upper) */
+ tramp[0] = 0xb0000000 | ((fn >> 16) & 0xffff);
+ /* addik r11, r0, fn(lower) */
+ tramp[1] = 0x31600000 | (fn & 0xffff);
+
+ /* load r12 (temp) with cls */
+ /* imm cls(upper) */
+ tramp[2] = 0xb0000000 | ((cls >> 16) & 0xffff);
+ /* addik r12, r0, cls(lower) */
+ tramp[3] = 0x31800000 | (cls & 0xffff);
+
+ /* load r3 (temp) with ffi_closure_call_SYSV */
+ /* imm fn_closure_call_sysv(upper) */
+ tramp[4] = 0xb0000000 | ((fn_closure_call_sysv >> 16) & 0xffff);
+ /* addik r3, r0, fn_closure_call_sysv(lower) */
+ tramp[5] = 0x30600000 | (fn_closure_call_sysv & 0xffff);
+ /* branch/jump to address stored in r11 (fn) */
+ tramp[6] = 0x98085800; /* bra r11 */
+
+ break;
+ default:
+ return FFI_BAD_ABI;
+ }
+ return FFI_OK;
+}
diff --git a/Modules/_ctypes/libffi/src/microblaze/ffitarget.h b/Modules/_ctypes/libffi/src/microblaze/ffitarget.h
new file mode 100644
index 0000000..c6fa5a4
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/microblaze/ffitarget.h
@@ -0,0 +1,53 @@
+/* -----------------------------------------------------------------------
+ ffitarget.h - Copyright (c) 2012, 2013 Xilinx, Inc
+
+ Target configuration macros for MicroBlaze.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#ifndef LIBFFI_TARGET_H
+#define LIBFFI_TARGET_H
+
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
+#ifndef LIBFFI_ASM
+typedef unsigned long ffi_arg;
+typedef signed long ffi_sarg;
+
+typedef enum ffi_abi {
+ FFI_FIRST_ABI = 0,
+ FFI_SYSV,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_SYSV
+} ffi_abi;
+#endif
+
+/* Definitions for closures */
+
+#define FFI_CLOSURES 1
+#define FFI_NATIVE_RAW_API 0
+
+#define FFI_TRAMPOLINE_SIZE (4*8)
+
+#endif
diff --git a/Modules/_ctypes/libffi/src/microblaze/sysv.S b/Modules/_ctypes/libffi/src/microblaze/sysv.S
new file mode 100644
index 0000000..7a195a6
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/microblaze/sysv.S
@@ -0,0 +1,302 @@
+/* -----------------------------------------------------------------------
+ sysv.S - Copyright (c) 2012, 2013 Xilinx, Inc
+
+ MicroBlaze Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#define LIBFFI_ASM
+#include <fficonfig.h>
+#include <ffi.h>
+
+ /*
+ * arg[0] (r5) = ffi_prep_args,
+ * arg[1] (r6) = &ecif,
+ * arg[2] (r7) = cif->bytes,
+ * arg[3] (r8) = cif->flags,
+ * arg[4] (r9) = ecif.rvalue,
+ * arg[5] (r10) = fn
+ * arg[6] (sp[0]) = cif->rtype->type
+ * arg[7] (sp[4]) = cif->rtype->size
+ */
+ .text
+ .globl ffi_call_SYSV
+ .type ffi_call_SYSV, @function
+ffi_call_SYSV:
+ /* push callee saves */
+ addik r1, r1, -20
+ swi r19, r1, 0 /* Frame Pointer */
+ swi r20, r1, 4 /* PIC register */
+ swi r21, r1, 8 /* PIC register */
+ swi r22, r1, 12 /* save for locals */
+ swi r23, r1, 16 /* save for locals */
+
+ /* save the r5-r10 registers in the stack */
+ addik r1, r1, -24 /* increment sp to store 6x 32-bit words */
+ swi r5, r1, 0
+ swi r6, r1, 4
+ swi r7, r1, 8
+ swi r8, r1, 12
+ swi r9, r1, 16
+ swi r10, r1, 20
+
+ /* save function pointer */
+ addik r3, r5, 0 /* copy ffi_prep_args into r3 */
+ addik r22, r1, 0 /* save sp for unallocated args into r22 (callee-saved) */
+ addik r23, r10, 0 /* save function address into r23 (callee-saved) */
+
+ /* prepare stack with allocation for n (bytes = r7) args */
+ rsub r1, r7, r1 /* subtract bytes from sp */
+
+ /* prep args for ffi_prep_args call */
+ addik r5, r1, 0 /* store stack pointer into arg[0] */
+ /* r6 still holds ecif for arg[1] */
+
+ /* Call ffi_prep_args(stack, &ecif). */
+ addik r1, r1, -4
+ swi r15, r1, 0 /* store the link register in the frame */
+ brald r15, r3
+ nop /* branch has delay slot */
+ lwi r15, r1, 0
+ addik r1, r1, 4 /* restore the link register from the frame */
+ /* returns calling stack pointer location */
+
+ /* prepare args for fn call, prep_args populates them onto the stack */
+ lwi r5, r1, 0 /* arg[0] */
+ lwi r6, r1, 4 /* arg[1] */
+ lwi r7, r1, 8 /* arg[2] */
+ lwi r8, r1, 12 /* arg[3] */
+ lwi r9, r1, 16 /* arg[4] */
+ lwi r10, r1, 20 /* arg[5] */
+
+ /* call (fn) (...). */
+ addik r1, r1, -4
+ swi r15, r1, 0 /* store the link register in the frame */
+ brald r15, r23
+ nop /* branch has delay slot */
+ lwi r15, r1, 0
+ addik r1, r1, 4 /* restore the link register from the frame */
+
+ /* Remove the space we pushed for the args. */
+ addik r1, r22, 0 /* restore old SP */
+
+ /* restore this functions parameters */
+ lwi r5, r1, 0 /* arg[0] */
+ lwi r6, r1, 4 /* arg[1] */
+ lwi r7, r1, 8 /* arg[2] */
+ lwi r8, r1, 12 /* arg[3] */
+ lwi r9, r1, 16 /* arg[4] */
+ lwi r10, r1, 20 /* arg[5] */
+ addik r1, r1, 24 /* decrement sp to de-allocate 6x 32-bit words */
+
+ /* If the return value pointer is NULL, assume no return value. */
+ beqi r9, ffi_call_SYSV_end
+
+ lwi r22, r1, 48 /* get return type (20 for locals + 28 for arg[6]) */
+ lwi r23, r1, 52 /* get return size (20 for locals + 32 for arg[7]) */
+
+ /* Check if return type is actually a struct, do nothing */
+ rsubi r11, r22, FFI_TYPE_STRUCT
+ beqi r11, ffi_call_SYSV_end
+
+ /* Return 8bit */
+ rsubi r11, r23, 1
+ beqi r11, ffi_call_SYSV_store8
+
+ /* Return 16bit */
+ rsubi r11, r23, 2
+ beqi r11, ffi_call_SYSV_store16
+
+ /* Return 32bit */
+ rsubi r11, r23, 4
+ beqi r11, ffi_call_SYSV_store32
+
+ /* Return 64bit */
+ rsubi r11, r23, 8
+ beqi r11, ffi_call_SYSV_store64
+
+ /* Didnt match anything */
+ bri ffi_call_SYSV_end
+
+ffi_call_SYSV_store64:
+ swi r3, r9, 0 /* store word r3 into return value */
+ swi r4, r9, 4 /* store word r4 into return value */
+ bri ffi_call_SYSV_end
+
+ffi_call_SYSV_store32:
+ swi r3, r9, 0 /* store word r3 into return value */
+ bri ffi_call_SYSV_end
+
+ffi_call_SYSV_store16:
+#ifdef __BIG_ENDIAN__
+ shi r3, r9, 2 /* store half-word r3 into return value */
+#else
+ shi r3, r9, 0 /* store half-word r3 into return value */
+#endif
+ bri ffi_call_SYSV_end
+
+ffi_call_SYSV_store8:
+#ifdef __BIG_ENDIAN__
+ sbi r3, r9, 3 /* store byte r3 into return value */
+#else
+ sbi r3, r9, 0 /* store byte r3 into return value */
+#endif
+ bri ffi_call_SYSV_end
+
+ffi_call_SYSV_end:
+ /* callee restores */
+ lwi r19, r1, 0 /* frame pointer */
+ lwi r20, r1, 4 /* PIC register */
+ lwi r21, r1, 8 /* PIC register */
+ lwi r22, r1, 12
+ lwi r23, r1, 16
+ addik r1, r1, 20
+
+ /* return from sub-routine (with delay slot) */
+ rtsd r15, 8
+ nop
+
+ .size ffi_call_SYSV, . - ffi_call_SYSV
+
+/* ------------------------------------------------------------------------- */
+
+ /*
+ * args passed into this function, are passed down to the callee.
+ * this function is the target of the closure trampoline, as such r12 is
+ * a pointer to the closure object.
+ */
+ .text
+ .globl ffi_closure_SYSV
+ .type ffi_closure_SYSV, @function
+ffi_closure_SYSV:
+ /* push callee saves */
+ addik r11, r1, 28 /* save stack args start location (excluding regs/link) */
+ addik r1, r1, -12
+ swi r19, r1, 0 /* Frame Pointer */
+ swi r20, r1, 4 /* PIC register */
+ swi r21, r1, 8 /* PIC register */
+
+ /* store register args on stack */
+ addik r1, r1, -24
+ swi r5, r1, 0
+ swi r6, r1, 4
+ swi r7, r1, 8
+ swi r8, r1, 12
+ swi r9, r1, 16
+ swi r10, r1, 20
+
+ /* setup args */
+ addik r5, r1, 0 /* register_args */
+ addik r6, r11, 0 /* stack_args */
+ addik r7, r12, 0 /* closure object */
+ addik r1, r1, -8 /* allocate return value */
+ addik r8, r1, 0 /* void* rvalue */
+ addik r1, r1, -8 /* allocate for reutrn type/size values */
+ addik r9, r1, 0 /* void* rtype */
+ addik r10, r1, 4 /* void* rsize */
+
+ /* call the wrap_call function */
+ addik r1, r1, -28 /* allocate args + link reg */
+ swi r15, r1, 0 /* store the link register in the frame */
+ brald r15, r3
+ nop /* branch has delay slot */
+ lwi r15, r1, 0
+ addik r1, r1, 28 /* restore the link register from the frame */
+
+ffi_closure_SYSV_prepare_return:
+ lwi r9, r1, 0 /* rtype */
+ lwi r10, r1, 4 /* rsize */
+ addik r1, r1, 8 /* de-allocate return info values */
+
+ /* Check if return type is actually a struct, store 4 bytes */
+ rsubi r11, r9, FFI_TYPE_STRUCT
+ beqi r11, ffi_closure_SYSV_store32
+
+ /* Return 8bit */
+ rsubi r11, r10, 1
+ beqi r11, ffi_closure_SYSV_store8
+
+ /* Return 16bit */
+ rsubi r11, r10, 2
+ beqi r11, ffi_closure_SYSV_store16
+
+ /* Return 32bit */
+ rsubi r11, r10, 4
+ beqi r11, ffi_closure_SYSV_store32
+
+ /* Return 64bit */
+ rsubi r11, r10, 8
+ beqi r11, ffi_closure_SYSV_store64
+
+ /* Didnt match anything */
+ bri ffi_closure_SYSV_end
+
+ffi_closure_SYSV_store64:
+ lwi r3, r1, 0 /* store word r3 into return value */
+ lwi r4, r1, 4 /* store word r4 into return value */
+ /* 64 bits == 2 words, no sign extend occurs */
+ bri ffi_closure_SYSV_end
+
+ffi_closure_SYSV_store32:
+ lwi r3, r1, 0 /* store word r3 into return value */
+ /* 32 bits == 1 word, no sign extend occurs */
+ bri ffi_closure_SYSV_end
+
+ffi_closure_SYSV_store16:
+#ifdef __BIG_ENDIAN__
+ lhui r3, r1, 2 /* store half-word r3 into return value */
+#else
+ lhui r3, r1, 0 /* store half-word r3 into return value */
+#endif
+ rsubi r11, r9, FFI_TYPE_SINT16
+ bnei r11, ffi_closure_SYSV_end
+ sext16 r3, r3 /* fix sign extend of sint8 */
+ bri ffi_closure_SYSV_end
+
+ffi_closure_SYSV_store8:
+#ifdef __BIG_ENDIAN__
+ lbui r3, r1, 3 /* store byte r3 into return value */
+#else
+ lbui r3, r1, 0 /* store byte r3 into return value */
+#endif
+ rsubi r11, r9, FFI_TYPE_SINT8
+ bnei r11, ffi_closure_SYSV_end
+ sext8 r3, r3 /* fix sign extend of sint8 */
+ bri ffi_closure_SYSV_end
+
+ffi_closure_SYSV_end:
+ addik r1, r1, 8 /* de-allocate return value */
+
+ /* de-allocate stored args */
+ addik r1, r1, 24
+
+ /* callee restores */
+ lwi r19, r1, 0 /* frame pointer */
+ lwi r20, r1, 4 /* PIC register */
+ lwi r21, r1, 8 /* PIC register */
+ addik r1, r1, 12
+
+ /* return from sub-routine (with delay slot) */
+ rtsd r15, 8
+ nop
+
+ .size ffi_closure_SYSV, . - ffi_closure_SYSV
diff --git a/Modules/_ctypes/libffi/src/mips/ffi.c b/Modules/_ctypes/libffi/src/mips/ffi.c
index d714cc9..03121e3 100644
--- a/Modules/_ctypes/libffi/src/mips/ffi.c
+++ b/Modules/_ctypes/libffi/src/mips/ffi.c
@@ -1,6 +1,7 @@
/* -----------------------------------------------------------------------
- ffi.c - Copyright (c) 1996, 2007, 2008 Red Hat, Inc.
- Copyright (c) 2008 David Daney
+ ffi.c - Copyright (c) 2011 Anthony Green
+ Copyright (c) 2008 David Daney
+ Copyright (c) 1996, 2007, 2008, 2011 Red Hat, Inc.
MIPS Foreign Function Interface
@@ -37,7 +38,11 @@
#endif
#ifndef USE__BUILTIN___CLEAR_CACHE
-#include <sys/cachectl.h>
+# if defined(__OpenBSD__)
+# include <mips64/sysarch.h>
+# else
+# include <sys/cachectl.h>
+# endif
#endif
#ifdef FFI_DEBUG
@@ -662,10 +667,19 @@ ffi_prep_closure_loc (ffi_closure *closure,
char *clear_location = (char *) codeloc;
#if defined(FFI_MIPS_O32)
- FFI_ASSERT(cif->abi == FFI_O32 || cif->abi == FFI_O32_SOFT_FLOAT);
+ if (cif->abi != FFI_O32 && cif->abi != FFI_O32_SOFT_FLOAT)
+ return FFI_BAD_ABI;
fn = ffi_closure_O32;
-#else /* FFI_MIPS_N32 */
- FFI_ASSERT(cif->abi == FFI_N32 || cif->abi == FFI_N64);
+#else
+#if _MIPS_SIM ==_ABIN32
+ if (cif->abi != FFI_N32
+ && cif->abi != FFI_N32_SOFT_FLOAT)
+ return FFI_BAD_ABI;
+#else
+ if (cif->abi != FFI_N64
+ && cif->abi != FFI_N64_SOFT_FLOAT)
+ return FFI_BAD_ABI;
+#endif
fn = ffi_closure_N32;
#endif /* FFI_MIPS_O32 */
diff --git a/Modules/_ctypes/libffi/src/mips/ffitarget.h b/Modules/_ctypes/libffi/src/mips/ffitarget.h
index c5f4e05..717d659 100644
--- a/Modules/_ctypes/libffi/src/mips/ffitarget.h
+++ b/Modules/_ctypes/libffi/src/mips/ffitarget.h
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------*-C-*-
- ffitarget.h - Copyright (c) 1996-2003 Red Hat, Inc.
+ ffitarget.h - Copyright (c) 2012 Anthony Green
+ Copyright (c) 1996-2003 Red Hat, Inc.
Target configuration macros for MIPS.
Permission is hereby granted, free of charge, to any person obtaining
@@ -27,11 +28,23 @@
#ifndef LIBFFI_TARGET_H
#define LIBFFI_TARGET_H
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
#ifdef linux
# include <asm/sgidefs.h>
-#else
+#elif defined(__rtems__)
+/*
+ * Subprogram calling convention - copied from sgidefs.h
+ */
+#define _MIPS_SIM_ABI32 1
+#define _MIPS_SIM_NABI32 2
+#define _MIPS_SIM_ABI64 3
+#elif !defined(__OpenBSD__)
# include <sgidefs.h>
#endif
+
# ifndef _ABIN32
# define _ABIN32 _MIPS_SIM_NABI32
# endif
@@ -43,7 +56,7 @@
# endif
#if !defined(_MIPS_SIM)
--- something is very wrong --
+# error -- something is very wrong --
#else
# if (_MIPS_SIM==_ABIN32 && defined(_ABIN32)) || (_MIPS_SIM==_ABI64 && defined(_ABI64))
# define FFI_MIPS_N32
@@ -51,7 +64,7 @@
# if (_MIPS_SIM==_ABIO32 && defined(_ABIO32))
# define FFI_MIPS_O32
# else
--- this is an unsupported platform --
+# error -- this is an unsupported platform --
# endif
# endif
#endif
@@ -186,30 +199,29 @@ typedef enum ffi_abi {
FFI_O32_SOFT_FLOAT,
FFI_N32_SOFT_FLOAT,
FFI_N64_SOFT_FLOAT,
+ FFI_LAST_ABI,
#ifdef FFI_MIPS_O32
#ifdef __mips_soft_float
- FFI_DEFAULT_ABI = FFI_O32_SOFT_FLOAT,
+ FFI_DEFAULT_ABI = FFI_O32_SOFT_FLOAT
#else
- FFI_DEFAULT_ABI = FFI_O32,
+ FFI_DEFAULT_ABI = FFI_O32
#endif
#else
# if _MIPS_SIM==_ABI64
# ifdef __mips_soft_float
- FFI_DEFAULT_ABI = FFI_N64_SOFT_FLOAT,
+ FFI_DEFAULT_ABI = FFI_N64_SOFT_FLOAT
# else
- FFI_DEFAULT_ABI = FFI_N64,
+ FFI_DEFAULT_ABI = FFI_N64
# endif
# else
# ifdef __mips_soft_float
- FFI_DEFAULT_ABI = FFI_N32_SOFT_FLOAT,
+ FFI_DEFAULT_ABI = FFI_N32_SOFT_FLOAT
# else
- FFI_DEFAULT_ABI = FFI_N32,
+ FFI_DEFAULT_ABI = FFI_N32
# endif
# endif
#endif
-
- FFI_LAST_ABI = FFI_DEFAULT_ABI + 1
} ffi_abi;
#define FFI_EXTRA_CIF_FIELDS unsigned rstruct_flag
diff --git a/Modules/_ctypes/libffi/src/mips/n32.S b/Modules/_ctypes/libffi/src/mips/n32.S
index ae23094..ff4bbce 100644
--- a/Modules/_ctypes/libffi/src/mips/n32.S
+++ b/Modules/_ctypes/libffi/src/mips/n32.S
@@ -43,6 +43,7 @@
#ifdef __GNUC__
.abicalls
#endif
+ .set mips4
.text
.align 2
.globl ffi_call_N32
diff --git a/Modules/_ctypes/libffi/src/moxie/eabi.S b/Modules/_ctypes/libffi/src/moxie/eabi.S
index 379ea4b..ac7aceb 100644
--- a/Modules/_ctypes/libffi/src/moxie/eabi.S
+++ b/Modules/_ctypes/libffi/src/moxie/eabi.S
@@ -1,7 +1,7 @@
/* -----------------------------------------------------------------------
- eabi.S - Copyright (c) 2004 Anthony Green
+ eabi.S - Copyright (c) 2012, 2013 Anthony Green
- FR-V Assembly glue.
+ Moxie Assembly glue.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -34,95 +34,68 @@
.globl ffi_call_EABI
.type ffi_call_EABI, @function
- # gr8 : ffi_prep_args
- # gr9 : &ecif
- # gr10: cif->bytes
- # gr11: fig->flags
- # gr12: ecif.rvalue
- # gr13: fn
+ # $r0 : ffi_prep_args
+ # $r1 : &ecif
+ # $r2 : cif->bytes
+ # $r3 : fig->flags
+ # $r4 : ecif.rvalue
+ # $r5 : fn
-ffi_call_EABI:
- addi sp, #-80, sp
- sti fp, @(sp, #24)
- addi sp, #24, fp
- movsg lr, gr5
+ffi_call_EABI:
+ push $sp, $r6
+ push $sp, $r7
+ push $sp, $r8
+ dec $sp, 24
- /* Make room for the new arguments. */
- /* subi sp, fp, gr10 */
-
- /* Store return address and incoming args on stack. */
- sti gr5, @(fp, #8)
- sti gr8, @(fp, #-4)
- sti gr9, @(fp, #-8)
- sti gr10, @(fp, #-12)
- sti gr11, @(fp, #-16)
- sti gr12, @(fp, #-20)
- sti gr13, @(fp, #-24)
-
- sub sp, gr10, sp
+ /* Store incoming args on stack. */
+ sto.l 0($sp), $r0 /* ffi_prep_args */
+ sto.l 4($sp), $r1 /* ecif */
+ sto.l 8($sp), $r2 /* bytes */
+ sto.l 12($sp), $r3 /* flags */
+ sto.l 16($sp), $r4 /* &rvalue */
+ sto.l 20($sp), $r5 /* fn */
/* Call ffi_prep_args. */
- ldi @(fp, #-4), gr4
- addi sp, #0, gr8
- ldi @(fp, #-8), gr9
-#ifdef __FRV_FDPIC__
- ldd @(gr4, gr0), gr14
- calll @(gr14, gr0)
-#else
- calll @(gr4, gr0)
-#endif
-
- /* ffi_prep_args returns the new stack pointer. */
- mov gr8, gr4
-
- ldi @(sp, #0), gr8
- ldi @(sp, #4), gr9
- ldi @(sp, #8), gr10
- ldi @(sp, #12), gr11
- ldi @(sp, #16), gr12
- ldi @(sp, #20), gr13
+ mov $r6, $r4 /* Save result buffer */
+ mov $r7, $r5 /* Save the target fn */
+ mov $r8, $r3 /* Save the flags */
+ sub.l $sp, $r2 /* Allocate stack space */
+ mov $r0, $sp /* We can stomp over $r0 */
+ /* $r1 is already set up */
+ jsra ffi_prep_args
- /* Always copy the return value pointer into the hidden
- parameter register. This is only strictly necessary
- when we're returning an aggregate type, but it doesn't
- hurt to do this all the time, and it saves a branch. */
- ldi @(fp, #-20), gr3
-
- /* Use the ffi_prep_args return value for the new sp. */
- mov gr4, sp
+ /* Load register arguments. */
+ ldo.l $r0, 0($sp)
+ ldo.l $r1, 4($sp)
+ ldo.l $r2, 8($sp)
+ ldo.l $r3, 12($sp)
+ ldo.l $r4, 16($sp)
+ ldo.l $r5, 20($sp)
/* Call the target function. */
- ldi @(fp, -24), gr4
-#ifdef __FRV_FDPIC__
- ldd @(gr4, gr0), gr14
- calll @(gr14, gr0)
-#else
- calll @(gr4, gr0)
-#endif
+ jsr $r7
+
+ ldi.l $r7, 0xffffffff
+ cmp $r8, $r7
+ beq retstruct
+
+ ldi.l $r7, 4
+ cmp $r8, $r7
+ bgt ret2reg
- /* Store the result. */
- ldi @(fp, #-16), gr10 /* fig->flags */
- ldi @(fp, #-20), gr4 /* ecif.rvalue */
+ st.l ($r6), $r0
+ jmpa retdone
- /* Is the return value stored in two registers? */
- cmpi gr10, #8, icc0
- bne icc0, 0, .L2
- /* Yes, save them. */
- sti gr8, @(gr4, #0)
- sti gr9, @(gr4, #4)
- bra .L3
-.L2:
- /* Is the return value a structure? */
- cmpi gr10, #-1, icc0
- beq icc0, 0, .L3
- /* No, save a 4 byte return value. */
- sti gr8, @(gr4, #0)
-.L3:
+ret2reg:
+ st.l ($r6), $r0
+ sto.l 4($r6), $r1
- /* Restore the stack, and return. */
- ldi @(fp, 8), gr5
- ld @(fp, gr0), fp
- addi sp,#80,sp
- jmpl @(gr5,gr0)
+retstruct:
+retdone:
+ /* Return. */
+ ldo.l $r6, -4($fp)
+ ldo.l $r7, -8($fp)
+ ldo.l $r8, -12($fp)
+ ret
.size ffi_call_EABI, .-ffi_call_EABI
diff --git a/Modules/_ctypes/libffi/src/moxie/ffi.c b/Modules/_ctypes/libffi/src/moxie/ffi.c
index 54cbbb9..540a042 100644
--- a/Modules/_ctypes/libffi/src/moxie/ffi.c
+++ b/Modules/_ctypes/libffi/src/moxie/ffi.c
@@ -1,5 +1,5 @@
/* -----------------------------------------------------------------------
- ffi.c - Copyright (C) 2009 Anthony Green
+ ffi.c - Copyright (C) 2012, 2013 Anthony Green
Moxie Foreign Function Interface
@@ -43,6 +43,12 @@ void *ffi_prep_args(char *stack, extended_cif *ecif)
p_argv = ecif->avalue;
argp = stack;
+ if (ecif->cif->rtype->type == FFI_TYPE_STRUCT)
+ {
+ *(void **) argp = ecif->rvalue;
+ argp += 4;
+ }
+
for (i = ecif->cif->nargs, p_arg = ecif->cif->arg_types;
(i != 0);
i--, p_arg++)
@@ -56,17 +62,6 @@ void *ffi_prep_args(char *stack, extended_cif *ecif)
z = sizeof(void*);
*(void **) argp = *p_argv;
}
- /* if ((*p_arg)->type == FFI_TYPE_FLOAT)
- {
- if (count > 24)
- {
- // This is going on the stack. Turn it into a double.
- *(double *) argp = (double) *(float*)(* p_argv);
- z = sizeof(double);
- }
- else
- *(void **) argp = *(void **)(* p_argv);
- } */
else if (z < sizeof(int))
{
z = sizeof(int);
@@ -147,8 +142,7 @@ void ffi_call(ffi_cif *cif,
}
else
ecif.rvalue = rvalue;
-
-
+
switch (cif->abi)
{
case FFI_EABI:
@@ -165,19 +159,25 @@ void ffi_closure_eabi (unsigned arg1, unsigned arg2, unsigned arg3,
unsigned arg4, unsigned arg5, unsigned arg6)
{
/* This function is called by a trampoline. The trampoline stows a
- pointer to the ffi_closure object in gr7. We must save this
+ pointer to the ffi_closure object in $r7. We must save this
pointer in a place that will persist while we do our work. */
- register ffi_closure *creg __asm__ ("gr7");
+ register ffi_closure *creg __asm__ ("$r12");
ffi_closure *closure = creg;
/* Arguments that don't fit in registers are found on the stack
at a fixed offset above the current frame pointer. */
- register char *frame_pointer __asm__ ("fp");
- char *stack_args = frame_pointer + 16;
+ register char *frame_pointer __asm__ ("$fp");
+
+ /* Pointer to a struct return value. */
+ void *struct_rvalue = (void *) arg1;
+
+ /* 6 words reserved for register args + 3 words from jsr */
+ char *stack_args = frame_pointer + 9*4;
/* Lay the register arguments down in a continuous chunk of memory. */
unsigned register_args[6] =
{ arg1, arg2, arg3, arg4, arg5, arg6 };
+ char *register_args_ptr = (char *) register_args;
ffi_cif *cif = closure->cif;
ffi_type **arg_types = cif->arg_types;
@@ -185,6 +185,12 @@ void ffi_closure_eabi (unsigned arg1, unsigned arg2, unsigned arg3,
char *ptr = (char *) register_args;
int i;
+ /* preserve struct type return pointer passing */
+ if ((cif->rtype != NULL) && (cif->rtype->type == FFI_TYPE_STRUCT)) {
+ ptr += 4;
+ register_args_ptr = (char *)&register_args[1];
+ }
+
/* Find the address of each argument. */
for (i = 0; i < cif->nargs; i++)
{
@@ -201,6 +207,7 @@ void ffi_closure_eabi (unsigned arg1, unsigned arg2, unsigned arg3,
case FFI_TYPE_SINT32:
case FFI_TYPE_UINT32:
case FFI_TYPE_FLOAT:
+ case FFI_TYPE_POINTER:
avalue[i] = ptr;
break;
case FFI_TYPE_STRUCT:
@@ -216,30 +223,21 @@ void ffi_closure_eabi (unsigned arg1, unsigned arg2, unsigned arg3,
/* If we've handled more arguments than fit in registers,
start looking at the those passed on the stack. */
- if (ptr == ((char *)register_args + (6*4)))
+ if (ptr == &register_args[6])
ptr = stack_args;
}
/* Invoke the closure. */
- if (cif->rtype->type == FFI_TYPE_STRUCT)
+ if (cif->rtype && (cif->rtype->type == FFI_TYPE_STRUCT))
{
- /* The caller allocates space for the return structure, and
- passes a pointer to this space in gr3. Use this value directly
- as the return value. */
- register void *return_struct_ptr __asm__("gr3");
- (closure->fun) (cif, return_struct_ptr, avalue, closure->user_data);
+ (closure->fun) (cif, struct_rvalue, avalue, closure->user_data);
}
else
{
/* Allocate space for the return value and call the function. */
long long rvalue;
(closure->fun) (cif, &rvalue, avalue, closure->user_data);
-
- /* Functions return 4-byte or smaller results in gr8. 8-byte
- values also use gr9. We fill the both, even for small return
- values, just to avoid a branch. */
- asm ("ldi @(%0, #0), gr8" : : "r" (&rvalue));
- asm ("ldi @(%0, #0), gr9" : : "r" (&((int *) &rvalue)[1]));
+ asm ("mov $r12, %0\n ld.l $r0, ($r12)\n ldo.l $r1, 4($r12)" : : "r" (&rvalue));
}
}
@@ -250,27 +248,25 @@ ffi_prep_closure_loc (ffi_closure* closure,
void *user_data,
void *codeloc)
{
- unsigned int *tramp = (unsigned int *) &closure->tramp[0];
+ unsigned short *tramp = (unsigned short *) &closure->tramp[0];
unsigned long fn = (long) ffi_closure_eabi;
unsigned long cls = (long) codeloc;
- int i;
+
+ if (cif->abi != FFI_EABI)
+ return FFI_BAD_ABI;
fn = (unsigned long) ffi_closure_eabi;
- tramp[0] = 0x8cfc0000 + (fn & 0xffff); /* setlos lo(fn), gr6 */
- tramp[1] = 0x8efc0000 + (cls & 0xffff); /* setlos lo(cls), gr7 */
- tramp[2] = 0x8cf80000 + (fn >> 16); /* sethi hi(fn), gr6 */
- tramp[3] = 0x8ef80000 + (cls >> 16); /* sethi hi(cls), gr7 */
- tramp[4] = 0x80300006; /* jmpl @(gr0, gr6) */
+ tramp[0] = 0x01e0; /* ldi.l $r7, .... */
+ tramp[1] = cls >> 16;
+ tramp[2] = cls & 0xffff;
+ tramp[3] = 0x1a00; /* jmpa .... */
+ tramp[4] = fn >> 16;
+ tramp[5] = fn & 0xffff;
closure->cif = cif;
closure->fun = fun;
closure->user_data = user_data;
- /* Cache flushing. */
- for (i = 0; i < FFI_TRAMPOLINE_SIZE; i++)
- __asm__ volatile ("dcf @(%0,%1)\n\tici @(%2,%1)" :: "r" (tramp), "r" (i),
- "r" (codeloc));
-
return FFI_OK;
}
diff --git a/Modules/_ctypes/libffi/src/moxie/ffitarget.h b/Modules/_ctypes/libffi/src/moxie/ffitarget.h
index f5305d1..623e3ec 100644
--- a/Modules/_ctypes/libffi/src/moxie/ffitarget.h
+++ b/Modules/_ctypes/libffi/src/moxie/ffitarget.h
@@ -1,5 +1,5 @@
/* -----------------------------------------------------------------*-C-*-
- ffitarget.h - Copyright (c) 2009 Anthony Green
+ ffitarget.h - Copyright (c) 2012, 2013 Anthony Green
Target configuration macros for Moxie
Permission is hereby granted, free of charge, to any person obtaining
@@ -35,22 +35,18 @@ typedef signed long ffi_sarg;
typedef enum ffi_abi {
FFI_FIRST_ABI = 0,
-
-#ifdef MOXIE
FFI_EABI,
FFI_DEFAULT_ABI = FFI_EABI,
-#endif
-
FFI_LAST_ABI = FFI_DEFAULT_ABI + 1
} ffi_abi;
#endif
/* ---- Definitions for closures ----------------------------------------- */
-#define FFI_CLOSURES 0
+#define FFI_CLOSURES 1
#define FFI_NATIVE_RAW_API 0
-/* Trampolines are 5 4-byte instructions long. */
-#define FFI_TRAMPOLINE_SIZE (5*4)
+/* Trampolines are 12-bytes long. See ffi_prep_closure_loc. */
+#define FFI_TRAMPOLINE_SIZE (12)
#endif
diff --git a/Modules/_ctypes/libffi/src/pa/ffi.c b/Modules/_ctypes/libffi/src/pa/ffi.c
index 6d7606f..4ce2bc6 100644
--- a/Modules/_ctypes/libffi/src/pa/ffi.c
+++ b/Modules/_ctypes/libffi/src/pa/ffi.c
@@ -1,9 +1,11 @@
/* -----------------------------------------------------------------------
- ffi.c - (c) 2003-2004 Randolph Chung <tausq@debian.org>
+ ffi.c - (c) 2011 Anthony Green
(c) 2008 Red Hat, Inc.
-
+ (c) 2006 Free Software Foundation, Inc.
+ (c) 2003-2004 Randolph Chung <tausq@debian.org>
+
HPPA Foreign Function Interface
- HP-UX PA ABI support (c) 2006 Free Software Foundation, Inc.
+ HP-UX PA ABI support
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -633,7 +635,8 @@ ffi_prep_closure_loc (ffi_closure* closure,
UINT32 *tmp;
#endif
- FFI_ASSERT (cif->abi == FFI_PA32);
+ if (cif->abi != FFI_PA32)
+ return FFI_BAD_ABI;
/* Make a small trampoline that will branch to our
handler function. Use PC-relative addressing. */
diff --git a/Modules/_ctypes/libffi/src/pa/ffitarget.h b/Modules/_ctypes/libffi/src/pa/ffitarget.h
index 001f891..5e364d3 100644
--- a/Modules/_ctypes/libffi/src/pa/ffitarget.h
+++ b/Modules/_ctypes/libffi/src/pa/ffitarget.h
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------*-C-*-
- ffitarget.h - Copyright (c) 1996-2003 Red Hat, Inc.
+ ffitarget.h - Copyright (c) 2012 Anthony Green
+ Copyright (c) 1996-2003 Red Hat, Inc.
Target configuration macros for hppa.
Permission is hereby granted, free of charge, to any person obtaining
@@ -27,6 +28,10 @@
#ifndef LIBFFI_TARGET_H
#define LIBFFI_TARGET_H
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
/* ---- System specific configurations ----------------------------------- */
#ifndef LIBFFI_ASM
@@ -38,21 +43,22 @@ typedef enum ffi_abi {
#ifdef PA_LINUX
FFI_PA32,
- FFI_DEFAULT_ABI = FFI_PA32,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_PA32
#endif
#ifdef PA_HPUX
FFI_PA32,
- FFI_DEFAULT_ABI = FFI_PA32,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_PA32
#endif
#ifdef PA64_HPUX
#error "PA64_HPUX FFI is not yet implemented"
FFI_PA64,
- FFI_DEFAULT_ABI = FFI_PA64,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_PA64
#endif
-
- FFI_LAST_ABI = FFI_DEFAULT_ABI + 1
} ffi_abi;
#endif
diff --git a/Modules/_ctypes/libffi/src/powerpc/aix.S b/Modules/_ctypes/libffi/src/powerpc/aix.S
index c6f8764..349e78c 100644
--- a/Modules/_ctypes/libffi/src/powerpc/aix.S
+++ b/Modules/_ctypes/libffi/src/powerpc/aix.S
@@ -1,5 +1,5 @@
/* -----------------------------------------------------------------------
- aix.S - Copyright (c) 2002,2009 Free Software Foundation, Inc.
+ aix.S - Copyright (c) 2002, 2009 Free Software Foundation, Inc.
based on darwin.S by John Hornkvist
PowerPC Assembly glue.
@@ -79,6 +79,8 @@
.set f20,20
.set f21,21
+ .extern .ffi_prep_args
+
#define LIBFFI_ASM
#include <fficonfig.h>
#include <ffi.h>
@@ -125,6 +127,7 @@ ffi_call_AIX:
/* Call ffi_prep_args. */
mr r4, r1
bl .ffi_prep_args
+ nop
/* Now do the call. */
ld r0, 0(r29)
@@ -134,7 +137,7 @@ ffi_call_AIX:
mtcrf 0x40, r31
mtctr r0
/* Load all those argument registers. */
- // We have set up a nice stack frame, just load it into registers.
+ /* We have set up a nice stack frame, just load it into registers. */
ld r3, 40+(1*8)(r1)
ld r4, 40+(2*8)(r1)
ld r5, 40+(3*8)(r1)
@@ -147,7 +150,7 @@ ffi_call_AIX:
L1:
/* Load all the FP registers. */
- bf 6,L2 // 2f + 0x18
+ bf 6,L2 /* 2f + 0x18 */
lfd f1,-32-(13*8)(r28)
lfd f2,-32-(12*8)(r28)
lfd f3,-32-(11*8)(r28)
@@ -226,6 +229,7 @@ L(float_return_value):
/* Call ffi_prep_args. */
mr r4, r1
bl .ffi_prep_args
+ nop
/* Now do the call. */
lwz r0, 0(r29)
@@ -235,7 +239,7 @@ L(float_return_value):
mtcrf 0x40, r31
mtctr r0
/* Load all those argument registers. */
- // We have set up a nice stack frame, just load it into registers.
+ /* We have set up a nice stack frame, just load it into registers. */
lwz r3, 20+(1*4)(r1)
lwz r4, 20+(2*4)(r1)
lwz r5, 20+(3*4)(r1)
@@ -248,7 +252,7 @@ L(float_return_value):
L1:
/* Load all the FP registers. */
- bf 6,L2 // 2f + 0x18
+ bf 6,L2 /* 2f + 0x18 */
lfd f1,-16-(13*8)(r28)
lfd f2,-16-(12*8)(r28)
lfd f3,-16-(11*8)(r28)
@@ -303,7 +307,7 @@ L(float_return_value):
#endif
.long 0
.byte 0,0,0,1,128,4,0,0
-//END(ffi_call_AIX)
+/* END(ffi_call_AIX) */
.csect .text[PR]
.align 2
@@ -321,4 +325,4 @@ ffi_call_DARWIN:
blr
.long 0
.byte 0,0,0,0,0,0,0,0
-//END(ffi_call_DARWIN)
+/* END(ffi_call_DARWIN) */
diff --git a/Modules/_ctypes/libffi/src/powerpc/aix_closure.S b/Modules/_ctypes/libffi/src/powerpc/aix_closure.S
index 5c74448..aabd3c3 100644
--- a/Modules/_ctypes/libffi/src/powerpc/aix_closure.S
+++ b/Modules/_ctypes/libffi/src/powerpc/aix_closure.S
@@ -79,6 +79,8 @@
.set f20,20
.set f21,21
+ .extern .ffi_closure_helper_DARWIN
+
#define LIBFFI_ASM
#define JUMPTARGET(name) name
#define L(x) x
@@ -165,6 +167,7 @@ ffi_closure_ASM:
/* look up the proper starting point in table */
/* by using return type as offset */
+ lhz r3, 10(r3) /* load type from return type */
ld r4, LC..60(2) /* get address of jump table */
sldi r3, r3, 4 /* now multiply return type by 16 */
ld r0, 240+16(r1) /* load return address */
@@ -337,8 +340,9 @@ L..finish:
/* look up the proper starting point in table */
/* by using return type as offset */
+ lhz r3, 6(r3) /* load type from return type */
lwz r4, LC..60(2) /* get address of jump table */
- slwi r3, r3, 4 /* now multiply return type by 4 */
+ slwi r3, r3, 4 /* now multiply return type by 16 */
lwz r0, 176+8(r1) /* load return address */
add r3, r3, r4 /* add contents of table to table address */
mtctr r3
diff --git a/Modules/_ctypes/libffi/src/powerpc/asm.h b/Modules/_ctypes/libffi/src/powerpc/asm.h
index e86e6b0..994f62d 100644
--- a/Modules/_ctypes/libffi/src/powerpc/asm.h
+++ b/Modules/_ctypes/libffi/src/powerpc/asm.h
@@ -42,7 +42,7 @@
/* If compiled for profiling, call `_mcount' at the start of each function. */
#ifdef PROF
-/* The mcount code relies on a the return address being on the stack
+/* The mcount code relies on the return address being on the stack
to locate our caller and so it can restore it; so store one just
for its benefit. */
#ifdef PIC
diff --git a/Modules/_ctypes/libffi/src/powerpc/darwin.S b/Modules/_ctypes/libffi/src/powerpc/darwin.S
index d8a1df5..4f987dc 100644
--- a/Modules/_ctypes/libffi/src/powerpc/darwin.S
+++ b/Modules/_ctypes/libffi/src/powerpc/darwin.S
@@ -1,6 +1,6 @@
/* -----------------------------------------------------------------------
darwin.S - Copyright (c) 2000 John Hornkvist
- Copyright (c) 2004 Free Software Foundation, Inc.
+ Copyright (c) 2004, 2010 Free Software Foundation, Inc.
PowerPC Assembly glue.
@@ -24,51 +24,92 @@
OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------- */
+#define LIBFFI_ASM
#if defined(__ppc64__)
#define MODE_CHOICE(x, y) y
#else
#define MODE_CHOICE(x, y) x
#endif
-#define g_long MODE_CHOICE(long, quad) /* usage is ".g_long" */
+#define machine_choice MODE_CHOICE(ppc7400,ppc64)
-#define LOG2_GPR_BYTES MODE_CHOICE(2,3) /* log2(GPR_BYTES) */
+; Define some pseudo-opcodes for size-independent load & store of GPRs ...
+#define lgu MODE_CHOICE(lwzu, ldu)
+#define lg MODE_CHOICE(lwz,ld)
+#define sg MODE_CHOICE(stw,std)
+#define sgu MODE_CHOICE(stwu,stdu)
+#define sgux MODE_CHOICE(stwux,stdux)
+
+; ... and the size of GPRs and their storage indicator.
+#define GPR_BYTES MODE_CHOICE(4,8)
+#define LOG2_GPR_BYTES MODE_CHOICE(2,3) /* log2(GPR_BYTES) */
+#define g_long MODE_CHOICE(long, quad) /* usage is ".g_long" */
+
+; From the ABI doc: "Mac OS X ABI Function Call Guide" Version 2009-02-04.
+#define LINKAGE_SIZE MODE_CHOICE(24,48)
+#define PARAM_AREA MODE_CHOICE(32,64)
+#define SAVED_LR_OFFSET MODE_CHOICE(8,16) /* save position for lr */
+
+/* If there is any FP stuff we make space for all of the regs. */
+#define SAVED_FPR_COUNT 13
+#define FPR_SIZE 8
+#define RESULT_BYTES 16
+
+/* This should be kept in step with the same value in ffi_darwin.c. */
+#define ASM_NEEDS_REGISTERS 4
+#define SAVE_REGS_SIZE (ASM_NEEDS_REGISTERS * GPR_BYTES)
-#define LIBFFI_ASM
#include <fficonfig.h>
#include <ffi.h>
+
#define JUMPTARGET(name) name
#define L(x) x
-.text
- .align 2
-.globl _ffi_prep_args
-.text
+ .text
.align 2
-.globl _ffi_call_DARWIN
-.text
+ .globl _ffi_prep_args
+
.align 2
+ .globl _ffi_call_DARWIN
+
+ /* We arrive here with:
+ r3 = ptr to extended cif.
+ r4 = -bytes.
+ r5 = cif flags.
+ r6 = ptr to return value.
+ r7 = fn pointer (user func).
+ r8 = fn pointer (ffi_prep_args).
+ r9 = ffi_type* for the ret val. */
+
_ffi_call_DARWIN:
-LFB0:
+Lstartcode:
mr r12,r8 /* We only need r12 until the call,
- so it doesn't have to be saved. */
+ so it does not have to be saved. */
LFB1:
/* Save the old stack pointer as AP. */
mr r8,r1
LCFI0:
+
+ /* Save the retval type in parents frame. */
+ sg r9,(LINKAGE_SIZE+6*GPR_BYTES)(r8)
+
/* Allocate the stack space we need. */
- stwux r1,r1,r4
+ sgux r1,r1,r4
/* Save registers we use. */
mflr r9
+ sg r9,SAVED_LR_OFFSET(r8)
+
+ sg r28,-(4 * GPR_BYTES)(r8)
+ sg r29,-(3 * GPR_BYTES)(r8)
+ sg r30,-(2 * GPR_BYTES)(r8)
+ sg r31,-( GPR_BYTES)(r8)
- stw r28,-16(r8)
- stw r29,-12(r8)
- stw r30,-8(r8)
- stw r31,-4(r8)
+#if !defined(POWERPC_DARWIN)
+ /* The TOC slot is reserved in the Darwin ABI and r2 is volatile. */
+ sg r2,(5 * GPR_BYTES)(r1)
+#endif
- stw r9,8(r8)
- stw r2,20(r1)
LCFI1:
/* Save arguments over call. */
@@ -77,14 +118,17 @@ LCFI1:
mr r29,r7 /* function address, */
mr r28,r8 /* our AP. */
LCFI2:
- /* Call ffi_prep_args. */
+ /* Call ffi_prep_args. r3 = extended cif, r4 = stack ptr copy. */
mr r4,r1
li r9,0
mtctr r12 /* r12 holds address of _ffi_prep_args. */
bctrl
- lwz r2,20(r1)
+#if !defined(POWERPC_DARWIN)
+ /* The TOC slot is reserved in the Darwin ABI and r2 is volatile. */
+ lg r2,(5 * GPR_BYTES)(r1)
+#endif
/* Now do the call.
Set up cr1 with bits 4-7 of the flags. */
mtcrf 0x40,r31
@@ -92,71 +136,130 @@ LCFI2:
mtctr r29
/* Load all those argument registers.
We have set up a nice stack frame, just load it into registers. */
- lwz r3,20+(1*4)(r1)
- lwz r4,20+(2*4)(r1)
- lwz r5,20+(3*4)(r1)
- lwz r6,20+(4*4)(r1)
+ lg r3, (LINKAGE_SIZE )(r1)
+ lg r4, (LINKAGE_SIZE + GPR_BYTES)(r1)
+ lg r5, (LINKAGE_SIZE + 2 * GPR_BYTES)(r1)
+ lg r6, (LINKAGE_SIZE + 3 * GPR_BYTES)(r1)
nop
- lwz r7,20+(5*4)(r1)
- lwz r8,20+(6*4)(r1)
- lwz r9,20+(7*4)(r1)
- lwz r10,20+(8*4)(r1)
+ lg r7, (LINKAGE_SIZE + 4 * GPR_BYTES)(r1)
+ lg r8, (LINKAGE_SIZE + 5 * GPR_BYTES)(r1)
+ lg r9, (LINKAGE_SIZE + 6 * GPR_BYTES)(r1)
+ lg r10,(LINKAGE_SIZE + 7 * GPR_BYTES)(r1)
L1:
- /* Load all the FP registers. */
+ /* ... Load all the FP registers. */
bf 6,L2 /* No floats to load. */
- lfd f1,-16-(13*8)(r28)
- lfd f2,-16-(12*8)(r28)
- lfd f3,-16-(11*8)(r28)
- lfd f4,-16-(10*8)(r28)
+ lfd f1, -SAVE_REGS_SIZE-(13*FPR_SIZE)(r28)
+ lfd f2, -SAVE_REGS_SIZE-(12*FPR_SIZE)(r28)
+ lfd f3, -SAVE_REGS_SIZE-(11*FPR_SIZE)(r28)
+ lfd f4, -SAVE_REGS_SIZE-(10*FPR_SIZE)(r28)
nop
- lfd f5,-16-(9*8)(r28)
- lfd f6,-16-(8*8)(r28)
- lfd f7,-16-(7*8)(r28)
- lfd f8,-16-(6*8)(r28)
+ lfd f5, -SAVE_REGS_SIZE-( 9*FPR_SIZE)(r28)
+ lfd f6, -SAVE_REGS_SIZE-( 8*FPR_SIZE)(r28)
+ lfd f7, -SAVE_REGS_SIZE-( 7*FPR_SIZE)(r28)
+ lfd f8, -SAVE_REGS_SIZE-( 6*FPR_SIZE)(r28)
nop
- lfd f9,-16-(5*8)(r28)
- lfd f10,-16-(4*8)(r28)
- lfd f11,-16-(3*8)(r28)
- lfd f12,-16-(2*8)(r28)
+ lfd f9, -SAVE_REGS_SIZE-( 5*FPR_SIZE)(r28)
+ lfd f10,-SAVE_REGS_SIZE-( 4*FPR_SIZE)(r28)
+ lfd f11,-SAVE_REGS_SIZE-( 3*FPR_SIZE)(r28)
+ lfd f12,-SAVE_REGS_SIZE-( 2*FPR_SIZE)(r28)
nop
- lfd f13,-16-(1*8)(r28)
+ lfd f13,-SAVE_REGS_SIZE-( 1*FPR_SIZE)(r28)
L2:
mr r12,r29 /* Put the target address in r12 as specified. */
mtctr r12
nop
nop
+
/* Make the call. */
bctrl
/* Now, deal with the return value. */
- mtcrf 0x01,r31
- bt 30,L(done_return_value)
- bt 29,L(fp_return_value)
- stw r3,0(r30)
- bf 28,L(done_return_value)
- stw r4,4(r30)
+ /* m64 structure returns can occupy the same set of registers as
+ would be used to pass such a structure as arg0 - so take care
+ not to step on any possibly hot regs. */
- /* Fall through. */
+ /* Get the flags.. */
+ mtcrf 0x03,r31 ; we need c6 & cr7 now.
+ ; FLAG_RETURNS_NOTHING also covers struct ret-by-ref.
+ bt 30,L(done_return_value) ; FLAG_RETURNS_NOTHING
+ bf 27,L(scalar_return_value) ; not FLAG_RETURNS_STRUCT
+
+ /* OK, so we have a struct. */
+#if defined(__ppc64__)
+ bt 31,L(maybe_return_128) ; FLAG_RETURNS_128BITS, special case
-L(done_return_value):
- /* Restore the registers we used and return. */
- lwz r9,8(r28)
- lwz r31,-4(r28)
- mtlr r9
- lwz r30,-8(r28)
- lwz r29,-12(r28)
- lwz r28,-16(r28)
- lwz r1,0(r1)
- blr
+ /* OK, we have to map the return back to a mem struct.
+ We are about to trample the parents param area, so recover the
+ return type. r29 is free, since the call is done. */
+ lg r29,(LINKAGE_SIZE + 6 * GPR_BYTES)(r28)
+
+ sg r3, (LINKAGE_SIZE )(r28)
+ sg r4, (LINKAGE_SIZE + GPR_BYTES)(r28)
+ sg r5, (LINKAGE_SIZE + 2 * GPR_BYTES)(r28)
+ sg r6, (LINKAGE_SIZE + 3 * GPR_BYTES)(r28)
+ nop
+ sg r7, (LINKAGE_SIZE + 4 * GPR_BYTES)(r28)
+ sg r8, (LINKAGE_SIZE + 5 * GPR_BYTES)(r28)
+ sg r9, (LINKAGE_SIZE + 6 * GPR_BYTES)(r28)
+ sg r10,(LINKAGE_SIZE + 7 * GPR_BYTES)(r28)
+ /* OK, so do the block move - we trust that memcpy will not trample
+ the fprs... */
+ mr r3,r30 ; dest
+ addi r4,r28,LINKAGE_SIZE ; source
+ /* The size is a size_t, should be long. */
+ lg r5,0(r29)
+ /* Figure out small structs */
+ cmpi 0,r5,4
+ bgt L3 ; 1, 2 and 4 bytes have special rules.
+ cmpi 0,r5,3
+ beq L3 ; not 3
+ addi r4,r4,8
+ subf r4,r5,r4
+L3:
+ bl _memcpy
+
+ /* ... do we need the FP registers? - recover the flags.. */
+ mtcrf 0x03,r31 ; we need c6 & cr7 now.
+ bf 29,L(done_return_value) /* No floats in the struct. */
+ stfd f1, -SAVE_REGS_SIZE-(13*FPR_SIZE)(r28)
+ stfd f2, -SAVE_REGS_SIZE-(12*FPR_SIZE)(r28)
+ stfd f3, -SAVE_REGS_SIZE-(11*FPR_SIZE)(r28)
+ stfd f4, -SAVE_REGS_SIZE-(10*FPR_SIZE)(r28)
+ nop
+ stfd f5, -SAVE_REGS_SIZE-( 9*FPR_SIZE)(r28)
+ stfd f6, -SAVE_REGS_SIZE-( 8*FPR_SIZE)(r28)
+ stfd f7, -SAVE_REGS_SIZE-( 7*FPR_SIZE)(r28)
+ stfd f8, -SAVE_REGS_SIZE-( 6*FPR_SIZE)(r28)
+ nop
+ stfd f9, -SAVE_REGS_SIZE-( 5*FPR_SIZE)(r28)
+ stfd f10,-SAVE_REGS_SIZE-( 4*FPR_SIZE)(r28)
+ stfd f11,-SAVE_REGS_SIZE-( 3*FPR_SIZE)(r28)
+ stfd f12,-SAVE_REGS_SIZE-( 2*FPR_SIZE)(r28)
+ nop
+ stfd f13,-SAVE_REGS_SIZE-( 1*FPR_SIZE)(r28)
+
+ mr r3,r29 ; ffi_type *
+ mr r4,r30 ; dest
+ addi r5,r28,-SAVE_REGS_SIZE-(13*FPR_SIZE) ; fprs
+ xor r6,r6,r6
+ sg r6,(LINKAGE_SIZE + 7 * GPR_BYTES)(r28)
+ addi r6,r28,(LINKAGE_SIZE + 7 * GPR_BYTES) ; point to a zeroed counter.
+ bl _darwin64_struct_floats_to_mem
+
+ b L(done_return_value)
+#else
+ stw r3,0(r30) ; m32 the only struct return in reg is 4 bytes.
+#endif
+ b L(done_return_value)
L(fp_return_value):
/* Do we have long double to store? */
- bf 31,L(fd_return_value)
+ bf 31,L(fd_return_value) ; FLAG_RETURNS_128BITS
stfd f1,0(r30)
- stfd f2,8(r30)
+ stfd f2,FPR_SIZE(r30)
b L(done_return_value)
L(fd_return_value):
@@ -170,21 +273,57 @@ L(float_return_value):
stfs f1,0(r30)
b L(done_return_value)
+L(scalar_return_value):
+ bt 29,L(fp_return_value) ; FLAG_RETURNS_FP
+ ; ffi_arg is defined as unsigned long.
+ sg r3,0(r30) ; Save the reg.
+ bf 28,L(done_return_value) ; not FLAG_RETURNS_64BITS
+
+#if defined(__ppc64__)
+L(maybe_return_128):
+ std r3,0(r30)
+ bf 31,L(done_return_value) ; not FLAG_RETURNS_128BITS
+ std r4,8(r30)
+#else
+ stw r4,4(r30)
+#endif
+
+ /* Fall through. */
+ /* We want this at the end to simplify eh epilog computation. */
+
+L(done_return_value):
+ /* Restore the registers we used and return. */
+ lg r29,SAVED_LR_OFFSET(r28)
+ ; epilog
+ lg r31,-(1 * GPR_BYTES)(r28)
+ mtlr r29
+ lg r30,-(2 * GPR_BYTES)(r28)
+ lg r29,-(3 * GPR_BYTES)(r28)
+ lg r28,-(4 * GPR_BYTES)(r28)
+ lg r1,0(r1)
+ blr
LFE1:
+ .align 1
/* END(_ffi_call_DARWIN) */
/* Provide a null definition of _ffi_call_AIX. */
-.text
- .align 2
-.globl _ffi_call_AIX
-.text
+ .text
+ .globl _ffi_call_AIX
.align 2
_ffi_call_AIX:
blr
/* END(_ffi_call_AIX) */
-.data
-.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms
+/* EH stuff. */
+
+#define EH_DATA_ALIGN_FACT MODE_CHOICE(0x7c,0x78)
+
+ .static_data
+ .align LOG2_GPR_BYTES
+LLFB0$non_lazy_ptr:
+ .g_long Lstartcode
+
+ .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
EH_frame1:
.set L$set$0,LECIE1-LSCIE1
.long L$set$0 ; Length of Common Information Entry
@@ -193,16 +332,17 @@ LSCIE1:
.byte 0x1 ; CIE Version
.ascii "zR\0" ; CIE Augmentation
.byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor
- .byte 0x7c ; sleb128 -4; CIE Data Alignment Factor
+ .byte EH_DATA_ALIGN_FACT ; sleb128 -4; CIE Data Alignment Factor
.byte 0x41 ; CIE RA Column
.byte 0x1 ; uleb128 0x1; Augmentation size
- .byte 0x90 ; FDE Encoding (indirect pcrel)
+ .byte 0x10 ; FDE Encoding (indirect pcrel)
.byte 0xc ; DW_CFA_def_cfa
.byte 0x1 ; uleb128 0x1
.byte 0x0 ; uleb128 0x0
.align LOG2_GPR_BYTES
LECIE1:
-.globl _ffi_call_DARWIN.eh
+
+ .globl _ffi_call_DARWIN.eh
_ffi_call_DARWIN.eh:
LSFDE1:
.set L$set$1,LEFDE1-LASFDE1
@@ -210,11 +350,11 @@ LSFDE1:
LASFDE1:
.long LASFDE1-EH_frame1 ; FDE CIE offset
.g_long LLFB0$non_lazy_ptr-. ; FDE initial location
- .set L$set$3,LFE1-LFB0
+ .set L$set$3,LFE1-Lstartcode
.g_long L$set$3 ; FDE address range
.byte 0x0 ; uleb128 0x0; Augmentation size
.byte 0x4 ; DW_CFA_advance_loc4
- .set L$set$4,LCFI0-LFB1
+ .set L$set$4,LCFI0-Lstartcode
.long L$set$4
.byte 0xd ; DW_CFA_def_cfa_register
.byte 0x08 ; uleb128 0x08
@@ -239,7 +379,5 @@ LASFDE1:
.byte 0x1c ; uleb128 0x1c
.align LOG2_GPR_BYTES
LEFDE1:
-.data
- .align LOG2_GPR_BYTES
-LLFB0$non_lazy_ptr:
- .g_long LFB0
+ .align 1
+
diff --git a/Modules/_ctypes/libffi/src/powerpc/darwin_closure.S b/Modules/_ctypes/libffi/src/powerpc/darwin_closure.S
index 7959838..3f6790f 100644
--- a/Modules/_ctypes/libffi/src/powerpc/darwin_closure.S
+++ b/Modules/_ctypes/libffi/src/powerpc/darwin_closure.S
@@ -1,6 +1,7 @@
/* -----------------------------------------------------------------------
- darwin_closure.S - Copyright (c) 2002, 2003, 2004, Free Software Foundation,
- Inc. based on ppc_closure.S
+ darwin_closure.S - Copyright (c) 2002, 2003, 2004, 2010,
+ Free Software Foundation, Inc.
+ based on ppc_closure.S
PowerPC Assembly glue.
@@ -33,91 +34,177 @@
#define MODE_CHOICE(x, y) x
#endif
-#define lgu MODE_CHOICE(lwzu, ldu)
-
-#define g_long MODE_CHOICE(long, quad) /* usage is ".g_long" */
+#define machine_choice MODE_CHOICE(ppc7400,ppc64)
+
+; Define some pseudo-opcodes for size-independent load & store of GPRs ...
+#define lgu MODE_CHOICE(lwzu, ldu)
+#define lg MODE_CHOICE(lwz,ld)
+#define sg MODE_CHOICE(stw,std)
+#define sgu MODE_CHOICE(stwu,stdu)
+
+; ... and the size of GPRs and their storage indicator.
+#define GPR_BYTES MODE_CHOICE(4,8)
+#define LOG2_GPR_BYTES MODE_CHOICE(2,3) /* log2(GPR_BYTES) */
+#define g_long MODE_CHOICE(long, quad) /* usage is ".g_long" */
+
+; From the ABI doc: "Mac OS X ABI Function Call Guide" Version 2009-02-04.
+#define LINKAGE_SIZE MODE_CHOICE(24,48)
+#define PARAM_AREA MODE_CHOICE(32,64)
+
+#define SAVED_CR_OFFSET MODE_CHOICE(4,8) /* save position for CR */
+#define SAVED_LR_OFFSET MODE_CHOICE(8,16) /* save position for lr */
+
+/* WARNING: if ffi_type is changed... here be monsters.
+ Offsets of items within the result type. */
+#define FFI_TYPE_TYPE MODE_CHOICE(6,10)
+#define FFI_TYPE_ELEM MODE_CHOICE(8,16)
+
+#define SAVED_FPR_COUNT 13
+#define FPR_SIZE 8
+/* biggest m64 struct ret is 8GPRS + 13FPRS = 168 bytes - rounded to 16bytes = 176. */
+#define RESULT_BYTES MODE_CHOICE(16,176)
+
+; The whole stack frame **MUST** be 16byte-aligned.
+#define SAVE_SIZE (((LINKAGE_SIZE+PARAM_AREA+SAVED_FPR_COUNT*FPR_SIZE+RESULT_BYTES)+15) & -16LL)
+#define PAD_SIZE (SAVE_SIZE-(LINKAGE_SIZE+PARAM_AREA+SAVED_FPR_COUNT*FPR_SIZE+RESULT_BYTES))
+
+#define PARENT_PARM_BASE (SAVE_SIZE+LINKAGE_SIZE)
+#define FP_SAVE_BASE (LINKAGE_SIZE+PARAM_AREA)
+
+#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1050
+; We no longer need the pic symbol stub for Darwin >= 9.
+#define BLCLS_HELP _ffi_closure_helper_DARWIN
+#define STRUCT_RETVALUE_P _darwin64_struct_ret_by_value_p
+#define PASS_STR_FLOATS _darwin64_pass_struct_floats
+#undef WANT_STUB
+#else
+#define BLCLS_HELP L_ffi_closure_helper_DARWIN$stub
+#define STRUCT_RETVALUE_P L_darwin64_struct_ret_by_value_p$stub
+#define PASS_STR_FLOATS L_darwin64_pass_struct_floats$stub
+#define WANT_STUB
+#endif
-#define LOG2_GPR_BYTES MODE_CHOICE(2,3) /* log2(GPR_BYTES) */
+/* m32/m64
+
+ The stack layout looks like this:
+
+ | Additional params... | | Higher address
+ ~ ~ ~
+ | Parameters (at least 8*4/8=32/64) | | NUM_GPR_ARG_REGISTERS
+ |--------------------------------------------| |
+ | TOC=R2 (AIX) Reserved (Darwin) 4/8 | |
+ |--------------------------------------------| |
+ | Reserved 2*4/8 | |
+ |--------------------------------------------| |
+ | Space for callee`s LR 4/8 | |
+ |--------------------------------------------| |
+ | Saved CR [low word for m64] 4/8 | |
+ |--------------------------------------------| |
+ | Current backchain pointer 4/8 |-/ Parent`s frame.
+ |--------------------------------------------| <+ <<< on entry to
+ | Result Bytes 16/176 | |
+ |--------------------------------------------| |
+ ~ padding to 16-byte alignment ~ ~
+ |--------------------------------------------| |
+ | NUM_FPR_ARG_REGISTERS slots | |
+ | here fp13 .. fp1 13*8 | |
+ |--------------------------------------------| |
+ | R3..R10 8*4/8=32/64 | | NUM_GPR_ARG_REGISTERS
+ |--------------------------------------------| |
+ | TOC=R2 (AIX) Reserved (Darwin) 4/8 | |
+ |--------------------------------------------| | stack |
+ | Reserved [compiler,binder] 2*4/8 | | grows |
+ |--------------------------------------------| | down V
+ | Space for callees LR 4/8 | |
+ |--------------------------------------------| | lower addresses
+ | Saved CR [low word for m64] 4/8 | |
+ |--------------------------------------------| | stack pointer here
+ | Current backchain pointer 4/8 |-/ during
+ |--------------------------------------------| <<< call.
+
+*/
.file "darwin_closure.S"
-.text
- .align LOG2_GPR_BYTES
-.globl _ffi_closure_ASM
-.text
+ .machine machine_choice
+
+ .text
+ .globl _ffi_closure_ASM
.align LOG2_GPR_BYTES
_ffi_closure_ASM:
LFB1:
- mflr r0 /* extract return address */
- stw r0,8(r1) /* save the return address */
+Lstartcode:
+ mflr r0 /* extract return address */
+ sg r0,SAVED_LR_OFFSET(r1) /* save the return address */
LCFI0:
- /* 24 Bytes (Linkage Area)
- 32 Bytes (outgoing parameter area, always reserved)
- 104 Bytes (13*8 from FPR)
- 16 Bytes (result)
- 176 Bytes */
-
- stwu r1,-176(r1) /* skip over caller save area
- keep stack aligned to 16. */
+ sgu r1,-SAVE_SIZE(r1) /* skip over caller save area
+ keep stack aligned to 16. */
LCFI1:
/* We want to build up an area for the parameters passed
in registers. (both floating point and integer) */
- /* We store gpr 3 to gpr 10 (aligned to 4)
- in the parents outgoing area. */
- stw r3,200(r1)
- stw r4,204(r1)
- stw r5,208(r1)
- stw r6,212(r1)
- stw r7,216(r1)
- stw r8,220(r1)
- stw r9,224(r1)
- stw r10,228(r1)
-
- /* We save fpr 1 to fpr 13. (aligned to 8) */
- stfd f1,56(r1)
- stfd f2,64(r1)
- stfd f3,72(r1)
- stfd f4,80(r1)
- stfd f5,88(r1)
- stfd f6,96(r1)
- stfd f7,104(r1)
- stfd f8,112(r1)
- stfd f9,120(r1)
- stfd f10,128(r1)
- stfd f11,136(r1)
- stfd f12,144(r1)
- stfd f13,152(r1)
+ /* Put gpr 3 to gpr 10 in the parents outgoing area...
+ ... the remainder of any params that overflowed the regs will
+ follow here. */
+ sg r3, (PARENT_PARM_BASE )(r1)
+ sg r4, (PARENT_PARM_BASE + GPR_BYTES )(r1)
+ sg r5, (PARENT_PARM_BASE + GPR_BYTES * 2)(r1)
+ sg r6, (PARENT_PARM_BASE + GPR_BYTES * 3)(r1)
+ sg r7, (PARENT_PARM_BASE + GPR_BYTES * 4)(r1)
+ sg r8, (PARENT_PARM_BASE + GPR_BYTES * 5)(r1)
+ sg r9, (PARENT_PARM_BASE + GPR_BYTES * 6)(r1)
+ sg r10,(PARENT_PARM_BASE + GPR_BYTES * 7)(r1)
+
+ /* We save fpr 1 to fpr 14 in our own save frame. */
+ stfd f1, (FP_SAVE_BASE )(r1)
+ stfd f2, (FP_SAVE_BASE + FPR_SIZE )(r1)
+ stfd f3, (FP_SAVE_BASE + FPR_SIZE * 2 )(r1)
+ stfd f4, (FP_SAVE_BASE + FPR_SIZE * 3 )(r1)
+ stfd f5, (FP_SAVE_BASE + FPR_SIZE * 4 )(r1)
+ stfd f6, (FP_SAVE_BASE + FPR_SIZE * 5 )(r1)
+ stfd f7, (FP_SAVE_BASE + FPR_SIZE * 6 )(r1)
+ stfd f8, (FP_SAVE_BASE + FPR_SIZE * 7 )(r1)
+ stfd f9, (FP_SAVE_BASE + FPR_SIZE * 8 )(r1)
+ stfd f10,(FP_SAVE_BASE + FPR_SIZE * 9 )(r1)
+ stfd f11,(FP_SAVE_BASE + FPR_SIZE * 10)(r1)
+ stfd f12,(FP_SAVE_BASE + FPR_SIZE * 11)(r1)
+ stfd f13,(FP_SAVE_BASE + FPR_SIZE * 12)(r1)
/* Set up registers for the routine that actually does the work
get the context pointer from the trampoline. */
- mr r3,r11
+ mr r3,r11
/* Now load up the pointer to the result storage. */
- addi r4,r1,160
+ addi r4,r1,(SAVE_SIZE-RESULT_BYTES)
/* Now load up the pointer to the saved gpr registers. */
- addi r5,r1,200
+ addi r5,r1,PARENT_PARM_BASE
/* Now load up the pointer to the saved fpr registers. */
- addi r6,r1,56
+ addi r6,r1,FP_SAVE_BASE
/* Make the call. */
- bl Lffi_closure_helper_DARWIN$stub
+ bl BLCLS_HELP
+
+ /* r3 contains the rtype pointer... save it since we will need
+ it later. */
+ sg r3,LINKAGE_SIZE(r1) ; ffi_type * result_type
+ lg r0,0(r3) ; size => r0
+ lhz r3,FFI_TYPE_TYPE(r3) ; type => r3
- /* Now r3 contains the return type
- so use it to look up in a table
+ /* The helper will have intercepted struture returns and inserted
+ the caller`s destination address for structs returned by ref. */
+
+ /* r3 contains the return type so use it to look up in a table
so we know how to deal with each type. */
- /* Look up the proper starting point in table
- by using return type as offset. */
- addi r5,r1,160 /* Get pointer to results area. */
- bl Lget_ret_type0_addr /* Get pointer to Lret_type0 into LR. */
- mflr r4 /* Move to r4. */
- slwi r3,r3,4 /* Now multiply return type by 16. */
- add r3,r3,r4 /* Add contents of table to table address. */
- mtctr r3
- bctr /* Jump to it. */
+ addi r5,r1,(SAVE_SIZE-RESULT_BYTES) /* Otherwise, our return is here. */
+ bl Lget_ret_type0_addr /* Get pointer to Lret_type0 into LR. */
+ mflr r4 /* Move to r4. */
+ slwi r3,r3,4 /* Now multiply return type by 16. */
+ add r3,r3,r4 /* Add contents of table to table address. */
+ mtctr r3
+ bctr /* Jump to it. */
LFE1:
/* Each of the ret_typeX code fragments has to be exactly 16 bytes long
(4 instructions). For cache effectiveness we align to a 16 byte boundary
@@ -140,7 +227,7 @@ Lret_type0:
/* case FFI_TYPE_INT */
Lret_type1:
- lwz r3,0(r5)
+ lg r3,0(r5)
b Lfinish
nop
nop
@@ -168,85 +255,224 @@ Lret_type4:
/* case FFI_TYPE_UINT8 */
Lret_type5:
+#if defined(__ppc64__)
+ lbz r3,7(r5)
+#else
lbz r3,3(r5)
+#endif
b Lfinish
nop
nop
/* case FFI_TYPE_SINT8 */
Lret_type6:
+#if defined(__ppc64__)
+ lbz r3,7(r5)
+#else
lbz r3,3(r5)
+#endif
extsb r3,r3
b Lfinish
nop
/* case FFI_TYPE_UINT16 */
Lret_type7:
+#if defined(__ppc64__)
+ lhz r3,6(r5)
+#else
lhz r3,2(r5)
+#endif
b Lfinish
nop
nop
/* case FFI_TYPE_SINT16 */
Lret_type8:
+#if defined(__ppc64__)
+ lha r3,6(r5)
+#else
lha r3,2(r5)
+#endif
b Lfinish
nop
nop
/* case FFI_TYPE_UINT32 */
Lret_type9:
+#if defined(__ppc64__)
+ lwz r3,4(r5)
+#else
lwz r3,0(r5)
+#endif
b Lfinish
nop
nop
/* case FFI_TYPE_SINT32 */
Lret_type10:
+#if defined(__ppc64__)
+ lwz r3,4(r5)
+#else
lwz r3,0(r5)
+#endif
b Lfinish
nop
nop
/* case FFI_TYPE_UINT64 */
Lret_type11:
+#if defined(__ppc64__)
+ lg r3,0(r5)
+ b Lfinish
+ nop
+#else
lwz r3,0(r5)
lwz r4,4(r5)
b Lfinish
+#endif
nop
/* case FFI_TYPE_SINT64 */
Lret_type12:
+#if defined(__ppc64__)
+ lg r3,0(r5)
+ b Lfinish
+ nop
+#else
lwz r3,0(r5)
lwz r4,4(r5)
b Lfinish
+#endif
nop
/* case FFI_TYPE_STRUCT */
Lret_type13:
+#if defined(__ppc64__)
+ lg r3,0(r5) ; we need at least this...
+ cmpi 0,r0,4
+ bgt Lstructend ; not a special small case
+ b Lsmallstruct ; see if we need more.
+#else
+ cmpi 0,r0,4
+ bgt Lfinish ; not by value
+ lg r3,0(r5)
b Lfinish
- nop
- nop
- nop
-
+#endif
/* case FFI_TYPE_POINTER */
Lret_type14:
- lwz r3,0(r5)
+ lg r3,0(r5)
b Lfinish
nop
nop
+#if defined(__ppc64__)
+Lsmallstruct:
+ beq Lfour ; continuation of Lret13.
+ cmpi 0,r0,3
+ beq Lfinish ; don`t adjust this - can`t be any floats here...
+ srdi r3,r3,48
+ cmpi 0,r0,2
+ beq Lfinish ; .. or here ..
+ srdi r3,r3,8
+ b Lfinish ; .. or here.
+
+Lfour:
+ lg r6,LINKAGE_SIZE(r1) ; get the result type
+ lg r6,FFI_TYPE_ELEM(r6) ; elements array pointer
+ lg r6,0(r6) ; first element
+ lhz r0,FFI_TYPE_TYPE(r6) ; OK go the type
+ cmpi 0,r0,2 ; FFI_TYPE_FLOAT
+ bne Lfourint
+ lfs f1,0(r5) ; just one float in the struct.
+ b Lfinish
+
+Lfourint:
+ srdi r3,r3,32 ; four bytes.
+ b Lfinish
+
+Lstructend:
+ lg r3,LINKAGE_SIZE(r1) ; get the result type
+ bl STRUCT_RETVALUE_P
+ cmpi 0,r3,0
+ beq Lfinish ; nope.
+ /* Recover a pointer to the results. */
+ addi r11,r1,(SAVE_SIZE-RESULT_BYTES)
+ lg r3,0(r11) ; we need at least this...
+ lg r4,8(r11)
+ cmpi 0,r0,16
+ beq Lfinish ; special case 16 bytes we don't consider floats.
+
+ /* OK, frustratingly, the process of saving the struct to mem might have
+ messed with the FPRs, so we have to re-load them :(.
+ We`ll use our FPRs space again - calling:
+ void darwin64_pass_struct_floats (ffi_type *s, char *src,
+ unsigned *nfpr, double **fprs)
+ We`ll temporarily pinch the first two slots of the param area for local
+ vars used by the routine. */
+ xor r6,r6,r6
+ addi r5,r1,PARENT_PARM_BASE ; some space
+ sg r6,0(r5) ; *nfpr zeroed.
+ addi r6,r5,8 ; **fprs
+ addi r3,r1,FP_SAVE_BASE ; pointer to FPRs space
+ sg r3,0(r6)
+ mr r4,r11 ; the struct is here...
+ lg r3,LINKAGE_SIZE(r1) ; ffi_type * result_type.
+ bl PASS_STR_FLOATS ; get struct floats into FPR save space.
+ /* See if we used any floats */
+ lwz r0,(SAVE_SIZE-RESULT_BYTES)(r1)
+ cmpi 0,r0,0
+ beq Lstructints ; nope.
+ /* OK load `em up... */
+ lfd f1, (FP_SAVE_BASE )(r1)
+ lfd f2, (FP_SAVE_BASE + FPR_SIZE )(r1)
+ lfd f3, (FP_SAVE_BASE + FPR_SIZE * 2 )(r1)
+ lfd f4, (FP_SAVE_BASE + FPR_SIZE * 3 )(r1)
+ lfd f5, (FP_SAVE_BASE + FPR_SIZE * 4 )(r1)
+ lfd f6, (FP_SAVE_BASE + FPR_SIZE * 5 )(r1)
+ lfd f7, (FP_SAVE_BASE + FPR_SIZE * 6 )(r1)
+ lfd f8, (FP_SAVE_BASE + FPR_SIZE * 7 )(r1)
+ lfd f9, (FP_SAVE_BASE + FPR_SIZE * 8 )(r1)
+ lfd f10,(FP_SAVE_BASE + FPR_SIZE * 9 )(r1)
+ lfd f11,(FP_SAVE_BASE + FPR_SIZE * 10)(r1)
+ lfd f12,(FP_SAVE_BASE + FPR_SIZE * 11)(r1)
+ lfd f13,(FP_SAVE_BASE + FPR_SIZE * 12)(r1)
+
+ /* point back at our saved struct. */
+Lstructints:
+ addi r11,r1,(SAVE_SIZE-RESULT_BYTES)
+ lg r3,0(r11) ; we end up picking the
+ lg r4,8(r11) ; first two again.
+ lg r5,16(r11)
+ lg r6,24(r11)
+ lg r7,32(r11)
+ lg r8,40(r11)
+ lg r9,48(r11)
+ lg r10,56(r11)
+#endif
+
/* case done */
Lfinish:
- addi r1,r1,176 /* Restore stack pointer. */
- lwz r0,8(r1) /* Get return address. */
- mtlr r0 /* Reset link register. */
+ addi r1,r1,SAVE_SIZE /* Restore stack pointer. */
+ lg r0,SAVED_LR_OFFSET(r1) /* Get return address. */
+ mtlr r0 /* Reset link register. */
blr
-
+Lendcode:
+ .align 1
+
/* END(ffi_closure_ASM) */
-.data
-.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
+/* EH frame stuff. */
+#define EH_DATA_ALIGN_FACT MODE_CHOICE(0x7c,0x78)
+/* 176, 400 */
+#define EH_FRAME_OFFSETA MODE_CHOICE(176,0x90)
+#define EH_FRAME_OFFSETB MODE_CHOICE(1,3)
+
+ .static_data
+ .align LOG2_GPR_BYTES
+LLFB1$non_lazy_ptr:
+ .g_long Lstartcode
+
+ .section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
EH_frame1:
.set L$set$0,LECIE1-LSCIE1
.long L$set$0 ; Length of Common Information Entry
@@ -255,16 +481,16 @@ LSCIE1:
.byte 0x1 ; CIE Version
.ascii "zR\0" ; CIE Augmentation
.byte 0x1 ; uleb128 0x1; CIE Code Alignment Factor
- .byte 0x7c ; sleb128 -4; CIE Data Alignment Factor
+ .byte EH_DATA_ALIGN_FACT ; sleb128 -4; CIE Data Alignment Factor
.byte 0x41 ; CIE RA Column
.byte 0x1 ; uleb128 0x1; Augmentation size
- .byte 0x90 ; FDE Encoding (indirect pcrel)
+ .byte 0x10 ; FDE Encoding (indirect pcrel)
.byte 0xc ; DW_CFA_def_cfa
.byte 0x1 ; uleb128 0x1
.byte 0x0 ; uleb128 0x0
.align LOG2_GPR_BYTES
LECIE1:
-.globl _ffi_closure_ASM.eh
+ .globl _ffi_closure_ASM.eh
_ffi_closure_ASM.eh:
LSFDE1:
.set L$set$1,LEFDE1-LASFDE1
@@ -273,45 +499,78 @@ LSFDE1:
LASFDE1:
.long LASFDE1-EH_frame1 ; FDE CIE offset
.g_long LLFB1$non_lazy_ptr-. ; FDE initial location
- .set L$set$3,LFE1-LFB1
+ .set L$set$3,LFE1-Lstartcode
.g_long L$set$3 ; FDE address range
.byte 0x0 ; uleb128 0x0; Augmentation size
.byte 0x4 ; DW_CFA_advance_loc4
.set L$set$3,LCFI1-LCFI0
.long L$set$3
.byte 0xe ; DW_CFA_def_cfa_offset
- .byte 176,1 ; uleb128 176
+ .byte EH_FRAME_OFFSETA,EH_FRAME_OFFSETB ; uleb128 176,1/190,3
.byte 0x4 ; DW_CFA_advance_loc4
- .set L$set$4,LCFI0-LFB1
+ .set L$set$4,LCFI0-Lstartcode
.long L$set$4
.byte 0x11 ; DW_CFA_offset_extended_sf
.byte 0x41 ; uleb128 0x41
.byte 0x7e ; sleb128 -2
.align LOG2_GPR_BYTES
LEFDE1:
-.data
- .align LOG2_GPR_BYTES
-LDFCM0:
-.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
- .align LOG2_GPR_BYTES
-Lffi_closure_helper_DARWIN$stub:
-#if 1
+ .align 1
+
+#ifdef WANT_STUB
+ .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
+ .align 5
+L_ffi_closure_helper_DARWIN$stub:
.indirect_symbol _ffi_closure_helper_DARWIN
- mflr r0
- bcl 20,31,LO$ffi_closure_helper_DARWIN
-LO$ffi_closure_helper_DARWIN:
- mflr r11
- addis r11,r11,ha16(L_ffi_closure_helper_DARWIN$lazy_ptr - LO$ffi_closure_helper_DARWIN)
- mtlr r0
- lgu r12,lo16(L_ffi_closure_helper_DARWIN$lazy_ptr - LO$ffi_closure_helper_DARWIN)(r11)
- mtctr r12
+ mflr r0
+ bcl 20,31,"L00000000001$spb"
+"L00000000001$spb":
+ mflr r11
+ addis r11,r11,ha16(L_ffi_closure_helper_DARWIN$lazy_ptr-"L00000000001$spb")
+ mtlr r0
+ lwzu r12,lo16(L_ffi_closure_helper_DARWIN$lazy_ptr-"L00000000001$spb")(r11)
+ mtctr r12
bctr
-.lazy_symbol_pointer
+ .lazy_symbol_pointer
L_ffi_closure_helper_DARWIN$lazy_ptr:
.indirect_symbol _ffi_closure_helper_DARWIN
- .g_long dyld_stub_binding_helper
+ .g_long dyld_stub_binding_helper
+
+#if defined(__ppc64__)
+ .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
+ .align 5
+L_darwin64_struct_ret_by_value_p$stub:
+ .indirect_symbol _darwin64_struct_ret_by_value_p
+ mflr r0
+ bcl 20,31,"L00000000002$spb"
+"L00000000002$spb":
+ mflr r11
+ addis r11,r11,ha16(L_darwin64_struct_ret_by_value_p$lazy_ptr-"L00000000002$spb")
+ mtlr r0
+ lwzu r12,lo16(L_darwin64_struct_ret_by_value_p$lazy_ptr-"L00000000002$spb")(r11)
+ mtctr r12
+ bctr
+ .lazy_symbol_pointer
+L_darwin64_struct_ret_by_value_p$lazy_ptr:
+ .indirect_symbol _darwin64_struct_ret_by_value_p
+ .g_long dyld_stub_binding_helper
+
+ .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
+ .align 5
+L_darwin64_pass_struct_floats$stub:
+ .indirect_symbol _darwin64_pass_struct_floats
+ mflr r0
+ bcl 20,31,"L00000000003$spb"
+"L00000000003$spb":
+ mflr r11
+ addis r11,r11,ha16(L_darwin64_pass_struct_floats$lazy_ptr-"L00000000003$spb")
+ mtlr r0
+ lwzu r12,lo16(L_darwin64_pass_struct_floats$lazy_ptr-"L00000000003$spb")(r11)
+ mtctr r12
+ bctr
+ .lazy_symbol_pointer
+L_darwin64_pass_struct_floats$lazy_ptr:
+ .indirect_symbol _darwin64_pass_struct_floats
+ .g_long dyld_stub_binding_helper
+# endif
#endif
-.data
- .align LOG2_GPR_BYTES
-LLFB1$non_lazy_ptr:
- .g_long LFB1
diff --git a/Modules/_ctypes/libffi/src/powerpc/ffi.c b/Modules/_ctypes/libffi/src/powerpc/ffi.c
index 75784a9..54f2731 100644
--- a/Modules/_ctypes/libffi/src/powerpc/ffi.c
+++ b/Modules/_ctypes/libffi/src/powerpc/ffi.c
@@ -1,7 +1,9 @@
/* -----------------------------------------------------------------------
- ffi.c - Copyright (c) 1998 Geoffrey Keating
- Copyright (C) 2007, 2008 Free Software Foundation, Inc
- Copyright (C) 2008 Red Hat, Inc
+ ffi.c - Copyright (C) 2011 Anthony Green
+ Copyright (C) 2011 Kyle Moffett
+ Copyright (C) 2008 Red Hat, Inc
+ Copyright (C) 2007, 2008 Free Software Foundation, Inc
+ Copyright (c) 1998 Geoffrey Keating
PowerPC Foreign Function Interface
@@ -39,32 +41,33 @@ enum {
/* The assembly depends on these exact flags. */
FLAG_RETURNS_SMST = 1 << (31-31), /* Used for FFI_SYSV small structs. */
FLAG_RETURNS_NOTHING = 1 << (31-30), /* These go in cr7 */
+#ifndef __NO_FPRS__
FLAG_RETURNS_FP = 1 << (31-29),
+#endif
FLAG_RETURNS_64BITS = 1 << (31-28),
FLAG_RETURNS_128BITS = 1 << (31-27), /* cr6 */
+
FLAG_SYSV_SMST_R4 = 1 << (31-26), /* use r4 for FFI_SYSV 8 byte
structs. */
FLAG_SYSV_SMST_R3 = 1 << (31-25), /* use r3 for FFI_SYSV 4 byte
structs. */
- /* Bits (31-24) through (31-19) store shift value for SMST */
FLAG_ARG_NEEDS_COPY = 1 << (31- 7),
+#ifndef __NO_FPRS__
FLAG_FP_ARGUMENTS = 1 << (31- 6), /* cr1.eq; specified by ABI */
+#endif
FLAG_4_GPR_ARGUMENTS = 1 << (31- 5),
FLAG_RETVAL_REFERENCE = 1 << (31- 4)
};
/* About the SYSV ABI. */
-unsigned int NUM_GPR_ARG_REGISTERS = 8;
+#define ASM_NEEDS_REGISTERS 4
+#define NUM_GPR_ARG_REGISTERS 8
#ifndef __NO_FPRS__
-unsigned int NUM_FPR_ARG_REGISTERS = 8;
-#else
-unsigned int NUM_FPR_ARG_REGISTERS = 0;
+# define NUM_FPR_ARG_REGISTERS 8
#endif
-enum { ASM_NEEDS_REGISTERS = 4 };
-
/* ffi_prep_args_SYSV is called by the assembly routine once stack space
has been allocated for the function's arguments.
@@ -113,10 +116,12 @@ ffi_prep_args_SYSV (extended_cif *ecif, unsigned *const stack)
valp gpr_base;
int intarg_count;
+#ifndef __NO_FPRS__
/* 'fpr_base' points at the space for fpr1, and grows upwards as
we use FPR registers. */
valp fpr_base;
int fparg_count;
+#endif
/* 'copy_space' grows down as we put structures in it. It should
stay 16-byte aligned. */
@@ -125,9 +130,8 @@ ffi_prep_args_SYSV (extended_cif *ecif, unsigned *const stack)
/* 'next_arg' grows up as we put parameters in it. */
valp next_arg;
- int i, ii MAYBE_UNUSED;
+ int i;
ffi_type **ptr;
- double double_tmp;
union {
void **v;
char **c;
@@ -143,21 +147,23 @@ ffi_prep_args_SYSV (extended_cif *ecif, unsigned *const stack)
size_t struct_copy_size;
unsigned gprvalue;
- if (ecif->cif->abi == FFI_LINUX_SOFT_FLOAT)
- NUM_FPR_ARG_REGISTERS = 0;
-
stacktop.c = (char *) stack + bytes;
gpr_base.u = stacktop.u - ASM_NEEDS_REGISTERS - NUM_GPR_ARG_REGISTERS;
intarg_count = 0;
+#ifndef __NO_FPRS__
+ double double_tmp;
fpr_base.d = gpr_base.d - NUM_FPR_ARG_REGISTERS;
fparg_count = 0;
copy_space.c = ((flags & FLAG_FP_ARGUMENTS) ? fpr_base.c : gpr_base.c);
+#else
+ copy_space.c = gpr_base.c;
+#endif
next_arg.u = stack + 2;
/* Check that everything starts aligned properly. */
- FFI_ASSERT (((unsigned) (char *) stack & 0xF) == 0);
- FFI_ASSERT (((unsigned) copy_space.c & 0xF) == 0);
- FFI_ASSERT (((unsigned) stacktop.c & 0xF) == 0);
+ FFI_ASSERT (((unsigned long) (char *) stack & 0xF) == 0);
+ FFI_ASSERT (((unsigned long) copy_space.c & 0xF) == 0);
+ FFI_ASSERT (((unsigned long) stacktop.c & 0xF) == 0);
FFI_ASSERT ((bytes & 0xF) == 0);
FFI_ASSERT (copy_space.c >= next_arg.c);
@@ -174,12 +180,28 @@ ffi_prep_args_SYSV (extended_cif *ecif, unsigned *const stack)
i > 0;
i--, ptr++, p_argv.v++)
{
- switch ((*ptr)->type)
- {
+ unsigned short typenum = (*ptr)->type;
+
+ /* We may need to handle some values depending on ABI */
+ if (ecif->cif->abi == FFI_LINUX_SOFT_FLOAT) {
+ if (typenum == FFI_TYPE_FLOAT)
+ typenum = FFI_TYPE_UINT32;
+ if (typenum == FFI_TYPE_DOUBLE)
+ typenum = FFI_TYPE_UINT64;
+ if (typenum == FFI_TYPE_LONGDOUBLE)
+ typenum = FFI_TYPE_UINT128;
+ } else if (ecif->cif->abi != FFI_LINUX) {
+#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
+ if (typenum == FFI_TYPE_LONGDOUBLE)
+ typenum = FFI_TYPE_STRUCT;
+#endif
+ }
+
+ /* Now test the translated value */
+ switch (typenum) {
+#ifndef __NO_FPRS__
case FFI_TYPE_FLOAT:
/* With FFI_LINUX_SOFT_FLOAT floats are handled like UINT32. */
- if (ecif->cif->abi == FFI_LINUX_SOFT_FLOAT)
- goto soft_float_prep;
double_tmp = **p_argv.f;
if (fparg_count >= NUM_FPR_ARG_REGISTERS)
{
@@ -195,8 +217,6 @@ ffi_prep_args_SYSV (extended_cif *ecif, unsigned *const stack)
case FFI_TYPE_DOUBLE:
/* With FFI_LINUX_SOFT_FLOAT doubles are handled like UINT64. */
- if (ecif->cif->abi == FFI_LINUX_SOFT_FLOAT)
- goto soft_double_prep;
double_tmp = **p_argv.d;
if (fparg_count >= NUM_FPR_ARG_REGISTERS)
@@ -218,43 +238,6 @@ ffi_prep_args_SYSV (extended_cif *ecif, unsigned *const stack)
#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
case FFI_TYPE_LONGDOUBLE:
- if ((ecif->cif->abi != FFI_LINUX)
- && (ecif->cif->abi != FFI_LINUX_SOFT_FLOAT))
- goto do_struct;
- /* The soft float ABI for long doubles works like this,
- a long double is passed in four consecutive gprs if available.
- A maximum of 2 long doubles can be passed in gprs.
- If we do not have 4 gprs left, the long double is passed on the
- stack, 4-byte aligned. */
- if (ecif->cif->abi == FFI_LINUX_SOFT_FLOAT)
- {
- unsigned int int_tmp = (*p_argv.ui)[0];
- if (intarg_count >= NUM_GPR_ARG_REGISTERS - 3)
- {
- if (intarg_count < NUM_GPR_ARG_REGISTERS)
- intarg_count += NUM_GPR_ARG_REGISTERS - intarg_count;
- *next_arg.u = int_tmp;
- next_arg.u++;
- for (ii = 1; ii < 4; ii++)
- {
- int_tmp = (*p_argv.ui)[ii];
- *next_arg.u = int_tmp;
- next_arg.u++;
- }
- }
- else
- {
- *gpr_base.u++ = int_tmp;
- for (ii = 1; ii < 4; ii++)
- {
- int_tmp = (*p_argv.ui)[ii];
- *gpr_base.u++ = int_tmp;
- }
- }
- intarg_count +=4;
- }
- else
- {
double_tmp = (*p_argv.d)[0];
if (fparg_count >= NUM_FPR_ARG_REGISTERS - 1)
@@ -280,13 +263,40 @@ ffi_prep_args_SYSV (extended_cif *ecif, unsigned *const stack)
fparg_count += 2;
FFI_ASSERT (flags & FLAG_FP_ARGUMENTS);
- }
break;
#endif
+#endif /* have FPRs */
+
+ /*
+ * The soft float ABI for long doubles works like this, a long double
+ * is passed in four consecutive GPRs if available. A maximum of 2
+ * long doubles can be passed in gprs. If we do not have 4 GPRs
+ * left, the long double is passed on the stack, 4-byte aligned.
+ */
+ case FFI_TYPE_UINT128: {
+ unsigned int int_tmp = (*p_argv.ui)[0];
+ unsigned int ii;
+ if (intarg_count >= NUM_GPR_ARG_REGISTERS - 3) {
+ if (intarg_count < NUM_GPR_ARG_REGISTERS)
+ intarg_count += NUM_GPR_ARG_REGISTERS - intarg_count;
+ *(next_arg.u++) = int_tmp;
+ for (ii = 1; ii < 4; ii++) {
+ int_tmp = (*p_argv.ui)[ii];
+ *(next_arg.u++) = int_tmp;
+ }
+ } else {
+ *(gpr_base.u++) = int_tmp;
+ for (ii = 1; ii < 4; ii++) {
+ int_tmp = (*p_argv.ui)[ii];
+ *(gpr_base.u++) = int_tmp;
+ }
+ }
+ intarg_count += 4;
+ break;
+ }
case FFI_TYPE_UINT64:
case FFI_TYPE_SINT64:
- soft_double_prep:
if (intarg_count == NUM_GPR_ARG_REGISTERS-1)
intarg_count++;
if (intarg_count >= NUM_GPR_ARG_REGISTERS)
@@ -319,9 +329,6 @@ ffi_prep_args_SYSV (extended_cif *ecif, unsigned *const stack)
break;
case FFI_TYPE_STRUCT:
-#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
- do_struct:
-#endif
struct_copy_size = ((*ptr)->size + 15) & ~0xF;
copy_space.c -= struct_copy_size;
memcpy (copy_space.c, *p_argv.c, (*ptr)->size);
@@ -349,7 +356,6 @@ ffi_prep_args_SYSV (extended_cif *ecif, unsigned *const stack)
case FFI_TYPE_UINT32:
case FFI_TYPE_SINT32:
case FFI_TYPE_POINTER:
- soft_float_prep:
gprvalue = **p_argv.ui;
@@ -366,8 +372,16 @@ ffi_prep_args_SYSV (extended_cif *ecif, unsigned *const stack)
/* Check that we didn't overrun the stack... */
FFI_ASSERT (copy_space.c >= next_arg.c);
FFI_ASSERT (gpr_base.u <= stacktop.u - ASM_NEEDS_REGISTERS);
+ /* The assert below is testing that the number of integer arguments agrees
+ with the number found in ffi_prep_cif_machdep(). However, intarg_count
+ is incremeneted whenever we place an FP arg on the stack, so account for
+ that before our assert test. */
+#ifndef __NO_FPRS__
+ if (fparg_count > NUM_FPR_ARG_REGISTERS)
+ intarg_count -= fparg_count - NUM_FPR_ARG_REGISTERS;
FFI_ASSERT (fpr_base.u
<= stacktop.u - ASM_NEEDS_REGISTERS - NUM_GPR_ARG_REGISTERS);
+#endif
FFI_ASSERT (flags & FLAG_4_GPR_ARGUMENTS || intarg_count <= 4);
}
@@ -604,9 +618,6 @@ ffi_prep_cif_machdep (ffi_cif *cif)
unsigned type = cif->rtype->type;
unsigned size = cif->rtype->size;
- if (cif->abi == FFI_LINUX_SOFT_FLOAT)
- NUM_FPR_ARG_REGISTERS = 0;
-
if (cif->abi != FFI_LINUX64)
{
/* All the machine-independent calculation of cif->bytes will be wrong.
@@ -646,13 +657,26 @@ ffi_prep_cif_machdep (ffi_cif *cif)
- Single/double FP values in fpr1, long double in fpr1,fpr2.
- soft-float float/doubles are treated as UINT32/UINT64 respectivley.
- soft-float long doubles are returned in gpr3-gpr6. */
+ /* First translate for softfloat/nonlinux */
+ if (cif->abi == FFI_LINUX_SOFT_FLOAT) {
+ if (type == FFI_TYPE_FLOAT)
+ type = FFI_TYPE_UINT32;
+ if (type == FFI_TYPE_DOUBLE)
+ type = FFI_TYPE_UINT64;
+ if (type == FFI_TYPE_LONGDOUBLE)
+ type = FFI_TYPE_UINT128;
+ } else if (cif->abi != FFI_LINUX && cif->abi != FFI_LINUX64) {
+#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
+ if (type == FFI_TYPE_LONGDOUBLE)
+ type = FFI_TYPE_STRUCT;
+#endif
+ }
+
switch (type)
{
+#ifndef __NO_FPRS__
#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
case FFI_TYPE_LONGDOUBLE:
- if (cif->abi != FFI_LINUX && cif->abi != FFI_LINUX64
- && cif->abi != FFI_LINUX_SOFT_FLOAT)
- goto byref;
flags |= FLAG_RETURNS_128BITS;
/* Fall through. */
#endif
@@ -660,11 +684,13 @@ ffi_prep_cif_machdep (ffi_cif *cif)
flags |= FLAG_RETURNS_64BITS;
/* Fall through. */
case FFI_TYPE_FLOAT:
- /* With FFI_LINUX_SOFT_FLOAT no fp registers are used. */
- if (cif->abi != FFI_LINUX_SOFT_FLOAT)
- flags |= FLAG_RETURNS_FP;
+ flags |= FLAG_RETURNS_FP;
break;
+#endif
+ case FFI_TYPE_UINT128:
+ flags |= FLAG_RETURNS_128BITS;
+ /* Fall through. */
case FFI_TYPE_UINT64:
case FFI_TYPE_SINT64:
flags |= FLAG_RETURNS_64BITS;
@@ -699,9 +725,7 @@ ffi_prep_cif_machdep (ffi_cif *cif)
}
}
}
-#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
- byref:
-#endif
+
intarg_count++;
flags |= FLAG_RETVAL_REFERENCE;
/* Fall through. */
@@ -722,39 +746,36 @@ ffi_prep_cif_machdep (ffi_cif *cif)
Stuff on the stack needs to keep proper alignment. */
for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++)
{
- switch ((*ptr)->type)
- {
+ unsigned short typenum = (*ptr)->type;
+
+ /* We may need to handle some values depending on ABI */
+ if (cif->abi == FFI_LINUX_SOFT_FLOAT) {
+ if (typenum == FFI_TYPE_FLOAT)
+ typenum = FFI_TYPE_UINT32;
+ if (typenum == FFI_TYPE_DOUBLE)
+ typenum = FFI_TYPE_UINT64;
+ if (typenum == FFI_TYPE_LONGDOUBLE)
+ typenum = FFI_TYPE_UINT128;
+ } else if (cif->abi != FFI_LINUX && cif->abi != FFI_LINUX64) {
+#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
+ if (typenum == FFI_TYPE_LONGDOUBLE)
+ typenum = FFI_TYPE_STRUCT;
+#endif
+ }
+
+ switch (typenum) {
+#ifndef __NO_FPRS__
case FFI_TYPE_FLOAT:
- /* With FFI_LINUX_SOFT_FLOAT floats are handled like UINT32. */
- if (cif->abi == FFI_LINUX_SOFT_FLOAT)
- goto soft_float_cif;
fparg_count++;
/* floating singles are not 8-aligned on stack */
break;
#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
case FFI_TYPE_LONGDOUBLE:
- if (cif->abi != FFI_LINUX && cif->abi != FFI_LINUX_SOFT_FLOAT)
- goto do_struct;
- if (cif->abi == FFI_LINUX_SOFT_FLOAT)
- {
- if (intarg_count >= NUM_GPR_ARG_REGISTERS - 3
- || intarg_count < NUM_GPR_ARG_REGISTERS)
- /* A long double in FFI_LINUX_SOFT_FLOAT can use only
- a set of four consecutive gprs. If we have not enough,
- we have to adjust the intarg_count value. */
- intarg_count += NUM_GPR_ARG_REGISTERS - intarg_count;
- intarg_count += 4;
- break;
- }
- else
- fparg_count++;
+ fparg_count++;
/* Fall thru */
#endif
case FFI_TYPE_DOUBLE:
- /* With FFI_LINUX_SOFT_FLOAT doubles are handled like UINT64. */
- if (cif->abi == FFI_LINUX_SOFT_FLOAT)
- goto soft_double_cif;
fparg_count++;
/* If this FP arg is going on the stack, it must be
8-byte-aligned. */
@@ -763,10 +784,21 @@ ffi_prep_cif_machdep (ffi_cif *cif)
&& intarg_count % 2 != 0)
intarg_count++;
break;
+#endif
+ case FFI_TYPE_UINT128:
+ /*
+ * A long double in FFI_LINUX_SOFT_FLOAT can use only a set
+ * of four consecutive gprs. If we do not have enough, we
+ * have to adjust the intarg_count value.
+ */
+ if (intarg_count >= NUM_GPR_ARG_REGISTERS - 3
+ && intarg_count < NUM_GPR_ARG_REGISTERS)
+ intarg_count = NUM_GPR_ARG_REGISTERS;
+ intarg_count += 4;
+ break;
case FFI_TYPE_UINT64:
case FFI_TYPE_SINT64:
- soft_double_cif:
/* 'long long' arguments are passed as two words, but
either both words must fit in registers or both go
on the stack. If they go on the stack, they must
@@ -783,9 +815,6 @@ ffi_prep_cif_machdep (ffi_cif *cif)
break;
case FFI_TYPE_STRUCT:
-#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
- do_struct:
-#endif
/* We must allocate space for a copy of these to enforce
pass-by-value. Pad the space up to a multiple of 16
bytes (the maximum alignment required for anything under
@@ -793,12 +822,20 @@ ffi_prep_cif_machdep (ffi_cif *cif)
struct_copy_size += ((*ptr)->size + 15) & ~0xF;
/* Fall through (allocate space for the pointer). */
- default:
- soft_float_cif:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
/* Everything else is passed as a 4-byte word in a GPR, either
the object itself or a pointer to it. */
intarg_count++;
break;
+ default:
+ FFI_ASSERT (0);
}
}
else
@@ -827,16 +864,29 @@ ffi_prep_cif_machdep (ffi_cif *cif)
intarg_count += ((*ptr)->size + 7) / 8;
break;
- default:
+ case FFI_TYPE_POINTER:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_UINT16:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT8:
/* Everything else is passed as a 8-byte word in a GPR, either
the object itself or a pointer to it. */
intarg_count++;
break;
+ default:
+ FFI_ASSERT (0);
}
}
+#ifndef __NO_FPRS__
if (fparg_count != 0)
flags |= FLAG_FP_ARGUMENTS;
+#endif
if (intarg_count > 4)
flags |= FLAG_4_GPR_ARGUMENTS;
if (struct_copy_size != 0)
@@ -844,21 +894,27 @@ ffi_prep_cif_machdep (ffi_cif *cif)
if (cif->abi != FFI_LINUX64)
{
+#ifndef __NO_FPRS__
/* Space for the FPR registers, if needed. */
if (fparg_count != 0)
bytes += NUM_FPR_ARG_REGISTERS * sizeof (double);
+#endif
/* Stack space. */
if (intarg_count > NUM_GPR_ARG_REGISTERS)
bytes += (intarg_count - NUM_GPR_ARG_REGISTERS) * sizeof (int);
+#ifndef __NO_FPRS__
if (fparg_count > NUM_FPR_ARG_REGISTERS)
bytes += (fparg_count - NUM_FPR_ARG_REGISTERS) * sizeof (double);
+#endif
}
else
{
+#ifndef __NO_FPRS__
/* Space for the FPR registers, if needed. */
if (fparg_count != 0)
bytes += NUM_FPR_ARG_REGISTERS64 * sizeof (double);
+#endif
/* Stack space. */
if (intarg_count > NUM_GPR_ARG_REGISTERS64)
@@ -886,28 +942,41 @@ extern void FFI_HIDDEN ffi_call_LINUX64(extended_cif *, unsigned long,
void
ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
{
+ /*
+ * The final SYSV ABI says that structures smaller or equal 8 bytes
+ * are returned in r3/r4. The FFI_GCC_SYSV ABI instead returns them
+ * in memory.
+ *
+ * Just to keep things simple for the assembly code, we will always
+ * bounce-buffer struct return values less than or equal to 8 bytes.
+ * This allows the ASM to handle SYSV small structures by directly
+ * writing r3 and r4 to memory without worrying about struct size.
+ */
+ unsigned int smst_buffer[2];
extended_cif ecif;
+ unsigned int rsize = 0;
ecif.cif = cif;
ecif.avalue = avalue;
- /* If the return value is a struct and we don't have a return */
- /* value address then we need to make one */
-
- if ((rvalue == NULL) && (cif->rtype->type == FFI_TYPE_STRUCT))
- {
- ecif.rvalue = alloca(cif->rtype->size);
- }
- else
- ecif.rvalue = rvalue;
-
+ /* Ensure that we have a valid struct return value */
+ ecif.rvalue = rvalue;
+ if (cif->rtype->type == FFI_TYPE_STRUCT) {
+ rsize = cif->rtype->size;
+ if (rsize <= 8)
+ ecif.rvalue = smst_buffer;
+ else if (!rvalue)
+ ecif.rvalue = alloca(rsize);
+ }
switch (cif->abi)
{
#ifndef POWERPC64
+# ifndef __NO_FPRS__
case FFI_SYSV:
case FFI_GCC_SYSV:
case FFI_LINUX:
+# endif
case FFI_LINUX_SOFT_FLOAT:
ffi_call_SYSV (&ecif, -cif->bytes, cif->flags, ecif.rvalue, fn);
break;
@@ -920,6 +989,10 @@ ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
FFI_ASSERT (0);
break;
}
+
+ /* Check for a bounce-buffered return value */
+ if (rvalue && ecif.rvalue == smst_buffer)
+ memcpy(rvalue, smst_buffer, rsize);
}
@@ -949,14 +1022,19 @@ ffi_prep_closure_loc (ffi_closure *closure,
#ifdef POWERPC64
void **tramp = (void **) &closure->tramp[0];
- FFI_ASSERT (cif->abi == FFI_LINUX64);
+ if (cif->abi != FFI_LINUX64)
+ return FFI_BAD_ABI;
/* Copy function address and TOC from ffi_closure_LINUX64. */
memcpy (tramp, (char *) ffi_closure_LINUX64, 16);
tramp[2] = codeloc;
#else
unsigned int *tramp;
- FFI_ASSERT (cif->abi == FFI_GCC_SYSV || cif->abi == FFI_SYSV);
+ if (! (cif->abi == FFI_GCC_SYSV
+ || cif->abi == FFI_SYSV
+ || cif->abi == FFI_LINUX
+ || cif->abi == FFI_LINUX_SOFT_FLOAT))
+ return FFI_BAD_ABI;
tramp = (unsigned int *) &closure->tramp[0];
tramp[0] = 0x7c0802a6; /* mflr r0 */
@@ -1011,32 +1089,38 @@ ffi_closure_helper_SYSV (ffi_closure *closure, void *rvalue,
void ** avalue;
ffi_type ** arg_types;
long i, avn;
- long nf; /* number of floating registers already used */
- long ng; /* number of general registers already used */
- ffi_cif * cif;
- double temp;
- unsigned size;
+#ifndef __NO_FPRS__
+ long nf = 0; /* number of floating registers already used */
+#endif
+ long ng = 0; /* number of general registers already used */
+
+ ffi_cif *cif = closure->cif;
+ unsigned size = cif->rtype->size;
+ unsigned short rtypenum = cif->rtype->type;
- cif = closure->cif;
avalue = alloca (cif->nargs * sizeof (void *));
- size = cif->rtype->size;
- nf = 0;
- ng = 0;
+ /* First translate for softfloat/nonlinux */
+ if (cif->abi == FFI_LINUX_SOFT_FLOAT) {
+ if (rtypenum == FFI_TYPE_FLOAT)
+ rtypenum = FFI_TYPE_UINT32;
+ if (rtypenum == FFI_TYPE_DOUBLE)
+ rtypenum = FFI_TYPE_UINT64;
+ if (rtypenum == FFI_TYPE_LONGDOUBLE)
+ rtypenum = FFI_TYPE_UINT128;
+ } else if (cif->abi != FFI_LINUX && cif->abi != FFI_LINUX64) {
+#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
+ if (rtypenum == FFI_TYPE_LONGDOUBLE)
+ rtypenum = FFI_TYPE_STRUCT;
+#endif
+ }
+
/* Copy the caller's structure return value address so that the closure
returns the data directly to the caller.
For FFI_SYSV the result is passed in r3/r4 if the struct size is less
or equal 8 bytes. */
-
- if ((cif->rtype->type == FFI_TYPE_STRUCT
- && !((cif->abi == FFI_SYSV) && (size <= 8)))
-#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
- || (cif->rtype->type == FFI_TYPE_LONGDOUBLE
- && cif->abi != FFI_LINUX && cif->abi != FFI_LINUX_SOFT_FLOAT)
-#endif
- )
- {
+ if (rtypenum == FFI_TYPE_STRUCT && ((cif->abi != FFI_SYSV) || (size > 8))) {
rvalue = (void *) *pgr;
ng++;
pgr++;
@@ -1047,10 +1131,109 @@ ffi_closure_helper_SYSV (ffi_closure *closure, void *rvalue,
arg_types = cif->arg_types;
/* Grab the addresses of the arguments from the stack frame. */
- while (i < avn)
- {
- switch (arg_types[i]->type)
- {
+ while (i < avn) {
+ unsigned short typenum = arg_types[i]->type;
+
+ /* We may need to handle some values depending on ABI */
+ if (cif->abi == FFI_LINUX_SOFT_FLOAT) {
+ if (typenum == FFI_TYPE_FLOAT)
+ typenum = FFI_TYPE_UINT32;
+ if (typenum == FFI_TYPE_DOUBLE)
+ typenum = FFI_TYPE_UINT64;
+ if (typenum == FFI_TYPE_LONGDOUBLE)
+ typenum = FFI_TYPE_UINT128;
+ } else if (cif->abi != FFI_LINUX && cif->abi != FFI_LINUX64) {
+#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
+ if (typenum == FFI_TYPE_LONGDOUBLE)
+ typenum = FFI_TYPE_STRUCT;
+#endif
+ }
+
+ switch (typenum) {
+#ifndef __NO_FPRS__
+ case FFI_TYPE_FLOAT:
+ /* unfortunately float values are stored as doubles
+ * in the ffi_closure_SYSV code (since we don't check
+ * the type in that routine).
+ */
+
+ /* there are 8 64bit floating point registers */
+
+ if (nf < 8)
+ {
+ double temp = pfr->d;
+ pfr->f = (float) temp;
+ avalue[i] = pfr;
+ nf++;
+ pfr++;
+ }
+ else
+ {
+ /* FIXME? here we are really changing the values
+ * stored in the original calling routines outgoing
+ * parameter stack. This is probably a really
+ * naughty thing to do but...
+ */
+ avalue[i] = pst;
+ pst += 1;
+ }
+ break;
+
+ case FFI_TYPE_DOUBLE:
+ /* On the outgoing stack all values are aligned to 8 */
+ /* there are 8 64bit floating point registers */
+
+ if (nf < 8)
+ {
+ avalue[i] = pfr;
+ nf++;
+ pfr++;
+ }
+ else
+ {
+ if (((long) pst) & 4)
+ pst++;
+ avalue[i] = pst;
+ pst += 2;
+ }
+ break;
+
+#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
+ case FFI_TYPE_LONGDOUBLE:
+ if (nf < 7)
+ {
+ avalue[i] = pfr;
+ pfr += 2;
+ nf += 2;
+ }
+ else
+ {
+ if (((long) pst) & 4)
+ pst++;
+ avalue[i] = pst;
+ pst += 4;
+ nf = 8;
+ }
+ break;
+#endif
+#endif /* have FPRS */
+
+ case FFI_TYPE_UINT128:
+ /*
+ * Test if for the whole long double, 4 gprs are available.
+ * otherwise the stuff ends up on the stack.
+ */
+ if (ng < 5) {
+ avalue[i] = pgr;
+ pgr += 4;
+ ng += 4;
+ } else {
+ avalue[i] = pst;
+ pst += 4;
+ ng = 8+4;
+ }
+ break;
+
case FFI_TYPE_SINT8:
case FFI_TYPE_UINT8:
/* there are 8 gpr registers used to pass values */
@@ -1086,7 +1269,6 @@ ffi_closure_helper_SYSV (ffi_closure *closure, void *rvalue,
case FFI_TYPE_SINT32:
case FFI_TYPE_UINT32:
case FFI_TYPE_POINTER:
- soft_float_closure:
/* there are 8 gpr registers used to pass values */
if (ng < 8)
{
@@ -1102,9 +1284,6 @@ ffi_closure_helper_SYSV (ffi_closure *closure, void *rvalue,
break;
case FFI_TYPE_STRUCT:
-#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
- do_struct:
-#endif
/* Structs are passed by reference. The address will appear in a
gpr if it is one of the first 8 arguments. */
if (ng < 8)
@@ -1122,7 +1301,6 @@ ffi_closure_helper_SYSV (ffi_closure *closure, void *rvalue,
case FFI_TYPE_SINT64:
case FFI_TYPE_UINT64:
- soft_double_closure:
/* passing long long ints are complex, they must
* be passed in suitable register pairs such as
* (r3,r4) or (r5,r6) or (r6,r7), or (r7,r8) or (r9,r10)
@@ -1154,99 +1332,8 @@ ffi_closure_helper_SYSV (ffi_closure *closure, void *rvalue,
}
break;
- case FFI_TYPE_FLOAT:
- /* With FFI_LINUX_SOFT_FLOAT floats are handled like UINT32. */
- if (cif->abi == FFI_LINUX_SOFT_FLOAT)
- goto soft_float_closure;
- /* unfortunately float values are stored as doubles
- * in the ffi_closure_SYSV code (since we don't check
- * the type in that routine).
- */
-
- /* there are 8 64bit floating point registers */
-
- if (nf < 8)
- {
- temp = pfr->d;
- pfr->f = (float) temp;
- avalue[i] = pfr;
- nf++;
- pfr++;
- }
- else
- {
- /* FIXME? here we are really changing the values
- * stored in the original calling routines outgoing
- * parameter stack. This is probably a really
- * naughty thing to do but...
- */
- avalue[i] = pst;
- pst += 1;
- }
- break;
-
- case FFI_TYPE_DOUBLE:
- /* With FFI_LINUX_SOFT_FLOAT doubles are handled like UINT64. */
- if (cif->abi == FFI_LINUX_SOFT_FLOAT)
- goto soft_double_closure;
- /* On the outgoing stack all values are aligned to 8 */
- /* there are 8 64bit floating point registers */
-
- if (nf < 8)
- {
- avalue[i] = pfr;
- nf++;
- pfr++;
- }
- else
- {
- if (((long) pst) & 4)
- pst++;
- avalue[i] = pst;
- pst += 2;
- }
- break;
-
-#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
- case FFI_TYPE_LONGDOUBLE:
- if (cif->abi != FFI_LINUX && cif->abi != FFI_LINUX_SOFT_FLOAT)
- goto do_struct;
- if (cif->abi == FFI_LINUX_SOFT_FLOAT)
- { /* Test if for the whole long double, 4 gprs are available.
- otherwise the stuff ends up on the stack. */
- if (ng < 5)
- {
- avalue[i] = pgr;
- pgr += 4;
- ng += 4;
- }
- else
- {
- avalue[i] = pst;
- pst += 4;
- ng = 8;
- }
- break;
- }
- if (nf < 7)
- {
- avalue[i] = pfr;
- pfr += 2;
- nf += 2;
- }
- else
- {
- if (((long) pst) & 4)
- pst++;
- avalue[i] = pst;
- pst += 4;
- nf = 8;
- }
- break;
-#endif
-
default:
- FFI_ASSERT (0);
+ FFI_ASSERT (0);
}
i++;
@@ -1263,39 +1350,9 @@ ffi_closure_helper_SYSV (ffi_closure *closure, void *rvalue,
already used and we never have a struct with size zero. That is the reason
for the subtraction of 1. See the comment in ffitarget.h about ordering.
*/
- if (cif->abi == FFI_SYSV && cif->rtype->type == FFI_TYPE_STRUCT
- && size <= 8)
+ if (cif->abi == FFI_SYSV && rtypenum == FFI_TYPE_STRUCT && size <= 8)
return (FFI_SYSV_TYPE_SMALL_STRUCT - 1) + size;
-#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
- else if (cif->rtype->type == FFI_TYPE_LONGDOUBLE
- && cif->abi != FFI_LINUX && cif->abi != FFI_LINUX_SOFT_FLOAT)
- return FFI_TYPE_STRUCT;
-#endif
- /* With FFI_LINUX_SOFT_FLOAT floats and doubles are handled like UINT32
- respectivley UINT64. */
- if (cif->abi == FFI_LINUX_SOFT_FLOAT)
- {
- switch (cif->rtype->type)
- {
- case FFI_TYPE_FLOAT:
- return FFI_TYPE_UINT32;
- break;
- case FFI_TYPE_DOUBLE:
- return FFI_TYPE_UINT64;
- break;
-#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
- case FFI_TYPE_LONGDOUBLE:
- return FFI_TYPE_UINT128;
- break;
-#endif
- default:
- return cif->rtype->type;
- }
- }
- else
- {
- return cif->rtype->type;
- }
+ return rtypenum;
}
int FFI_HIDDEN ffi_closure_helper_LINUX64 (ffi_closure *, void *,
diff --git a/Modules/_ctypes/libffi/src/powerpc/ffi_darwin.c b/Modules/_ctypes/libffi/src/powerpc/ffi_darwin.c
index d84f1c3..1d1d48c 100644
--- a/Modules/_ctypes/libffi/src/powerpc/ffi_darwin.c
+++ b/Modules/_ctypes/libffi/src/powerpc/ffi_darwin.c
@@ -3,7 +3,7 @@
Copyright (C) 1998 Geoffrey Keating
Copyright (C) 2001 John Hornkvist
- Copyright (C) 2002, 2006, 2007, 2009 Free Software Foundation, Inc.
+ Copyright (C) 2002, 2006, 2007, 2009, 2010 Free Software Foundation, Inc.
FFI support for Darwin and AIX.
@@ -35,11 +35,17 @@
extern void ffi_closure_ASM (void);
enum {
- /* The assembly depends on these exact flags. */
- FLAG_RETURNS_NOTHING = 1 << (31-30), /* These go in cr7 */
- FLAG_RETURNS_FP = 1 << (31-29),
- FLAG_RETURNS_64BITS = 1 << (31-28),
- FLAG_RETURNS_128BITS = 1 << (31-31),
+ /* The assembly depends on these exact flags.
+ For Darwin64 (when FLAG_RETURNS_STRUCT is set):
+ FLAG_RETURNS_FP indicates that the structure embeds FP data.
+ FLAG_RETURNS_128BITS signals a special struct size that is not
+ expanded for float content. */
+ FLAG_RETURNS_128BITS = 1 << (31-31), /* These go in cr7 */
+ FLAG_RETURNS_NOTHING = 1 << (31-30),
+ FLAG_RETURNS_FP = 1 << (31-29),
+ FLAG_RETURNS_64BITS = 1 << (31-28),
+
+ FLAG_RETURNS_STRUCT = 1 << (31-27), /* This goes in cr6 */
FLAG_ARG_NEEDS_COPY = 1 << (31- 7),
FLAG_FP_ARGUMENTS = 1 << (31- 6), /* cr1.eq; specified by ABI */
@@ -50,43 +56,61 @@ enum {
/* About the DARWIN ABI. */
enum {
NUM_GPR_ARG_REGISTERS = 8,
- NUM_FPR_ARG_REGISTERS = 13
+ NUM_FPR_ARG_REGISTERS = 13,
+ LINKAGE_AREA_GPRS = 6
};
-enum { ASM_NEEDS_REGISTERS = 4 };
+
+enum { ASM_NEEDS_REGISTERS = 4 }; /* r28-r31 */
/* ffi_prep_args is called by the assembly routine once stack space
has been allocated for the function's arguments.
+
+ m32/m64
The stack layout we want looks like this:
| Return address from ffi_call_DARWIN | higher addresses
|--------------------------------------------|
- | Previous backchain pointer 4 | stack pointer here
+ | Previous backchain pointer 4/8 | stack pointer here
|--------------------------------------------|<+ <<< on entry to
- | Saved r28-r31 4*4 | | ffi_call_DARWIN
+ | ASM_NEEDS_REGISTERS=r28-r31 4*(4/8) | | ffi_call_DARWIN
|--------------------------------------------| |
- | Parameters (at least 8*4=32) | |
+ | When we have any FP activity... the | |
+ | FPRs occupy NUM_FPR_ARG_REGISTERS slots | |
+ | here fp13 .. fp1 from high to low addr. | |
+ ~ ~ ~
+ | Parameters (at least 8*4/8=32/64) | | NUM_GPR_ARG_REGISTERS
|--------------------------------------------| |
- | Space for GPR2 4 | |
+ | TOC=R2 (AIX) Reserved (Darwin) 4/8 | |
|--------------------------------------------| | stack |
- | Reserved 2*4 | | grows |
+ | Reserved 2*4/8 | | grows |
|--------------------------------------------| | down V
- | Space for callee's LR 4 | |
+ | Space for callee's LR 4/8 | |
|--------------------------------------------| | lower addresses
- | Saved CR 4 | |
+ | Saved CR [low word for m64] 4/8 | |
|--------------------------------------------| | stack pointer here
- | Current backchain pointer 4 |-/ during
+ | Current backchain pointer 4/8 |-/ during
|--------------------------------------------| <<< ffi_call_DARWIN
*/
+#if defined(POWERPC_DARWIN64)
+static void
+darwin64_pass_struct_by_value
+ (ffi_type *, char *, unsigned, unsigned *, double **, unsigned long **);
+#endif
+
+/* This depends on GPR_SIZE = sizeof (unsigned long) */
+
void
ffi_prep_args (extended_cif *ecif, unsigned long *const stack)
{
const unsigned bytes = ecif->cif->bytes;
const unsigned flags = ecif->cif->flags;
const unsigned nargs = ecif->cif->nargs;
+#if !defined(POWERPC_DARWIN64)
const ffi_abi abi = ecif->cif->abi;
+#endif
/* 'stacktop' points at the previous backchain pointer. */
unsigned long *const stacktop = stack + (bytes / sizeof(unsigned long));
@@ -94,18 +118,19 @@ ffi_prep_args (extended_cif *ecif, unsigned long *const stack)
/* 'fpr_base' points at the space for fpr1, and grows upwards as
we use FPR registers. */
double *fpr_base = (double *) (stacktop - ASM_NEEDS_REGISTERS) - NUM_FPR_ARG_REGISTERS;
- int fparg_count = 0;
-
+ int gp_count = 0, fparg_count = 0;
/* 'next_arg' grows up as we put parameters in it. */
- unsigned long *next_arg = stack + 6; /* 6 reserved positions. */
+ unsigned long *next_arg = stack + LINKAGE_AREA_GPRS; /* 6 reserved positions. */
int i;
double double_tmp;
void **p_argv = ecif->avalue;
unsigned long gprvalue;
ffi_type** ptr = ecif->cif->arg_types;
+#if !defined(POWERPC_DARWIN64)
char *dest_cpy;
+#endif
unsigned size_al = 0;
/* Check that everything starts aligned properly. */
@@ -130,25 +155,30 @@ ffi_prep_args (extended_cif *ecif, unsigned long *const stack)
the size of the floating-point parameter are skipped. */
case FFI_TYPE_FLOAT:
double_tmp = *(float *) *p_argv;
- if (fparg_count >= NUM_FPR_ARG_REGISTERS)
- *(double *)next_arg = double_tmp;
- else
+ if (fparg_count < NUM_FPR_ARG_REGISTERS)
*fpr_base++ = double_tmp;
+#if defined(POWERPC_DARWIN)
+ *(float *)next_arg = *(float *) *p_argv;
+#else
+ *(double *)next_arg = double_tmp;
+#endif
next_arg++;
+ gp_count++;
fparg_count++;
FFI_ASSERT(flags & FLAG_FP_ARGUMENTS);
break;
case FFI_TYPE_DOUBLE:
double_tmp = *(double *) *p_argv;
- if (fparg_count >= NUM_FPR_ARG_REGISTERS)
- *(double *)next_arg = double_tmp;
- else
+ if (fparg_count < NUM_FPR_ARG_REGISTERS)
*fpr_base++ = double_tmp;
+ *(double *)next_arg = double_tmp;
#ifdef POWERPC64
next_arg++;
+ gp_count++;
#else
next_arg += 2;
+ gp_count += 2;
#endif
fparg_count++;
FFI_ASSERT(flags & FLAG_FP_ARGUMENTS);
@@ -157,30 +187,41 @@ ffi_prep_args (extended_cif *ecif, unsigned long *const stack)
#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
case FFI_TYPE_LONGDOUBLE:
-#ifdef POWERPC64
+# if defined(POWERPC64) && !defined(POWERPC_DARWIN64)
+ /* ??? This will exceed the regs count when the value starts at fp13
+ and it will not put the extra bit on the stack. */
if (fparg_count < NUM_FPR_ARG_REGISTERS)
*(long double *) fpr_base++ = *(long double *) *p_argv;
else
*(long double *) next_arg = *(long double *) *p_argv;
next_arg += 2;
fparg_count += 2;
-#else
+# else
double_tmp = ((double *) *p_argv)[0];
if (fparg_count < NUM_FPR_ARG_REGISTERS)
*fpr_base++ = double_tmp;
- else
- *(double *) next_arg = double_tmp;
+ *(double *) next_arg = double_tmp;
+# if defined(POWERPC_DARWIN64)
+ next_arg++;
+ gp_count++;
+# else
next_arg += 2;
+ gp_count += 2;
+# endif
fparg_count++;
-
double_tmp = ((double *) *p_argv)[1];
if (fparg_count < NUM_FPR_ARG_REGISTERS)
*fpr_base++ = double_tmp;
- else
- *(double *) next_arg = double_tmp;
+ *(double *) next_arg = double_tmp;
+# if defined(POWERPC_DARWIN64)
+ next_arg++;
+ gp_count++;
+# else
next_arg += 2;
+ gp_count += 2;
+# endif
fparg_count++;
-#endif
+# endif
FFI_ASSERT(flags & FLAG_FP_ARGUMENTS);
break;
#endif
@@ -192,6 +233,7 @@ ffi_prep_args (extended_cif *ecif, unsigned long *const stack)
#else
*(long long *) next_arg = *(long long *) *p_argv;
next_arg += 2;
+ gp_count += 2;
#endif
break;
case FFI_TYPE_POINTER:
@@ -211,32 +253,35 @@ ffi_prep_args (extended_cif *ecif, unsigned long *const stack)
goto putgpr;
case FFI_TYPE_STRUCT:
-#ifdef POWERPC64
- dest_cpy = (char *) next_arg;
size_al = (*ptr)->size;
- if ((*ptr)->elements[0]->type == 3)
+#if defined(POWERPC_DARWIN64)
+ next_arg = (unsigned long *)ALIGN((char *)next_arg, (*ptr)->alignment);
+ darwin64_pass_struct_by_value (*ptr, (char *) *p_argv,
+ (unsigned) size_al,
+ (unsigned int *) &fparg_count,
+ &fpr_base, &next_arg);
+#else
+ dest_cpy = (char *) next_arg;
+
+ /* If the first member of the struct is a double, then include enough
+ padding in the struct size to align it to double-word. */
+ if ((*ptr)->elements[0]->type == FFI_TYPE_DOUBLE)
size_al = ALIGN((*ptr)->size, 8);
- if (size_al < 3 && abi == FFI_DARWIN)
- dest_cpy += 4 - size_al;
+# if defined(POWERPC64)
+ FFI_ASSERT (abi != FFI_DARWIN);
memcpy ((char *) dest_cpy, (char *) *p_argv, size_al);
next_arg += (size_al + 7) / 8;
-#else
- dest_cpy = (char *) next_arg;
-
+# else
/* Structures that match the basic modes (QI 1 byte, HI 2 bytes,
SI 4 bytes) are aligned as if they were those modes.
Structures with 3 byte in size are padded upwards. */
- size_al = (*ptr)->size;
- /* If the first member of the struct is a double, then align
- the struct to double-word. */
- if ((*ptr)->elements[0]->type == FFI_TYPE_DOUBLE)
- size_al = ALIGN((*ptr)->size, 8);
if (size_al < 3 && abi == FFI_DARWIN)
dest_cpy += 4 - size_al;
memcpy((char *) dest_cpy, (char *) *p_argv, size_al);
next_arg += (size_al + 3) / 4;
+# endif
#endif
break;
@@ -249,6 +294,7 @@ ffi_prep_args (extended_cif *ecif, unsigned long *const stack)
gprvalue = *(unsigned int *) *p_argv;
putgpr:
*next_arg++ = gprvalue;
+ gp_count++;
break;
default:
break;
@@ -256,14 +302,275 @@ ffi_prep_args (extended_cif *ecif, unsigned long *const stack)
}
/* Check that we didn't overrun the stack... */
- //FFI_ASSERT(gpr_base <= stacktop - ASM_NEEDS_REGISTERS);
- //FFI_ASSERT((unsigned *)fpr_base
- // <= stacktop - ASM_NEEDS_REGISTERS - NUM_GPR_ARG_REGISTERS);
- //FFI_ASSERT(flags & FLAG_4_GPR_ARGUMENTS || intarg_count <= 4);
+ /* FFI_ASSERT(gpr_base <= stacktop - ASM_NEEDS_REGISTERS);
+ FFI_ASSERT((unsigned *)fpr_base
+ <= stacktop - ASM_NEEDS_REGISTERS - NUM_GPR_ARG_REGISTERS);
+ FFI_ASSERT(flags & FLAG_4_GPR_ARGUMENTS || intarg_count <= 4); */
}
+#if defined(POWERPC_DARWIN64)
+
+/* See if we can put some of the struct into fprs.
+ This should not be called for structures of size 16 bytes, since these are not
+ broken out this way. */
+static void
+darwin64_scan_struct_for_floats (ffi_type *s, unsigned *nfpr)
+{
+ int i;
+
+ FFI_ASSERT (s->type == FFI_TYPE_STRUCT)
+
+ for (i = 0; s->elements[i] != NULL; i++)
+ {
+ ffi_type *p = s->elements[i];
+ switch (p->type)
+ {
+ case FFI_TYPE_STRUCT:
+ darwin64_scan_struct_for_floats (p, nfpr);
+ break;
+ case FFI_TYPE_LONGDOUBLE:
+ (*nfpr) += 2;
+ break;
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_FLOAT:
+ (*nfpr) += 1;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int
+darwin64_struct_size_exceeds_gprs_p (ffi_type *s, char *src, unsigned *nfpr)
+{
+ unsigned struct_offset=0, i;
+
+ for (i = 0; s->elements[i] != NULL; i++)
+ {
+ char *item_base;
+ ffi_type *p = s->elements[i];
+ /* Find the start of this item (0 for the first one). */
+ if (i > 0)
+ struct_offset = ALIGN(struct_offset, p->alignment);
+
+ item_base = src + struct_offset;
+
+ switch (p->type)
+ {
+ case FFI_TYPE_STRUCT:
+ if (darwin64_struct_size_exceeds_gprs_p (p, item_base, nfpr))
+ return 1;
+ break;
+ case FFI_TYPE_LONGDOUBLE:
+ if (*nfpr >= NUM_FPR_ARG_REGISTERS)
+ return 1;
+ (*nfpr) += 1;
+ item_base += 8;
+ /* FALL THROUGH */
+ case FFI_TYPE_DOUBLE:
+ if (*nfpr >= NUM_FPR_ARG_REGISTERS)
+ return 1;
+ (*nfpr) += 1;
+ break;
+ case FFI_TYPE_FLOAT:
+ if (*nfpr >= NUM_FPR_ARG_REGISTERS)
+ return 1;
+ (*nfpr) += 1;
+ break;
+ default:
+ /* If we try and place any item, that is non-float, once we've
+ exceeded the 8 GPR mark, then we can't fit the struct. */
+ if ((unsigned long)item_base >= 8*8)
+ return 1;
+ break;
+ }
+ /* now count the size of what we just used. */
+ struct_offset += p->size;
+ }
+ return 0;
+}
+
+/* Can this struct be returned by value? */
+int
+darwin64_struct_ret_by_value_p (ffi_type *s)
+{
+ unsigned nfp = 0;
+
+ FFI_ASSERT (s && s->type == FFI_TYPE_STRUCT);
+
+ /* The largest structure we can return is 8long + 13 doubles. */
+ if (s->size > 168)
+ return 0;
+
+ /* We can't pass more than 13 floats. */
+ darwin64_scan_struct_for_floats (s, &nfp);
+ if (nfp > 13)
+ return 0;
+
+ /* If there are not too many floats, and the struct is
+ small enough to accommodate in the GPRs, then it must be OK. */
+ if (s->size <= 64)
+ return 1;
+
+ /* Well, we have to look harder. */
+ nfp = 0;
+ if (darwin64_struct_size_exceeds_gprs_p (s, NULL, &nfp))
+ return 0;
+
+ return 1;
+}
+
+void
+darwin64_pass_struct_floats (ffi_type *s, char *src,
+ unsigned *nfpr, double **fprs)
+{
+ int i;
+ double *fpr_base = *fprs;
+ unsigned struct_offset = 0;
+
+ /* We don't assume anything about the alignment of the source. */
+ for (i = 0; s->elements[i] != NULL; i++)
+ {
+ char *item_base;
+ ffi_type *p = s->elements[i];
+ /* Find the start of this item (0 for the first one). */
+ if (i > 0)
+ struct_offset = ALIGN(struct_offset, p->alignment);
+ item_base = src + struct_offset;
+
+ switch (p->type)
+ {
+ case FFI_TYPE_STRUCT:
+ darwin64_pass_struct_floats (p, item_base, nfpr,
+ &fpr_base);
+ break;
+ case FFI_TYPE_LONGDOUBLE:
+ if (*nfpr < NUM_FPR_ARG_REGISTERS)
+ *fpr_base++ = *(double *)item_base;
+ (*nfpr) += 1;
+ item_base += 8;
+ /* FALL THROUGH */
+ case FFI_TYPE_DOUBLE:
+ if (*nfpr < NUM_FPR_ARG_REGISTERS)
+ *fpr_base++ = *(double *)item_base;
+ (*nfpr) += 1;
+ break;
+ case FFI_TYPE_FLOAT:
+ if (*nfpr < NUM_FPR_ARG_REGISTERS)
+ *fpr_base++ = (double) *(float *)item_base;
+ (*nfpr) += 1;
+ break;
+ default:
+ break;
+ }
+ /* now count the size of what we just used. */
+ struct_offset += p->size;
+ }
+ /* Update the scores. */
+ *fprs = fpr_base;
+}
+
+/* Darwin64 special rules.
+ Break out a struct into params and float registers. */
+static void
+darwin64_pass_struct_by_value (ffi_type *s, char *src, unsigned size,
+ unsigned *nfpr, double **fprs, unsigned long **arg)
+{
+ unsigned long *next_arg = *arg;
+ char *dest_cpy = (char *)next_arg;
+
+ FFI_ASSERT (s->type == FFI_TYPE_STRUCT)
+
+ if (!size)
+ return;
+
+ /* First... special cases. */
+ if (size < 3
+ || (size == 4
+ && s->elements[0]
+ && s->elements[0]->type != FFI_TYPE_FLOAT))
+ {
+ /* Must be at least one GPR, padding is unspecified in value,
+ let's make it zero. */
+ *next_arg = 0UL;
+ dest_cpy += 8 - size;
+ memcpy ((char *) dest_cpy, src, size);
+ next_arg++;
+ }
+ else if (size == 16)
+ {
+ memcpy ((char *) dest_cpy, src, size);
+ next_arg += 2;
+ }
+ else
+ {
+ /* now the general case, we consider embedded floats. */
+ memcpy ((char *) dest_cpy, src, size);
+ darwin64_pass_struct_floats (s, src, nfpr, fprs);
+ next_arg += (size+7)/8;
+ }
+
+ *arg = next_arg;
+}
+
+double *
+darwin64_struct_floats_to_mem (ffi_type *s, char *dest, double *fprs, unsigned *nf)
+{
+ int i;
+ unsigned struct_offset = 0;
+
+ /* We don't assume anything about the alignment of the source. */
+ for (i = 0; s->elements[i] != NULL; i++)
+ {
+ char *item_base;
+ ffi_type *p = s->elements[i];
+ /* Find the start of this item (0 for the first one). */
+ if (i > 0)
+ struct_offset = ALIGN(struct_offset, p->alignment);
+ item_base = dest + struct_offset;
+
+ switch (p->type)
+ {
+ case FFI_TYPE_STRUCT:
+ fprs = darwin64_struct_floats_to_mem (p, item_base, fprs, nf);
+ break;
+ case FFI_TYPE_LONGDOUBLE:
+ if (*nf < NUM_FPR_ARG_REGISTERS)
+ {
+ *(double *)item_base = *fprs++ ;
+ (*nf) += 1;
+ }
+ item_base += 8;
+ /* FALL THROUGH */
+ case FFI_TYPE_DOUBLE:
+ if (*nf < NUM_FPR_ARG_REGISTERS)
+ {
+ *(double *)item_base = *fprs++ ;
+ (*nf) += 1;
+ }
+ break;
+ case FFI_TYPE_FLOAT:
+ if (*nf < NUM_FPR_ARG_REGISTERS)
+ {
+ *(float *)item_base = (float) *fprs++ ;
+ (*nf) += 1;
+ }
+ break;
+ default:
+ break;
+ }
+ /* now count the size of what we just used. */
+ struct_offset += p->size;
+ }
+ return fprs;
+}
+
+#endif
+
/* Adjust the size of S to be correct for Darwin.
- On Darwin, the first field of a structure has natural alignment. */
+ On Darwin m32, the first field of a structure has natural alignment.
+ On Darwin m64, all fields have natural alignment. */
static void
darwin_adjust_aggregate_sizes (ffi_type *s)
@@ -280,22 +587,29 @@ darwin_adjust_aggregate_sizes (ffi_type *s)
int align;
p = s->elements[i];
- darwin_adjust_aggregate_sizes (p);
- if (i == 0
- && (p->type == FFI_TYPE_UINT64
- || p->type == FFI_TYPE_SINT64
- || p->type == FFI_TYPE_DOUBLE
- || p->alignment == 8))
- align = 8;
+ if (p->type == FFI_TYPE_STRUCT)
+ darwin_adjust_aggregate_sizes (p);
+#if defined(POWERPC_DARWIN64)
+ /* Natural alignment for all items. */
+ align = p->alignment;
+#else
+ /* Natrual alignment for the first item... */
+ if (i == 0)
+ align = p->alignment;
else if (p->alignment == 16 || p->alignment < 4)
+ /* .. subsequent items with vector or align < 4 have natural align. */
align = p->alignment;
else
+ /* .. or align is 4. */
align = 4;
+#endif
+ /* Pad, if necessary, before adding the current item. */
s->size = ALIGN(s->size, align) + p->size;
}
s->size = ALIGN(s->size, s->alignment);
+ /* This should not be necessary on m64, but harmless. */
if (s->elements[0]->type == FFI_TYPE_UINT64
|| s->elements[0]->type == FFI_TYPE_SINT64
|| s->elements[0]->type == FFI_TYPE_DOUBLE
@@ -344,10 +658,10 @@ ffi_status
ffi_prep_cif_machdep (ffi_cif *cif)
{
/* All this is for the DARWIN ABI. */
- int i;
+ unsigned i;
ffi_type **ptr;
unsigned bytes;
- int fparg_count = 0, intarg_count = 0;
+ unsigned fparg_count = 0, intarg_count = 0;
unsigned flags = 0;
unsigned size_al = 0;
@@ -372,16 +686,25 @@ ffi_prep_cif_machdep (ffi_cif *cif)
/* Space for the frame pointer, callee's LR, CR, etc, and for
the asm's temp regs. */
- bytes = (6 + ASM_NEEDS_REGISTERS) * sizeof(long);
+ bytes = (LINKAGE_AREA_GPRS + ASM_NEEDS_REGISTERS) * sizeof(unsigned long);
- /* Return value handling. The rules are as follows:
+ /* Return value handling.
+ The rules m32 are as follows:
- 32-bit (or less) integer values are returned in gpr3;
- - Structures of size <= 4 bytes also returned in gpr3;
- - 64-bit integer values and structures between 5 and 8 bytes are returned
- in gpr3 and gpr4;
+ - structures of size <= 4 bytes also returned in gpr3;
+ - 64-bit integer values [??? and structures between 5 and 8 bytes] are
+ returned in gpr3 and gpr4;
- Single/double FP values are returned in fpr1;
- Long double FP (if not equivalent to double) values are returned in
fpr1 and fpr2;
+ m64:
+ - 64-bit or smaller integral values are returned in GPR3
+ - Single/double FP values are returned in fpr1;
+ - Long double FP values are returned in fpr1 and fpr2;
+ m64 Structures:
+ - If the structure could be accommodated in registers were it to be the
+ first argument to a routine, then it is returned in those registers.
+ m32/m64 structures otherwise:
- Larger structures values are allocated space and a pointer is passed
as the first argument. */
switch (cif->rtype->type)
@@ -410,9 +733,42 @@ ffi_prep_cif_machdep (ffi_cif *cif)
break;
case FFI_TYPE_STRUCT:
+#if defined(POWERPC_DARWIN64)
+ {
+ /* Can we fit the struct into regs? */
+ if (darwin64_struct_ret_by_value_p (cif->rtype))
+ {
+ unsigned nfpr = 0;
+ flags |= FLAG_RETURNS_STRUCT;
+ if (cif->rtype->size != 16)
+ darwin64_scan_struct_for_floats (cif->rtype, &nfpr) ;
+ else
+ flags |= FLAG_RETURNS_128BITS;
+ /* Will be 0 for 16byte struct. */
+ if (nfpr)
+ flags |= FLAG_RETURNS_FP;
+ }
+ else /* By ref. */
+ {
+ flags |= FLAG_RETVAL_REFERENCE;
+ flags |= FLAG_RETURNS_NOTHING;
+ intarg_count++;
+ }
+ }
+#elif defined(DARWIN_PPC)
+ if (cif->rtype->size <= 4)
+ flags |= FLAG_RETURNS_STRUCT;
+ else /* else by reference. */
+ {
+ flags |= FLAG_RETVAL_REFERENCE;
+ flags |= FLAG_RETURNS_NOTHING;
+ intarg_count++;
+ }
+#else /* assume we pass by ref. */
flags |= FLAG_RETVAL_REFERENCE;
flags |= FLAG_RETURNS_NOTHING;
intarg_count++;
+#endif
break;
case FFI_TYPE_VOID:
flags |= FLAG_RETURNS_NOTHING;
@@ -425,57 +781,83 @@ ffi_prep_cif_machdep (ffi_cif *cif)
/* The first NUM_GPR_ARG_REGISTERS words of integer arguments, and the
first NUM_FPR_ARG_REGISTERS fp arguments, go in registers; the rest
- goes on the stack. Structures are passed as a pointer to a copy of
- the structure. Stuff on the stack needs to keep proper alignment. */
+ goes on the stack.
+ ??? Structures are passed as a pointer to a copy of the structure.
+ Stuff on the stack needs to keep proper alignment.
+ For m64 the count is effectively of half-GPRs. */
for (ptr = cif->arg_types, i = cif->nargs; i > 0; i--, ptr++)
{
+ unsigned align_words;
switch ((*ptr)->type)
{
case FFI_TYPE_FLOAT:
case FFI_TYPE_DOUBLE:
fparg_count++;
+#if !defined(POWERPC_DARWIN64)
/* If this FP arg is going on the stack, it must be
8-byte-aligned. */
if (fparg_count > NUM_FPR_ARG_REGISTERS
- && intarg_count%2 != 0)
+ && (intarg_count & 0x01) != 0)
intarg_count++;
+#endif
break;
#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
-
case FFI_TYPE_LONGDOUBLE:
fparg_count += 2;
/* If this FP arg is going on the stack, it must be
- 8-byte-aligned. */
- if (fparg_count > NUM_FPR_ARG_REGISTERS
- && intarg_count%2 != 0)
- intarg_count++;
- intarg_count +=2;
+ 16-byte-aligned. */
+ if (fparg_count >= NUM_FPR_ARG_REGISTERS)
+#if defined (POWERPC64)
+ intarg_count = ALIGN(intarg_count, 2);
+#else
+ intarg_count = ALIGN(intarg_count, 4);
+#endif
break;
#endif
case FFI_TYPE_UINT64:
case FFI_TYPE_SINT64:
+#if defined(POWERPC64)
+ intarg_count++;
+#else
/* 'long long' arguments are passed as two words, but
either both words must fit in registers or both go
on the stack. If they go on the stack, they must
be 8-byte-aligned. */
if (intarg_count == NUM_GPR_ARG_REGISTERS-1
- || (intarg_count >= NUM_GPR_ARG_REGISTERS && intarg_count%2 != 0))
+ || (intarg_count >= NUM_GPR_ARG_REGISTERS
+ && (intarg_count & 0x01) != 0))
intarg_count++;
intarg_count += 2;
+#endif
break;
case FFI_TYPE_STRUCT:
size_al = (*ptr)->size;
+#if defined(POWERPC_DARWIN64)
+ align_words = (*ptr)->alignment >> 3;
+ if (align_words)
+ intarg_count = ALIGN(intarg_count, align_words);
+ /* Base size of the struct. */
+ intarg_count += (size_al + 7) / 8;
+ /* If 16 bytes then don't worry about floats. */
+ if (size_al != 16)
+ /* Scan through for floats to be placed in regs. */
+ darwin64_scan_struct_for_floats (*ptr, &fparg_count) ;
+#else
+ align_words = (*ptr)->alignment >> 2;
+ if (align_words)
+ intarg_count = ALIGN(intarg_count, align_words);
/* If the first member of the struct is a double, then align
- the struct to double-word. */
+ the struct to double-word.
if ((*ptr)->elements[0]->type == FFI_TYPE_DOUBLE)
- size_al = ALIGN((*ptr)->size, 8);
-#ifdef POWERPC64
+ size_al = ALIGN((*ptr)->size, 8); */
+# ifdef POWERPC64
intarg_count += (size_al + 7) / 8;
-#else
+# else
intarg_count += (size_al + 3) / 4;
+# endif
#endif
break;
@@ -490,9 +872,18 @@ ffi_prep_cif_machdep (ffi_cif *cif)
if (fparg_count != 0)
flags |= FLAG_FP_ARGUMENTS;
+#if defined(POWERPC_DARWIN64)
+ /* Space to image the FPR registers, if needed - which includes when they might be
+ used in a struct return. */
+ if (fparg_count != 0
+ || ((flags & FLAG_RETURNS_STRUCT)
+ && (flags & FLAG_RETURNS_FP)))
+ bytes += NUM_FPR_ARG_REGISTERS * sizeof(double);
+#else
/* Space for the FPR registers, if needed. */
if (fparg_count != 0)
bytes += NUM_FPR_ARG_REGISTERS * sizeof(double);
+#endif
/* Stack space. */
#ifdef POWERPC64
@@ -506,7 +897,7 @@ ffi_prep_cif_machdep (ffi_cif *cif)
bytes += NUM_GPR_ARG_REGISTERS * sizeof(long);
/* The stack space allocated needs to be a multiple of 16 bytes. */
- bytes = (bytes + 15) & ~0xF;
+ bytes = ALIGN(bytes, 16) ;
cif->flags = flags;
cif->bytes = bytes;
@@ -516,8 +907,9 @@ ffi_prep_cif_machdep (ffi_cif *cif)
extern void ffi_call_AIX(extended_cif *, long, unsigned, unsigned *,
void (*fn)(void), void (*fn2)(void));
+
extern void ffi_call_DARWIN(extended_cif *, long, unsigned, unsigned *,
- void (*fn)(void), void (*fn2)(void));
+ void (*fn)(void), void (*fn2)(void), ffi_type*);
void
ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
@@ -542,11 +934,11 @@ ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
{
case FFI_AIX:
ffi_call_AIX(&ecif, -(long)cif->bytes, cif->flags, ecif.rvalue, fn,
- ffi_prep_args);
+ FFI_FN(ffi_prep_args));
break;
case FFI_DARWIN:
ffi_call_DARWIN(&ecif, -(long)cif->bytes, cif->flags, ecif.rvalue, fn,
- ffi_prep_args);
+ FFI_FN(ffi_prep_args), cif->rtype);
break;
default:
FFI_ASSERT(0);
@@ -566,58 +958,48 @@ typedef struct aix_fd_struct {
} aix_fd;
/* here I'd like to add the stack frame layout we use in darwin_closure.S
- and aix_clsoure.S
-
- SP previous -> +---------------------------------------+ <--- child frame
- | back chain to caller 4 |
- +---------------------------------------+ 4
- | saved CR 4 |
- +---------------------------------------+ 8
- | saved LR 4 |
- +---------------------------------------+ 12
- | reserved for compilers 4 |
- +---------------------------------------+ 16
- | reserved for binders 4 |
- +---------------------------------------+ 20
- | saved TOC pointer 4 |
- +---------------------------------------+ 24
- | always reserved 8*4=32 (previous GPRs)|
- | according to the linkage convention |
- | from AIX |
- +---------------------------------------+ 56
- | our FPR area 13*8=104 |
- | f1 |
- | . |
- | f13 |
- +---------------------------------------+ 160
- | result area 8 |
- +---------------------------------------+ 168
- | alignement to the next multiple of 16 |
-SP current --> +---------------------------------------+ 176 <- parent frame
- | back chain to caller 4 |
- +---------------------------------------+ 180
- | saved CR 4 |
- +---------------------------------------+ 184
- | saved LR 4 |
- +---------------------------------------+ 188
- | reserved for compilers 4 |
- +---------------------------------------+ 192
- | reserved for binders 4 |
- +---------------------------------------+ 196
- | saved TOC pointer 4 |
- +---------------------------------------+ 200
- | always reserved 8*4=32 we store our |
- | GPRs here |
- | r3 |
- | . |
- | r10 |
- +---------------------------------------+ 232
- | overflow part |
- +---------------------------------------+ xxx
- | ???? |
- +---------------------------------------+ xxx
+ and aix_closure.S
+
+ m32/m64
+
+ The stack layout looks like this:
+
+ | Additional params... | | Higher address
+ ~ ~ ~
+ | Parameters (at least 8*4/8=32/64) | | NUM_GPR_ARG_REGISTERS
+ |--------------------------------------------| |
+ | TOC=R2 (AIX) Reserved (Darwin) 4/8 | |
+ |--------------------------------------------| |
+ | Reserved 2*4/8 | |
+ |--------------------------------------------| |
+ | Space for callee's LR 4/8 | |
+ |--------------------------------------------| |
+ | Saved CR [low word for m64] 4/8 | |
+ |--------------------------------------------| |
+ | Current backchain pointer 4/8 |-/ Parent's frame.
+ |--------------------------------------------| <+ <<< on entry to ffi_closure_ASM
+ | Result Bytes 16 | |
+ |--------------------------------------------| |
+ ~ padding to 16-byte alignment ~ ~
+ |--------------------------------------------| |
+ | NUM_FPR_ARG_REGISTERS slots | |
+ | here fp13 .. fp1 13*8 | |
+ |--------------------------------------------| |
+ | R3..R10 8*4/8=32/64 | | NUM_GPR_ARG_REGISTERS
+ |--------------------------------------------| |
+ | TOC=R2 (AIX) Reserved (Darwin) 4/8 | |
+ |--------------------------------------------| | stack |
+ | Reserved [compiler,binder] 2*4/8 | | grows |
+ |--------------------------------------------| | down V
+ | Space for callee's LR 4/8 | |
+ |--------------------------------------------| | lower addresses
+ | Saved CR [low word for m64] 4/8 | |
+ |--------------------------------------------| | stack pointer here
+ | Current backchain pointer 4/8 |-/ during
+ |--------------------------------------------| <<< ffi_closure_ASM.
*/
+
ffi_status
ffi_prep_closure_loc (ffi_closure* closure,
ffi_cif* cif,
@@ -631,30 +1013,44 @@ ffi_prep_closure_loc (ffi_closure* closure,
switch (cif->abi)
{
- case FFI_DARWIN:
-
- FFI_ASSERT (cif->abi == FFI_DARWIN);
-
- tramp = (unsigned int *) &closure->tramp[0];
- tramp[0] = 0x7c0802a6; /* mflr r0 */
- tramp[1] = 0x429f000d; /* bcl- 20,4*cr7+so,0x10 */
- tramp[4] = 0x7d6802a6; /* mflr r11 */
- tramp[5] = 0x818b0000; /* lwz r12,0(r11) function address */
- tramp[6] = 0x7c0803a6; /* mtlr r0 */
- tramp[7] = 0x7d8903a6; /* mtctr r12 */
- tramp[8] = 0x816b0004; /* lwz r11,4(r11) static chain */
- tramp[9] = 0x4e800420; /* bctr */
- tramp[2] = (unsigned long) ffi_closure_ASM; /* function */
- tramp[3] = (unsigned long) codeloc; /* context */
-
- closure->cif = cif;
- closure->fun = fun;
- closure->user_data = user_data;
+ case FFI_DARWIN:
+
+ FFI_ASSERT (cif->abi == FFI_DARWIN);
+
+ tramp = (unsigned int *) &closure->tramp[0];
+#if defined(POWERPC_DARWIN64)
+ tramp[0] = 0x7c0802a6; /* mflr r0 */
+ tramp[1] = 0x429f0015; /* bcl- 20,4*cr7+so, +0x18 (L1) */
+ /* We put the addresses here. */
+ tramp[6] = 0x7d6802a6; /*L1: mflr r11 */
+ tramp[7] = 0xe98b0000; /* ld r12,0(r11) function address */
+ tramp[8] = 0x7c0803a6; /* mtlr r0 */
+ tramp[9] = 0x7d8903a6; /* mtctr r12 */
+ tramp[10] = 0xe96b0008; /* lwz r11,8(r11) static chain */
+ tramp[11] = 0x4e800420; /* bctr */
+
+ *((unsigned long *)&tramp[2]) = (unsigned long) ffi_closure_ASM; /* function */
+ *((unsigned long *)&tramp[4]) = (unsigned long) codeloc; /* context */
+#else
+ tramp[0] = 0x7c0802a6; /* mflr r0 */
+ tramp[1] = 0x429f000d; /* bcl- 20,4*cr7+so,0x10 */
+ tramp[4] = 0x7d6802a6; /* mflr r11 */
+ tramp[5] = 0x818b0000; /* lwz r12,0(r11) function address */
+ tramp[6] = 0x7c0803a6; /* mtlr r0 */
+ tramp[7] = 0x7d8903a6; /* mtctr r12 */
+ tramp[8] = 0x816b0004; /* lwz r11,4(r11) static chain */
+ tramp[9] = 0x4e800420; /* bctr */
+ tramp[2] = (unsigned long) ffi_closure_ASM; /* function */
+ tramp[3] = (unsigned long) codeloc; /* context */
+#endif
+ closure->cif = cif;
+ closure->fun = fun;
+ closure->user_data = user_data;
- /* Flush the icache. Only necessary on Darwin. */
- flush_range(codeloc, FFI_TRAMPOLINE_SIZE);
+ /* Flush the icache. Only necessary on Darwin. */
+ flush_range(codeloc, FFI_TRAMPOLINE_SIZE);
- break;
+ break;
case FFI_AIX:
@@ -669,10 +1065,10 @@ ffi_prep_closure_loc (ffi_closure* closure,
closure->cif = cif;
closure->fun = fun;
closure->user_data = user_data;
+ break;
default:
-
- FFI_ASSERT(0);
+ return FFI_BAD_ABI;
break;
}
return FFI_OK;
@@ -708,7 +1104,7 @@ typedef union
double d;
} ffi_dblfl;
-int
+ffi_type *
ffi_closure_helper_DARWIN (ffi_closure *, void *,
unsigned long *, ffi_dblfl *);
@@ -719,7 +1115,7 @@ ffi_closure_helper_DARWIN (ffi_closure *, void *,
up space for a return value, ffi_closure_ASM invokes the
following helper function to do most of the work. */
-int
+ffi_type *
ffi_closure_helper_DARWIN (ffi_closure *closure, void *rvalue,
unsigned long *pgr, ffi_dblfl *pfr)
{
@@ -741,16 +1137,32 @@ ffi_closure_helper_DARWIN (ffi_closure *closure, void *rvalue,
ffi_cif * cif;
ffi_dblfl * end_pfr = pfr + NUM_FPR_ARG_REGISTERS;
unsigned size_al;
+#if defined(POWERPC_DARWIN64)
+ unsigned fpsused = 0;
+#endif
cif = closure->cif;
avalue = alloca (cif->nargs * sizeof(void *));
- /* Copy the caller's structure return value address so that the closure
- returns the data directly to the caller. */
if (cif->rtype->type == FFI_TYPE_STRUCT)
{
+#if defined(POWERPC_DARWIN64)
+ if (!darwin64_struct_ret_by_value_p (cif->rtype))
+ {
+ /* Won't fit into the regs - return by ref. */
+ rvalue = (void *) *pgr;
+ pgr++;
+ }
+#elif defined(DARWIN_PPC)
+ if (cif->rtype->size > 4)
+ {
+ rvalue = (void *) *pgr;
+ pgr++;
+ }
+#else /* assume we return by ref. */
rvalue = (void *) *pgr;
pgr++;
+#endif
}
i = 0;
@@ -764,7 +1176,7 @@ ffi_closure_helper_DARWIN (ffi_closure *closure, void *rvalue,
{
case FFI_TYPE_SINT8:
case FFI_TYPE_UINT8:
-#ifdef POWERPC64
+#if defined(POWERPC64)
avalue[i] = (char *) pgr + 7;
#else
avalue[i] = (char *) pgr + 3;
@@ -774,7 +1186,7 @@ ffi_closure_helper_DARWIN (ffi_closure *closure, void *rvalue,
case FFI_TYPE_SINT16:
case FFI_TYPE_UINT16:
-#ifdef POWERPC64
+#if defined(POWERPC64)
avalue[i] = (char *) pgr + 6;
#else
avalue[i] = (char *) pgr + 2;
@@ -784,7 +1196,7 @@ ffi_closure_helper_DARWIN (ffi_closure *closure, void *rvalue,
case FFI_TYPE_SINT32:
case FFI_TYPE_UINT32:
-#ifdef POWERPC64
+#if defined(POWERPC64)
avalue[i] = (char *) pgr + 4;
#else
case FFI_TYPE_POINTER:
@@ -794,34 +1206,53 @@ ffi_closure_helper_DARWIN (ffi_closure *closure, void *rvalue,
break;
case FFI_TYPE_STRUCT:
-#ifdef POWERPC64
size_al = arg_types[i]->size;
- if (arg_types[i]->elements[0]->type == FFI_TYPE_DOUBLE)
- size_al = ALIGN (arg_types[i]->size, 8);
- if (size_al < 3 && cif->abi == FFI_DARWIN)
- avalue[i] = (void *) pgr + 8 - size_al;
- else
- avalue[i] = (void *) pgr;
+#if defined(POWERPC_DARWIN64)
+ pgr = (unsigned long *)ALIGN((char *)pgr, arg_types[i]->alignment);
+ if (size_al < 3 || size_al == 4)
+ {
+ avalue[i] = ((char *)pgr)+8-size_al;
+ if (arg_types[i]->elements[0]->type == FFI_TYPE_FLOAT
+ && fpsused < NUM_FPR_ARG_REGISTERS)
+ {
+ *(float *)pgr = (float) *(double *)pfr;
+ pfr++;
+ fpsused++;
+ }
+ }
+ else
+ {
+ if (size_al != 16)
+ pfr = (ffi_dblfl *)
+ darwin64_struct_floats_to_mem (arg_types[i], (char *)pgr,
+ (double *)pfr, &fpsused);
+ avalue[i] = pgr;
+ }
pgr += (size_al + 7) / 8;
#else
- /* Structures that match the basic modes (QI 1 byte, HI 2 bytes,
- SI 4 bytes) are aligned as if they were those modes. */
- size_al = arg_types[i]->size;
/* If the first member of the struct is a double, then align
the struct to double-word. */
if (arg_types[i]->elements[0]->type == FFI_TYPE_DOUBLE)
size_al = ALIGN(arg_types[i]->size, 8);
+# if defined(POWERPC64)
+ FFI_ASSERT (cif->abi != FFI_DARWIN);
+ avalue[i] = pgr;
+ pgr += (size_al + 7) / 8;
+# else
+ /* Structures that match the basic modes (QI 1 byte, HI 2 bytes,
+ SI 4 bytes) are aligned as if they were those modes. */
if (size_al < 3 && cif->abi == FFI_DARWIN)
- avalue[i] = (void*) pgr + 4 - size_al;
+ avalue[i] = (char*) pgr + 4 - size_al;
else
- avalue[i] = (void*) pgr;
+ avalue[i] = pgr;
pgr += (size_al + 3) / 4;
+# endif
#endif
break;
case FFI_TYPE_SINT64:
case FFI_TYPE_UINT64:
-#ifdef POWERPC64
+#if defined(POWERPC64)
case FFI_TYPE_POINTER:
avalue[i] = pgr;
pgr++;
@@ -924,5 +1355,5 @@ ffi_closure_helper_DARWIN (ffi_closure *closure, void *rvalue,
(closure->fun) (cif, rvalue, avalue, closure->user_data);
/* Tell ffi_closure_ASM to perform return type promotions. */
- return cif->rtype->type;
+ return cif->rtype;
}
diff --git a/Modules/_ctypes/libffi/src/powerpc/ffitarget.h b/Modules/_ctypes/libffi/src/powerpc/ffitarget.h
index b4f992e..3c9db49 100644
--- a/Modules/_ctypes/libffi/src/powerpc/ffitarget.h
+++ b/Modules/_ctypes/libffi/src/powerpc/ffitarget.h
@@ -1,6 +1,8 @@
/* -----------------------------------------------------------------*-C-*-
- ffitarget.h - Copyright (c) 1996-2003 Red Hat, Inc.
- Copyright (C) 2007, 2008 Free Software Foundation, Inc
+ ffitarget.h - Copyright (c) 2012 Anthony Green
+ Copyright (C) 2007, 2008, 2010 Free Software Foundation, Inc
+ Copyright (c) 1996-2003 Red Hat, Inc.
+
Target configuration macros for PowerPC.
Permission is hereby granted, free of charge, to any person obtaining
@@ -28,15 +30,28 @@
#ifndef LIBFFI_TARGET_H
#define LIBFFI_TARGET_H
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
/* ---- System specific configurations ----------------------------------- */
#if defined (POWERPC) && defined (__powerpc64__) /* linux64 */
+#ifndef POWERPC64
#define POWERPC64
-#elif defined (POWERPC_DARWIN) && defined (__ppc64__) /* Darwin */
+#endif
+#elif defined (POWERPC_DARWIN) && defined (__ppc64__) /* Darwin64 */
+#ifndef POWERPC64
#define POWERPC64
+#endif
+#ifndef POWERPC_DARWIN64
+#define POWERPC_DARWIN64
+#endif
#elif defined (POWERPC_AIX) && defined (__64BIT__) /* AIX64 */
+#ifndef POWERPC64
#define POWERPC64
#endif
+#endif
#ifndef LIBFFI_ASM
typedef unsigned long ffi_arg;
@@ -51,18 +66,14 @@ typedef enum ffi_abi {
FFI_LINUX64,
FFI_LINUX,
FFI_LINUX_SOFT_FLOAT,
-# ifdef POWERPC64
+# if defined(POWERPC64)
FFI_DEFAULT_ABI = FFI_LINUX64,
-# else
-# if (!defined(__NO_FPRS__) && (__LDBL_MANT_DIG__ == 106))
- FFI_DEFAULT_ABI = FFI_LINUX,
-# else
-# ifdef __NO_FPRS__
+# elif defined(__NO_FPRS__)
FFI_DEFAULT_ABI = FFI_LINUX_SOFT_FLOAT,
-# else
+# elif (__LDBL_MANT_DIG__ == 106)
+ FFI_DEFAULT_ABI = FFI_LINUX,
+# else
FFI_DEFAULT_ABI = FFI_GCC_SYSV,
-# endif
-# endif
# endif
#endif
@@ -108,9 +119,13 @@ typedef enum ffi_abi {
#define FFI_SYSV_TYPE_SMALL_STRUCT (FFI_TYPE_LAST + 2)
#if defined(POWERPC64) || defined(POWERPC_AIX)
-#define FFI_TRAMPOLINE_SIZE 24
+# if defined(POWERPC_DARWIN64)
+# define FFI_TRAMPOLINE_SIZE 48
+# else
+# define FFI_TRAMPOLINE_SIZE 24
+# endif
#else /* POWERPC || POWERPC_AIX */
-#define FFI_TRAMPOLINE_SIZE 40
+# define FFI_TRAMPOLINE_SIZE 40
#endif
#ifndef LIBFFI_ASM
diff --git a/Modules/_ctypes/libffi/src/powerpc/linux64.S b/Modules/_ctypes/libffi/src/powerpc/linux64.S
index 57b56cb..f28da81 100644
--- a/Modules/_ctypes/libffi/src/powerpc/linux64.S
+++ b/Modules/_ctypes/libffi/src/powerpc/linux64.S
@@ -30,16 +30,25 @@
#include <ffi.h>
#ifdef __powerpc64__
- .hidden ffi_call_LINUX64, .ffi_call_LINUX64
- .globl ffi_call_LINUX64, .ffi_call_LINUX64
+ .hidden ffi_call_LINUX64
+ .globl ffi_call_LINUX64
.section ".opd","aw"
.align 3
ffi_call_LINUX64:
+#ifdef _CALL_LINUX
+ .quad .L.ffi_call_LINUX64,.TOC.@tocbase,0
+ .type ffi_call_LINUX64,@function
+ .text
+.L.ffi_call_LINUX64:
+#else
+ .hidden .ffi_call_LINUX64
+ .globl .ffi_call_LINUX64
.quad .ffi_call_LINUX64,.TOC.@tocbase,0
.size ffi_call_LINUX64,24
.type .ffi_call_LINUX64,@function
.text
.ffi_call_LINUX64:
+#endif
.LFB1:
mflr %r0
std %r28, -32(%r1)
@@ -58,7 +67,11 @@ ffi_call_LINUX64:
/* Call ffi_prep_args64. */
mr %r4, %r1
+#ifdef _CALL_LINUX
+ bl ffi_prep_args64
+#else
bl .ffi_prep_args64
+#endif
ld %r0, 0(%r29)
ld %r2, 8(%r29)
@@ -137,7 +150,11 @@ ffi_call_LINUX64:
.LFE1:
.long 0
.byte 0,12,0,1,128,4,0,0
+#ifdef _CALL_LINUX
+ .size ffi_call_LINUX64,.-.L.ffi_call_LINUX64
+#else
.size .ffi_call_LINUX64,.-.ffi_call_LINUX64
+#endif
.section .eh_frame,EH_FRAME_FLAGS,@progbits
.Lframe1:
diff --git a/Modules/_ctypes/libffi/src/powerpc/linux64_closure.S b/Modules/_ctypes/libffi/src/powerpc/linux64_closure.S
index f7aa2c9..b1e1219 100644
--- a/Modules/_ctypes/libffi/src/powerpc/linux64_closure.S
+++ b/Modules/_ctypes/libffi/src/powerpc/linux64_closure.S
@@ -32,16 +32,24 @@
#ifdef __powerpc64__
FFI_HIDDEN (ffi_closure_LINUX64)
- FFI_HIDDEN (.ffi_closure_LINUX64)
- .globl ffi_closure_LINUX64, .ffi_closure_LINUX64
+ .globl ffi_closure_LINUX64
.section ".opd","aw"
.align 3
ffi_closure_LINUX64:
+#ifdef _CALL_LINUX
+ .quad .L.ffi_closure_LINUX64,.TOC.@tocbase,0
+ .type ffi_closure_LINUX64,@function
+ .text
+.L.ffi_closure_LINUX64:
+#else
+ FFI_HIDDEN (.ffi_closure_LINUX64)
+ .globl .ffi_closure_LINUX64
.quad .ffi_closure_LINUX64,.TOC.@tocbase,0
.size ffi_closure_LINUX64,24
.type .ffi_closure_LINUX64,@function
.text
.ffi_closure_LINUX64:
+#endif
.LFB1:
# save general regs into parm save area
std %r3, 48(%r1)
@@ -91,7 +99,11 @@ ffi_closure_LINUX64:
addi %r6, %r1, 128
# make the call
+#ifdef _CALL_LINUX
+ bl ffi_closure_helper_LINUX64
+#else
bl .ffi_closure_helper_LINUX64
+#endif
.Lret:
# now r3 contains the return type
@@ -194,7 +206,11 @@ ffi_closure_LINUX64:
.LFE1:
.long 0
.byte 0,12,0,1,128,0,0,0
+#ifdef _CALL_LINUX
+ .size ffi_closure_LINUX64,.-.L.ffi_closure_LINUX64
+#else
.size .ffi_closure_LINUX64,.-.ffi_closure_LINUX64
+#endif
.section .eh_frame,EH_FRAME_FLAGS,@progbits
.Lframe1:
diff --git a/Modules/_ctypes/libffi/src/powerpc/ppc_closure.S b/Modules/_ctypes/libffi/src/powerpc/ppc_closure.S
index 56f7d1a..41fb885 100644
--- a/Modules/_ctypes/libffi/src/powerpc/ppc_closure.S
+++ b/Modules/_ctypes/libffi/src/powerpc/ppc_closure.S
@@ -122,22 +122,41 @@ ENTRY(ffi_closure_SYSV)
blr
# case FFI_TYPE_FLOAT
+#ifndef __NO_FPRS__
lfs %f1,112+0(%r1)
mtlr %r0
addi %r1,%r1,144
+#else
+ nop
+ nop
+ nop
+#endif
blr
# case FFI_TYPE_DOUBLE
+#ifndef __NO_FPRS__
lfd %f1,112+0(%r1)
mtlr %r0
addi %r1,%r1,144
+#else
+ nop
+ nop
+ nop
+#endif
blr
# case FFI_TYPE_LONGDOUBLE
+#ifndef __NO_FPRS__
lfd %f1,112+0(%r1)
lfd %f2,112+8(%r1)
mtlr %r0
b .Lfinish
+#else
+ nop
+ nop
+ nop
+ blr
+#endif
# case FFI_TYPE_UINT8
lbz %r3,112+3(%r1)
diff --git a/Modules/_ctypes/libffi/src/powerpc/sysv.S b/Modules/_ctypes/libffi/src/powerpc/sysv.S
index 96ea22b..5ee3a19 100644
--- a/Modules/_ctypes/libffi/src/powerpc/sysv.S
+++ b/Modules/_ctypes/libffi/src/powerpc/sysv.S
@@ -83,6 +83,7 @@ ENTRY(ffi_call_SYSV)
nop
1:
+#ifndef __NO_FPRS__
/* Load all the FP registers. */
bf- 6,2f
lfd %f1,-16-(8*4)-(8*8)(%r28)
@@ -94,6 +95,7 @@ ENTRY(ffi_call_SYSV)
lfd %f6,-16-(8*4)-(3*8)(%r28)
lfd %f7,-16-(8*4)-(2*8)(%r28)
lfd %f8,-16-(8*4)-(1*8)(%r28)
+#endif
2:
/* Make the call. */
@@ -103,7 +105,9 @@ ENTRY(ffi_call_SYSV)
mtcrf 0x01,%r31 /* cr7 */
bt- 31,L(small_struct_return_value)
bt- 30,L(done_return_value)
+#ifndef __NO_FPRS__
bt- 29,L(fp_return_value)
+#endif
stw %r3,0(%r30)
bf+ 28,L(done_return_value)
stw %r4,4(%r30)
@@ -124,6 +128,7 @@ L(done_return_value):
lwz %r1,0(%r1)
blr
+#ifndef __NO_FPRS__
L(fp_return_value):
bf 28,L(float_return_value)
stfd %f1,0(%r30)
@@ -134,6 +139,7 @@ L(fp_return_value):
L(float_return_value):
stfs %f1,0(%r30)
b L(done_return_value)
+#endif
L(small_struct_return_value):
extrwi %r6,%r31,2,19 /* number of bytes padding = shift/8 */
diff --git a/Modules/_ctypes/libffi/src/prep_cif.c b/Modules/_ctypes/libffi/src/prep_cif.c
index c1c3b9a..e8ec5cf 100644
--- a/Modules/_ctypes/libffi/src/prep_cif.c
+++ b/Modules/_ctypes/libffi/src/prep_cif.c
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------------
- prep_cif.c - Copyright (c) 1996, 1998, 2007 Red Hat, Inc.
+ prep_cif.c - Copyright (c) 2011, 2012 Anthony Green
+ Copyright (c) 1996, 1998, 2007 Red Hat, Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -37,17 +38,21 @@ static ffi_status initialize_aggregate(ffi_type *arg)
{
ffi_type **ptr;
- FFI_ASSERT(arg != NULL);
+ if (UNLIKELY(arg == NULL || arg->elements == NULL))
+ return FFI_BAD_TYPEDEF;
- FFI_ASSERT(arg->elements != NULL);
- FFI_ASSERT(arg->size == 0);
- FFI_ASSERT(arg->alignment == 0);
+ arg->size = 0;
+ arg->alignment = 0;
ptr = &(arg->elements[0]);
+ if (UNLIKELY(ptr == 0))
+ return FFI_BAD_TYPEDEF;
+
while ((*ptr) != NULL)
{
- if (((*ptr)->size == 0) && (initialize_aggregate((*ptr)) != FFI_OK))
+ if (UNLIKELY(((*ptr)->size == 0)
+ && (initialize_aggregate((*ptr)) != FFI_OK)))
return FFI_BAD_TYPEDEF;
/* Perform a sanity check on the argument type */
@@ -85,19 +90,38 @@ static ffi_status initialize_aggregate(ffi_type *arg)
/* Perform machine independent ffi_cif preparation, then call
machine dependent routine. */
-ffi_status ffi_prep_cif(ffi_cif *cif, ffi_abi abi, unsigned int nargs,
- ffi_type *rtype, ffi_type **atypes)
+/* For non variadic functions isvariadic should be 0 and
+ nfixedargs==ntotalargs.
+
+ For variadic calls, isvariadic should be 1 and nfixedargs
+ and ntotalargs set as appropriate. nfixedargs must always be >=1 */
+
+
+ffi_status FFI_HIDDEN ffi_prep_cif_core(ffi_cif *cif, ffi_abi abi,
+ unsigned int isvariadic,
+ unsigned int nfixedargs,
+ unsigned int ntotalargs,
+ ffi_type *rtype, ffi_type **atypes)
{
unsigned bytes = 0;
unsigned int i;
ffi_type **ptr;
FFI_ASSERT(cif != NULL);
- FFI_ASSERT((abi > FFI_FIRST_ABI) && (abi <= FFI_DEFAULT_ABI));
+ FFI_ASSERT((!isvariadic) || (nfixedargs >= 1));
+ FFI_ASSERT(nfixedargs <= ntotalargs);
+
+#ifndef X86_WIN32
+ if (! (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI))
+ return FFI_BAD_ABI;
+#else
+ if (! (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI || abi == FFI_THISCALL))
+ return FFI_BAD_ABI;
+#endif
cif->abi = abi;
cif->arg_types = atypes;
- cif->nargs = nargs;
+ cif->nargs = ntotalargs;
cif->rtype = rtype;
cif->flags = 0;
@@ -110,12 +134,19 @@ ffi_status ffi_prep_cif(ffi_cif *cif, ffi_abi abi, unsigned int nargs,
FFI_ASSERT_VALID_TYPE(cif->rtype);
/* x86, x86-64 and s390 stack space allocation is handled in prep_machdep. */
-#if !defined M68K && !defined __i386__ && !defined __x86_64__ && !defined S390 && !defined PA
+#if !defined M68K && !defined X86_ANY && !defined S390 && !defined PA
/* Make space for the return structure pointer */
if (cif->rtype->type == FFI_TYPE_STRUCT
#ifdef SPARC
&& (cif->abi != FFI_V9 || cif->rtype->size > 32)
#endif
+#ifdef TILE
+ && (cif->rtype->size > 10 * FFI_SIZEOF_ARG)
+#endif
+#ifdef XTENSA
+ && (cif->rtype->size > 16)
+#endif
+
)
bytes = STACK_ARG_SIZE(sizeof(void*));
#endif
@@ -131,7 +162,7 @@ ffi_status ffi_prep_cif(ffi_cif *cif, ffi_abi abi, unsigned int nargs,
check after the initialization. */
FFI_ASSERT_VALID_TYPE(*ptr);
-#if !defined __i386__ && !defined __x86_64__ && !defined S390 && !defined PA
+#if !defined X86_ANY && !defined S390 && !defined PA
#ifdef SPARC
if (((*ptr)->type == FFI_TYPE_STRUCT
&& ((*ptr)->size > 16 || cif->abi != FFI_V9))
@@ -145,6 +176,20 @@ ffi_status ffi_prep_cif(ffi_cif *cif, ffi_abi abi, unsigned int nargs,
if (((*ptr)->alignment - 1) & bytes)
bytes = ALIGN(bytes, (*ptr)->alignment);
+#ifdef TILE
+ if (bytes < 10 * FFI_SIZEOF_ARG &&
+ bytes + STACK_ARG_SIZE((*ptr)->size) > 10 * FFI_SIZEOF_ARG)
+ {
+ /* An argument is never split between the 10 parameter
+ registers and the stack. */
+ bytes = 10 * FFI_SIZEOF_ARG;
+ }
+#endif
+#ifdef XTENSA
+ if (bytes <= 6*4 && bytes + STACK_ARG_SIZE((*ptr)->size) > 6*4)
+ bytes = 6*4;
+#endif
+
bytes += STACK_ARG_SIZE((*ptr)->size);
}
#endif
@@ -153,10 +198,31 @@ ffi_status ffi_prep_cif(ffi_cif *cif, ffi_abi abi, unsigned int nargs,
cif->bytes = bytes;
/* Perform machine dependent cif processing */
+#ifdef FFI_TARGET_SPECIFIC_VARIADIC
+ if (isvariadic)
+ return ffi_prep_cif_machdep_var(cif, nfixedargs, ntotalargs);
+#endif
+
return ffi_prep_cif_machdep(cif);
}
#endif /* not __CRIS__ */
+ffi_status ffi_prep_cif(ffi_cif *cif, ffi_abi abi, unsigned int nargs,
+ ffi_type *rtype, ffi_type **atypes)
+{
+ return ffi_prep_cif_core(cif, abi, 0, nargs, nargs, rtype, atypes);
+}
+
+ffi_status ffi_prep_cif_var(ffi_cif *cif,
+ ffi_abi abi,
+ unsigned int nfixedargs,
+ unsigned int ntotalargs,
+ ffi_type *rtype,
+ ffi_type **atypes)
+{
+ return ffi_prep_cif_core(cif, abi, 1, nfixedargs, ntotalargs, rtype, atypes);
+}
+
#if FFI_CLOSURES
ffi_status
diff --git a/Modules/_ctypes/libffi/src/s390/ffi.c b/Modules/_ctypes/libffi/src/s390/ffi.c
index ca2675b..8adb5bc 100644
--- a/Modules/_ctypes/libffi/src/s390/ffi.c
+++ b/Modules/_ctypes/libffi/src/s390/ffi.c
@@ -750,7 +750,8 @@ ffi_prep_closure_loc (ffi_closure *closure,
void *user_data,
void *codeloc)
{
- FFI_ASSERT (cif->abi == FFI_SYSV);
+ if (cif->abi != FFI_SYSV)
+ return FFI_BAD_ABI;
#ifndef __s390x__
*(short *)&closure->tramp [0] = 0x0d10; /* basr %r1,0 */
diff --git a/Modules/_ctypes/libffi/src/s390/ffitarget.h b/Modules/_ctypes/libffi/src/s390/ffitarget.h
index 78f3c65..97fa5c4 100644
--- a/Modules/_ctypes/libffi/src/s390/ffitarget.h
+++ b/Modules/_ctypes/libffi/src/s390/ffitarget.h
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------*-C-*-
- ffitarget.h - Copyright (c) 1996-2003 Red Hat, Inc.
+ ffitarget.h - Copyright (c) 2012 Anthony Green
+ Copyright (c) 1996-2003 Red Hat, Inc.
Target configuration macros for S390.
Permission is hereby granted, free of charge, to any person obtaining
@@ -27,9 +28,15 @@
#ifndef LIBFFI_TARGET_H
#define LIBFFI_TARGET_H
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
#if defined (__s390x__)
+#ifndef S390X
#define S390X
#endif
+#endif
/* ---- System specific configurations ----------------------------------- */
@@ -40,8 +47,8 @@ typedef signed long ffi_sarg;
typedef enum ffi_abi {
FFI_FIRST_ABI = 0,
FFI_SYSV,
- FFI_DEFAULT_ABI = FFI_SYSV,
- FFI_LAST_ABI = FFI_DEFAULT_ABI + 1
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_SYSV
} ffi_abi;
#endif
diff --git a/Modules/_ctypes/libffi/src/sh/ffi.c b/Modules/_ctypes/libffi/src/sh/ffi.c
index 69bd025..3515b91 100644
--- a/Modules/_ctypes/libffi/src/sh/ffi.c
+++ b/Modules/_ctypes/libffi/src/sh/ffi.c
@@ -1,5 +1,5 @@
/* -----------------------------------------------------------------------
- ffi.c - Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 Kaz Kojima
+ ffi.c - Copyright (c) 2002-2008, 2012 Kaz Kojima
Copyright (c) 2008 Red Hat, Inc.
SuperH Foreign Function Interface
@@ -463,7 +463,8 @@ ffi_prep_closure_loc (ffi_closure* closure,
unsigned int *tramp;
unsigned int insn;
- FFI_ASSERT (cif->abi == FFI_GCC_SYSV);
+ if (cif->abi != FFI_SYSV)
+ return FFI_BAD_ABI;
tramp = (unsigned int *) &closure->tramp[0];
/* Set T bit if the function returns a struct pointed with R2. */
diff --git a/Modules/_ctypes/libffi/src/sh/ffitarget.h b/Modules/_ctypes/libffi/src/sh/ffitarget.h
index 218ae3d..a36bf42 100644
--- a/Modules/_ctypes/libffi/src/sh/ffitarget.h
+++ b/Modules/_ctypes/libffi/src/sh/ffitarget.h
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------*-C-*-
- ffitarget.h - Copyright (c) 1996-2003 Red Hat, Inc.
+ ffitarget.h - Copyright (c) 2012 Anthony Green
+ Copyright (c) 1996-2003 Red Hat, Inc.
Target configuration macros for SuperH.
Permission is hereby granted, free of charge, to any person obtaining
@@ -27,6 +28,10 @@
#ifndef LIBFFI_TARGET_H
#define LIBFFI_TARGET_H
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
/* ---- Generic type definitions ----------------------------------------- */
#ifndef LIBFFI_ASM
@@ -36,8 +41,8 @@ typedef signed long ffi_sarg;
typedef enum ffi_abi {
FFI_FIRST_ABI = 0,
FFI_SYSV,
- FFI_DEFAULT_ABI = FFI_SYSV,
- FFI_LAST_ABI = FFI_DEFAULT_ABI + 1
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_SYSV
} ffi_abi;
#endif
diff --git a/Modules/_ctypes/libffi/src/sh64/ffi.c b/Modules/_ctypes/libffi/src/sh64/ffi.c
index 8fbc05c..123b87a 100644
--- a/Modules/_ctypes/libffi/src/sh64/ffi.c
+++ b/Modules/_ctypes/libffi/src/sh64/ffi.c
@@ -1,5 +1,5 @@
/* -----------------------------------------------------------------------
- ffi.c - Copyright (c) 2003, 2004, 2006, 2007 Kaz Kojima
+ ffi.c - Copyright (c) 2003, 2004, 2006, 2007, 2012 Kaz Kojima
Copyright (c) 2008 Anthony Green
SuperH SHmedia Foreign Function Interface
@@ -302,7 +302,8 @@ ffi_prep_closure_loc (ffi_closure *closure,
{
unsigned int *tramp;
- FFI_ASSERT (cif->abi == FFI_GCC_SYSV);
+ if (cif->abi != FFI_SYSV)
+ return FFI_BAD_ABI;
tramp = (unsigned int *) &closure->tramp[0];
/* Since ffi_closure is an aligned object, the ffi trampoline is
diff --git a/Modules/_ctypes/libffi/src/sh64/ffitarget.h b/Modules/_ctypes/libffi/src/sh64/ffitarget.h
index 4e922fc..08a6fe9 100644
--- a/Modules/_ctypes/libffi/src/sh64/ffitarget.h
+++ b/Modules/_ctypes/libffi/src/sh64/ffitarget.h
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------*-C-*-
- ffitarget.h - Copyright (c) 1996-2003 Red Hat, Inc.
+ ffitarget.h - Copyright (c) 2012 Anthony Green
+ Copyright (c) 1996-2003 Red Hat, Inc.
Target configuration macros for SuperH - SHmedia.
Permission is hereby granted, free of charge, to any person obtaining
@@ -27,6 +28,10 @@
#ifndef LIBFFI_TARGET_H
#define LIBFFI_TARGET_H
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
/* ---- Generic type definitions ----------------------------------------- */
#ifndef LIBFFI_ASM
@@ -36,8 +41,8 @@ typedef signed long ffi_sarg;
typedef enum ffi_abi {
FFI_FIRST_ABI = 0,
FFI_SYSV,
- FFI_DEFAULT_ABI = FFI_SYSV,
- FFI_LAST_ABI = FFI_DEFAULT_ABI + 1
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_SYSV
} ffi_abi;
#define FFI_EXTRA_CIF_FIELDS long long flags2
diff --git a/Modules/_ctypes/libffi/src/sparc/ffi.c b/Modules/_ctypes/libffi/src/sparc/ffi.c
index 1d01f59..9f0fded 100644
--- a/Modules/_ctypes/libffi/src/sparc/ffi.c
+++ b/Modules/_ctypes/libffi/src/sparc/ffi.c
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------------
- ffi.c - Copyright (c) 1996, 2003, 2004, 2007, 2008 Red Hat, Inc.
+ ffi.c - Copyright (c) 2011, 2013 Anthony Green
+ Copyright (c) 1996, 2003-2004, 2007-2008 Red Hat, Inc.
SPARC Foreign Function Interface
@@ -375,6 +376,10 @@ extern int ffi_call_v8(void *, extended_cif *, unsigned,
unsigned, unsigned *, void (*fn)(void));
#endif
+#ifndef __GNUC__
+void ffi_flush_icache (void *, size_t);
+#endif
+
void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
{
extended_cif ecif;
@@ -406,8 +411,54 @@ void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
/* We don't yet support calling 32bit code from 64bit */
FFI_ASSERT(0);
#else
- ffi_call_v8(ffi_prep_args_v8, &ecif, cif->bytes,
- cif->flags, rvalue, fn);
+ if (rvalue && (cif->rtype->type == FFI_TYPE_STRUCT
+#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
+ || cif->flags == FFI_TYPE_LONGDOUBLE
+#endif
+ ))
+ {
+ /* For v8, we need an "unimp" with size of returning struct */
+ /* behind "call", so we alloc some executable space for it. */
+ /* l7 is used, we need to make sure v8.S doesn't use %l7. */
+ unsigned int *call_struct = NULL;
+ ffi_closure_alloc(32, (void **)&call_struct);
+ if (call_struct)
+ {
+ unsigned long f = (unsigned long)fn;
+ call_struct[0] = 0xae10001f; /* mov %i7, %l7 */
+ call_struct[1] = 0xbe10000f; /* mov %o7, %i7 */
+ call_struct[2] = 0x03000000 | f >> 10; /* sethi %hi(fn), %g1 */
+ call_struct[3] = 0x9fc06000 | (f & 0x3ff); /* jmp %g1+%lo(fn), %o7 */
+ call_struct[4] = 0x01000000; /* nop */
+ if (cif->rtype->size < 0x7f)
+ call_struct[5] = cif->rtype->size; /* unimp */
+ else
+ call_struct[5] = 0x01000000; /* nop */
+ call_struct[6] = 0x81c7e008; /* ret */
+ call_struct[7] = 0xbe100017; /* mov %l7, %i7 */
+#ifdef __GNUC__
+ asm volatile ("iflush %0; iflush %0+8; iflush %0+16; iflush %0+24" : :
+ "r" (call_struct) : "memory");
+ /* SPARC v8 requires 5 instructions for flush to be visible */
+ asm volatile ("nop; nop; nop; nop; nop");
+#else
+ ffi_flush_icache (call_struct, 32);
+#endif
+ ffi_call_v8(ffi_prep_args_v8, &ecif, cif->bytes,
+ cif->flags, rvalue, call_struct);
+ ffi_closure_free(call_struct);
+ }
+ else
+ {
+ ffi_call_v8(ffi_prep_args_v8, &ecif, cif->bytes,
+ cif->flags, rvalue, fn);
+ }
+ }
+ else
+ {
+ ffi_call_v8(ffi_prep_args_v8, &ecif, cif->bytes,
+ cif->flags, rvalue, fn);
+ }
#endif
break;
case FFI_V9:
@@ -425,7 +476,6 @@ void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
FFI_ASSERT(0);
break;
}
-
}
@@ -447,7 +497,8 @@ ffi_prep_closure_loc (ffi_closure* closure,
#ifdef SPARC64
/* Trampoline address is equal to the closure address. We take advantage
of that to reduce the trampoline size by 8 bytes. */
- FFI_ASSERT (cif->abi == FFI_V9);
+ if (cif->abi != FFI_V9)
+ return FFI_BAD_ABI;
fn = (unsigned long) ffi_closure_v9;
tramp[0] = 0x83414000; /* rd %pc, %g1 */
tramp[1] = 0xca586010; /* ldx [%g1+16], %g5 */
@@ -456,7 +507,8 @@ ffi_prep_closure_loc (ffi_closure* closure,
*((unsigned long *) &tramp[4]) = fn;
#else
unsigned long ctx = (unsigned long) codeloc;
- FFI_ASSERT (cif->abi == FFI_V8);
+ if (cif->abi != FFI_V8)
+ return FFI_BAD_ABI;
fn = (unsigned long) ffi_closure_v8;
tramp[0] = 0x03000000 | fn >> 10; /* sethi %hi(fn), %g1 */
tramp[1] = 0x05000000 | ctx >> 10; /* sethi %hi(ctx), %g2 */
@@ -468,13 +520,17 @@ ffi_prep_closure_loc (ffi_closure* closure,
closure->fun = fun;
closure->user_data = user_data;
- /* Flush the Icache. FIXME: alignment isn't certain, assume 8 bytes */
+ /* Flush the Icache. closure is 8 bytes aligned. */
+#ifdef __GNUC__
#ifdef SPARC64
- asm volatile ("flush %0" : : "r" (closure) : "memory");
- asm volatile ("flush %0" : : "r" (((char *) closure) + 8) : "memory");
+ asm volatile ("flush %0; flush %0+8" : : "r" (closure) : "memory");
+#else
+ asm volatile ("iflush %0; iflush %0+8" : : "r" (closure) : "memory");
+ /* SPARC v8 requires 5 instructions for flush to be visible */
+ asm volatile ("nop; nop; nop; nop; nop");
+#endif
#else
- asm volatile ("iflush %0" : : "r" (closure) : "memory");
- asm volatile ("iflush %0" : : "r" (((char *) closure) + 8) : "memory");
+ ffi_flush_icache (closure, 16);
#endif
return FFI_OK;
diff --git a/Modules/_ctypes/libffi/src/sparc/ffitarget.h b/Modules/_ctypes/libffi/src/sparc/ffitarget.h
index 1a1a1ac..d89f787 100644
--- a/Modules/_ctypes/libffi/src/sparc/ffitarget.h
+++ b/Modules/_ctypes/libffi/src/sparc/ffitarget.h
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------*-C-*-
- ffitarget.h - Copyright (c) 1996-2003 Red Hat, Inc.
+ ffitarget.h - Copyright (c) 2012 Anthony Green
+ Copyright (c) 1996-2003 Red Hat, Inc.
Target configuration macros for SPARC.
Permission is hereby granted, free of charge, to any person obtaining
@@ -27,11 +28,17 @@
#ifndef LIBFFI_TARGET_H
#define LIBFFI_TARGET_H
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
/* ---- System specific configurations ----------------------------------- */
#if defined(__arch64__) || defined(__sparcv9)
+#ifndef SPARC64
#define SPARC64
#endif
+#endif
#ifndef LIBFFI_ASM
typedef unsigned long ffi_arg;
@@ -42,12 +49,12 @@ typedef enum ffi_abi {
FFI_V8,
FFI_V8PLUS,
FFI_V9,
+ FFI_LAST_ABI,
#ifdef SPARC64
- FFI_DEFAULT_ABI = FFI_V9,
+ FFI_DEFAULT_ABI = FFI_V9
#else
- FFI_DEFAULT_ABI = FFI_V8,
+ FFI_DEFAULT_ABI = FFI_V8
#endif
- FFI_LAST_ABI = FFI_DEFAULT_ABI + 1
} ffi_abi;
#endif
diff --git a/Modules/_ctypes/libffi/src/sparc/v8.S b/Modules/_ctypes/libffi/src/sparc/v8.S
index bef1b68..6bf7ac0 100644
--- a/Modules/_ctypes/libffi/src/sparc/v8.S
+++ b/Modules/_ctypes/libffi/src/sparc/v8.S
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------------
- v8.S - Copyright (c) 1996, 1997, 2003, 2004, 2008 Red Hat, Inc.
+ v8.S - Copyright (c) 2013 The Written Word, Inc.
+ Copyright (c) 1996, 1997, 2003, 2004, 2008 Red Hat, Inc.
SPARC Foreign Function Interface
@@ -31,11 +32,39 @@
#define STACKFRAME 96 /* Minimum stack framesize for SPARC */
#define ARGS (64+4) /* Offset of register area in frame */
-.text
+#ifndef __GNUC__
+ .text
+ .align 8
+.globl ffi_flush_icache
+.globl _ffi_flush_icache
+
+ffi_flush_icache:
+_ffi_flush_icache:
+ add %o0, %o1, %o2
+#ifdef SPARC64
+1: flush %o0
+#else
+1: iflush %o0
+#endif
+ add %o0, 8, %o0
+ cmp %o0, %o2
+ blt 1b
+ nop
+ nop
+ nop
+ nop
+ nop
+ retl
+ nop
+.ffi_flush_icache_end:
+ .size ffi_flush_icache,.ffi_flush_icache_end-ffi_flush_icache
+#endif
+
+ .text
.align 8
.globl ffi_call_v8
.globl _ffi_call_v8
-
+
ffi_call_v8:
_ffi_call_v8:
.LLFB1:
diff --git a/Modules/_ctypes/libffi/src/sparc/v9.S b/Modules/_ctypes/libffi/src/sparc/v9.S
index 489ff02..bf31a2b 100644
--- a/Modules/_ctypes/libffi/src/sparc/v9.S
+++ b/Modules/_ctypes/libffi/src/sparc/v9.S
@@ -32,7 +32,7 @@
/* Only compile this in for 64bit builds, because otherwise the object file
will have inproper architecture due to used instructions. */
-#define STACKFRAME 128 /* Minimum stack framesize for SPARC */
+#define STACKFRAME 176 /* Minimum stack framesize for SPARC 64-bit */
#define STACK_BIAS 2047
#define ARGS (128) /* Offset of register area in frame */
diff --git a/Modules/_ctypes/libffi/src/tile/ffi.c b/Modules/_ctypes/libffi/src/tile/ffi.c
new file mode 100644
index 0000000..3a94469
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/tile/ffi.c
@@ -0,0 +1,355 @@
+/* -----------------------------------------------------------------------
+ ffi.c - Copyright (c) 2012 Tilera Corp.
+
+ TILE Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#include <ffi.h>
+#include <ffi_common.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <arch/abi.h>
+#include <arch/icache.h>
+#include <arch/opcode.h>
+
+
+/* The first 10 registers are used to pass arguments and return values. */
+#define NUM_ARG_REGS 10
+
+/* Performs a raw function call with the given NUM_ARG_REGS register arguments
+ and the specified additional stack arguments (if any). */
+extern void ffi_call_tile(ffi_sarg reg_args[NUM_ARG_REGS],
+ const ffi_sarg *stack_args,
+ size_t stack_args_bytes,
+ void (*fnaddr)(void))
+ FFI_HIDDEN;
+
+/* This handles the raw call from the closure stub, cleaning up the
+ parameters and delegating to ffi_closure_tile_inner. */
+extern void ffi_closure_tile(void) FFI_HIDDEN;
+
+
+ffi_status
+ffi_prep_cif_machdep(ffi_cif *cif)
+{
+ /* We always allocate room for all registers. Even if we don't
+ use them as parameters, they get returned in the same array
+ as struct return values so we need to make room. */
+ if (cif->bytes < NUM_ARG_REGS * FFI_SIZEOF_ARG)
+ cif->bytes = NUM_ARG_REGS * FFI_SIZEOF_ARG;
+
+ if (cif->rtype->size > NUM_ARG_REGS * FFI_SIZEOF_ARG)
+ cif->flags = FFI_TYPE_STRUCT;
+ else
+ cif->flags = FFI_TYPE_INT;
+
+ /* Nothing to do. */
+ return FFI_OK;
+}
+
+
+static long
+assign_to_ffi_arg(ffi_sarg *out, void *in, const ffi_type *type,
+ int write_to_reg)
+{
+ switch (type->type)
+ {
+ case FFI_TYPE_SINT8:
+ *out = *(SINT8 *)in;
+ return 1;
+
+ case FFI_TYPE_UINT8:
+ *out = *(UINT8 *)in;
+ return 1;
+
+ case FFI_TYPE_SINT16:
+ *out = *(SINT16 *)in;
+ return 1;
+
+ case FFI_TYPE_UINT16:
+ *out = *(UINT16 *)in;
+ return 1;
+
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_UINT32:
+#ifndef __LP64__
+ case FFI_TYPE_POINTER:
+#endif
+ /* Note that even unsigned 32-bit quantities are sign extended
+ on tilegx when stored in a register. */
+ *out = *(SINT32 *)in;
+ return 1;
+
+ case FFI_TYPE_FLOAT:
+#ifdef __tilegx__
+ if (write_to_reg)
+ {
+ /* Properly sign extend the value. */
+ union { float f; SINT32 s32; } val;
+ val.f = *(float *)in;
+ *out = val.s32;
+ }
+ else
+#endif
+ {
+ *(float *)out = *(float *)in;
+ }
+ return 1;
+
+ case FFI_TYPE_SINT64:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_DOUBLE:
+#ifdef __LP64__
+ case FFI_TYPE_POINTER:
+#endif
+ *(UINT64 *)out = *(UINT64 *)in;
+ return sizeof(UINT64) / FFI_SIZEOF_ARG;
+
+ case FFI_TYPE_STRUCT:
+ memcpy(out, in, type->size);
+ return (type->size + FFI_SIZEOF_ARG - 1) / FFI_SIZEOF_ARG;
+
+ case FFI_TYPE_VOID:
+ /* Must be a return type. Nothing to do. */
+ return 0;
+
+ default:
+ FFI_ASSERT(0);
+ return -1;
+ }
+}
+
+
+void
+ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
+{
+ ffi_sarg * const arg_mem = alloca(cif->bytes);
+ ffi_sarg * const reg_args = arg_mem;
+ ffi_sarg * const stack_args = &reg_args[NUM_ARG_REGS];
+ ffi_sarg *argp = arg_mem;
+ ffi_type ** const arg_types = cif->arg_types;
+ const long num_args = cif->nargs;
+ long i;
+
+ if (cif->flags == FFI_TYPE_STRUCT)
+ {
+ /* Pass a hidden pointer to the return value. We make sure there
+ is scratch space for the callee to store the return value even if
+ our caller doesn't care about it. */
+ *argp++ = (intptr_t)(rvalue ? rvalue : alloca(cif->rtype->size));
+
+ /* No more work needed to return anything. */
+ rvalue = NULL;
+ }
+
+ for (i = 0; i < num_args; i++)
+ {
+ ffi_type *type = arg_types[i];
+ void * const arg_in = avalue[i];
+ ptrdiff_t arg_word = argp - arg_mem;
+
+#ifndef __tilegx__
+ /* Doubleword-aligned values are always in an even-number register
+ pair, or doubleword-aligned stack slot if out of registers. */
+ long align = arg_word & (type->alignment > FFI_SIZEOF_ARG);
+ argp += align;
+ arg_word += align;
+#endif
+
+ if (type->type == FFI_TYPE_STRUCT)
+ {
+ const size_t arg_size_in_words =
+ (type->size + FFI_SIZEOF_ARG - 1) / FFI_SIZEOF_ARG;
+
+ if (arg_word < NUM_ARG_REGS &&
+ arg_word + arg_size_in_words > NUM_ARG_REGS)
+ {
+ /* Args are not allowed to span registers and the stack. */
+ argp = stack_args;
+ }
+
+ memcpy(argp, arg_in, type->size);
+ argp += arg_size_in_words;
+ }
+ else
+ {
+ argp += assign_to_ffi_arg(argp, arg_in, arg_types[i], 1);
+ }
+ }
+
+ /* Actually do the call. */
+ ffi_call_tile(reg_args, stack_args,
+ cif->bytes - (NUM_ARG_REGS * FFI_SIZEOF_ARG), fn);
+
+ if (rvalue != NULL)
+ assign_to_ffi_arg(rvalue, reg_args, cif->rtype, 0);
+}
+
+
+/* Template code for closure. */
+extern const UINT64 ffi_template_tramp_tile[] FFI_HIDDEN;
+
+
+ffi_status
+ffi_prep_closure_loc (ffi_closure *closure,
+ ffi_cif *cif,
+ void (*fun)(ffi_cif*, void*, void**, void*),
+ void *user_data,
+ void *codeloc)
+{
+#ifdef __tilegx__
+ /* TILE-Gx */
+ SINT64 c;
+ SINT64 h;
+ int s;
+ UINT64 *out;
+
+ if (cif->abi != FFI_UNIX)
+ return FFI_BAD_ABI;
+
+ out = (UINT64 *)closure->tramp;
+
+ c = (intptr_t)closure;
+ h = (intptr_t)ffi_closure_tile;
+ s = 0;
+
+ /* Find the smallest shift count that doesn't lose information
+ (i.e. no need to explicitly insert high bits of the address that
+ are just the sign extension of the low bits). */
+ while ((c >> s) != (SINT16)(c >> s) || (h >> s) != (SINT16)(h >> s))
+ s += 16;
+
+#define OPS(a, b, shift) \
+ (create_Imm16_X0((a) >> (shift)) | create_Imm16_X1((b) >> (shift)))
+
+ /* Emit the moveli. */
+ *out++ = ffi_template_tramp_tile[0] | OPS(c, h, s);
+ for (s -= 16; s >= 0; s -= 16)
+ *out++ = ffi_template_tramp_tile[1] | OPS(c, h, s);
+
+#undef OPS
+
+ *out++ = ffi_template_tramp_tile[2];
+
+#else
+ /* TILEPro */
+ UINT64 *out;
+ intptr_t delta;
+
+ if (cif->abi != FFI_UNIX)
+ return FFI_BAD_ABI;
+
+ out = (UINT64 *)closure->tramp;
+ delta = (intptr_t)ffi_closure_tile - (intptr_t)codeloc;
+
+ *out++ = ffi_template_tramp_tile[0] | create_JOffLong_X1(delta >> 3);
+#endif
+
+ closure->cif = cif;
+ closure->fun = fun;
+ closure->user_data = user_data;
+
+ invalidate_icache(closure->tramp, (char *)out - closure->tramp,
+ getpagesize());
+
+ return FFI_OK;
+}
+
+
+/* This is called by the assembly wrapper for closures. This does
+ all of the work. On entry reg_args[0] holds the values the registers
+ had when the closure was invoked. On return reg_args[1] holds the register
+ values to be returned to the caller (many of which may be garbage). */
+void FFI_HIDDEN
+ffi_closure_tile_inner(ffi_closure *closure,
+ ffi_sarg reg_args[2][NUM_ARG_REGS],
+ ffi_sarg *stack_args)
+{
+ ffi_cif * const cif = closure->cif;
+ void ** const avalue = alloca(cif->nargs * sizeof(void *));
+ void *rvalue;
+ ffi_type ** const arg_types = cif->arg_types;
+ ffi_sarg * const reg_args_in = reg_args[0];
+ ffi_sarg * const reg_args_out = reg_args[1];
+ ffi_sarg * argp;
+ long i, arg_word, nargs = cif->nargs;
+ /* Use a union to guarantee proper alignment for double. */
+ union { ffi_sarg arg[NUM_ARG_REGS]; double d; UINT64 u64; } closure_ret;
+
+ /* Start out reading register arguments. */
+ argp = reg_args_in;
+
+ /* Copy the caller's structure return address to that the closure
+ returns the data directly to the caller. */
+ if (cif->flags == FFI_TYPE_STRUCT)
+ {
+ /* Return by reference via hidden pointer. */
+ rvalue = (void *)(intptr_t)*argp++;
+ arg_word = 1;
+ }
+ else
+ {
+ /* Return the value in registers. */
+ rvalue = &closure_ret;
+ arg_word = 0;
+ }
+
+ /* Grab the addresses of the arguments. */
+ for (i = 0; i < nargs; i++)
+ {
+ ffi_type * const type = arg_types[i];
+ const size_t arg_size_in_words =
+ (type->size + FFI_SIZEOF_ARG - 1) / FFI_SIZEOF_ARG;
+
+#ifndef __tilegx__
+ /* Doubleword-aligned values are always in an even-number register
+ pair, or doubleword-aligned stack slot if out of registers. */
+ long align = arg_word & (type->alignment > FFI_SIZEOF_ARG);
+ argp += align;
+ arg_word += align;
+#endif
+
+ if (arg_word == NUM_ARG_REGS ||
+ (arg_word < NUM_ARG_REGS &&
+ arg_word + arg_size_in_words > NUM_ARG_REGS))
+ {
+ /* Switch to reading arguments from the stack. */
+ argp = stack_args;
+ arg_word = NUM_ARG_REGS;
+ }
+
+ avalue[i] = argp;
+ argp += arg_size_in_words;
+ arg_word += arg_size_in_words;
+ }
+
+ /* Invoke the closure. */
+ closure->fun(cif, rvalue, avalue, closure->user_data);
+
+ if (cif->flags != FFI_TYPE_STRUCT)
+ {
+ /* Canonicalize for register representation. */
+ assign_to_ffi_arg(reg_args_out, &closure_ret, cif->rtype, 1);
+ }
+}
diff --git a/Modules/_ctypes/libffi/src/tile/ffitarget.h b/Modules/_ctypes/libffi/src/tile/ffitarget.h
new file mode 100644
index 0000000..679fb5d
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/tile/ffitarget.h
@@ -0,0 +1,65 @@
+/* -----------------------------------------------------------------*-C-*-
+ ffitarget.h - Copyright (c) 2012 Tilera Corp.
+ Target configuration macros for TILE.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#ifndef LIBFFI_TARGET_H
+#define LIBFFI_TARGET_H
+
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
+#ifndef LIBFFI_ASM
+
+#include <arch/abi.h>
+
+typedef uint_reg_t ffi_arg;
+typedef int_reg_t ffi_sarg;
+
+typedef enum ffi_abi {
+ FFI_FIRST_ABI = 0,
+ FFI_UNIX,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_UNIX
+} ffi_abi;
+#endif
+
+/* ---- Definitions for closures ----------------------------------------- */
+#define FFI_CLOSURES 1
+
+#ifdef __tilegx__
+/* We always pass 8-byte values, even in -m32 mode. */
+# define FFI_SIZEOF_ARG 8
+# ifdef __LP64__
+# define FFI_TRAMPOLINE_SIZE (8 * 5) /* 5 bundles */
+# else
+# define FFI_TRAMPOLINE_SIZE (8 * 3) /* 3 bundles */
+# endif
+#else
+# define FFI_SIZEOF_ARG 4
+# define FFI_TRAMPOLINE_SIZE 8 /* 1 bundle */
+#endif
+#define FFI_NATIVE_RAW_API 0
+
+#endif
diff --git a/Modules/_ctypes/libffi/src/tile/tile.S b/Modules/_ctypes/libffi/src/tile/tile.S
new file mode 100644
index 0000000..a186e1f
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/tile/tile.S
@@ -0,0 +1,360 @@
+/* -----------------------------------------------------------------------
+ tile.S - Copyright (c) 2011 Tilera Corp.
+
+ Tilera TILEPro and TILE-Gx Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#define LIBFFI_ASM
+#include <fficonfig.h>
+#include <ffi.h>
+
+/* Number of bytes in a register. */
+#define REG_SIZE FFI_SIZEOF_ARG
+
+/* Number of bytes in stack linkage area for backtracing.
+
+ A note about the ABI: on entry to a procedure, sp points to a stack
+ slot where it must spill the return address if it's not a leaf.
+ REG_SIZE bytes beyond that is a slot owned by the caller which
+ contains the sp value that the caller had when it was originally
+ entered (i.e. the caller's frame pointer). */
+#define LINKAGE_SIZE (2 * REG_SIZE)
+
+/* The first 10 registers are used to pass arguments and return values. */
+#define NUM_ARG_REGS 10
+
+#ifdef __tilegx__
+#define SW st
+#define LW ld
+#define BGZT bgtzt
+#else
+#define SW sw
+#define LW lw
+#define BGZT bgzt
+#endif
+
+
+/* void ffi_call_tile (int_reg_t reg_args[NUM_ARG_REGS],
+ const int_reg_t *stack_args,
+ unsigned long stack_args_bytes,
+ void (*fnaddr)(void));
+
+ On entry, REG_ARGS contain the outgoing register values,
+ and STACK_ARGS containts STACK_ARG_BYTES of additional values
+ to be passed on the stack. If STACK_ARG_BYTES is zero, then
+ STACK_ARGS is ignored.
+
+ When the invoked function returns, the values of r0-r9 are
+ blindly stored back into REG_ARGS for the caller to examine. */
+
+ .section .text.ffi_call_tile, "ax", @progbits
+ .align 8
+ .globl ffi_call_tile
+ FFI_HIDDEN(ffi_call_tile)
+ffi_call_tile:
+
+/* Incoming arguments. */
+#define REG_ARGS r0
+#define INCOMING_STACK_ARGS r1
+#define STACK_ARG_BYTES r2
+#define ORIG_FNADDR r3
+
+/* Temporary values. */
+#define FRAME_SIZE r10
+#define TMP r11
+#define TMP2 r12
+#define OUTGOING_STACK_ARGS r13
+#define REG_ADDR_PTR r14
+#define RETURN_REG_ADDR r15
+#define FNADDR r16
+
+ .cfi_startproc
+ {
+ /* Save return address. */
+ SW sp, lr
+ .cfi_offset lr, 0
+ /* Prepare to spill incoming r52. */
+ addi TMP, sp, -REG_SIZE
+ /* Increase frame size to have room to spill r52 and REG_ARGS.
+ The +7 is to round up mod 8. */
+ addi FRAME_SIZE, STACK_ARG_BYTES, \
+ REG_SIZE + REG_SIZE + LINKAGE_SIZE + 7
+ }
+ {
+ /* Round stack frame size to a multiple of 8 to satisfy ABI. */
+ andi FRAME_SIZE, FRAME_SIZE, -8
+ /* Compute where to spill REG_ARGS value. */
+ addi TMP2, sp, -(REG_SIZE * 2)
+ }
+ {
+ /* Spill incoming r52. */
+ SW TMP, r52
+ .cfi_offset r52, -REG_SIZE
+ /* Set up our frame pointer. */
+ move r52, sp
+ .cfi_def_cfa_register r52
+ /* Push stack frame. */
+ sub sp, sp, FRAME_SIZE
+ }
+ {
+ /* Prepare to set up stack linkage. */
+ addi TMP, sp, REG_SIZE
+ /* Prepare to memcpy stack args. */
+ addi OUTGOING_STACK_ARGS, sp, LINKAGE_SIZE
+ /* Save REG_ARGS which we will need after we call the subroutine. */
+ SW TMP2, REG_ARGS
+ }
+ {
+ /* Set up linkage info to hold incoming stack pointer. */
+ SW TMP, r52
+ }
+ {
+ /* Skip stack args memcpy if we don't have any stack args (common). */
+ blezt STACK_ARG_BYTES, .Ldone_stack_args_memcpy
+ }
+
+.Lmemcpy_stack_args:
+ {
+ /* Load incoming argument from stack_args. */
+ LW TMP, INCOMING_STACK_ARGS
+ addi INCOMING_STACK_ARGS, INCOMING_STACK_ARGS, REG_SIZE
+ }
+ {
+ /* Store stack argument into outgoing stack argument area. */
+ SW OUTGOING_STACK_ARGS, TMP
+ addi OUTGOING_STACK_ARGS, OUTGOING_STACK_ARGS, REG_SIZE
+ addi STACK_ARG_BYTES, STACK_ARG_BYTES, -REG_SIZE
+ }
+ {
+ BGZT STACK_ARG_BYTES, .Lmemcpy_stack_args
+ }
+.Ldone_stack_args_memcpy:
+
+ {
+ /* Copy aside ORIG_FNADDR so we can overwrite its register. */
+ move FNADDR, ORIG_FNADDR
+ /* Prepare to load argument registers. */
+ addi REG_ADDR_PTR, r0, REG_SIZE
+ /* Load outgoing r0. */
+ LW r0, r0
+ }
+
+ /* Load up argument registers from the REG_ARGS array. */
+#define LOAD_REG(REG, PTR) \
+ { \
+ LW REG, PTR ; \
+ addi PTR, PTR, REG_SIZE \
+ }
+
+ LOAD_REG(r1, REG_ADDR_PTR)
+ LOAD_REG(r2, REG_ADDR_PTR)
+ LOAD_REG(r3, REG_ADDR_PTR)
+ LOAD_REG(r4, REG_ADDR_PTR)
+ LOAD_REG(r5, REG_ADDR_PTR)
+ LOAD_REG(r6, REG_ADDR_PTR)
+ LOAD_REG(r7, REG_ADDR_PTR)
+ LOAD_REG(r8, REG_ADDR_PTR)
+ LOAD_REG(r9, REG_ADDR_PTR)
+
+ {
+ /* Call the subroutine. */
+ jalr FNADDR
+ }
+
+ {
+ /* Restore original lr. */
+ LW lr, r52
+ /* Prepare to recover ARGS, which we spilled earlier. */
+ addi TMP, r52, -(2 * REG_SIZE)
+ }
+ {
+ /* Restore ARGS, so we can fill it in with the return regs r0-r9. */
+ LW RETURN_REG_ADDR, TMP
+ /* Prepare to restore original r52. */
+ addi TMP, r52, -REG_SIZE
+ }
+
+ {
+ /* Pop stack frame. */
+ move sp, r52
+ /* Restore original r52. */
+ LW r52, TMP
+ }
+
+#define STORE_REG(REG, PTR) \
+ { \
+ SW PTR, REG ; \
+ addi PTR, PTR, REG_SIZE \
+ }
+
+ /* Return all register values by reference. */
+ STORE_REG(r0, RETURN_REG_ADDR)
+ STORE_REG(r1, RETURN_REG_ADDR)
+ STORE_REG(r2, RETURN_REG_ADDR)
+ STORE_REG(r3, RETURN_REG_ADDR)
+ STORE_REG(r4, RETURN_REG_ADDR)
+ STORE_REG(r5, RETURN_REG_ADDR)
+ STORE_REG(r6, RETURN_REG_ADDR)
+ STORE_REG(r7, RETURN_REG_ADDR)
+ STORE_REG(r8, RETURN_REG_ADDR)
+ STORE_REG(r9, RETURN_REG_ADDR)
+
+ {
+ jrp lr
+ }
+
+ .cfi_endproc
+ .size ffi_call_tile, .-ffi_call_tile
+
+/* ffi_closure_tile(...)
+
+ On entry, lr points to the closure plus 8 bytes, and r10
+ contains the actual return address.
+
+ This function simply dumps all register parameters into a stack array
+ and passes the closure, the registers array, and the stack arguments
+ to C code that does all of the actual closure processing. */
+
+ .section .text.ffi_closure_tile, "ax", @progbits
+ .align 8
+ .globl ffi_closure_tile
+ FFI_HIDDEN(ffi_closure_tile)
+
+ .cfi_startproc
+/* Room to spill all NUM_ARG_REGS incoming registers, plus frame linkage. */
+#define CLOSURE_FRAME_SIZE (((NUM_ARG_REGS * REG_SIZE * 2 + LINKAGE_SIZE) + 7) & -8)
+ffi_closure_tile:
+ {
+#ifdef __tilegx__
+ st sp, lr
+ .cfi_offset lr, 0
+#else
+ /* Save return address (in r10 due to closure stub wrapper). */
+ SW sp, r10
+ .cfi_return_column r10
+ .cfi_offset r10, 0
+#endif
+ /* Compute address for stack frame linkage. */
+ addli r10, sp, -(CLOSURE_FRAME_SIZE - REG_SIZE)
+ }
+ {
+ /* Save incoming stack pointer in linkage area. */
+ SW r10, sp
+ .cfi_offset sp, -(CLOSURE_FRAME_SIZE - REG_SIZE)
+ /* Push a new stack frame. */
+ addli sp, sp, -CLOSURE_FRAME_SIZE
+ .cfi_adjust_cfa_offset CLOSURE_FRAME_SIZE
+ }
+
+ {
+ /* Create pointer to where to start spilling registers. */
+ addi r10, sp, LINKAGE_SIZE
+ }
+
+ /* Spill all the incoming registers. */
+ STORE_REG(r0, r10)
+ STORE_REG(r1, r10)
+ STORE_REG(r2, r10)
+ STORE_REG(r3, r10)
+ STORE_REG(r4, r10)
+ STORE_REG(r5, r10)
+ STORE_REG(r6, r10)
+ STORE_REG(r7, r10)
+ STORE_REG(r8, r10)
+ {
+ /* Save r9. */
+ SW r10, r9
+#ifdef __tilegx__
+ /* Pointer to closure is passed in r11. */
+ move r0, r11
+#else
+ /* Compute pointer to the closure object. Because the closure
+ starts with a "jal ffi_closure_tile", we can just take the
+ value of lr (a phony return address pointing into the closure)
+ and subtract 8. */
+ addi r0, lr, -8
+#endif
+ /* Compute a pointer to the register arguments we just spilled. */
+ addi r1, sp, LINKAGE_SIZE
+ }
+ {
+ /* Compute a pointer to the extra stack arguments (if any). */
+ addli r2, sp, CLOSURE_FRAME_SIZE + LINKAGE_SIZE
+ /* Call C code to deal with all of the grotty details. */
+ jal ffi_closure_tile_inner
+ }
+ {
+ addli r10, sp, CLOSURE_FRAME_SIZE
+ }
+ {
+ /* Restore the return address. */
+ LW lr, r10
+ /* Compute pointer to registers array. */
+ addli r10, sp, LINKAGE_SIZE + (NUM_ARG_REGS * REG_SIZE)
+ }
+ /* Return all the register values, which C code may have set. */
+ LOAD_REG(r0, r10)
+ LOAD_REG(r1, r10)
+ LOAD_REG(r2, r10)
+ LOAD_REG(r3, r10)
+ LOAD_REG(r4, r10)
+ LOAD_REG(r5, r10)
+ LOAD_REG(r6, r10)
+ LOAD_REG(r7, r10)
+ LOAD_REG(r8, r10)
+ LOAD_REG(r9, r10)
+ {
+ /* Pop the frame. */
+ addli sp, sp, CLOSURE_FRAME_SIZE
+ jrp lr
+ }
+
+ .cfi_endproc
+ .size ffi_closure_tile, . - ffi_closure_tile
+
+
+/* What follows are code template instructions that get copied to the
+ closure trampoline by ffi_prep_closure_loc. The zeroed operands
+ get replaced by their proper values at runtime. */
+
+ .section .text.ffi_template_tramp_tile, "ax", @progbits
+ .align 8
+ .globl ffi_template_tramp_tile
+ FFI_HIDDEN(ffi_template_tramp_tile)
+ffi_template_tramp_tile:
+#ifdef __tilegx__
+ {
+ moveli r11, 0 /* backpatched to address of containing closure. */
+ moveli r10, 0 /* backpatched to ffi_closure_tile. */
+ }
+ /* Note: the following bundle gets generated multiple times
+ depending on the pointer value (esp. useful for -m32 mode). */
+ { shl16insli r11, r11, 0 ; shl16insli r10, r10, 0 }
+ { info 2+8 /* for backtracer: -> pc in lr, frame size 0 */ ; jr r10 }
+#else
+ /* 'jal .' yields a PC-relative offset of zero so we can OR in the
+ right offset at runtime. */
+ { move r10, lr ; jal . /* ffi_closure_tile */ }
+#endif
+
+ .size ffi_template_tramp_tile, . - ffi_template_tramp_tile
diff --git a/Modules/_ctypes/libffi/src/x86/ffi.c b/Modules/_ctypes/libffi/src/x86/ffi.c
index 8049653..0600414 100644
--- a/Modules/_ctypes/libffi/src/x86/ffi.c
+++ b/Modules/_ctypes/libffi/src/x86/ffi.c
@@ -3,7 +3,7 @@
Copyright (c) 2002 Ranjit Mathew
Copyright (c) 2002 Bo Thorsen
Copyright (c) 2002 Roger Sayle
- Copyright (C) 2008 Free Software Foundation, Inc.
+ Copyright (C) 2008, 2010 Free Software Foundation, Inc.
x86 Foreign Function Interface
@@ -48,10 +48,18 @@ void ffi_prep_args(char *stack, extended_cif *ecif)
register void **p_argv;
register char *argp;
register ffi_type **p_arg;
+#ifdef X86_WIN32
+ size_t p_stack_args[2];
+ void *p_stack_data[2];
+ char *argp2 = stack;
+ int stack_args_count = 0;
+ int cabi = ecif->cif->abi;
+#endif
argp = stack;
- if (ecif->cif->flags == FFI_TYPE_STRUCT
+ if ((ecif->cif->flags == FFI_TYPE_STRUCT
+ || ecif->cif->flags == FFI_TYPE_MS_STRUCT)
#ifdef X86_WIN64
&& (ecif->cif->rtype->size != 1 && ecif->cif->rtype->size != 2
&& ecif->cif->rtype->size != 4 && ecif->cif->rtype->size != 8)
@@ -59,6 +67,16 @@ void ffi_prep_args(char *stack, extended_cif *ecif)
)
{
*(void **) argp = ecif->rvalue;
+#ifdef X86_WIN32
+ /* For fastcall/thiscall this is first register-passed
+ argument. */
+ if (cabi == FFI_THISCALL || cabi == FFI_FASTCALL)
+ {
+ p_stack_args[stack_args_count] = sizeof (void*);
+ p_stack_data[stack_args_count] = argp;
+ ++stack_args_count;
+ }
+#endif
argp += sizeof(void*);
}
@@ -134,6 +152,24 @@ void ffi_prep_args(char *stack, extended_cif *ecif)
{
memcpy(argp, *p_argv, z);
}
+
+#ifdef X86_WIN32
+ /* For thiscall/fastcall convention register-passed arguments
+ are the first two none-floating-point arguments with a size
+ smaller or equal to sizeof (void*). */
+ if ((cabi == FFI_THISCALL && stack_args_count < 1)
+ || (cabi == FFI_FASTCALL && stack_args_count < 2))
+ {
+ if (z <= 4
+ && ((*p_arg)->type != FFI_TYPE_FLOAT
+ && (*p_arg)->type != FFI_TYPE_STRUCT))
+ {
+ p_stack_args[stack_args_count] = z;
+ p_stack_data[stack_args_count] = argp;
+ ++stack_args_count;
+ }
+ }
+#endif
p_argv++;
#ifdef X86_WIN64
argp += (z + sizeof(void*) - 1) & ~(sizeof(void*) - 1);
@@ -141,7 +177,45 @@ void ffi_prep_args(char *stack, extended_cif *ecif)
argp += z;
#endif
}
-
+
+#ifdef X86_WIN32
+ /* We need to move the register-passed arguments for thiscall/fastcall
+ on top of stack, so that those can be moved to registers ecx/edx by
+ call-handler. */
+ if (stack_args_count > 0)
+ {
+ size_t zz = (p_stack_args[0] + 3) & ~3;
+ char *h;
+
+ /* Move first argument to top-stack position. */
+ if (p_stack_data[0] != argp2)
+ {
+ h = alloca (zz + 1);
+ memcpy (h, p_stack_data[0], zz);
+ memmove (argp2 + zz, argp2,
+ (size_t) ((char *) p_stack_data[0] - (char*)argp2));
+ memcpy (argp2, h, zz);
+ }
+
+ argp2 += zz;
+ --stack_args_count;
+ if (zz > 4)
+ stack_args_count = 0;
+
+ /* If we have a second argument, then move it on top
+ after the first one. */
+ if (stack_args_count > 0 && p_stack_data[1] != argp2)
+ {
+ zz = p_stack_args[1];
+ zz = (zz + 3) & ~3;
+ h = alloca (zz + 1);
+ h = alloca (zz + 1);
+ memcpy (h, p_stack_data[1], zz);
+ memmove (argp2 + zz, argp2, (size_t) ((char*) p_stack_data[1] - (char*)argp2));
+ memcpy (argp2, h, zz);
+ }
+ }
+#endif
return;
}
@@ -155,12 +229,10 @@ ffi_status ffi_prep_cif_machdep(ffi_cif *cif)
switch (cif->rtype->type)
{
case FFI_TYPE_VOID:
-#if defined(X86) || defined (X86_WIN32) || defined(X86_FREEBSD) || defined(X86_DARWIN) || defined(X86_WIN64)
case FFI_TYPE_UINT8:
case FFI_TYPE_UINT16:
case FFI_TYPE_SINT8:
case FFI_TYPE_SINT16:
-#endif
#ifdef X86_WIN64
case FFI_TYPE_UINT32:
case FFI_TYPE_SINT32:
@@ -208,8 +280,13 @@ ffi_status ffi_prep_cif_machdep(ffi_cif *cif)
else
#endif
{
- cif->flags = FFI_TYPE_STRUCT;
- // allocate space for return value pointer
+#ifdef X86_WIN32
+ if (cif->abi == FFI_MS_CDECL)
+ cif->flags = FFI_TYPE_MS_STRUCT;
+ else
+#endif
+ cif->flags = FFI_TYPE_STRUCT;
+ /* allocate space for return value pointer */
cif->bytes += ALIGN(sizeof(void*), FFI_SIZEOF_ARG);
}
break;
@@ -234,13 +311,11 @@ ffi_status ffi_prep_cif_machdep(ffi_cif *cif)
}
#ifdef X86_WIN64
- // ensure space for storing four registers
+ /* ensure space for storing four registers */
cif->bytes += 4 * sizeof(ffi_arg);
#endif
-#ifdef X86_DARWIN
cif->bytes = (cif->bytes + 15) & ~0xF;
-#endif
return FFI_OK;
}
@@ -252,7 +327,7 @@ ffi_call_win64(void (*)(char *, extended_cif *), extended_cif *,
#elif defined(X86_WIN32)
extern void
ffi_call_win32(void (*)(char *, extended_cif *), extended_cif *,
- unsigned, unsigned, unsigned *, void (*fn)(void));
+ unsigned, unsigned, unsigned, unsigned *, void (*fn)(void));
#else
extern void ffi_call_SYSV(void (*)(char *, extended_cif *), extended_cif *,
unsigned, unsigned, unsigned *, void (*fn)(void));
@@ -278,7 +353,8 @@ void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
}
#else
if (rvalue == NULL
- && cif->flags == FFI_TYPE_STRUCT)
+ && (cif->flags == FFI_TYPE_STRUCT
+ || cif->flags == FFI_TYPE_MS_STRUCT))
{
ecif.rvalue = alloca(cif->rtype->size);
}
@@ -291,33 +367,44 @@ void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
{
#ifdef X86_WIN64
case FFI_WIN64:
- {
- // Make copies of all struct arguments
- // NOTE: not sure if responsibility should be here or in caller
- unsigned int i;
- for (i=0; i < cif->nargs;i++) {
- size_t size = cif->arg_types[i]->size;
- if ((cif->arg_types[i]->type == FFI_TYPE_STRUCT
- && (size != 1 && size != 2 && size != 4 && size != 8))
-#if FFI_TYPE_LONGDOUBLE != FFI_TYPE_DOUBLE
- || cif->arg_types[i]->type == FFI_TYPE_LONGDOUBLE
-#endif
- )
- {
- void *local = alloca(size);
- memcpy(local, avalue[i], size);
- avalue[i] = local;
- }
- }
- ffi_call_win64(ffi_prep_args, &ecif, cif->bytes,
- cif->flags, ecif.rvalue, fn);
- }
+ ffi_call_win64(ffi_prep_args, &ecif, cif->bytes,
+ cif->flags, ecif.rvalue, fn);
break;
#elif defined(X86_WIN32)
case FFI_SYSV:
case FFI_STDCALL:
- ffi_call_win32(ffi_prep_args, &ecif, cif->bytes, cif->flags,
- ecif.rvalue, fn);
+ case FFI_MS_CDECL:
+ ffi_call_win32(ffi_prep_args, &ecif, cif->abi, cif->bytes, cif->flags,
+ ecif.rvalue, fn);
+ break;
+ case FFI_THISCALL:
+ case FFI_FASTCALL:
+ {
+ unsigned int abi = cif->abi;
+ unsigned int i, passed_regs = 0;
+
+ if (cif->flags == FFI_TYPE_STRUCT)
+ ++passed_regs;
+
+ for (i=0; i < cif->nargs && passed_regs < 2;i++)
+ {
+ size_t sz;
+
+ if (cif->arg_types[i]->type == FFI_TYPE_FLOAT
+ || cif->arg_types[i]->type == FFI_TYPE_STRUCT)
+ continue;
+ sz = (cif->arg_types[i]->size + 3) & ~3;
+ if (sz == 0 || sz > 4)
+ continue;
+ ++passed_regs;
+ }
+ if (passed_regs < 2 && abi == FFI_FASTCALL)
+ abi = FFI_THISCALL;
+ if (passed_regs < 1 && abi == FFI_THISCALL)
+ abi = FFI_STDCALL;
+ ffi_call_win32(ffi_prep_args, &ecif, abi, cif->bytes, cif->flags,
+ ecif.rvalue, fn);
+ }
break;
#else
case FFI_SYSV:
@@ -335,7 +422,7 @@ void ffi_call(ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
/** private members **/
/* The following __attribute__((regparm(1))) decorations will have no effect
- on MSVC - standard cdecl convention applies. */
+ on MSVC or SUNPRO_C -- standard conventions apply. */
static void ffi_prep_incoming_args_SYSV (char *stack, void **ret,
void** args, ffi_cif* cif);
void FFI_HIDDEN ffi_closure_SYSV (ffi_closure *)
@@ -345,8 +432,12 @@ unsigned int FFI_HIDDEN ffi_closure_SYSV_inner (ffi_closure *, void **, void *)
void FFI_HIDDEN ffi_closure_raw_SYSV (ffi_raw_closure *)
__attribute__ ((regparm(1)));
#ifdef X86_WIN32
+void FFI_HIDDEN ffi_closure_raw_THISCALL (ffi_raw_closure *)
+ __attribute__ ((regparm(1)));
void FFI_HIDDEN ffi_closure_STDCALL (ffi_closure *)
__attribute__ ((regparm(1)));
+void FFI_HIDDEN ffi_closure_THISCALL (ffi_closure *)
+ __attribute__ ((regparm(1)));
#endif
#ifdef X86_WIN64
void FFI_HIDDEN ffi_closure_win64 (ffi_closure *);
@@ -428,7 +519,8 @@ ffi_prep_incoming_args_SYSV(char *stack, void **rvalue, void **avalue,
argp += sizeof(void *);
}
#else
- if ( cif->flags == FFI_TYPE_STRUCT ) {
+ if ( cif->flags == FFI_TYPE_STRUCT
+ || cif->flags == FFI_TYPE_MS_STRUCT ) {
*rvalue = *(void **) argp;
argp += sizeof(void *);
}
@@ -506,6 +598,33 @@ ffi_prep_incoming_args_SYSV(char *stack, void **rvalue, void **avalue,
*(unsigned int*) &__tramp[6] = __dis; /* jmp __fun */ \
}
+#define FFI_INIT_TRAMPOLINE_THISCALL(TRAMP,FUN,CTX,SIZE) \
+{ unsigned char *__tramp = (unsigned char*)(TRAMP); \
+ unsigned int __fun = (unsigned int)(FUN); \
+ unsigned int __ctx = (unsigned int)(CTX); \
+ unsigned int __dis = __fun - (__ctx + 49); \
+ unsigned short __size = (unsigned short)(SIZE); \
+ *(unsigned int *) &__tramp[0] = 0x8324048b; /* mov (%esp), %eax */ \
+ *(unsigned int *) &__tramp[4] = 0x4c890cec; /* sub $12, %esp */ \
+ *(unsigned int *) &__tramp[8] = 0x04890424; /* mov %ecx, 4(%esp) */ \
+ *(unsigned char*) &__tramp[12] = 0x24; /* mov %eax, (%esp) */ \
+ *(unsigned char*) &__tramp[13] = 0xb8; \
+ *(unsigned int *) &__tramp[14] = __size; /* mov __size, %eax */ \
+ *(unsigned int *) &__tramp[18] = 0x08244c8d; /* lea 8(%esp), %ecx */ \
+ *(unsigned int *) &__tramp[22] = 0x4802e8c1; /* shr $2, %eax ; dec %eax */ \
+ *(unsigned short*) &__tramp[26] = 0x0b74; /* jz 1f */ \
+ *(unsigned int *) &__tramp[28] = 0x8908518b; /* 2b: mov 8(%ecx), %edx */ \
+ *(unsigned int *) &__tramp[32] = 0x04c18311; /* mov %edx, (%ecx) ; add $4, %ecx */ \
+ *(unsigned char*) &__tramp[36] = 0x48; /* dec %eax */ \
+ *(unsigned short*) &__tramp[37] = 0xf575; /* jnz 2b ; 1f: */ \
+ *(unsigned char*) &__tramp[39] = 0xb8; \
+ *(unsigned int*) &__tramp[40] = __ctx; /* movl __ctx, %eax */ \
+ *(unsigned char *) &__tramp[44] = 0xe8; \
+ *(unsigned int*) &__tramp[45] = __dis; /* call __fun */ \
+ *(unsigned char*) &__tramp[49] = 0xc2; /* ret */ \
+ *(unsigned short*) &__tramp[50] = (__size + 8); /* ret (__size + 8) */ \
+ }
+
#define FFI_INIT_TRAMPOLINE_STDCALL(TRAMP,FUN,CTX,SIZE) \
{ unsigned char *__tramp = (unsigned char*)(TRAMP); \
unsigned int __fun = (unsigned int)(FUN); \
@@ -548,12 +667,25 @@ ffi_prep_closure_loc (ffi_closure* closure,
(void*)codeloc);
}
#ifdef X86_WIN32
+ else if (cif->abi == FFI_THISCALL)
+ {
+ FFI_INIT_TRAMPOLINE_THISCALL (&closure->tramp[0],
+ &ffi_closure_THISCALL,
+ (void*)codeloc,
+ cif->bytes);
+ }
else if (cif->abi == FFI_STDCALL)
{
FFI_INIT_TRAMPOLINE_STDCALL (&closure->tramp[0],
&ffi_closure_STDCALL,
(void*)codeloc, cif->bytes);
}
+ else if (cif->abi == FFI_MS_CDECL)
+ {
+ FFI_INIT_TRAMPOLINE (&closure->tramp[0],
+ &ffi_closure_SYSV,
+ (void*)codeloc);
+ }
#endif /* X86_WIN32 */
#endif /* !X86_WIN64 */
else
@@ -582,6 +714,9 @@ ffi_prep_raw_closure_loc (ffi_raw_closure* closure,
int i;
if (cif->abi != FFI_SYSV) {
+#ifdef X86_WIN32
+ if (cif->abi != FFI_THISCALL)
+#endif
return FFI_BAD_ABI;
}
@@ -596,10 +731,20 @@ ffi_prep_raw_closure_loc (ffi_raw_closure* closure,
FFI_ASSERT (cif->arg_types[i]->type != FFI_TYPE_LONGDOUBLE);
}
-
+#ifdef X86_WIN32
+ if (cif->abi == FFI_SYSV)
+ {
+#endif
FFI_INIT_TRAMPOLINE (&closure->tramp[0], &ffi_closure_raw_SYSV,
codeloc);
-
+#ifdef X86_WIN32
+ }
+ else if (cif->abi == FFI_THISCALL)
+ {
+ FFI_INIT_TRAMPOLINE_THISCALL (&closure->tramp[0], &ffi_closure_raw_THISCALL,
+ codeloc, cif->bytes);
+ }
+#endif
closure->cif = cif;
closure->user_data = user_data;
closure->fun = fun;
@@ -630,8 +775,9 @@ ffi_raw_call(ffi_cif *cif, void (*fn)(void), void *rvalue, ffi_raw *fake_avalue)
/* If the return value is a struct and we don't have a return */
/* value address then we need to make one */
- if ((rvalue == NULL) &&
- (cif->rtype->type == FFI_TYPE_STRUCT))
+ if (rvalue == NULL
+ && (cif->flags == FFI_TYPE_STRUCT
+ || cif->flags == FFI_TYPE_MS_STRUCT))
{
ecif.rvalue = alloca(cif->rtype->size);
}
@@ -644,8 +790,38 @@ ffi_raw_call(ffi_cif *cif, void (*fn)(void), void *rvalue, ffi_raw *fake_avalue)
#ifdef X86_WIN32
case FFI_SYSV:
case FFI_STDCALL:
- ffi_call_win32(ffi_prep_args_raw, &ecif, cif->bytes, cif->flags,
- ecif.rvalue, fn);
+ case FFI_MS_CDECL:
+ ffi_call_win32(ffi_prep_args_raw, &ecif, cif->abi, cif->bytes, cif->flags,
+ ecif.rvalue, fn);
+ break;
+ case FFI_THISCALL:
+ case FFI_FASTCALL:
+ {
+ unsigned int abi = cif->abi;
+ unsigned int i, passed_regs = 0;
+
+ if (cif->flags == FFI_TYPE_STRUCT)
+ ++passed_regs;
+
+ for (i=0; i < cif->nargs && passed_regs < 2;i++)
+ {
+ size_t sz;
+
+ if (cif->arg_types[i]->type == FFI_TYPE_FLOAT
+ || cif->arg_types[i]->type == FFI_TYPE_STRUCT)
+ continue;
+ sz = (cif->arg_types[i]->size + 3) & ~3;
+ if (sz == 0 || sz > 4)
+ continue;
+ ++passed_regs;
+ }
+ if (passed_regs < 2 && abi == FFI_FASTCALL)
+ cif->abi = abi = FFI_THISCALL;
+ if (passed_regs < 1 && abi == FFI_THISCALL)
+ cif->abi = abi = FFI_STDCALL;
+ ffi_call_win32(ffi_prep_args_raw, &ecif, abi, cif->bytes, cif->flags,
+ ecif.rvalue, fn);
+ }
break;
#else
case FFI_SYSV:
diff --git a/Modules/_ctypes/libffi/src/x86/ffi64.c b/Modules/_ctypes/libffi/src/x86/ffi64.c
index 07a2627..2014af2 100644
--- a/Modules/_ctypes/libffi/src/x86/ffi64.c
+++ b/Modules/_ctypes/libffi/src/x86/ffi64.c
@@ -1,7 +1,9 @@
/* -----------------------------------------------------------------------
- ffi64.c - Copyright (c) 2002, 2007 Bo Thorsen <bo@suse.de>
- Copyright (c) 2008 Red Hat, Inc.
-
+ ffi64.c - Copyright (c) 2013 The Written Word, Inc.
+ Copyright (c) 2011 Anthony Green
+ Copyright (c) 2008, 2010 Red Hat, Inc.
+ Copyright (c) 2002, 2007 Bo Thorsen <bo@suse.de>
+
x86-64 Foreign Function Interface
Permission is hereby granted, free of charge, to any person obtaining
@@ -36,11 +38,29 @@
#define MAX_GPR_REGS 6
#define MAX_SSE_REGS 8
+#if defined(__INTEL_COMPILER)
+#define UINT128 __m128
+#else
+#if defined(__SUNPRO_C)
+#include <sunmedia_types.h>
+#define UINT128 __m128i
+#else
+#define UINT128 __int128_t
+#endif
+#endif
+
+union big_int_union
+{
+ UINT32 i32;
+ UINT64 i64;
+ UINT128 i128;
+};
+
struct register_args
{
/* Registers for argument passing. */
UINT64 gpr[MAX_GPR_REGS];
- __int128_t sse[MAX_SSE_REGS];
+ union big_int_union sse[MAX_SSE_REGS];
};
extern void ffi_call_unix64 (void *args, unsigned long bytes, unsigned flags,
@@ -378,7 +398,7 @@ ffi_prep_cif_machdep (ffi_cif *cif)
if (align < 8)
align = 8;
- bytes = ALIGN(bytes, align);
+ bytes = ALIGN (bytes, align);
bytes += cif->arg_types[i]->size;
}
else
@@ -390,7 +410,7 @@ ffi_prep_cif_machdep (ffi_cif *cif)
if (ssecount)
flags |= 1 << 11;
cif->flags = flags;
- cif->bytes = bytes;
+ cif->bytes = ALIGN (bytes, 8);
return FFI_OK;
}
@@ -426,7 +446,7 @@ ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
/* If the return value is passed in memory, add the pointer as the
first integer argument. */
if (ret_in_memory)
- reg_args->gpr[gprcount++] = (long) rvalue;
+ reg_args->gpr[gprcount++] = (unsigned long) rvalue;
avn = cif->nargs;
arg_types = cif->arg_types;
@@ -464,16 +484,33 @@ ffi_call (ffi_cif *cif, void (*fn)(void), void *rvalue, void **avalue)
{
case X86_64_INTEGER_CLASS:
case X86_64_INTEGERSI_CLASS:
- reg_args->gpr[gprcount] = 0;
- memcpy (&reg_args->gpr[gprcount], a, size < 8 ? size : 8);
+ /* Sign-extend integer arguments passed in general
+ purpose registers, to cope with the fact that
+ LLVM incorrectly assumes that this will be done
+ (the x86-64 PS ABI does not specify this). */
+ switch (arg_types[i]->type)
+ {
+ case FFI_TYPE_SINT8:
+ *(SINT64 *)&reg_args->gpr[gprcount] = (SINT64) *((SINT8 *) a);
+ break;
+ case FFI_TYPE_SINT16:
+ *(SINT64 *)&reg_args->gpr[gprcount] = (SINT64) *((SINT16 *) a);
+ break;
+ case FFI_TYPE_SINT32:
+ *(SINT64 *)&reg_args->gpr[gprcount] = (SINT64) *((SINT32 *) a);
+ break;
+ default:
+ reg_args->gpr[gprcount] = 0;
+ memcpy (&reg_args->gpr[gprcount], a, size < 8 ? size : 8);
+ }
gprcount++;
break;
case X86_64_SSE_CLASS:
case X86_64_SSEDF_CLASS:
- reg_args->sse[ssecount++] = *(UINT64 *) a;
+ reg_args->sse[ssecount++].i64 = *(UINT64 *) a;
break;
case X86_64_SSESF_CLASS:
- reg_args->sse[ssecount++] = *(UINT32 *) a;
+ reg_args->sse[ssecount++].i32 = *(UINT32 *) a;
break;
default:
abort();
@@ -498,12 +535,21 @@ ffi_prep_closure_loc (ffi_closure* closure,
{
volatile unsigned short *tramp;
+ /* Sanity check on the cif ABI. */
+ {
+ int abi = cif->abi;
+ if (UNLIKELY (! (abi > FFI_FIRST_ABI && abi < FFI_LAST_ABI)))
+ return FFI_BAD_ABI;
+ }
+
tramp = (volatile unsigned short *) &closure->tramp[0];
tramp[0] = 0xbb49; /* mov <code>, %r11 */
- *(void * volatile *) &tramp[1] = ffi_closure_unix64;
+ *((unsigned long long * volatile) &tramp[1])
+ = (unsigned long) ffi_closure_unix64;
tramp[5] = 0xba49; /* mov <data>, %r10 */
- *(void * volatile *) &tramp[6] = codeloc;
+ *((unsigned long long * volatile) &tramp[6])
+ = (unsigned long) codeloc;
/* Set the carry bit iff the function uses any sse registers.
This is clc or stc, together with the first byte of the jmp. */
@@ -542,7 +588,7 @@ ffi_closure_unix64_inner(ffi_closure *closure, void *rvalue,
{
/* The return value goes in memory. Arrange for the closure
return value to go directly back to the original caller. */
- rvalue = (void *) reg_args->gpr[gprcount++];
+ rvalue = (void *) (unsigned long) reg_args->gpr[gprcount++];
/* We don't have to do anything in asm for the return. */
ret = FFI_TYPE_VOID;
}
diff --git a/Modules/_ctypes/libffi/src/x86/ffitarget.h b/Modules/_ctypes/libffi/src/x86/ffitarget.h
index 89a8983..46f294c 100644
--- a/Modules/_ctypes/libffi/src/x86/ffitarget.h
+++ b/Modules/_ctypes/libffi/src/x86/ffitarget.h
@@ -1,6 +1,7 @@
/* -----------------------------------------------------------------*-C-*-
- ffitarget.h - Copyright (c) 1996-2003, 2010 Red Hat, Inc.
- Copyright (C) 2008 Free Software Foundation, Inc.
+ ffitarget.h - Copyright (c) 2012 Anthony Green
+ Copyright (c) 1996-2003, 2010 Red Hat, Inc.
+ Copyright (C) 2008 Free Software Foundation, Inc.
Target configuration macros for x86 and x86-64.
@@ -29,8 +30,15 @@
#ifndef LIBFFI_TARGET_H
#define LIBFFI_TARGET_H
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
/* ---- System specific configurations ----------------------------------- */
+/* For code common to all platforms on x86 and x86_64. */
+#define X86_ANY
+
#if defined (X86_64) && defined (__i386__)
#undef X86_64
#define X86
@@ -38,7 +46,7 @@
#ifdef X86_WIN64
#define FFI_SIZEOF_ARG 8
-#define USE_BUILTIN_FFS 0 // not yet implemented in mingw-64
+#define USE_BUILTIN_FFS 0 /* not yet implemented in mingw-64 */
#endif
/* ---- Generic type definitions ----------------------------------------- */
@@ -53,9 +61,16 @@ typedef unsigned long long ffi_arg;
typedef long long ffi_sarg;
#endif
#else
+#if defined __x86_64__ && defined __ILP32__
+#define FFI_SIZEOF_ARG 8
+#define FFI_SIZEOF_JAVA_RAW 4
+typedef unsigned long long ffi_arg;
+typedef long long ffi_sarg;
+#else
typedef unsigned long ffi_arg;
typedef signed long ffi_sarg;
#endif
+#endif
typedef enum ffi_abi {
FFI_FIRST_ABI = 0,
@@ -64,28 +79,32 @@ typedef enum ffi_abi {
#ifdef X86_WIN32
FFI_SYSV,
FFI_STDCALL,
- /* TODO: Add fastcall support for the sake of completeness */
- FFI_DEFAULT_ABI = FFI_SYSV,
+ FFI_THISCALL,
+ FFI_FASTCALL,
+ FFI_MS_CDECL,
+ FFI_LAST_ABI,
+#ifdef _MSC_VER
+ FFI_DEFAULT_ABI = FFI_MS_CDECL
+#else
+ FFI_DEFAULT_ABI = FFI_SYSV
#endif
-#ifdef X86_WIN64
+#elif defined(X86_WIN64)
FFI_WIN64,
- FFI_DEFAULT_ABI = FFI_WIN64,
-#else
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_WIN64
+#else
/* ---- Intel x86 and AMD x86-64 - */
-#if !defined(X86_WIN32) && (defined(__i386__) || defined(__x86_64__) || defined(__i386) || defined(__amd64))
FFI_SYSV,
FFI_UNIX64, /* Unix variants all use the same ABI for x86-64 */
+ FFI_LAST_ABI,
#if defined(__i386__) || defined(__i386)
- FFI_DEFAULT_ABI = FFI_SYSV,
+ FFI_DEFAULT_ABI = FFI_SYSV
#else
- FFI_DEFAULT_ABI = FFI_UNIX64,
+ FFI_DEFAULT_ABI = FFI_UNIX64
#endif
#endif
-#endif /* X86_WIN64 */
-
- FFI_LAST_ABI = FFI_DEFAULT_ABI + 1
} ffi_abi;
#endif
@@ -95,13 +114,14 @@ typedef enum ffi_abi {
#define FFI_TYPE_SMALL_STRUCT_1B (FFI_TYPE_LAST + 1)
#define FFI_TYPE_SMALL_STRUCT_2B (FFI_TYPE_LAST + 2)
#define FFI_TYPE_SMALL_STRUCT_4B (FFI_TYPE_LAST + 3)
+#define FFI_TYPE_MS_STRUCT (FFI_TYPE_LAST + 4)
#if defined (X86_64) || (defined (__x86_64__) && defined (X86_DARWIN))
#define FFI_TRAMPOLINE_SIZE 24
#define FFI_NATIVE_RAW_API 0
#else
#ifdef X86_WIN32
-#define FFI_TRAMPOLINE_SIZE 13
+#define FFI_TRAMPOLINE_SIZE 52
#else
#ifdef X86_WIN64
#define FFI_TRAMPOLINE_SIZE 29
diff --git a/Modules/_ctypes/libffi/src/x86/sysv.S b/Modules/_ctypes/libffi/src/x86/sysv.S
index f4b6c1e..3bd5477 100644
--- a/Modules/_ctypes/libffi/src/x86/sysv.S
+++ b/Modules/_ctypes/libffi/src/x86/sysv.S
@@ -1,5 +1,6 @@
/* -----------------------------------------------------------------------
- sysv.S - Copyright (c) 1996, 1998, 2001-2003, 2005, 2008 Red Hat, Inc.
+ sysv.S - Copyright (c) 2013 The Written Word, Inc.
+ - Copyright (c) 1996,1998,2001-2003,2005,2008,2010 Red Hat, Inc.
X86 Foreign Function Interface
@@ -48,6 +49,9 @@ ffi_call_SYSV:
movl 16(%ebp),%ecx
subl %ecx,%esp
+ /* Align the stack pointer to 16-bytes */
+ andl $0xfffffff0, %esp
+
movl %esp,%eax
/* Place all of the ffi_prep_args in position */
@@ -178,9 +182,19 @@ ffi_closure_SYSV:
leal -24(%ebp), %edx
movl %edx, -12(%ebp) /* resp */
leal 8(%ebp), %edx
+#ifdef __SUNPRO_C
+ /* The SUNPRO compiler doesn't support GCC's regparm function
+ attribute, so we have to pass all three arguments to
+ ffi_closure_SYSV_inner on the stack. */
+ movl %edx, 8(%esp) /* args = __builtin_dwarf_cfa () */
+ leal -12(%ebp), %edx
+ movl %edx, 4(%esp) /* &resp */
+ movl %eax, (%esp) /* closure */
+#else
movl %edx, 4(%esp) /* args = __builtin_dwarf_cfa () */
leal -12(%ebp), %edx
movl %edx, (%esp) /* &resp */
+#endif
#if defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE || !defined __PIC__
call ffi_closure_SYSV_inner
#else
@@ -325,23 +339,55 @@ ffi_closure_raw_SYSV:
.size ffi_closure_raw_SYSV, .-ffi_closure_raw_SYSV
#endif
+#if defined __GNUC__
+/* Only emit dwarf unwind info when building with GNU toolchain. */
+
+#if defined __PIC__
+# if defined __sun__ && defined __svr4__
+/* 32-bit Solaris 2/x86 uses datarel encoding for PIC. GNU ld before 2.22
+ doesn't correctly sort .eh_frame_hdr with mixed encodings, so match this. */
+# define FDE_ENCODING 0x30 /* datarel */
+# define FDE_ENCODE(X) X@GOTOFF
+# else
+# define FDE_ENCODING 0x1b /* pcrel sdata4 */
+# if defined HAVE_AS_X86_PCREL
+# define FDE_ENCODE(X) X-.
+# else
+# define FDE_ENCODE(X) X@rel
+# endif
+# endif
+#else
+# define FDE_ENCODING 0 /* absolute */
+# define FDE_ENCODE(X) X
+#endif
+
.section .eh_frame,EH_FRAME_FLAGS,@progbits
.Lframe1:
.long .LECIE1-.LSCIE1 /* Length of Common Information Entry */
.LSCIE1:
.long 0x0 /* CIE Identifier Tag */
.byte 0x1 /* CIE Version */
+#ifdef HAVE_AS_ASCII_PSEUDO_OP
#ifdef __PIC__
.ascii "zR\0" /* CIE Augmentation */
#else
.ascii "\0" /* CIE Augmentation */
#endif
+#elif defined HAVE_AS_STRING_PSEUDO_OP
+#ifdef __PIC__
+ .string "zR" /* CIE Augmentation */
+#else
+ .string "" /* CIE Augmentation */
+#endif
+#else
+#error missing .ascii/.string
+#endif
.byte 0x1 /* .uleb128 0x1; CIE Code Alignment Factor */
.byte 0x7c /* .sleb128 -4; CIE Data Alignment Factor */
.byte 0x8 /* CIE RA Column */
#ifdef __PIC__
.byte 0x1 /* .uleb128 0x1; Augmentation size */
- .byte 0x1b /* FDE Encoding (pcrel sdata4) */
+ .byte FDE_ENCODING
#endif
.byte 0xc /* DW_CFA_def_cfa */
.byte 0x4 /* .uleb128 0x4 */
@@ -354,14 +400,8 @@ ffi_closure_raw_SYSV:
.long .LEFDE1-.LASFDE1 /* FDE Length */
.LASFDE1:
.long .LASFDE1-.Lframe1 /* FDE CIE offset */
-#if defined __PIC__ && defined HAVE_AS_X86_PCREL
- .long .LFB1-. /* FDE initial location */
-#elif defined __PIC__
- .long .LFB1@rel
-#else
- .long .LFB1
-#endif
- .long .LFE1-.LFB1 /* FDE address range */
+ .long FDE_ENCODE(.LFB1) /* FDE initial location */
+ .long .LFE1-.LFB1 /* FDE address range */
#ifdef __PIC__
.byte 0x0 /* .uleb128 0x0; Augmentation size */
#endif
@@ -381,14 +421,8 @@ ffi_closure_raw_SYSV:
.long .LEFDE2-.LASFDE2 /* FDE Length */
.LASFDE2:
.long .LASFDE2-.Lframe1 /* FDE CIE offset */
-#if defined __PIC__ && defined HAVE_AS_X86_PCREL
- .long .LFB2-. /* FDE initial location */
-#elif defined __PIC__
- .long .LFB2@rel
-#else
- .long .LFB2
-#endif
- .long .LFE2-.LFB2 /* FDE address range */
+ .long FDE_ENCODE(.LFB2) /* FDE initial location */
+ .long .LFE2-.LFB2 /* FDE address range */
#ifdef __PIC__
.byte 0x0 /* .uleb128 0x0; Augmentation size */
#endif
@@ -417,14 +451,8 @@ ffi_closure_raw_SYSV:
.long .LEFDE3-.LASFDE3 /* FDE Length */
.LASFDE3:
.long .LASFDE3-.Lframe1 /* FDE CIE offset */
-#if defined __PIC__ && defined HAVE_AS_X86_PCREL
- .long .LFB3-. /* FDE initial location */
-#elif defined __PIC__
- .long .LFB3@rel
-#else
- .long .LFB3
-#endif
- .long .LFE3-.LFB3 /* FDE address range */
+ .long FDE_ENCODE(.LFB3) /* FDE initial location */
+ .long .LFE3-.LFB3 /* FDE address range */
#ifdef __PIC__
.byte 0x0 /* .uleb128 0x0; Augmentation size */
#endif
@@ -446,6 +474,7 @@ ffi_closure_raw_SYSV:
.LEFDE3:
#endif
+#endif
#endif /* ifndef __x86_64__ */
diff --git a/Modules/_ctypes/libffi/src/x86/unix64.S b/Modules/_ctypes/libffi/src/x86/unix64.S
index fe3f4fd..dcd6bc7 100644
--- a/Modules/_ctypes/libffi/src/x86/unix64.S
+++ b/Modules/_ctypes/libffi/src/x86/unix64.S
@@ -1,6 +1,7 @@
/* -----------------------------------------------------------------------
- unix64.S - Copyright (c) 2002 Bo Thorsen <bo@suse.de>
- Copyright (c) 2008 Red Hat, Inc
+ unix64.S - Copyright (c) 2013 The Written Word, Inc.
+ - Copyright (c) 2008 Red Hat, Inc
+ - Copyright (c) 2002 Bo Thorsen <bo@suse.de>
x86-64 Foreign Function Interface
@@ -324,7 +325,14 @@ ffi_closure_unix64:
.LUW9:
.size ffi_closure_unix64,.-ffi_closure_unix64
+#ifdef __GNUC__
+/* Only emit DWARF unwind info when building with the GNU toolchain. */
+
+#ifdef HAVE_AS_X86_64_UNWIND_SECTION_TYPE
+ .section .eh_frame,"a",@unwind
+#else
.section .eh_frame,"a",@progbits
+#endif
.Lframe1:
.long .LECIE1-.LSCIE1 /* CIE Length */
.LSCIE1:
@@ -415,6 +423,8 @@ ffi_closure_unix64:
.align 8
.LEFDE3:
+#endif /* __GNUC__ */
+
#endif /* __x86_64__ */
#if defined __ELF__ && defined __linux__
diff --git a/Modules/_ctypes/libffi/src/x86/win32.S b/Modules/_ctypes/libffi/src/x86/win32.S
index ac1ed6f..24b7bbd 100644
--- a/Modules/_ctypes/libffi/src/x86/win32.S
+++ b/Modules/_ctypes/libffi/src/x86/win32.S
@@ -45,6 +45,7 @@ _TEXT SEGMENT
ffi_call_win32 PROC NEAR,
ffi_prep_args : NEAR PTR DWORD,
ecif : NEAR PTR DWORD,
+ cif_abi : DWORD,
cif_bytes : DWORD,
cif_flags : DWORD,
rvalue : NEAR PTR DWORD,
@@ -64,6 +65,19 @@ ffi_call_win32 PROC NEAR,
;; Return stack to previous state and call the function
add esp, 8
+ ;; Handle thiscall and fastcall
+ cmp cif_abi, 3 ;; FFI_THISCALL
+ jz do_thiscall
+ cmp cif_abi, 4 ;; FFI_FASTCALL
+ jnz do_stdcall
+ mov ecx, DWORD PTR [esp]
+ mov edx, DWORD PTR [esp+4]
+ add esp, 8
+ jmp do_stdcall
+do_thiscall:
+ mov ecx, DWORD PTR [esp]
+ add esp, 4
+do_stdcall:
call fn
;; cdecl: we restore esp in the epilogue, so there's no need to
@@ -94,31 +108,37 @@ ca_jumpdata:
dd offset ca_retfloat ;; FFI_TYPE_FLOAT
dd offset ca_retdouble ;; FFI_TYPE_DOUBLE
dd offset ca_retlongdouble ;; FFI_TYPE_LONGDOUBLE
- dd offset ca_retint8 ;; FFI_TYPE_UINT8
- dd offset ca_retint8 ;; FFI_TYPE_SINT8
- dd offset ca_retint16 ;; FFI_TYPE_UINT16
- dd offset ca_retint16 ;; FFI_TYPE_SINT16
+ dd offset ca_retuint8 ;; FFI_TYPE_UINT8
+ dd offset ca_retsint8 ;; FFI_TYPE_SINT8
+ dd offset ca_retuint16 ;; FFI_TYPE_UINT16
+ dd offset ca_retsint16 ;; FFI_TYPE_SINT16
dd offset ca_retint ;; FFI_TYPE_UINT32
dd offset ca_retint ;; FFI_TYPE_SINT32
dd offset ca_retint64 ;; FFI_TYPE_UINT64
dd offset ca_retint64 ;; FFI_TYPE_SINT64
dd offset ca_epilogue ;; FFI_TYPE_STRUCT
dd offset ca_retint ;; FFI_TYPE_POINTER
- dd offset ca_retint8 ;; FFI_TYPE_SMALL_STRUCT_1B
- dd offset ca_retint16 ;; FFI_TYPE_SMALL_STRUCT_2B
+ dd offset ca_retstruct1b ;; FFI_TYPE_SMALL_STRUCT_1B
+ dd offset ca_retstruct2b ;; FFI_TYPE_SMALL_STRUCT_2B
dd offset ca_retint ;; FFI_TYPE_SMALL_STRUCT_4B
+ dd offset ca_epilogue ;; FFI_TYPE_MS_STRUCT
-ca_retint8:
- ;; Load %ecx with the pointer to storage for the return value
- mov ecx, rvalue
- mov [ecx + 0], al
- jmp ca_epilogue
+ /* Sign/zero extend as appropriate. */
+ca_retuint8:
+ movzx eax, al
+ jmp ca_retint
-ca_retint16:
- ;; Load %ecx with the pointer to storage for the return value
- mov ecx, rvalue
- mov [ecx + 0], ax
- jmp ca_epilogue
+ca_retsint8:
+ movsx eax, al
+ jmp ca_retint
+
+ca_retuint16:
+ movzx eax, ax
+ jmp ca_retint
+
+ca_retsint16:
+ movsx eax, ax
+ jmp ca_retint
ca_retint:
;; Load %ecx with the pointer to storage for the return value
@@ -151,11 +171,31 @@ ca_retlongdouble:
fstp TBYTE PTR [ecx]
jmp ca_epilogue
+ca_retstruct1b:
+ ;; Load %ecx with the pointer to storage for the return value
+ mov ecx, rvalue
+ mov [ecx + 0], al
+ jmp ca_epilogue
+
+ca_retstruct2b:
+ ;; Load %ecx with the pointer to storage for the return value
+ mov ecx, rvalue
+ mov [ecx + 0], ax
+ jmp ca_epilogue
+
ca_epilogue:
;; Epilogue code is autogenerated.
ret
ffi_call_win32 ENDP
+ffi_closure_THISCALL PROC NEAR FORCEFRAME
+ sub esp, 40
+ lea edx, [ebp -24]
+ mov [ebp - 12], edx /* resp */
+ lea edx, [ebp + 12] /* account for stub return address on stack */
+ jmp stub
+ffi_closure_THISCALL ENDP
+
ffi_closure_SYSV PROC NEAR FORCEFRAME
;; the ffi_closure ctx is passed in eax by the trampoline.
@@ -163,6 +203,7 @@ ffi_closure_SYSV PROC NEAR FORCEFRAME
lea edx, [ebp - 24]
mov [ebp - 12], edx ;; resp
lea edx, [ebp + 8]
+stub::
mov [esp + 8], edx ;; args
lea edx, [ebp - 12]
mov [esp + 4], edx ;; &resp
@@ -179,26 +220,35 @@ cs_jumpdata:
dd offset cs_retfloat ;; FFI_TYPE_FLOAT
dd offset cs_retdouble ;; FFI_TYPE_DOUBLE
dd offset cs_retlongdouble ;; FFI_TYPE_LONGDOUBLE
- dd offset cs_retint8 ;; FFI_TYPE_UINT8
- dd offset cs_retint8 ;; FFI_TYPE_SINT8
- dd offset cs_retint16 ;; FFI_TYPE_UINT16
- dd offset cs_retint16 ;; FFI_TYPE_SINT16
+ dd offset cs_retuint8 ;; FFI_TYPE_UINT8
+ dd offset cs_retsint8 ;; FFI_TYPE_SINT8
+ dd offset cs_retuint16 ;; FFI_TYPE_UINT16
+ dd offset cs_retsint16 ;; FFI_TYPE_SINT16
dd offset cs_retint ;; FFI_TYPE_UINT32
dd offset cs_retint ;; FFI_TYPE_SINT32
dd offset cs_retint64 ;; FFI_TYPE_UINT64
dd offset cs_retint64 ;; FFI_TYPE_SINT64
dd offset cs_retstruct ;; FFI_TYPE_STRUCT
dd offset cs_retint ;; FFI_TYPE_POINTER
- dd offset cs_retint8 ;; FFI_TYPE_SMALL_STRUCT_1B
- dd offset cs_retint16 ;; FFI_TYPE_SMALL_STRUCT_2B
+ dd offset cs_retsint8 ;; FFI_TYPE_SMALL_STRUCT_1B
+ dd offset cs_retsint16 ;; FFI_TYPE_SMALL_STRUCT_2B
dd offset cs_retint ;; FFI_TYPE_SMALL_STRUCT_4B
+ dd offset cs_retmsstruct ;; FFI_TYPE_MS_STRUCT
+
+cs_retuint8:
+ movzx eax, BYTE PTR [ecx]
+ jmp cs_epilogue
-cs_retint8:
- mov al, [ecx]
+cs_retsint8:
+ movsx eax, BYTE PTR [ecx]
jmp cs_epilogue
-cs_retint16:
- mov ax, [ecx]
+cs_retuint16:
+ movzx eax, WORD PTR [ecx]
+ jmp cs_epilogue
+
+cs_retsint16:
+ movsx eax, WORD PTR [ecx]
jmp cs_epilogue
cs_retint:
@@ -227,6 +277,12 @@ cs_retstruct:
;; Epilogue code is autogenerated.
ret 4
+cs_retmsstruct:
+ ;; Caller expects us to return a pointer to the real return value.
+ mov eax, ecx
+ ;; Caller doesn't expects us to pop struct return value pointer hidden arg.
+ jmp cs_epilogue
+
cs_epilogue:
;; Epilogue code is autogenerated.
ret
@@ -239,7 +295,16 @@ ffi_closure_SYSV ENDP
#define RAW_CLOSURE_USER_DATA_OFFSET (RAW_CLOSURE_FUN_OFFSET + 4)
#define CIF_FLAGS_OFFSET 20
-ffi_closure_raw_SYSV PROC NEAR USES esi
+ffi_closure_raw_THISCALL PROC NEAR USES esi FORCEFRAME
+ sub esp, 36
+ mov esi, [eax + RAW_CLOSURE_CIF_OFFSET] ;; closure->cif
+ mov edx, [eax + RAW_CLOSURE_USER_DATA_OFFSET] ;; closure->user_data
+ mov [esp + 12], edx
+ lea edx, [ebp + 12]
+ jmp stubraw
+ffi_closure_raw_THISCALL ENDP
+
+ffi_closure_raw_SYSV PROC NEAR USES esi FORCEFRAME
;; the ffi_closure ctx is passed in eax by the trampoline.
sub esp, 40
@@ -247,6 +312,7 @@ ffi_closure_raw_SYSV PROC NEAR USES esi
mov edx, [eax + RAW_CLOSURE_USER_DATA_OFFSET] ;; closure->user_data
mov [esp + 12], edx ;; user_data
lea edx, [ebp + 8]
+stubraw::
mov [esp + 8], edx ;; raw_args
lea edx, [ebp - 24]
mov [esp + 4], edx ;; &res
@@ -264,26 +330,35 @@ cr_jumpdata:
dd offset cr_retfloat ;; FFI_TYPE_FLOAT
dd offset cr_retdouble ;; FFI_TYPE_DOUBLE
dd offset cr_retlongdouble ;; FFI_TYPE_LONGDOUBLE
- dd offset cr_retint8 ;; FFI_TYPE_UINT8
- dd offset cr_retint8 ;; FFI_TYPE_SINT8
- dd offset cr_retint16 ;; FFI_TYPE_UINT16
- dd offset cr_retint16 ;; FFI_TYPE_SINT16
+ dd offset cr_retuint8 ;; FFI_TYPE_UINT8
+ dd offset cr_retsint8 ;; FFI_TYPE_SINT8
+ dd offset cr_retuint16 ;; FFI_TYPE_UINT16
+ dd offset cr_retsint16 ;; FFI_TYPE_SINT16
dd offset cr_retint ;; FFI_TYPE_UINT32
dd offset cr_retint ;; FFI_TYPE_SINT32
dd offset cr_retint64 ;; FFI_TYPE_UINT64
dd offset cr_retint64 ;; FFI_TYPE_SINT64
dd offset cr_epilogue ;; FFI_TYPE_STRUCT
dd offset cr_retint ;; FFI_TYPE_POINTER
- dd offset cr_retint8 ;; FFI_TYPE_SMALL_STRUCT_1B
- dd offset cr_retint16 ;; FFI_TYPE_SMALL_STRUCT_2B
+ dd offset cr_retsint8 ;; FFI_TYPE_SMALL_STRUCT_1B
+ dd offset cr_retsint16 ;; FFI_TYPE_SMALL_STRUCT_2B
dd offset cr_retint ;; FFI_TYPE_SMALL_STRUCT_4B
+ dd offset cr_epilogue ;; FFI_TYPE_MS_STRUCT
+
+cr_retuint8:
+ movzx eax, BYTE PTR [ecx]
+ jmp cr_epilogue
+
+cr_retsint8:
+ movsx eax, BYTE PTR [ecx]
+ jmp cr_epilogue
-cr_retint8:
- mov al, [ecx]
+cr_retuint16:
+ movzx eax, WORD PTR [ecx]
jmp cr_epilogue
-cr_retint16:
- mov ax, [ecx]
+cr_retsint16:
+ movsx eax, WORD PTR [ecx]
jmp cr_epilogue
cr_retint:
@@ -337,26 +412,34 @@ cd_jumpdata:
dd offset cd_retfloat ;; FFI_TYPE_FLOAT
dd offset cd_retdouble ;; FFI_TYPE_DOUBLE
dd offset cd_retlongdouble ;; FFI_TYPE_LONGDOUBLE
- dd offset cd_retint8 ;; FFI_TYPE_UINT8
- dd offset cd_retint8 ;; FFI_TYPE_SINT8
- dd offset cd_retint16 ;; FFI_TYPE_UINT16
- dd offset cd_retint16 ;; FFI_TYPE_SINT16
+ dd offset cd_retuint8 ;; FFI_TYPE_UINT8
+ dd offset cd_retsint8 ;; FFI_TYPE_SINT8
+ dd offset cd_retuint16 ;; FFI_TYPE_UINT16
+ dd offset cd_retsint16 ;; FFI_TYPE_SINT16
dd offset cd_retint ;; FFI_TYPE_UINT32
dd offset cd_retint ;; FFI_TYPE_SINT32
dd offset cd_retint64 ;; FFI_TYPE_UINT64
dd offset cd_retint64 ;; FFI_TYPE_SINT64
dd offset cd_epilogue ;; FFI_TYPE_STRUCT
dd offset cd_retint ;; FFI_TYPE_POINTER
- dd offset cd_retint8 ;; FFI_TYPE_SMALL_STRUCT_1B
- dd offset cd_retint16 ;; FFI_TYPE_SMALL_STRUCT_2B
+ dd offset cd_retsint8 ;; FFI_TYPE_SMALL_STRUCT_1B
+ dd offset cd_retsint16 ;; FFI_TYPE_SMALL_STRUCT_2B
dd offset cd_retint ;; FFI_TYPE_SMALL_STRUCT_4B
-cd_retint8:
- mov al, [ecx]
+cd_retuint8:
+ movzx eax, BYTE PTR [ecx]
+ jmp cd_epilogue
+
+cd_retsint8:
+ movsx eax, BYTE PTR [ecx]
+ jmp cd_epilogue
+
+cd_retuint16:
+ movzx eax, WORD PTR [ecx]
jmp cd_epilogue
-cd_retint16:
- mov ax, [ecx]
+cd_retsint16:
+ movsx eax, WORD PTR [ecx]
jmp cd_epilogue
cd_retint:
@@ -395,7 +478,9 @@ END
# This assumes we are using gas.
.balign 16
.globl _ffi_call_win32
+#ifndef __OS2__
.def _ffi_call_win32; .scl 2; .type 32; .endef
+#endif
_ffi_call_win32:
.LFB1:
pushl %ebp
@@ -403,7 +488,7 @@ _ffi_call_win32:
movl %esp,%ebp
.LCFI1:
# Make room for all of the new args.
- movl 16(%ebp),%ecx
+ movl 20(%ebp),%ecx
subl %ecx,%esp
movl %esp,%eax
@@ -415,19 +500,34 @@ _ffi_call_win32:
# Return stack to previous state and call the function
addl $8,%esp
-
+
+ # Handle fastcall and thiscall
+ cmpl $3, 16(%ebp) # FFI_THISCALL
+ jz .do_thiscall
+ cmpl $4, 16(%ebp) # FFI_FASTCALL
+ jnz .do_fncall
+ movl (%esp), %ecx
+ movl 4(%esp), %edx
+ addl $8, %esp
+ jmp .do_fncall
+.do_thiscall:
+ movl (%esp), %ecx
+ addl $4, %esp
+
+.do_fncall:
+
# FIXME: Align the stack to a 128-bit boundary to avoid
# potential performance hits.
- call *28(%ebp)
+ call *32(%ebp)
# stdcall functions pop arguments off the stack themselves
# Load %ecx with the return type code
- movl 20(%ebp),%ecx
+ movl 24(%ebp),%ecx
# If the return value pointer is NULL, assume no return value.
- cmpl $0,24(%ebp)
+ cmpl $0,28(%ebp)
jne 0f
# Even if there is no space for the return value, we are
@@ -460,6 +560,7 @@ _ffi_call_win32:
.long .Lretstruct1b /* FFI_TYPE_SMALL_STRUCT_1B */
.long .Lretstruct2b /* FFI_TYPE_SMALL_STRUCT_2B */
.long .Lretstruct4b /* FFI_TYPE_SMALL_STRUCT_4B */
+ .long .Lretstruct /* FFI_TYPE_MS_STRUCT */
1:
add %ecx, %ecx
add %ecx, %ecx
@@ -486,50 +587,50 @@ _ffi_call_win32:
.Lretint:
# Load %ecx with the pointer to storage for the return value
- movl 24(%ebp),%ecx
+ movl 28(%ebp),%ecx
movl %eax,0(%ecx)
jmp .Lepilogue
.Lretfloat:
# Load %ecx with the pointer to storage for the return value
- movl 24(%ebp),%ecx
+ movl 28(%ebp),%ecx
fstps (%ecx)
jmp .Lepilogue
.Lretdouble:
# Load %ecx with the pointer to storage for the return value
- movl 24(%ebp),%ecx
+ movl 28(%ebp),%ecx
fstpl (%ecx)
jmp .Lepilogue
.Lretlongdouble:
# Load %ecx with the pointer to storage for the return value
- movl 24(%ebp),%ecx
+ movl 28(%ebp),%ecx
fstpt (%ecx)
jmp .Lepilogue
.Lretint64:
# Load %ecx with the pointer to storage for the return value
- movl 24(%ebp),%ecx
+ movl 28(%ebp),%ecx
movl %eax,0(%ecx)
movl %edx,4(%ecx)
jmp .Lepilogue
.Lretstruct1b:
# Load %ecx with the pointer to storage for the return value
- movl 24(%ebp),%ecx
+ movl 28(%ebp),%ecx
movb %al,0(%ecx)
jmp .Lepilogue
.Lretstruct2b:
# Load %ecx with the pointer to storage for the return value
- movl 24(%ebp),%ecx
+ movl 28(%ebp),%ecx
movw %ax,0(%ecx)
jmp .Lepilogue
.Lretstruct4b:
# Load %ecx with the pointer to storage for the return value
- movl 24(%ebp),%ecx
+ movl 28(%ebp),%ecx
movl %eax,0(%ecx)
jmp .Lepilogue
@@ -542,12 +643,27 @@ _ffi_call_win32:
popl %ebp
ret
.ffi_call_win32_end:
+ .balign 16
+ .globl _ffi_closure_THISCALL
+#ifndef __OS2__
+ .def _ffi_closure_THISCALL; .scl 2; .type 32; .endef
+#endif
+_ffi_closure_THISCALL:
+ pushl %ebp
+ movl %esp, %ebp
+ subl $40, %esp
+ leal -24(%ebp), %edx
+ movl %edx, -12(%ebp) /* resp */
+ leal 12(%ebp), %edx /* account for stub return address on stack */
+ jmp .stub
.LFE1:
# This assumes we are using gas.
.balign 16
.globl _ffi_closure_SYSV
+#ifndef __OS2__
.def _ffi_closure_SYSV; .scl 2; .type 32; .endef
+#endif
_ffi_closure_SYSV:
.LFB3:
pushl %ebp
@@ -558,6 +674,7 @@ _ffi_closure_SYSV:
leal -24(%ebp), %edx
movl %edx, -12(%ebp) /* resp */
leal 8(%ebp), %edx
+.stub:
movl %edx, 4(%esp) /* args = __builtin_dwarf_cfa () */
leal -12(%ebp), %edx
movl %edx, (%esp) /* &resp */
@@ -586,6 +703,7 @@ _ffi_closure_SYSV:
.long .Lcls_retstruct1 /* FFI_TYPE_SMALL_STRUCT_1B */
.long .Lcls_retstruct2 /* FFI_TYPE_SMALL_STRUCT_2B */
.long .Lcls_retstruct4 /* FFI_TYPE_SMALL_STRUCT_4B */
+ .long .Lcls_retmsstruct /* FFI_TYPE_MS_STRUCT */
1:
add %eax, %eax
@@ -650,6 +768,12 @@ _ffi_closure_SYSV:
popl %ebp
ret $0x4
+.Lcls_retmsstruct:
+ # Caller expects us to return a pointer to the real return value.
+ mov %ecx, %eax
+ # Caller doesn't expects us to pop struct return value pointer hidden arg.
+ jmp .Lcls_epilogue
+
.Lcls_noretval:
.Lcls_epilogue:
movl %ebp, %esp
@@ -664,11 +788,27 @@ _ffi_closure_SYSV:
#define RAW_CLOSURE_FUN_OFFSET (RAW_CLOSURE_CIF_OFFSET + 4)
#define RAW_CLOSURE_USER_DATA_OFFSET (RAW_CLOSURE_FUN_OFFSET + 4)
#define CIF_FLAGS_OFFSET 20
-
+ .balign 16
+ .globl _ffi_closure_raw_THISCALL
+#ifndef __OS2__
+ .def _ffi_closure_raw_THISCALL; .scl 2; .type 32; .endef
+#endif
+_ffi_closure_raw_THISCALL:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %esi
+ subl $36, %esp
+ movl RAW_CLOSURE_CIF_OFFSET(%eax), %esi /* closure->cif */
+ movl RAW_CLOSURE_USER_DATA_OFFSET(%eax), %edx /* closure->user_data */
+ movl %edx, 12(%esp) /* user_data */
+ leal 12(%ebp), %edx /* __builtin_dwarf_cfa () */
+ jmp .stubraw
# This assumes we are using gas.
.balign 16
.globl _ffi_closure_raw_SYSV
+#ifndef __OS2__
.def _ffi_closure_raw_SYSV; .scl 2; .type 32; .endef
+#endif
_ffi_closure_raw_SYSV:
.LFB4:
pushl %ebp
@@ -682,6 +822,7 @@ _ffi_closure_raw_SYSV:
movl RAW_CLOSURE_USER_DATA_OFFSET(%eax), %edx /* closure->user_data */
movl %edx, 12(%esp) /* user_data */
leal 8(%ebp), %edx /* __builtin_dwarf_cfa () */
+.stubraw:
movl %edx, 8(%esp) /* raw_args */
leal -24(%ebp), %edx
movl %edx, 4(%esp) /* &res */
@@ -710,6 +851,7 @@ _ffi_closure_raw_SYSV:
.long .Lrcls_retstruct1 /* FFI_TYPE_SMALL_STRUCT_1B */
.long .Lrcls_retstruct2 /* FFI_TYPE_SMALL_STRUCT_2B */
.long .Lrcls_retstruct4 /* FFI_TYPE_SMALL_STRUCT_4B */
+ .long .Lrcls_retstruct /* FFI_TYPE_MS_STRUCT */
1:
add %eax, %eax
add %eax, %eax
@@ -784,7 +926,9 @@ _ffi_closure_raw_SYSV:
# This assumes we are using gas.
.balign 16
.globl _ffi_closure_STDCALL
+#ifndef __OS2__
.def _ffi_closure_STDCALL; .scl 2; .type 32; .endef
+#endif
_ffi_closure_STDCALL:
.LFB5:
pushl %ebp
@@ -890,7 +1034,9 @@ _ffi_closure_STDCALL:
.ffi_closure_STDCALL_end:
.LFE5:
+#ifndef __OS2__
.section .eh_frame,"w"
+#endif
.Lframe1:
.LSCIE1:
.long .LECIE1-.LASCIE1 /* Length of Common Information Entry */
diff --git a/Modules/_ctypes/libffi/src/x86/win64.S b/Modules/_ctypes/libffi/src/x86/win64.S
index 6e91818..fcdb270 100644
--- a/Modules/_ctypes/libffi/src/x86/win64.S
+++ b/Modules/_ctypes/libffi/src/x86/win64.S
@@ -232,10 +232,18 @@ ret_void$:
ffi_call_win64 ENDP
_TEXT ENDS
END
-#else
+
+#else
+
+#ifdef SYMBOL_UNDERSCORE
+#define SYMBOL_NAME(name) _##name
+#else
+#define SYMBOL_NAME(name) name
+#endif
+
.text
-.extern _ffi_closure_win64_inner
+.extern SYMBOL_NAME(ffi_closure_win64_inner)
# ffi_closure_win64 will be called with these registers set:
# rax points to 'closure'
@@ -246,8 +254,8 @@ END
# call ffi_closure_win64_inner for the actual work, then return the result.
#
.balign 16
- .globl _ffi_closure_win64
-_ffi_closure_win64:
+ .globl SYMBOL_NAME(ffi_closure_win64)
+SYMBOL_NAME(ffi_closure_win64):
# copy register arguments onto stack
test $1,%r11
jne .Lfirst_is_float
@@ -287,7 +295,7 @@ _ffi_closure_win64:
mov %rax, %rcx # context is first parameter
mov %rsp, %rdx # stack is second parameter
add $48, %rdx # point to start of arguments
- mov $_ffi_closure_win64_inner, %rax
+ mov $SYMBOL_NAME(ffi_closure_win64_inner), %rax
callq *%rax # call the real closure function
add $40, %rsp
movq %rax, %xmm0 # If the closure returned a float,
@@ -296,8 +304,8 @@ _ffi_closure_win64:
.ffi_closure_win64_end:
.balign 16
- .globl _ffi_call_win64
-_ffi_call_win64:
+ .globl SYMBOL_NAME(ffi_call_win64)
+SYMBOL_NAME(ffi_call_win64):
# copy registers onto stack
mov %r9,32(%rsp)
mov %r8,24(%rsp)
diff --git a/Modules/_ctypes/libffi/src/xtensa/ffi.c b/Modules/_ctypes/libffi/src/xtensa/ffi.c
new file mode 100644
index 0000000..fd94daf
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/xtensa/ffi.c
@@ -0,0 +1,298 @@
+/* -----------------------------------------------------------------------
+ ffi.c - Copyright (c) 2013 Tensilica, Inc.
+
+ XTENSA Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#include <ffi.h>
+#include <ffi_common.h>
+
+/*
+ |----------------------------------------|
+ | |
+ on entry to ffi_call ----> |----------------------------------------|
+ | caller stack frame for registers a0-a3 |
+ |----------------------------------------|
+ | |
+ | additional arguments |
+ entry of the function ---> |----------------------------------------|
+ | copy of function arguments a2-a7 |
+ | - - - - - - - - - - - - - |
+ | |
+
+ The area below the entry line becomes the new stack frame for the function.
+
+*/
+
+
+#define FFI_TYPE_STRUCT_REGS FFI_TYPE_LAST
+
+
+extern void ffi_call_SYSV(void *rvalue, unsigned rsize, unsigned flags,
+ void(*fn)(void), unsigned nbytes, extended_cif*);
+extern void ffi_closure_SYSV(void) FFI_HIDDEN;
+
+ffi_status ffi_prep_cif_machdep(ffi_cif *cif)
+{
+ switch(cif->rtype->type) {
+ case FFI_TYPE_SINT8:
+ case FFI_TYPE_UINT8:
+ case FFI_TYPE_SINT16:
+ case FFI_TYPE_UINT16:
+ cif->flags = cif->rtype->type;
+ break;
+ case FFI_TYPE_VOID:
+ case FFI_TYPE_FLOAT:
+ cif->flags = FFI_TYPE_UINT32;
+ break;
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ cif->flags = FFI_TYPE_UINT64; // cif->rtype->type;
+ break;
+ case FFI_TYPE_STRUCT:
+ cif->flags = FFI_TYPE_STRUCT; //_REGS;
+ /* Up to 16 bytes are returned in registers */
+ if (cif->rtype->size > 4 * 4) {
+ /* returned structure is referenced by a register; use 8 bytes
+ (including 4 bytes for potential additional alignment) */
+ cif->flags = FFI_TYPE_STRUCT;
+ cif->bytes += 8;
+ }
+ break;
+
+ default:
+ cif->flags = FFI_TYPE_UINT32;
+ break;
+ }
+
+ /* Round the stack up to a full 4 register frame, just in case
+ (we use this size in movsp). This way, it's also a multiple of
+ 8 bytes for 64-bit arguments. */
+ cif->bytes = ALIGN(cif->bytes, 16);
+
+ return FFI_OK;
+}
+
+void ffi_prep_args(extended_cif *ecif, unsigned char* stack)
+{
+ unsigned int i;
+ unsigned long *addr;
+ ffi_type **ptr;
+
+ union {
+ void **v;
+ char **c;
+ signed char **sc;
+ unsigned char **uc;
+ signed short **ss;
+ unsigned short **us;
+ unsigned int **i;
+ long long **ll;
+ float **f;
+ double **d;
+ } p_argv;
+
+ /* Verify that everything is aligned up properly */
+ FFI_ASSERT (((unsigned long) stack & 0x7) == 0);
+
+ p_argv.v = ecif->avalue;
+ addr = (unsigned long*)stack;
+
+ /* structures with a size greater than 16 bytes are passed in memory */
+ if (ecif->cif->rtype->type == FFI_TYPE_STRUCT && ecif->cif->rtype->size > 16)
+ {
+ *addr++ = (unsigned long)ecif->rvalue;
+ }
+
+ for (i = ecif->cif->nargs, ptr = ecif->cif->arg_types;
+ i > 0;
+ i--, ptr++, p_argv.v++)
+ {
+ switch ((*ptr)->type)
+ {
+ case FFI_TYPE_SINT8:
+ *addr++ = **p_argv.sc;
+ break;
+ case FFI_TYPE_UINT8:
+ *addr++ = **p_argv.uc;
+ break;
+ case FFI_TYPE_SINT16:
+ *addr++ = **p_argv.ss;
+ break;
+ case FFI_TYPE_UINT16:
+ *addr++ = **p_argv.us;
+ break;
+ case FFI_TYPE_FLOAT:
+ case FFI_TYPE_INT:
+ case FFI_TYPE_UINT32:
+ case FFI_TYPE_SINT32:
+ case FFI_TYPE_POINTER:
+ *addr++ = **p_argv.i;
+ break;
+ case FFI_TYPE_DOUBLE:
+ case FFI_TYPE_UINT64:
+ case FFI_TYPE_SINT64:
+ if (((unsigned long)addr & 4) != 0)
+ addr++;
+ *(unsigned long long*)addr = **p_argv.ll;
+ addr += sizeof(unsigned long long) / sizeof (addr);
+ break;
+
+ case FFI_TYPE_STRUCT:
+ {
+ unsigned long offs;
+ unsigned long size;
+
+ if (((unsigned long)addr & 4) != 0 && (*ptr)->alignment > 4)
+ addr++;
+
+ offs = (unsigned long) addr - (unsigned long) stack;
+ size = (*ptr)->size;
+
+ /* Entire structure must fit the argument registers or referenced */
+ if (offs < FFI_REGISTER_NARGS * 4
+ && offs + size > FFI_REGISTER_NARGS * 4)
+ addr = (unsigned long*) (stack + FFI_REGISTER_NARGS * 4);
+
+ memcpy((char*) addr, *p_argv.c, size);
+ addr += (size + 3) / 4;
+ break;
+ }
+
+ default:
+ FFI_ASSERT(0);
+ }
+ }
+}
+
+
+void ffi_call(ffi_cif* cif, void(*fn)(void), void *rvalue, void **avalue)
+{
+ extended_cif ecif;
+ unsigned long rsize = cif->rtype->size;
+ int flags = cif->flags;
+ void *alloc = NULL;
+
+ ecif.cif = cif;
+ ecif.avalue = avalue;
+
+ /* Note that for structures that are returned in registers (size <= 16 bytes)
+ we allocate a temporary buffer and use memcpy to copy it to the final
+ destination. The reason is that the target address might be misaligned or
+ the length not a multiple of 4 bytes. Handling all those cases would be
+ very complex. */
+
+ if (flags == FFI_TYPE_STRUCT && (rsize <= 16 || rvalue == NULL))
+ {
+ alloc = alloca(ALIGN(rsize, 4));
+ ecif.rvalue = alloc;
+ }
+ else
+ {
+ ecif.rvalue = rvalue;
+ }
+
+ if (cif->abi != FFI_SYSV)
+ FFI_ASSERT(0);
+
+ ffi_call_SYSV (ecif.rvalue, rsize, cif->flags, fn, cif->bytes, &ecif);
+
+ if (alloc != NULL && rvalue != NULL)
+ memcpy(rvalue, alloc, rsize);
+}
+
+extern void ffi_trampoline();
+extern void ffi_cacheflush(void* start, void* end);
+
+ffi_status
+ffi_prep_closure_loc (ffi_closure* closure,
+ ffi_cif* cif,
+ void (*fun)(ffi_cif*, void*, void**, void*),
+ void *user_data,
+ void *codeloc)
+{
+ /* copye trampoline to stack and patch 'ffi_closure_SYSV' pointer */
+ memcpy(closure->tramp, ffi_trampoline, FFI_TRAMPOLINE_SIZE);
+ *(unsigned int*)(&closure->tramp[8]) = (unsigned int)ffi_closure_SYSV;
+
+ // Do we have this function?
+ // __builtin___clear_cache(closer->tramp, closer->tramp + FFI_TRAMPOLINE_SIZE)
+ ffi_cacheflush(closure->tramp, closure->tramp + FFI_TRAMPOLINE_SIZE);
+
+ closure->cif = cif;
+ closure->fun = fun;
+ closure->user_data = user_data;
+ return FFI_OK;
+}
+
+
+long FFI_HIDDEN
+ffi_closure_SYSV_inner(ffi_closure *closure, void **values, void *rvalue)
+{
+ ffi_cif *cif;
+ ffi_type **arg_types;
+ void **avalue;
+ int i, areg;
+
+ cif = closure->cif;
+ if (cif->abi != FFI_SYSV)
+ return FFI_BAD_ABI;
+
+ areg = 0;
+
+ int rtype = cif->rtype->type;
+ if (rtype == FFI_TYPE_STRUCT && cif->rtype->size > 4 * 4)
+ {
+ rvalue = *values;
+ areg++;
+ }
+
+ cif = closure->cif;
+ arg_types = cif->arg_types;
+ avalue = alloca(cif->nargs * sizeof(void *));
+
+ for (i = 0; i < cif->nargs; i++)
+ {
+ if (arg_types[i]->alignment == 8 && (areg & 1) != 0)
+ areg++;
+
+ // skip the entry 16,a1 framework, add 16 bytes (4 registers)
+ if (areg == FFI_REGISTER_NARGS)
+ areg += 4;
+
+ if (arg_types[i]->type == FFI_TYPE_STRUCT)
+ {
+ int numregs = ((arg_types[i]->size + 3) & ~3) / 4;
+ if (areg < FFI_REGISTER_NARGS && areg + numregs > FFI_REGISTER_NARGS)
+ areg = FFI_REGISTER_NARGS + 4;
+ }
+
+ avalue[i] = &values[areg];
+ areg += (arg_types[i]->size + 3) / 4;
+ }
+
+ (closure->fun)(cif, rvalue, avalue, closure->user_data);
+
+ return rtype;
+}
diff --git a/Modules/_ctypes/libffi/src/xtensa/ffitarget.h b/Modules/_ctypes/libffi/src/xtensa/ffitarget.h
new file mode 100644
index 0000000..0ba728b
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/xtensa/ffitarget.h
@@ -0,0 +1,53 @@
+/* -----------------------------------------------------------------*-C-*-
+ ffitarget.h - Copyright (c) 2013 Tensilica, Inc.
+ Target configuration macros for XTENSA.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#ifndef LIBFFI_TARGET_H
+#define LIBFFI_TARGET_H
+
+#ifndef LIBFFI_H
+#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead."
+#endif
+
+#ifndef LIBFFI_ASM
+typedef unsigned long ffi_arg;
+typedef signed long ffi_sarg;
+
+typedef enum ffi_abi {
+ FFI_FIRST_ABI = 0,
+ FFI_SYSV,
+ FFI_LAST_ABI,
+ FFI_DEFAULT_ABI = FFI_SYSV
+} ffi_abi;
+#endif
+
+#define FFI_REGISTER_NARGS 6
+
+/* ---- Definitions for closures ----------------------------------------- */
+
+#define FFI_CLOSURES 1
+#define FFI_NATIVE_RAW_API 0
+#define FFI_TRAMPOLINE_SIZE 24
+
+#endif
diff --git a/Modules/_ctypes/libffi/src/xtensa/sysv.S b/Modules/_ctypes/libffi/src/xtensa/sysv.S
new file mode 100644
index 0000000..64e6a09
--- /dev/null
+++ b/Modules/_ctypes/libffi/src/xtensa/sysv.S
@@ -0,0 +1,253 @@
+/* -----------------------------------------------------------------------
+ sysv.S - Copyright (c) 2013 Tensilica, Inc.
+
+ XTENSA Foreign Function Interface
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ ``Software''), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ ----------------------------------------------------------------------- */
+
+#define LIBFFI_ASM
+#include <fficonfig.h>
+#include <ffi.h>
+
+#define ENTRY(name) .text; .globl name; .type name,@function; .align 4; name:
+#define END(name) .size name , . - name
+
+/* Assert that the table below is in sync with ffi.h. */
+
+#if FFI_TYPE_UINT8 != 5 \
+ || FFI_TYPE_SINT8 != 6 \
+ || FFI_TYPE_UINT16 != 7 \
+ || FFI_TYPE_SINT16 != 8 \
+ || FFI_TYPE_UINT32 != 9 \
+ || FFI_TYPE_SINT32 != 10 \
+ || FFI_TYPE_UINT64 != 11
+#error "xtensa/sysv.S out of sync with ffi.h"
+#endif
+
+
+/* ffi_call_SYSV (rvalue, rbytes, flags, (*fnaddr)(), bytes, ecif)
+ void *rvalue; a2
+ unsigned long rbytes; a3
+ unsigned flags; a4
+ void (*fnaddr)(); a5
+ unsigned long bytes; a6
+ extended_cif* ecif) a7
+*/
+
+ENTRY(ffi_call_SYSV)
+
+ entry a1, 32 # 32 byte frame for using call8 below
+
+ mov a10, a7 # a10(->arg0): ecif
+ sub a11, a1, a6 # a11(->arg1): stack pointer
+ mov a7, a1 # fp
+ movsp a1, a11 # set new sp = old_sp - bytes
+
+ movi a8, ffi_prep_args
+ callx8 a8 # ffi_prep_args(ecif, stack)
+
+ # prepare to move stack pointer back up to 6 arguments
+ # note that 'bytes' is already aligned
+
+ movi a10, 6*4
+ sub a11, a6, a10
+ movgez a6, a10, a11
+ add a6, a1, a6
+
+
+ # we can pass up to 6 arguments in registers
+ # for simplicity, just load 6 arguments
+ # (the stack size is at least 32 bytes, so no risk to cross boundaries)
+
+ l32i a10, a1, 0
+ l32i a11, a1, 4
+ l32i a12, a1, 8
+ l32i a13, a1, 12
+ l32i a14, a1, 16
+ l32i a15, a1, 20
+
+ # move stack pointer
+
+ movsp a1, a6
+
+ callx8 a5 # (*fn)(args...)
+
+ # Handle return value(s)
+
+ beqz a2, .Lexit
+
+ movi a5, FFI_TYPE_STRUCT
+ bne a4, a5, .Lstore
+ movi a5, 16
+ blt a5, a3, .Lexit
+
+ s32i a10, a2, 0
+ blti a3, 5, .Lexit
+ addi a3, a3, -1
+ s32i a11, a2, 4
+ blti a3, 8, .Lexit
+ s32i a12, a2, 8
+ blti a3, 12, .Lexit
+ s32i a13, a2, 12
+
+.Lexit: retw
+
+.Lstore:
+ addi a4, a4, -FFI_TYPE_UINT8
+ bgei a4, 7, .Lexit # should never happen
+ movi a6, store_calls
+ add a4, a4, a4
+ addx4 a6, a4, a6 # store_table + idx * 8
+ jx a6
+
+ .align 8
+store_calls:
+ # UINT8
+ s8i a10, a2, 0
+ retw
+
+ # SINT8
+ .align 8
+ s8i a10, a2, 0
+ retw
+
+ # UINT16
+ .align 8
+ s16i a10, a2, 0
+ retw
+
+ # SINT16
+ .align 8
+ s16i a10, a2, 0
+ retw
+
+ # UINT32
+ .align 8
+ s32i a10, a2, 0
+ retw
+
+ # SINT32
+ .align 8
+ s32i a10, a2, 0
+ retw
+
+ # UINT64
+ .align 8
+ s32i a10, a2, 0
+ s32i a11, a2, 4
+ retw
+
+END(ffi_call_SYSV)
+
+
+/*
+ * void ffi_cacheflush (unsigned long start, unsigned long end)
+ */
+
+#define EXTRA_ARGS_SIZE 24
+
+ENTRY(ffi_cacheflush)
+
+ entry a1, 16
+
+1: dhwbi a2, 0
+ ihi a2, 0
+ addi a2, a2, 4
+ blt a2, a3, 1b
+
+ retw
+
+END(ffi_cacheflush)
+
+/* ffi_trampoline is copied to the stack */
+
+ENTRY(ffi_trampoline)
+
+ entry a1, 16 + (FFI_REGISTER_NARGS * 4) + (4 * 4) # [ 0]
+ j 2f # [ 3]
+ .align 4 # [ 6]
+1: .long 0 # [ 8]
+2: l32r a15, 1b # [12]
+ _mov a14, a0 # [15]
+ callx0 a15 # [18]
+ # [21]
+END(ffi_trampoline)
+
+/*
+ * ffi_closure()
+ *
+ * a0: closure + 21
+ * a14: return address (a0)
+ */
+
+ENTRY(ffi_closure_SYSV)
+
+ /* intentionally omitting entry here */
+
+ # restore return address (a0) and move pointer to closure to a10
+ addi a10, a0, -21
+ mov a0, a14
+
+ # allow up to 4 arguments as return values
+ addi a11, a1, 4 * 4
+
+ # save up to 6 arguments to stack (allocated by entry below)
+ s32i a2, a11, 0
+ s32i a3, a11, 4
+ s32i a4, a11, 8
+ s32i a5, a11, 12
+ s32i a6, a11, 16
+ s32i a7, a11, 20
+
+ movi a8, ffi_closure_SYSV_inner
+ mov a12, a1
+ callx8 a8 # .._inner(*closure, **avalue, *rvalue)
+
+ # load up to four return arguments
+ l32i a2, a1, 0
+ l32i a3, a1, 4
+ l32i a4, a1, 8
+ l32i a5, a1, 12
+
+ # (sign-)extend return value
+ movi a11, FFI_TYPE_UINT8
+ bne a10, a11, 1f
+ extui a2, a2, 0, 8
+ retw
+
+1: movi a11, FFI_TYPE_SINT8
+ bne a10, a11, 1f
+ sext a2, a2, 7
+ retw
+
+1: movi a11, FFI_TYPE_UINT16
+ bne a10, a11, 1f
+ extui a2, a2, 0, 16
+ retw
+
+1: movi a11, FFI_TYPE_SINT16
+ bne a10, a11, 1f
+ sext a2, a2, 15
+
+1: retw
+
+END(ffi_closure_SYSV)
diff --git a/Modules/_ctypes/libffi/stamp-h.in b/Modules/_ctypes/libffi/stamp-h.in
new file mode 100644
index 0000000..9788f70
--- /dev/null
+++ b/Modules/_ctypes/libffi/stamp-h.in
@@ -0,0 +1 @@
+timestamp
diff --git a/Modules/_ctypes/libffi/testsuite/Makefile.am b/Modules/_ctypes/libffi/testsuite/Makefile.am
index eae3d74..edc6e61 100644
--- a/Modules/_ctypes/libffi/testsuite/Makefile.am
+++ b/Modules/_ctypes/libffi/testsuite/Makefile.am
@@ -13,68 +13,82 @@ RUNTEST = `if [ -f $(top_srcdir)/../dejagnu/runtest ] ; then \
AM_RUNTESTFLAGS =
+EXTRA_DEJAGNU_SITE_CONFIG=../local.exp
+
CLEANFILES = *.exe core* *.log *.sum
-EXTRA_DIST = libffi.special/special.exp \
-libffi.special/unwindtest_ffi_call.cc libffi.special/unwindtest.cc \
-libffi.special/ffitestcxx.h config/default.exp lib/target-libpath.exp \
-lib/libffi-dg.exp lib/wrapper.exp libffi.call/float.c \
-libffi.call/cls_multi_schar.c libffi.call/float3.c \
-libffi.call/cls_3_1byte.c libffi.call/stret_large2.c \
-libffi.call/cls_5_1_byte.c libffi.call/stret_medium.c \
-libffi.call/promotion.c libffi.call/cls_dbls_struct.c \
-libffi.call/nested_struct.c libffi.call/closure_fn1.c \
-libffi.call/cls_4_1byte.c libffi.call/cls_float.c \
-libffi.call/cls_2byte.c libffi.call/closure_fn4.c \
-libffi.call/return_fl2.c libffi.call/nested_struct7.c \
-libffi.call/cls_uint.c libffi.call/cls_align_sint64.c \
-libffi.call/float1.c libffi.call/cls_19byte.c \
-libffi.call/nested_struct1.c libffi.call/cls_4byte.c \
-libffi.call/return_fl1.c libffi.call/cls_align_pointer.c \
-libffi.call/nested_struct4.c libffi.call/nested_struct3.c \
-libffi.call/struct7.c libffi.call/nested_struct9.c \
-libffi.call/cls_sshort.c libffi.call/cls_ulonglong.c \
-libffi.call/cls_pointer_stack.c libffi.call/cls_multi_uchar.c \
-libffi.call/testclosure.c libffi.call/cls_3byte1.c \
-libffi.call/struct6.c libffi.call/return_uc.c libffi.call/return_ll1.c \
-libffi.call/cls_ushort.c libffi.call/stret_medium2.c \
-libffi.call/cls_multi_ushortchar.c libffi.call/return_dbl2.c \
-libffi.call/closure_loc_fn0.c libffi.call/return_sc.c \
-libffi.call/nested_struct8.c libffi.call/cls_7_1_byte.c \
-libffi.call/return_ll.c libffi.call/cls_pointer.c \
-libffi.call/err_bad_abi.c libffi.call/return_dbl1.c \
-libffi.call/call.exp libffi.call/ffitest.h libffi.call/strlen.c \
-libffi.call/return_sl.c libffi.call/cls_1_1byte.c \
-libffi.call/struct1.c libffi.call/cls_64byte.c libffi.call/return_ul.c \
-libffi.call/cls_double.c libffi.call/many_win32.c \
-libffi.call/cls_16byte.c libffi.call/cls_align_double.c \
-libffi.call/cls_align_uint16.c libffi.call/cls_9byte1.c \
-libffi.call/cls_multi_sshortchar.c libffi.call/cls_multi_ushort.c \
-libffi.call/closure_stdcall.c libffi.call/return_fl.c \
-libffi.call/strlen_win32.c libffi.call/return_ldl.c \
-libffi.call/cls_align_float.c libffi.call/struct3.c \
-libffi.call/cls_uchar.c libffi.call/cls_sint.c libffi.call/float2.c \
-libffi.call/cls_align_longdouble_split.c \
-libffi.call/cls_longdouble_va.c libffi.call/cls_multi_sshort.c \
-libffi.call/stret_large.c libffi.call/cls_align_sint16.c \
-libffi.call/nested_struct6.c libffi.call/cls_5byte.c \
-libffi.call/return_dbl.c libffi.call/cls_20byte.c \
-libffi.call/cls_8byte.c libffi.call/pyobjc-tc.c \
-libffi.call/cls_24byte.c libffi.call/cls_align_longdouble_split2.c \
-libffi.call/cls_6_1_byte.c libffi.call/cls_schar.c \
-libffi.call/cls_18byte.c libffi.call/closure_fn3.c \
-libffi.call/err_bad_typedef.c libffi.call/closure_fn2.c \
-libffi.call/struct2.c libffi.call/cls_3byte2.c \
-libffi.call/cls_align_longdouble.c libffi.call/cls_20byte1.c \
-libffi.call/return_fl3.c libffi.call/cls_align_uint32.c \
-libffi.call/problem1.c libffi.call/float4.c \
-libffi.call/cls_align_uint64.c libffi.call/struct9.c \
-libffi.call/closure_fn5.c libffi.call/cls_align_sint32.c \
-libffi.call/closure_fn0.c libffi.call/closure_fn6.c \
-libffi.call/struct4.c libffi.call/nested_struct2.c \
-libffi.call/cls_6byte.c libffi.call/cls_7byte.c libffi.call/many.c \
-libffi.call/struct8.c libffi.call/negint.c libffi.call/struct5.c \
-libffi.call/cls_12byte.c libffi.call/cls_double_va.c \
-libffi.call/cls_longdouble.c libffi.call/cls_9byte2.c \
-libffi.call/nested_struct10.c libffi.call/nested_struct5.c \
-libffi.call/huge_struct.c
+EXTRA_DIST = config/default.exp libffi.call/cls_19byte.c \
+libffi.call/cls_align_longdouble_split.c \
+libffi.call/closure_loc_fn0.c libffi.call/cls_schar.c \
+libffi.call/closure_fn1.c libffi.call/many2_win32.c \
+libffi.call/return_ul.c libffi.call/cls_align_double.c \
+libffi.call/return_fl2.c libffi.call/cls_1_1byte.c \
+libffi.call/cls_64byte.c libffi.call/nested_struct7.c \
+libffi.call/cls_align_sint32.c libffi.call/nested_struct2.c \
+libffi.call/ffitest.h libffi.call/nested_struct4.c \
+libffi.call/cls_multi_ushort.c libffi.call/struct3.c \
+libffi.call/cls_3byte1.c libffi.call/cls_16byte.c \
+libffi.call/struct8.c libffi.call/nested_struct8.c \
+libffi.call/cls_multi_sshort.c libffi.call/cls_3byte2.c \
+libffi.call/fastthis2_win32.c libffi.call/cls_pointer.c \
+libffi.call/err_bad_typedef.c libffi.call/cls_4_1byte.c \
+libffi.call/cls_9byte2.c libffi.call/cls_multi_schar.c \
+libffi.call/stret_medium2.c libffi.call/cls_5_1_byte.c \
+libffi.call/call.exp libffi.call/cls_double.c \
+libffi.call/cls_align_sint16.c libffi.call/cls_uint.c \
+libffi.call/return_ll1.c libffi.call/nested_struct3.c \
+libffi.call/cls_20byte1.c libffi.call/closure_fn4.c \
+libffi.call/cls_uchar.c libffi.call/struct2.c libffi.call/cls_7byte.c \
+libffi.call/strlen.c libffi.call/many.c libffi.call/testclosure.c \
+libffi.call/return_fl.c libffi.call/struct5.c \
+libffi.call/cls_12byte.c libffi.call/cls_multi_sshortchar.c \
+libffi.call/cls_align_longdouble_split2.c libffi.call/return_dbl2.c \
+libffi.call/return_fl3.c libffi.call/stret_medium.c \
+libffi.call/nested_struct6.c libffi.call/closure_fn3.c \
+libffi.call/float3.c libffi.call/many2.c \
+libffi.call/closure_stdcall.c libffi.call/cls_align_uint16.c \
+libffi.call/cls_9byte1.c libffi.call/closure_fn6.c \
+libffi.call/cls_double_va.c libffi.call/cls_align_pointer.c \
+libffi.call/cls_align_longdouble.c libffi.call/closure_fn2.c \
+libffi.call/cls_sshort.c libffi.call/many_win32.c \
+libffi.call/nested_struct.c libffi.call/cls_20byte.c \
+libffi.call/cls_longdouble.c libffi.call/cls_multi_uchar.c \
+libffi.call/return_uc.c libffi.call/closure_thiscall.c \
+libffi.call/cls_18byte.c libffi.call/cls_8byte.c \
+libffi.call/promotion.c libffi.call/struct1_win32.c \
+libffi.call/return_dbl.c libffi.call/cls_24byte.c \
+libffi.call/struct4.c libffi.call/cls_6byte.c \
+libffi.call/cls_align_uint32.c libffi.call/float.c \
+libffi.call/float1.c libffi.call/float_va.c libffi.call/negint.c \
+libffi.call/return_dbl1.c libffi.call/cls_3_1byte.c \
+libffi.call/cls_align_float.c libffi.call/return_fl1.c \
+libffi.call/nested_struct10.c libffi.call/nested_struct5.c \
+libffi.call/fastthis1_win32.c libffi.call/cls_align_sint64.c \
+libffi.call/stret_large2.c libffi.call/return_sl.c \
+libffi.call/closure_fn0.c libffi.call/cls_5byte.c \
+libffi.call/cls_2byte.c libffi.call/float2.c \
+libffi.call/cls_dbls_struct.c libffi.call/cls_sint.c \
+libffi.call/stret_large.c libffi.call/cls_ulonglong.c \
+libffi.call/cls_ushort.c libffi.call/nested_struct1.c \
+libffi.call/err_bad_abi.c libffi.call/cls_longdouble_va.c \
+libffi.call/cls_float.c libffi.call/cls_pointer_stack.c \
+libffi.call/pyobjc-tc.c libffi.call/cls_multi_ushortchar.c \
+libffi.call/struct1.c libffi.call/nested_struct9.c \
+libffi.call/huge_struct.c libffi.call/problem1.c \
+libffi.call/float4.c libffi.call/fastthis3_win32.c \
+libffi.call/return_ldl.c libffi.call/strlen2_win32.c \
+libffi.call/closure_fn5.c libffi.call/struct2_win32.c \
+libffi.call/struct6.c libffi.call/return_ll.c libffi.call/struct9.c \
+libffi.call/return_sc.c libffi.call/struct7.c \
+libffi.call/cls_align_uint64.c libffi.call/cls_4byte.c \
+libffi.call/strlen_win32.c libffi.call/cls_6_1_byte.c \
+libffi.call/cls_7_1_byte.c libffi.special/unwindtest.cc \
+libffi.special/special.exp libffi.special/unwindtest_ffi_call.cc \
+libffi.special/ffitestcxx.h lib/wrapper.exp lib/target-libpath.exp \
+lib/libffi.exp libffi.call/cls_struct_va1.c \
+libffi.call/cls_uchar_va.c libffi.call/cls_uint_va.c \
+libffi.call/cls_ulong_va.c libffi.call/cls_ushort_va.c \
+libffi.call/nested_struct11.c libffi.call/uninitialized.c \
+libffi.call/va_1.c libffi.call/va_struct1.c libffi.call/va_struct2.c \
+libffi.call/va_struct3.c
+
diff --git a/Modules/_ctypes/libffi/testsuite/Makefile.in b/Modules/_ctypes/libffi/testsuite/Makefile.in
index 698c461..a3ba066 100644
--- a/Modules/_ctypes/libffi/testsuite/Makefile.in
+++ b/Modules/_ctypes/libffi/testsuite/Makefile.in
@@ -1,9 +1,8 @@
-# Makefile.in generated by automake 1.11 from Makefile.am.
+# Makefile.in generated by automake 1.12.2 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
-# Inc.
+# Copyright (C) 1994-2012 Free Software Foundation, Inc.
+
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -15,6 +14,23 @@
@SET_MAKE@
VPATH = @srcdir@
+am__make_dryrun = \
+ { \
+ am__dry=no; \
+ case $$MAKEFLAGS in \
+ *\\[\ \ ]*) \
+ echo 'am--echo: ; @echo "AM" OK' | $(MAKE) -f - 2>/dev/null \
+ | grep '^AM OK$$' >/dev/null || am__dry=yes;; \
+ *) \
+ for am__flg in $$MAKEFLAGS; do \
+ case $$am__flg in \
+ *=*|--*) ;; \
+ *n*) am__dry=yes; break;; \
+ esac; \
+ done;; \
+ esac; \
+ test $$am__dry = yes; \
+ }
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
@@ -37,7 +53,19 @@ target_triplet = @target@
subdir = testsuite
DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/acinclude.m4 \
+am__aclocal_m4_deps = $(top_srcdir)/m4/asmcfi.m4 \
+ $(top_srcdir)/m4/ax_append_flag.m4 \
+ $(top_srcdir)/m4/ax_cc_maxopt.m4 \
+ $(top_srcdir)/m4/ax_cflags_warn_all.m4 \
+ $(top_srcdir)/m4/ax_check_compile_flag.m4 \
+ $(top_srcdir)/m4/ax_compiler_vendor.m4 \
+ $(top_srcdir)/m4/ax_configure_args.m4 \
+ $(top_srcdir)/m4/ax_enable_builddir.m4 \
+ $(top_srcdir)/m4/ax_gcc_archflag.m4 \
+ $(top_srcdir)/m4/ax_gcc_x86_cpuid.m4 \
+ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
+ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
+ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/acinclude.m4 \
$(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
@@ -47,12 +75,18 @@ CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
SOURCES =
DIST_SOURCES =
+am__can_run_installinfo = \
+ case $$AM_UPDATE_INFO_DIR in \
+ n|no|NO) false;; \
+ *) (install-info --version) >/dev/null 2>&1;; \
+ esac
DEJATOOL = $(PACKAGE)
RUNTESTDEFAULTFLAGS = --tool $$tool --srcdir $$srcdir
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
ALLOCA = @ALLOCA@
AMTAR = @AMTAR@
+AM_LTLDFLAGS = @AM_LTLDFLAGS@
AM_RUNTESTFLAGS =
AR = @AR@
AUTOCONF = @AUTOCONF@
@@ -70,6 +104,7 @@ CPPFLAGS = @CPPFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
ECHO_C = @ECHO_C@
@@ -77,6 +112,7 @@ ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
+FFI_EXEC_TRAMPOLINE_TABLE = @FFI_EXEC_TRAMPOLINE_TABLE@
FGREP = @FGREP@
GREP = @GREP@
HAVE_LONG_DOUBLE = @HAVE_LONG_DOUBLE@
@@ -95,6 +131,7 @@ LN_S = @LN_S@
LTLIBOBJS = @LTLIBOBJS@
MAINT = @MAINT@
MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
MKDIR_P = @MKDIR_P@
NM = @NM@
NMEDIT = @NMEDIT@
@@ -107,8 +144,10 @@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
+PRTDIAG = @PRTDIAG@
RANLIB = @RANLIB@
SED = @SED@
SET_MAKE = @SET_MAKE@
@@ -121,6 +160,7 @@ abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
ac_ct_CC = @ac_ct_CC@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
am__include = @am__include@
@@ -128,6 +168,7 @@ am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
+ax_enable_builddir_sed = @ax_enable_builddir_sed@
bindir = @bindir@
build = @build@
build_alias = @build_alias@
@@ -153,7 +194,6 @@ libdir = @libdir@
libexecdir = @libexecdir@
localedir = @localedir@
localstatedir = @localstatedir@
-lt_ECHO = @lt_ECHO@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
@@ -164,6 +204,7 @@ psdir = @psdir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
+sys_symbol_underscore = @sys_symbol_underscore@
sysconfdir = @sysconfdir@
target = @target@
target_alias = @target_alias@
@@ -186,70 +227,82 @@ RUNTEST = `if [ -f $(top_srcdir)/../dejagnu/runtest ] ; then \
echo $(top_srcdir)/../dejagnu/runtest ; \
else echo runtest; fi`
+EXTRA_DEJAGNU_SITE_CONFIG = ../local.exp
CLEANFILES = *.exe core* *.log *.sum
-EXTRA_DIST = libffi.special/special.exp \
-libffi.special/unwindtest_ffi_call.cc libffi.special/unwindtest.cc \
-libffi.special/ffitestcxx.h config/default.exp lib/target-libpath.exp \
-lib/libffi-dg.exp lib/wrapper.exp libffi.call/float.c \
-libffi.call/cls_multi_schar.c libffi.call/float3.c \
-libffi.call/cls_3_1byte.c libffi.call/stret_large2.c \
-libffi.call/cls_5_1_byte.c libffi.call/stret_medium.c \
-libffi.call/promotion.c libffi.call/cls_dbls_struct.c \
-libffi.call/nested_struct.c libffi.call/closure_fn1.c \
-libffi.call/cls_4_1byte.c libffi.call/cls_float.c \
-libffi.call/cls_2byte.c libffi.call/closure_fn4.c \
-libffi.call/return_fl2.c libffi.call/nested_struct7.c \
-libffi.call/cls_uint.c libffi.call/cls_align_sint64.c \
-libffi.call/float1.c libffi.call/cls_19byte.c \
-libffi.call/nested_struct1.c libffi.call/cls_4byte.c \
-libffi.call/return_fl1.c libffi.call/cls_align_pointer.c \
-libffi.call/nested_struct4.c libffi.call/nested_struct3.c \
-libffi.call/struct7.c libffi.call/nested_struct9.c \
-libffi.call/cls_sshort.c libffi.call/cls_ulonglong.c \
-libffi.call/cls_pointer_stack.c libffi.call/cls_multi_uchar.c \
-libffi.call/testclosure.c libffi.call/cls_3byte1.c \
-libffi.call/struct6.c libffi.call/return_uc.c libffi.call/return_ll1.c \
-libffi.call/cls_ushort.c libffi.call/stret_medium2.c \
-libffi.call/cls_multi_ushortchar.c libffi.call/return_dbl2.c \
-libffi.call/closure_loc_fn0.c libffi.call/return_sc.c \
-libffi.call/nested_struct8.c libffi.call/cls_7_1_byte.c \
-libffi.call/return_ll.c libffi.call/cls_pointer.c \
-libffi.call/err_bad_abi.c libffi.call/return_dbl1.c \
-libffi.call/call.exp libffi.call/ffitest.h libffi.call/strlen.c \
-libffi.call/return_sl.c libffi.call/cls_1_1byte.c \
-libffi.call/struct1.c libffi.call/cls_64byte.c libffi.call/return_ul.c \
-libffi.call/cls_double.c libffi.call/many_win32.c \
-libffi.call/cls_16byte.c libffi.call/cls_align_double.c \
-libffi.call/cls_align_uint16.c libffi.call/cls_9byte1.c \
-libffi.call/cls_multi_sshortchar.c libffi.call/cls_multi_ushort.c \
-libffi.call/closure_stdcall.c libffi.call/return_fl.c \
-libffi.call/strlen_win32.c libffi.call/return_ldl.c \
-libffi.call/cls_align_float.c libffi.call/struct3.c \
-libffi.call/cls_uchar.c libffi.call/cls_sint.c libffi.call/float2.c \
-libffi.call/cls_align_longdouble_split.c \
-libffi.call/cls_longdouble_va.c libffi.call/cls_multi_sshort.c \
-libffi.call/stret_large.c libffi.call/cls_align_sint16.c \
-libffi.call/nested_struct6.c libffi.call/cls_5byte.c \
-libffi.call/return_dbl.c libffi.call/cls_20byte.c \
-libffi.call/cls_8byte.c libffi.call/pyobjc-tc.c \
-libffi.call/cls_24byte.c libffi.call/cls_align_longdouble_split2.c \
-libffi.call/cls_6_1_byte.c libffi.call/cls_schar.c \
-libffi.call/cls_18byte.c libffi.call/closure_fn3.c \
-libffi.call/err_bad_typedef.c libffi.call/closure_fn2.c \
-libffi.call/struct2.c libffi.call/cls_3byte2.c \
-libffi.call/cls_align_longdouble.c libffi.call/cls_20byte1.c \
-libffi.call/return_fl3.c libffi.call/cls_align_uint32.c \
-libffi.call/problem1.c libffi.call/float4.c \
-libffi.call/cls_align_uint64.c libffi.call/struct9.c \
-libffi.call/closure_fn5.c libffi.call/cls_align_sint32.c \
-libffi.call/closure_fn0.c libffi.call/closure_fn6.c \
-libffi.call/struct4.c libffi.call/nested_struct2.c \
-libffi.call/cls_6byte.c libffi.call/cls_7byte.c libffi.call/many.c \
-libffi.call/struct8.c libffi.call/negint.c libffi.call/struct5.c \
-libffi.call/cls_12byte.c libffi.call/cls_double_va.c \
-libffi.call/cls_longdouble.c libffi.call/cls_9byte2.c \
-libffi.call/nested_struct10.c libffi.call/nested_struct5.c \
-libffi.call/huge_struct.c
+EXTRA_DIST = config/default.exp libffi.call/cls_19byte.c \
+libffi.call/cls_align_longdouble_split.c \
+libffi.call/closure_loc_fn0.c libffi.call/cls_schar.c \
+libffi.call/closure_fn1.c libffi.call/many2_win32.c \
+libffi.call/return_ul.c libffi.call/cls_align_double.c \
+libffi.call/return_fl2.c libffi.call/cls_1_1byte.c \
+libffi.call/cls_64byte.c libffi.call/nested_struct7.c \
+libffi.call/cls_align_sint32.c libffi.call/nested_struct2.c \
+libffi.call/ffitest.h libffi.call/nested_struct4.c \
+libffi.call/cls_multi_ushort.c libffi.call/struct3.c \
+libffi.call/cls_3byte1.c libffi.call/cls_16byte.c \
+libffi.call/struct8.c libffi.call/nested_struct8.c \
+libffi.call/cls_multi_sshort.c libffi.call/cls_3byte2.c \
+libffi.call/fastthis2_win32.c libffi.call/cls_pointer.c \
+libffi.call/err_bad_typedef.c libffi.call/cls_4_1byte.c \
+libffi.call/cls_9byte2.c libffi.call/cls_multi_schar.c \
+libffi.call/stret_medium2.c libffi.call/cls_5_1_byte.c \
+libffi.call/call.exp libffi.call/cls_double.c \
+libffi.call/cls_align_sint16.c libffi.call/cls_uint.c \
+libffi.call/return_ll1.c libffi.call/nested_struct3.c \
+libffi.call/cls_20byte1.c libffi.call/closure_fn4.c \
+libffi.call/cls_uchar.c libffi.call/struct2.c libffi.call/cls_7byte.c \
+libffi.call/strlen.c libffi.call/many.c libffi.call/testclosure.c \
+libffi.call/return_fl.c libffi.call/struct5.c \
+libffi.call/cls_12byte.c libffi.call/cls_multi_sshortchar.c \
+libffi.call/cls_align_longdouble_split2.c libffi.call/return_dbl2.c \
+libffi.call/return_fl3.c libffi.call/stret_medium.c \
+libffi.call/nested_struct6.c libffi.call/closure_fn3.c \
+libffi.call/float3.c libffi.call/many2.c \
+libffi.call/closure_stdcall.c libffi.call/cls_align_uint16.c \
+libffi.call/cls_9byte1.c libffi.call/closure_fn6.c \
+libffi.call/cls_double_va.c libffi.call/cls_align_pointer.c \
+libffi.call/cls_align_longdouble.c libffi.call/closure_fn2.c \
+libffi.call/cls_sshort.c libffi.call/many_win32.c \
+libffi.call/nested_struct.c libffi.call/cls_20byte.c \
+libffi.call/cls_longdouble.c libffi.call/cls_multi_uchar.c \
+libffi.call/return_uc.c libffi.call/closure_thiscall.c \
+libffi.call/cls_18byte.c libffi.call/cls_8byte.c \
+libffi.call/promotion.c libffi.call/struct1_win32.c \
+libffi.call/return_dbl.c libffi.call/cls_24byte.c \
+libffi.call/struct4.c libffi.call/cls_6byte.c \
+libffi.call/cls_align_uint32.c libffi.call/float.c \
+libffi.call/float1.c libffi.call/float_va.c libffi.call/negint.c \
+libffi.call/return_dbl1.c libffi.call/cls_3_1byte.c \
+libffi.call/cls_align_float.c libffi.call/return_fl1.c \
+libffi.call/nested_struct10.c libffi.call/nested_struct5.c \
+libffi.call/fastthis1_win32.c libffi.call/cls_align_sint64.c \
+libffi.call/stret_large2.c libffi.call/return_sl.c \
+libffi.call/closure_fn0.c libffi.call/cls_5byte.c \
+libffi.call/cls_2byte.c libffi.call/float2.c \
+libffi.call/cls_dbls_struct.c libffi.call/cls_sint.c \
+libffi.call/stret_large.c libffi.call/cls_ulonglong.c \
+libffi.call/cls_ushort.c libffi.call/nested_struct1.c \
+libffi.call/err_bad_abi.c libffi.call/cls_longdouble_va.c \
+libffi.call/cls_float.c libffi.call/cls_pointer_stack.c \
+libffi.call/pyobjc-tc.c libffi.call/cls_multi_ushortchar.c \
+libffi.call/struct1.c libffi.call/nested_struct9.c \
+libffi.call/huge_struct.c libffi.call/problem1.c \
+libffi.call/float4.c libffi.call/fastthis3_win32.c \
+libffi.call/return_ldl.c libffi.call/strlen2_win32.c \
+libffi.call/closure_fn5.c libffi.call/struct2_win32.c \
+libffi.call/struct6.c libffi.call/return_ll.c libffi.call/struct9.c \
+libffi.call/return_sc.c libffi.call/struct7.c \
+libffi.call/cls_align_uint64.c libffi.call/cls_4byte.c \
+libffi.call/strlen_win32.c libffi.call/cls_6_1_byte.c \
+libffi.call/cls_7_1_byte.c libffi.special/unwindtest.cc \
+libffi.special/special.exp libffi.special/unwindtest_ffi_call.cc \
+libffi.special/ffitestcxx.h lib/wrapper.exp lib/target-libpath.exp \
+lib/libffi.exp libffi.call/cls_struct_va1.c \
+libffi.call/cls_uchar_va.c libffi.call/cls_uint_va.c \
+libffi.call/cls_ulong_va.c libffi.call/cls_ushort_va.c \
+libffi.call/nested_struct11.c libffi.call/uninitialized.c \
+libffi.call/va_1.c libffi.call/va_struct1.c libffi.call/va_struct2.c \
+libffi.call/va_struct3.c
all: all-am
@@ -296,9 +349,11 @@ TAGS:
ctags: CTAGS
CTAGS:
+cscope cscopelist:
+
check-DEJAGNU: site.exp
- srcdir=`$(am__cd) $(srcdir) && pwd`; export srcdir; \
+ srcdir='$(srcdir)'; export srcdir; \
EXPECT=$(EXPECT); export EXPECT; \
runtest=$(RUNTEST); \
if $(SHELL) -c "$$runtest --version" > /dev/null 2>&1; then \
@@ -306,15 +361,15 @@ check-DEJAGNU: site.exp
if $$runtest $(AM_RUNTESTFLAGS) $(RUNTESTDEFAULTFLAGS) $(RUNTESTFLAGS); \
then :; else exit_status=1; fi; \
done; \
- else echo "WARNING: could not find \`runtest'" 1>&2; :;\
+ else echo "WARNING: could not find 'runtest'" 1>&2; :;\
fi; \
exit $$exit_status
-site.exp: Makefile
- @echo 'Making a new site.exp file...'
+site.exp: Makefile $(EXTRA_DEJAGNU_SITE_CONFIG)
+ @echo 'Making a new site.exp file ...'
@echo '## these variables are automatically generated by make ##' >site.tmp
@echo '# Do not edit here. If you wish to override these values' >>site.tmp
@echo '# edit the last section' >>site.tmp
- @echo 'set srcdir $(srcdir)' >>site.tmp
+ @echo 'set srcdir "$(srcdir)"' >>site.tmp
@echo "set objdir `pwd`" >>site.tmp
@echo 'set build_alias "$(build_alias)"' >>site.tmp
@echo 'set build_triplet $(build_triplet)' >>site.tmp
@@ -322,9 +377,16 @@ site.exp: Makefile
@echo 'set host_triplet $(host_triplet)' >>site.tmp
@echo 'set target_alias "$(target_alias)"' >>site.tmp
@echo 'set target_triplet $(target_triplet)' >>site.tmp
- @echo '## All variables above are generated by configure. Do Not Edit ##' >>site.tmp
- @test ! -f site.exp || \
- sed '1,/^## All variables above are.*##/ d' site.exp >> site.tmp
+ @list='$(EXTRA_DEJAGNU_SITE_CONFIG)'; for f in $$list; do \
+ echo "## Begin content included from file $$f. Do not modify. ##" \
+ && cat `test -f "$$f" || echo '$(srcdir)/'`$$f \
+ && echo "## End content included from file $$f. ##" \
+ || exit 1; \
+ done >> site.tmp
+ @echo "## End of auto-generated content; you can edit from here. ##" >> site.tmp
+ @if test -f site.exp; then \
+ sed -e '1,/^## End of auto-generated content.*##/d' site.exp >> site.tmp; \
+ fi
@-rm -f site.bak
@test ! -f site.exp || mv site.exp site.bak
@mv site.tmp site.exp
@@ -380,10 +442,15 @@ install-am: all-am
installcheck: installcheck-am
install-strip:
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- `test -z '$(STRIP)' || \
- echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+ if test -z '$(STRIP)'; then \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ install; \
+ else \
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+ fi
mostlyclean-generic:
clean-generic:
diff --git a/Modules/_ctypes/libffi/testsuite/lib/libffi-dg.exp b/Modules/_ctypes/libffi/testsuite/lib/libffi.exp
index 838a306..1ac2c36 100644
--- a/Modules/_ctypes/libffi/testsuite/lib/libffi-dg.exp
+++ b/Modules/_ctypes/libffi/testsuite/lib/libffi.exp
@@ -1,4 +1,4 @@
-# Copyright (C) 2003, 2005, 2008, 2009, 2010 Free Software Foundation, Inc.
+# Copyright (C) 2003, 2005, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -11,8 +11,8 @@
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# along with this program; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
proc load_gcc_lib { filename } {
global srcdir
@@ -101,9 +101,17 @@ proc libffi-init { args } {
global tool_root_dir
global ld_library_path
- set blddirffi [pwd]/..
+ global using_gcc
+
+ set blddirffi [pwd]/..
verbose "libffi $blddirffi"
+ # Are we building with GCC?
+ set tmp [grep ../config.status "GCC='yes'"]
+ if { [string match $tmp "GCC='yes'"] } {
+
+ set using_gcc "yes"
+
set gccdir [lookfor_file $tool_root_dir gcc/libgcc.a]
if {$gccdir != ""} {
set gccdir [file dirname $gccdir]
@@ -127,6 +135,13 @@ proc libffi-init { args } {
}
}
}
+
+ } else {
+
+ set using_gcc "no"
+
+ }
+
# add the library path for libffi.
append ld_library_path ":${blddirffi}/.libs"
@@ -203,6 +218,10 @@ proc libffi_target_compile { source dest type options } {
lappend options "libs= -lffi"
+ if { [string match "aarch64*-*-linux*" $target_triplet] } {
+ lappend options "libs= -lpthread"
+ }
+
verbose "options: $options"
return [target_compile $source $dest $type $options]
}
@@ -266,6 +285,56 @@ proc dg-xfail-if { args } {
}
}
+proc check-flags { args } {
+
+ # The args are within another list; pull them out.
+ set args [lindex $args 0]
+
+ # The next two arguments are optional. If they were not specified,
+ # use the defaults.
+ if { [llength $args] == 2 } {
+ lappend $args [list "*"]
+ }
+ if { [llength $args] == 3 } {
+ lappend $args [list ""]
+ }
+
+ # If the option strings are the defaults, or the same as the
+ # defaults, there is no need to call check_conditional_xfail to
+ # compare them to the actual options.
+ if { [string compare [lindex $args 2] "*"] == 0
+ && [string compare [lindex $args 3] "" ] == 0 } {
+ set result 1
+ } else {
+ # The target list might be an effective-target keyword, so replace
+ # the original list with "*-*-*", since we already know it matches.
+ set result [check_conditional_xfail [lreplace $args 1 1 "*-*-*"]]
+ }
+
+ return $result
+}
+
+proc dg-skip-if { args } {
+ # Verify the number of arguments. The last two are optional.
+ set args [lreplace $args 0 0]
+ if { [llength $args] < 2 || [llength $args] > 4 } {
+ error "dg-skip-if 2: need 2, 3, or 4 arguments"
+ }
+
+ # Don't bother if we're already skipping the test.
+ upvar dg-do-what dg-do-what
+ if { [lindex ${dg-do-what} 1] == "N" } {
+ return
+ }
+
+ set selector [list target [lindex $args 1]]
+ if { [dg-process-target $selector] == "S" } {
+ if [check-flags $args] {
+ upvar dg-do-what dg-do-what
+ set dg-do-what [list [lindex ${dg-do-what} 0] "N" "P"]
+ }
+ }
+}
# We need to make sure that additional_files and additional_sources
# are both cleared out after every test. It is not enough to clear
diff --git a/Modules/_ctypes/libffi/testsuite/lib/target-libpath.exp b/Modules/_ctypes/libffi/testsuite/lib/target-libpath.exp
index 8999aa4..6b7beba 100644
--- a/Modules/_ctypes/libffi/testsuite/lib/target-libpath.exp
+++ b/Modules/_ctypes/libffi/testsuite/lib/target-libpath.exp
@@ -25,7 +25,7 @@ set orig_ld_library64_path_saved 0
set orig_ld_library_path_32_saved 0
set orig_ld_library_path_64_saved 0
set orig_dyld_library_path_saved 0
-
+set orig_path_saved 0
#######################################
# proc set_ld_library_path_env_vars { }
@@ -42,6 +42,7 @@ proc set_ld_library_path_env_vars { } {
global orig_ld_library_path_32_saved
global orig_ld_library_path_64_saved
global orig_dyld_library_path_saved
+ global orig_path_saved
global orig_ld_library_path
global orig_ld_run_path
global orig_shlib_path
@@ -50,6 +51,7 @@ proc set_ld_library_path_env_vars { } {
global orig_ld_library_path_32
global orig_ld_library_path_64
global orig_dyld_library_path
+ global orig_path
global GCC_EXEC_PREFIX
# Set the relocated compiler prefix, but only if the user hasn't specified one.
@@ -100,6 +102,10 @@ proc set_ld_library_path_env_vars { } {
set orig_dyld_library_path "$env(DYLD_LIBRARY_PATH)"
set orig_dyld_library_path_saved 1
}
+ if [info exists env(PATH)] {
+ set orig_path "$env(PATH)"
+ set orig_path_saved 1
+ }
}
# We need to set ld library path in the environment. Currently,
@@ -169,6 +175,13 @@ proc set_ld_library_path_env_vars { } {
} else {
setenv DYLD_LIBRARY_PATH "$ld_library_path"
}
+ if { [istarget *-*-cygwin*] || [istarget *-*-mingw*] } {
+ if { $orig_path_saved } {
+ setenv PATH "$ld_library_path:$orig_path"
+ } else {
+ setenv PATH "$ld_library_path"
+ }
+ }
verbose -log "set_ld_library_path_env_vars: ld_library_path=$ld_library_path"
}
@@ -187,6 +200,7 @@ proc restore_ld_library_path_env_vars { } {
global orig_ld_library_path_32_saved
global orig_ld_library_path_64_saved
global orig_dyld_library_path_saved
+ global orig_path_saved
global orig_ld_library_path
global orig_ld_run_path
global orig_shlib_path
@@ -195,6 +209,7 @@ proc restore_ld_library_path_env_vars { } {
global orig_ld_library_path_32
global orig_ld_library_path_64
global orig_dyld_library_path
+ global orig_path
if { $orig_environment_saved == 0 } {
return
@@ -240,6 +255,11 @@ proc restore_ld_library_path_env_vars { } {
} elseif [info exists env(DYLD_LIBRARY_PATH)] {
unsetenv DYLD_LIBRARY_PATH
}
+ if { $orig_path_saved } {
+ setenv PATH "$orig_path"
+ } elseif [info exists env(PATH)] {
+ unsetenv PATH
+ }
}
#######################################
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/call.exp b/Modules/_ctypes/libffi/testsuite/libffi.call/call.exp
index 1e9985e..c334685 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/call.exp
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/call.exp
@@ -1,4 +1,4 @@
-# Copyright (C) 2003, 2006, 2009 Free Software Foundation, Inc.
+# Copyright (C) 2003, 2006, 2009, 2010 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -14,20 +14,25 @@
# along with this program; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>.
-# libffi testsuite that uses the 'dg.exp' driver.
-
-load_lib libffi-dg.exp
-
dg-init
libffi-init
global srcdir subdir
-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cS\]]] "-O0 -W -Wall" ""
-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cS\]]] "-O2" ""
-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cS\]]] "-O3" ""
-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cS\]]] "-Os" ""
-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cS\]]] "-O2 -fomit-frame-pointer" ""
+if { [string match $using_gcc "yes"] } {
+
+ dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cS\]]] "-O0 -W -Wall" ""
+ dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cS\]]] "-O2" ""
+ dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cS\]]] "-O3" ""
+ dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cS\]]] "-Os" ""
+ dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cS\]]] "-O2 -fomit-frame-pointer" ""
+
+} else {
+
+ # Assume we are using the vendor compiler.
+ dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cS\]]] "" ""
+
+}
dg-finish
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/closure_stdcall.c b/Modules/_ctypes/libffi/testsuite/libffi.call/closure_stdcall.c
index 6bfcc1f..1407f02 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/closure_stdcall.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/closure_stdcall.c
@@ -49,9 +49,17 @@ int main (void)
CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_stdcall,
(void *) 3 /* userdata */, code) == FFI_OK);
+#ifdef _MSC_VER
+ __asm { mov sp_pre, esp }
+#else
asm volatile (" movl %%esp,%0" : "=g" (sp_pre));
+#endif
res = (*(closure_test_type0)code)(0, 1, 2, 3);
+#ifdef _MSC_VER
+ __asm { mov sp_post, esp }
+#else
asm volatile (" movl %%esp,%0" : "=g" (sp_post));
+#endif
/* { dg-output "0 1 2 3: 9" } */
printf("res: %d\n",res);
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/closure_thiscall.c b/Modules/_ctypes/libffi/testsuite/libffi.call/closure_thiscall.c
new file mode 100644
index 0000000..0f93649
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/closure_thiscall.c
@@ -0,0 +1,72 @@
+/* Area: closure_call (thiscall convention)
+ Purpose: Check handling when caller expects thiscall callee
+ Limitations: none.
+ PR: none.
+ Originator: <ktietz@redhat.com> */
+
+/* { dg-do run { target i?86-*-cygwin* i?86-*-mingw* } } */
+#include "ffitest.h"
+
+static void
+closure_test_thiscall(ffi_cif* cif __UNUSED__, void* resp, void** args,
+ void* userdata)
+{
+ *(ffi_arg*)resp =
+ (int)*(int *)args[0] + (int)(*(int *)args[1])
+ + (int)(*(int *)args[2]) + (int)(*(int *)args[3])
+ + (int)(intptr_t)userdata;
+
+ printf("%d %d %d %d: %d\n",
+ (int)*(int *)args[0], (int)(*(int *)args[1]),
+ (int)(*(int *)args[2]), (int)(*(int *)args[3]),
+ (int)*(ffi_arg *)resp);
+
+}
+
+typedef int (__thiscall *closure_test_type0)(int, int, int, int);
+
+int main (void)
+{
+ ffi_cif cif;
+ void *code;
+ ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code);
+ ffi_type * cl_arg_types[17];
+ int res;
+ void* sp_pre;
+ void* sp_post;
+ char buf[1024];
+
+ cl_arg_types[0] = &ffi_type_uint;
+ cl_arg_types[1] = &ffi_type_uint;
+ cl_arg_types[2] = &ffi_type_uint;
+ cl_arg_types[3] = &ffi_type_uint;
+ cl_arg_types[4] = NULL;
+
+ /* Initialize the cif */
+ CHECK(ffi_prep_cif(&cif, FFI_THISCALL, 4,
+ &ffi_type_sint, cl_arg_types) == FFI_OK);
+
+ CHECK(ffi_prep_closure_loc(pcl, &cif, closure_test_thiscall,
+ (void *) 3 /* userdata */, code) == FFI_OK);
+
+#ifdef _MSC_VER
+ __asm { mov sp_pre, esp }
+#else
+ asm volatile (" movl %%esp,%0" : "=g" (sp_pre));
+#endif
+ res = (*(closure_test_type0)code)(0, 1, 2, 3);
+#ifdef _MSC_VER
+ __asm { mov sp_post, esp }
+#else
+ asm volatile (" movl %%esp,%0" : "=g" (sp_post));
+#endif
+ /* { dg-output "0 1 2 3: 9" } */
+
+ printf("res: %d\n",res);
+ /* { dg-output "\nres: 9" } */
+
+ sprintf(buf, "mismatch: pre=%p vs post=%p", sp_pre, sp_post);
+ printf("stack pointer %s\n", (sp_pre == sp_post ? "match" : buf));
+ /* { dg-output "\nstack pointer match" } */
+ exit(0);
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_12byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_12byte.c
index f0a334f..ea0825d 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_12byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_12byte.c
@@ -49,15 +49,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_12byte h_dbl = { 7, 4, 9 };
+ struct cls_struct_12byte j_dbl = { 1, 5, 3 };
+ struct cls_struct_12byte res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_12byte h_dbl = { 7, 4, 9 };
- struct cls_struct_12byte j_dbl = { 1, 5, 3 };
- struct cls_struct_12byte res_dbl;
-
cls_struct_fields[0] = &ffi_type_sint;
cls_struct_fields[1] = &ffi_type_sint;
cls_struct_fields[2] = &ffi_type_sint;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_16byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_16byte.c
index 9b9292a..89a08a2 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_16byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_16byte.c
@@ -50,15 +50,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_16byte h_dbl = { 7, 8.0, 9 };
+ struct cls_struct_16byte j_dbl = { 1, 9.0, 3 };
+ struct cls_struct_16byte res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_16byte h_dbl = { 7, 8.0, 9 };
- struct cls_struct_16byte j_dbl = { 1, 9.0, 3 };
- struct cls_struct_16byte res_dbl;
-
cls_struct_fields[0] = &ffi_type_sint;
cls_struct_fields[1] = &ffi_type_double;
cls_struct_fields[2] = &ffi_type_sint;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_18byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_18byte.c
index 40c8c6d..9f75da8 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_18byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_18byte.c
@@ -54,15 +54,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[3];
+ struct cls_struct_18byte g_dbl = { 1.0, 127, 126, 3.0 };
+ struct cls_struct_18byte f_dbl = { 4.0, 125, 124, 5.0 };
+ struct cls_struct_18byte res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_18byte g_dbl = { 1.0, 127, 126, 3.0 };
- struct cls_struct_18byte f_dbl = { 4.0, 125, 124, 5.0 };
- struct cls_struct_18byte res_dbl;
-
cls_struct_fields[0] = &ffi_type_double;
cls_struct_fields[1] = &ffi_type_uchar;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_19byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_19byte.c
index aa64248..278794b 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_19byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_19byte.c
@@ -57,15 +57,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[3];
+ struct cls_struct_19byte g_dbl = { 1.0, 127, 126, 3.0, 120 };
+ struct cls_struct_19byte f_dbl = { 4.0, 125, 124, 5.0, 119 };
+ struct cls_struct_19byte res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_19byte g_dbl = { 1.0, 127, 126, 3.0, 120 };
- struct cls_struct_19byte f_dbl = { 4.0, 125, 124, 5.0, 119 };
- struct cls_struct_19byte res_dbl;
-
cls_struct_fields[0] = &ffi_type_double;
cls_struct_fields[1] = &ffi_type_uchar;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_1_1byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_1_1byte.c
index b9402d6..82492c0 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_1_1byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_1_1byte.c
@@ -50,15 +50,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_1_1byte g_dbl = { 12 };
+ struct cls_struct_1_1byte f_dbl = { 178 };
+ struct cls_struct_1_1byte res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_1_1byte g_dbl = { 12 };
- struct cls_struct_1_1byte f_dbl = { 178 };
- struct cls_struct_1_1byte res_dbl;
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = NULL;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_20byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_20byte.c
index 80dd7ac..3f8bb28 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_20byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_20byte.c
@@ -50,15 +50,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_20byte g_dbl = { 1.0, 2.0, 3 };
+ struct cls_struct_20byte f_dbl = { 4.0, 5.0, 7 };
+ struct cls_struct_20byte res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_20byte g_dbl = { 1.0, 2.0, 3 };
- struct cls_struct_20byte f_dbl = { 4.0, 5.0, 7 };
- struct cls_struct_20byte res_dbl;
-
cls_struct_fields[0] = &ffi_type_double;
cls_struct_fields[1] = &ffi_type_double;
cls_struct_fields[2] = &ffi_type_sint;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_20byte1.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_20byte1.c
index 50bcbbf..6562727 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_20byte1.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_20byte1.c
@@ -52,15 +52,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[3];
+ struct cls_struct_20byte g_dbl = { 1, 2.0, 3.0 };
+ struct cls_struct_20byte f_dbl = { 4, 5.0, 7.0 };
+ struct cls_struct_20byte res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_20byte g_dbl = { 1, 2.0, 3.0 };
- struct cls_struct_20byte f_dbl = { 4, 5.0, 7.0 };
- struct cls_struct_20byte res_dbl;
-
cls_struct_fields[0] = &ffi_type_sint;
cls_struct_fields[1] = &ffi_type_double;
cls_struct_fields[2] = &ffi_type_double;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_24byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_24byte.c
index 46a6eb4..1d82f6e 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_24byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_24byte.c
@@ -61,17 +61,17 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
- cls_struct_type.size = 0;
- cls_struct_type.alignment = 0;
- cls_struct_type.type = FFI_TYPE_STRUCT;
- cls_struct_type.elements = cls_struct_fields;
-
struct cls_struct_24byte e_dbl = { 9.0, 2.0, 6, 5.0 };
struct cls_struct_24byte f_dbl = { 1.0, 2.0, 3, 7.0 };
struct cls_struct_24byte g_dbl = { 4.0, 5.0, 7, 9.0 };
struct cls_struct_24byte h_dbl = { 8.0, 6.0, 1, 4.0 };
struct cls_struct_24byte res_dbl;
+ cls_struct_type.size = 0;
+ cls_struct_type.alignment = 0;
+ cls_struct_type.type = FFI_TYPE_STRUCT;
+ cls_struct_type.elements = cls_struct_fields;
+
cls_struct_fields[0] = &ffi_type_double;
cls_struct_fields[1] = &ffi_type_double;
cls_struct_fields[2] = &ffi_type_sint;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_2byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_2byte.c
index 101e130..81bb0a6 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_2byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_2byte.c
@@ -50,15 +50,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_2byte g_dbl = { 12, 127 };
+ struct cls_struct_2byte f_dbl = { 1, 13 };
+ struct cls_struct_2byte res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_2byte g_dbl = { 12, 127 };
- struct cls_struct_2byte f_dbl = { 1, 13 };
- struct cls_struct_2byte res_dbl;
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = &ffi_type_uchar;
cls_struct_fields[2] = NULL;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_3_1byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_3_1byte.c
index fc780c3..b782746 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_3_1byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_3_1byte.c
@@ -54,15 +54,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_3_1byte g_dbl = { 12, 13, 14 };
+ struct cls_struct_3_1byte f_dbl = { 178, 179, 180 };
+ struct cls_struct_3_1byte res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_3_1byte g_dbl = { 12, 13, 14 };
- struct cls_struct_3_1byte f_dbl = { 178, 179, 180 };
- struct cls_struct_3_1byte res_dbl;
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = &ffi_type_uchar;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_3byte1.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_3byte1.c
index 5705ce3..a02c463 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_3byte1.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_3byte1.c
@@ -50,15 +50,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_3byte g_dbl = { 12, 119 };
+ struct cls_struct_3byte f_dbl = { 1, 15 };
+ struct cls_struct_3byte res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_3byte g_dbl = { 12, 119 };
- struct cls_struct_3byte f_dbl = { 1, 15 };
- struct cls_struct_3byte res_dbl;
-
cls_struct_fields[0] = &ffi_type_ushort;
cls_struct_fields[1] = &ffi_type_uchar;
cls_struct_fields[2] = NULL;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_3byte2.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_3byte2.c
index 01770a0..c7251ce 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_3byte2.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_3byte2.c
@@ -50,15 +50,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_3byte_1 g_dbl = { 15, 125 };
+ struct cls_struct_3byte_1 f_dbl = { 9, 19 };
+ struct cls_struct_3byte_1 res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_3byte_1 g_dbl = { 15, 125 };
- struct cls_struct_3byte_1 f_dbl = { 9, 19 };
- struct cls_struct_3byte_1 res_dbl;
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = &ffi_type_ushort;
cls_struct_fields[2] = NULL;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_4_1byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_4_1byte.c
index f3806d7..2d6d8b6 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_4_1byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_4_1byte.c
@@ -56,15 +56,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_4_1byte g_dbl = { 12, 13, 14, 15 };
+ struct cls_struct_4_1byte f_dbl = { 178, 179, 180, 181 };
+ struct cls_struct_4_1byte res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_4_1byte g_dbl = { 12, 13, 14, 15 };
- struct cls_struct_4_1byte f_dbl = { 178, 179, 180, 181 };
- struct cls_struct_4_1byte res_dbl;
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = &ffi_type_uchar;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_4byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_4byte.c
index a1aba3c..4ac3787 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_4byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_4byte.c
@@ -50,15 +50,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_4byte g_dbl = { 127, 120 };
+ struct cls_struct_4byte f_dbl = { 12, 128 };
+ struct cls_struct_4byte res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_4byte g_dbl = { 127, 120 };
- struct cls_struct_4byte f_dbl = { 12, 128 };
- struct cls_struct_4byte res_dbl;
-
cls_struct_fields[0] = &ffi_type_ushort;
cls_struct_fields[1] = &ffi_type_ushort;
cls_struct_fields[2] = NULL;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_5_1_byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_5_1_byte.c
index 2ceba3d..ad9d51c 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_5_1_byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_5_1_byte.c
@@ -58,15 +58,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_5byte g_dbl = { 127, 120, 1, 3, 4 };
+ struct cls_struct_5byte f_dbl = { 12, 128, 9, 3, 4 };
+ struct cls_struct_5byte res_dbl = { 0, 0, 0, 0, 0 };
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_5byte g_dbl = { 127, 120, 1, 3, 4 };
- struct cls_struct_5byte f_dbl = { 12, 128, 9, 3, 4 };
- struct cls_struct_5byte res_dbl = { 0, 0, 0, 0, 0 };
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = &ffi_type_uchar;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_5byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_5byte.c
index 61d595c..4e0c000 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_5byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_5byte.c
@@ -53,15 +53,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_5byte g_dbl = { 127, 120, 1 };
+ struct cls_struct_5byte f_dbl = { 12, 128, 9 };
+ struct cls_struct_5byte res_dbl = { 0, 0, 0 };
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_5byte g_dbl = { 127, 120, 1 };
- struct cls_struct_5byte f_dbl = { 12, 128, 9 };
- struct cls_struct_5byte res_dbl = { 0, 0, 0 };
-
cls_struct_fields[0] = &ffi_type_ushort;
cls_struct_fields[1] = &ffi_type_ushort;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_64byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_64byte.c
index 576ebe0..a55edc2 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_64byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_64byte.c
@@ -66,17 +66,17 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
- cls_struct_type.size = 0;
- cls_struct_type.alignment = 0;
- cls_struct_type.type = FFI_TYPE_STRUCT;
- cls_struct_type.elements = cls_struct_fields;
-
struct cls_struct_64byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0 };
struct cls_struct_64byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0 };
struct cls_struct_64byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0 };
struct cls_struct_64byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0 };
struct cls_struct_64byte res_dbl;
+ cls_struct_type.size = 0;
+ cls_struct_type.alignment = 0;
+ cls_struct_type.type = FFI_TYPE_STRUCT;
+ cls_struct_type.elements = cls_struct_fields;
+
cls_struct_fields[0] = &ffi_type_double;
cls_struct_fields[1] = &ffi_type_double;
cls_struct_fields[2] = &ffi_type_double;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_6_1_byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_6_1_byte.c
index 9f2eff6..b4dcdba 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_6_1_byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_6_1_byte.c
@@ -60,15 +60,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_6byte g_dbl = { 127, 120, 1, 3, 4, 5 };
+ struct cls_struct_6byte f_dbl = { 12, 128, 9, 3, 4, 5 };
+ struct cls_struct_6byte res_dbl = { 0, 0, 0, 0, 0, 0 };
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_6byte g_dbl = { 127, 120, 1, 3, 4, 5 };
- struct cls_struct_6byte f_dbl = { 12, 128, 9, 3, 4, 5 };
- struct cls_struct_6byte res_dbl = { 0, 0, 0, 0, 0, 0 };
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = &ffi_type_uchar;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_6byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_6byte.c
index 73257b0..7406780 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_6byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_6byte.c
@@ -56,15 +56,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_6byte g_dbl = { 127, 120, 1, 128 };
+ struct cls_struct_6byte f_dbl = { 12, 128, 9, 127 };
+ struct cls_struct_6byte res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_6byte g_dbl = { 127, 120, 1, 128 };
- struct cls_struct_6byte f_dbl = { 12, 128, 9, 127 };
- struct cls_struct_6byte res_dbl;
-
cls_struct_fields[0] = &ffi_type_ushort;
cls_struct_fields[1] = &ffi_type_ushort;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_7_1_byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_7_1_byte.c
index 50d09c9..14a7e96 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_7_1_byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_7_1_byte.c
@@ -62,15 +62,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_7byte g_dbl = { 127, 120, 1, 3, 4, 5, 6 };
+ struct cls_struct_7byte f_dbl = { 12, 128, 9, 3, 4, 5, 6 };
+ struct cls_struct_7byte res_dbl = { 0, 0, 0, 0, 0, 0, 0 };
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_7byte g_dbl = { 127, 120, 1, 3, 4, 5, 6 };
- struct cls_struct_7byte f_dbl = { 12, 128, 9, 3, 4, 5, 6 };
- struct cls_struct_7byte res_dbl = { 0, 0, 0, 0, 0, 0, 0 };
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = &ffi_type_uchar;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_7byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_7byte.c
index f5c0000..1645cc6 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_7byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_7byte.c
@@ -55,15 +55,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_7byte g_dbl = { 127, 120, 1, 254 };
+ struct cls_struct_7byte f_dbl = { 12, 128, 9, 255 };
+ struct cls_struct_7byte res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_7byte g_dbl = { 127, 120, 1, 254 };
- struct cls_struct_7byte f_dbl = { 12, 128, 9, 255 };
- struct cls_struct_7byte res_dbl;
-
cls_struct_fields[0] = &ffi_type_ushort;
cls_struct_fields[1] = &ffi_type_ushort;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_8byte.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_8byte.c
index 4aa99d1..f6c1ea5 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_8byte.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_8byte.c
@@ -49,15 +49,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_8byte g_dbl = { 1, 2.0 };
+ struct cls_struct_8byte f_dbl = { 4, 5.0 };
+ struct cls_struct_8byte res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_8byte g_dbl = { 1, 2.0 };
- struct cls_struct_8byte f_dbl = { 4, 5.0 };
- struct cls_struct_8byte res_dbl;
-
cls_struct_fields[0] = &ffi_type_sint;
cls_struct_fields[1] = &ffi_type_float;
cls_struct_fields[2] = NULL;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_9byte1.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_9byte1.c
index cc5e9d6..0b85722 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_9byte1.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_9byte1.c
@@ -50,15 +50,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[3];
+ struct cls_struct_9byte h_dbl = { 7, 8.0};
+ struct cls_struct_9byte j_dbl = { 1, 9.0};
+ struct cls_struct_9byte res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_9byte h_dbl = { 7, 8.0};
- struct cls_struct_9byte j_dbl = { 1, 9.0};
- struct cls_struct_9byte res_dbl;
-
cls_struct_fields[0] = &ffi_type_sint;
cls_struct_fields[1] = &ffi_type_double;
cls_struct_fields[2] = NULL;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_9byte2.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_9byte2.c
index 5c0ba0d..edf991d 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_9byte2.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_9byte2.c
@@ -50,15 +50,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[3];
+ struct cls_struct_9byte h_dbl = { 7.0, 8};
+ struct cls_struct_9byte j_dbl = { 1.0, 9};
+ struct cls_struct_9byte res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_9byte h_dbl = { 7.0, 8};
- struct cls_struct_9byte j_dbl = { 1.0, 9};
- struct cls_struct_9byte res_dbl;
-
cls_struct_fields[0] = &ffi_type_double;
cls_struct_fields[1] = &ffi_type_sint;
cls_struct_fields[2] = NULL;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_double.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_double.c
index 22b94d5..aad5f3c 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_double.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_double.c
@@ -52,15 +52,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_align g_dbl = { 12, 4951, 127 };
+ struct cls_struct_align f_dbl = { 1, 9320, 13 };
+ struct cls_struct_align res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_align g_dbl = { 12, 4951, 127 };
- struct cls_struct_align f_dbl = { 1, 9320, 13 };
- struct cls_struct_align res_dbl;
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = &ffi_type_double;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_float.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_float.c
index 62637f2..37e0855 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_float.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_float.c
@@ -50,15 +50,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_align g_dbl = { 12, 4951, 127 };
+ struct cls_struct_align f_dbl = { 1, 9320, 13 };
+ struct cls_struct_align res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_align g_dbl = { 12, 4951, 127 };
- struct cls_struct_align f_dbl = { 1, 9320, 13 };
- struct cls_struct_align res_dbl;
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = &ffi_type_float;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_longdouble.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_longdouble.c
index af38060..b3322d8 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_longdouble.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_longdouble.c
@@ -51,15 +51,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_align g_dbl = { 12, 4951, 127 };
+ struct cls_struct_align f_dbl = { 1, 9320, 13 };
+ struct cls_struct_align res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_align g_dbl = { 12, 4951, 127 };
- struct cls_struct_align f_dbl = { 1, 9320, 13 };
- struct cls_struct_align res_dbl;
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = &ffi_type_longdouble;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_longdouble_split.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_longdouble_split.c
index 4274af1..15f9365 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_longdouble_split.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_longdouble_split.c
@@ -6,7 +6,7 @@
/* { dg-excess-errors "no long double format" { xfail x86_64-*-mingw* x86_64-*-cygwin* } } */
/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */
-/* { dg-options -mlong-double-128 { target powerpc64*-*-* } } */
+/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */
/* { dg-output "" { xfail x86_64-*-mingw* x86_64-*-cygwin* } } */
#include "ffitest.h"
@@ -87,15 +87,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[3];
+ struct cls_struct_align g_dbl = { 1, 2, 3, 4, 5, 6, 7 };
+ struct cls_struct_align f_dbl = { 8, 9, 10, 11, 12, 13, 14 };
+ struct cls_struct_align res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_align g_dbl = { 1, 2, 3, 4, 5, 6, 7 };
- struct cls_struct_align f_dbl = { 8, 9, 10, 11, 12, 13, 14 };
- struct cls_struct_align res_dbl;
-
cls_struct_fields[0] = &ffi_type_longdouble;
cls_struct_fields[1] = &ffi_type_longdouble;
cls_struct_fields[2] = &ffi_type_longdouble;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_longdouble_split2.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_longdouble_split2.c
index 088f0d3..ca1c356 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_longdouble_split2.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_longdouble_split2.c
@@ -7,7 +7,7 @@
/* { dg-excess-errors "no long double format" { xfail x86_64-*-mingw* x86_64-*-cygwin* } } */
/* { dg-do run { xfail strongarm*-*-* } } */
-/* { dg-options -mlong-double-128 { target powerpc64*-*-* } } */
+/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */
/* { dg-output "" { xfail x86_64-*-mingw* x86_64-*-cygwin* } } */
#include "ffitest.h"
@@ -67,15 +67,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[3];
+ struct cls_struct_align g_dbl = { 1, 2, 3, 4, 5, 6, 7 };
+ struct cls_struct_align f_dbl = { 8, 9, 10, 11, 12, 13, 14 };
+ struct cls_struct_align res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_align g_dbl = { 1, 2, 3, 4, 5, 6, 7 };
- struct cls_struct_align f_dbl = { 8, 9, 10, 11, 12, 13, 14 };
- struct cls_struct_align res_dbl;
-
cls_struct_fields[0] = &ffi_type_longdouble;
cls_struct_fields[1] = &ffi_type_longdouble;
cls_struct_fields[2] = &ffi_type_longdouble;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_pointer.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_pointer.c
index cbc4f95..8fbf36a 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_pointer.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_pointer.c
@@ -54,15 +54,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_align g_dbl = { 12, (void *)4951, 127 };
+ struct cls_struct_align f_dbl = { 1, (void *)9320, 13 };
+ struct cls_struct_align res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_align g_dbl = { 12, (void *)4951, 127 };
- struct cls_struct_align f_dbl = { 1, (void *)9320, 13 };
- struct cls_struct_align res_dbl;
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = &ffi_type_pointer;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_sint16.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_sint16.c
index 383ea41..039b874 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_sint16.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_sint16.c
@@ -50,15 +50,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_align g_dbl = { 12, 4951, 127 };
+ struct cls_struct_align f_dbl = { 1, 9320, 13 };
+ struct cls_struct_align res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_align g_dbl = { 12, 4951, 127 };
- struct cls_struct_align f_dbl = { 1, 9320, 13 };
- struct cls_struct_align res_dbl;
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = &ffi_type_sshort;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_sint32.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_sint32.c
index 705d78c..c96c6d1 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_sint32.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_sint32.c
@@ -50,15 +50,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_align g_dbl = { 12, 4951, 127 };
+ struct cls_struct_align f_dbl = { 1, 9320, 13 };
+ struct cls_struct_align res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_align g_dbl = { 12, 4951, 127 };
- struct cls_struct_align f_dbl = { 1, 9320, 13 };
- struct cls_struct_align res_dbl;
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = &ffi_type_sint;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_sint64.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_sint64.c
index 31d53af..9aa7bdd 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_sint64.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_sint64.c
@@ -51,15 +51,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_align g_dbl = { 12, 4951, 127 };
+ struct cls_struct_align f_dbl = { 1, 9320, 13 };
+ struct cls_struct_align res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_align g_dbl = { 12, 4951, 127 };
- struct cls_struct_align f_dbl = { 1, 9320, 13 };
- struct cls_struct_align res_dbl;
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = &ffi_type_sint64;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_uint16.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_uint16.c
index cb6b748..97620b7 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_uint16.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_uint16.c
@@ -50,15 +50,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_align g_dbl = { 12, 4951, 127 };
+ struct cls_struct_align f_dbl = { 1, 9320, 13 };
+ struct cls_struct_align res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_align g_dbl = { 12, 4951, 127 };
- struct cls_struct_align f_dbl = { 1, 9320, 13 };
- struct cls_struct_align res_dbl;
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = &ffi_type_ushort;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_uint32.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_uint32.c
index e453d3e..5766fad 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_uint32.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_uint32.c
@@ -50,15 +50,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_align g_dbl = { 12, 4951, 127 };
+ struct cls_struct_align f_dbl = { 1, 9320, 13 };
+ struct cls_struct_align res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_align g_dbl = { 12, 4951, 127 };
- struct cls_struct_align f_dbl = { 1, 9320, 13 };
- struct cls_struct_align res_dbl;
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = &ffi_type_uint;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_uint64.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_uint64.c
index 495c79f..a52cb89 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_uint64.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_align_uint64.c
@@ -52,15 +52,15 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_align g_dbl = { 12, 4951, 127 };
+ struct cls_struct_align f_dbl = { 1, 9320, 13 };
+ struct cls_struct_align res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
cls_struct_type.elements = cls_struct_fields;
- struct cls_struct_align g_dbl = { 12, 4951, 127 };
- struct cls_struct_align f_dbl = { 1, 9320, 13 };
- struct cls_struct_align res_dbl;
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = &ffi_type_uint64;
cls_struct_fields[2] = &ffi_type_uchar;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_dbls_struct.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_dbls_struct.c
index 660dabb..d663791 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_dbls_struct.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_dbls_struct.c
@@ -37,6 +37,8 @@ int main(int argc __UNUSED__, char** argv __UNUSED__)
ffi_type ts1_type;
ffi_type* ts1_type_elements[4];
+ Dbls arg = { 1.0, 2.0 };
+
ts1_type.size = 0;
ts1_type.alignment = 0;
ts1_type.type = FFI_TYPE_STRUCT;
@@ -48,8 +50,6 @@ int main(int argc __UNUSED__, char** argv __UNUSED__)
cl_arg_types[0] = &ts1_type;
- Dbls arg = { 1.0, 2.0 };
-
/* Initialize the cif */
CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1,
&ffi_type_void, cl_arg_types) == FFI_OK);
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_double_va.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_double_va.c
index 0695874..43167b6 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_double_va.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_double_va.c
@@ -6,6 +6,8 @@
/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */
/* { dg-output "" { xfail avr32*-*-* } } */
+/* { dg-output "" { xfail mips-sgi-irix6* } } PR libffi/46660 */
+
#include "ffitest.h"
static void
@@ -34,7 +36,8 @@ int main (void)
arg_types[1] = &ffi_type_double;
arg_types[2] = NULL;
- CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &ffi_type_sint,
+ /* This printf call is variadic */
+ CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, &ffi_type_sint,
arg_types) == FFI_OK);
args[0] = &format;
@@ -42,16 +45,19 @@ int main (void)
args[2] = NULL;
ffi_call(&cif, FFI_FN(printf), &res, args);
- // { dg-output "7.0" }
+ /* { dg-output "7.0" } */
printf("res: %d\n", (int) res);
- // { dg-output "\nres: 4" }
+ /* { dg-output "\nres: 4" } */
+
+ /* The call to cls_double_va_fn is static, so have to use a normal prep_cif */
+ CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &ffi_type_sint, arg_types) == FFI_OK);
CHECK(ffi_prep_closure_loc(pcl, &cif, cls_double_va_fn, NULL, code) == FFI_OK);
res = ((int(*)(char*, double))(code))(format, doubleArg);
- // { dg-output "\n7.0" }
+ /* { dg-output "\n7.0" } */
printf("res: %d\n", (int) res);
- // { dg-output "\nres: 4" }
+ /* { dg-output "\nres: 4" } */
exit(0);
}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_longdouble.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_longdouble.c
index 52af6cf..5dc9ac7 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_longdouble.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_longdouble.c
@@ -5,8 +5,10 @@
Originator: Blake Chaffin */
/* { dg-excess-errors "no long double format" { xfail x86_64-*-mingw* x86_64-*-cygwin* } } */
-/* { dg-do run { xfail arm*-*-* strongarm*-*-* xscale*-*-* } } */
-/* { dg-options -mlong-double-128 { target powerpc64*-*-* } } */
+/* This test is known to PASS on armv7l-unknown-linux-gnueabihf, so I have
+ remove the xfail for arm*-*-* below, until we know more. */
+/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */
+/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */
/* { dg-output "" { xfail x86_64-*-mingw* x86_64-*-cygwin* } } */
#include "ffitest.h"
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_longdouble_va.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_longdouble_va.c
index 38564cb..7126b13 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_longdouble_va.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_longdouble_va.c
@@ -6,6 +6,8 @@
/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */
/* { dg-output "" { xfail avr32*-*-* x86_64-*-mingw* } } */
+/* { dg-output "" { xfail mips-sgi-irix6* } } PR libffi/46660 */
+
#include "ffitest.h"
static void
@@ -34,7 +36,8 @@ int main (void)
arg_types[1] = &ffi_type_longdouble;
arg_types[2] = NULL;
- CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &ffi_type_sint,
+ /* This printf call is variadic */
+ CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2, &ffi_type_sint,
arg_types) == FFI_OK);
args[0] = &format;
@@ -42,16 +45,20 @@ int main (void)
args[2] = NULL;
ffi_call(&cif, FFI_FN(printf), &res, args);
- // { dg-output "7.0" }
+ /* { dg-output "7.0" } */
printf("res: %d\n", (int) res);
- // { dg-output "\nres: 4" }
+ /* { dg-output "\nres: 4" } */
+
+ /* The call to cls_longdouble_va_fn is static, so have to use a normal prep_cif */
+ CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 2, &ffi_type_sint,
+ arg_types) == FFI_OK);
CHECK(ffi_prep_closure_loc(pcl, &cif, cls_longdouble_va_fn, NULL, code) == FFI_OK);
res = ((int(*)(char*, long double))(code))(format, ldArg);
- // { dg-output "\n7.0" }
+ /* { dg-output "\n7.0" } */
printf("res: %d\n", (int) res);
- // { dg-output "\nres: 4" }
+ /* { dg-output "\nres: 4" } */
exit(0);
}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_pointer.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_pointer.c
index fadd353..d82a87a 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_pointer.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_pointer.c
@@ -35,7 +35,7 @@ int main (void)
void *code;
ffi_closure* pcl = ffi_closure_alloc(sizeof(ffi_closure), &code);
void* args[3];
-// ffi_type cls_pointer_type;
+ /* ffi_type cls_pointer_type; */
ffi_type* arg_types[3];
/* cls_pointer_type.size = sizeof(void*);
@@ -65,7 +65,7 @@ int main (void)
CHECK(ffi_prep_closure_loc(pcl, &cif, cls_pointer_gn, NULL, code) == FFI_OK);
- res = (ffi_arg)((void*(*)(void*, void*))(code))(arg1, arg2);
+ res = (ffi_arg)(uintptr_t)((void*(*)(void*, void*))(code))(arg1, arg2);
/* { dg-output "\n0x12345678 0x89abcdef: 0x9be02467" } */
printf("res: 0x%08x\n", (unsigned int) res);
/* { dg-output "\nres: 0x9be02467" } */
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_pointer_stack.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_pointer_stack.c
index 697f271..1f1d915 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_pointer_stack.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_pointer_stack.c
@@ -28,11 +28,12 @@ void* cls_pointer_fn2(void* a1, void* a2)
char trample6 = trample4 + ((char*)&a2)[1];
long double trample7 = (intptr_t)trample5 + (intptr_t)trample1;
char trample8 = trample6 + trample2;
+ void* result;
dummyVar = dummy_func(trample1, trample2, trample3, trample4,
trample5, trample6, trample7, trample8);
- void* result = (void*)((intptr_t)a1 + (intptr_t)a2);
+ result = (void*)((intptr_t)a1 + (intptr_t)a2);
printf("0x%08x 0x%08x: 0x%08x\n",
(unsigned int)(uintptr_t) a1,
@@ -52,11 +53,12 @@ void* cls_pointer_fn1(void* a1, void* a2)
char trample6 = trample4 + ((char*)&a2)[1];
long double trample7 = (intptr_t)trample5 + (intptr_t)trample1;
char trample8 = trample6 + trample2;
+ void* result;
dummyVar = dummy_func(trample1, trample2, trample3, trample4,
trample5, trample6, trample7, trample8);
- void* result = (void*)((intptr_t)a1 + (intptr_t)a2);
+ result = (void*)((intptr_t)a1 + (intptr_t)a2);
printf("0x%08x 0x%08x: 0x%08x\n",
(unsigned int)(intptr_t) a1,
@@ -96,7 +98,7 @@ int main (void)
void *code;
ffi_closure* pcl = ffi_closure_alloc(sizeof(ffi_closure), &code);
void* args[3];
-// ffi_type cls_pointer_type;
+ /* ffi_type cls_pointer_type; */
ffi_type* arg_types[3];
/* cls_pointer_type.size = sizeof(void*);
@@ -123,18 +125,18 @@ int main (void)
ffi_call(&cif, FFI_FN(cls_pointer_fn1), &res, args);
printf("res: 0x%08x\n", (unsigned int) res);
- // { dg-output "\n0x01234567 0x89abcdef: 0x8acf1356" }
- // { dg-output "\n0x8acf1356 0x01234567: 0x8bf258bd" }
- // { dg-output "\nres: 0x8bf258bd" }
+ /* { dg-output "\n0x01234567 0x89abcdef: 0x8acf1356" } */
+ /* { dg-output "\n0x8acf1356 0x01234567: 0x8bf258bd" } */
+ /* { dg-output "\nres: 0x8bf258bd" } */
CHECK(ffi_prep_closure_loc(pcl, &cif, cls_pointer_gn, NULL, code) == FFI_OK);
- res = (ffi_arg)((void*(*)(void*, void*))(code))(arg1, arg2);
+ res = (ffi_arg)(uintptr_t)((void*(*)(void*, void*))(code))(arg1, arg2);
printf("res: 0x%08x\n", (unsigned int) res);
- // { dg-output "\n0x01234567 0x89abcdef: 0x8acf1356" }
- // { dg-output "\n0x8acf1356 0x01234567: 0x8bf258bd" }
- // { dg-output "\nres: 0x8bf258bd" }
+ /* { dg-output "\n0x01234567 0x89abcdef: 0x8acf1356" } */
+ /* { dg-output "\n0x8acf1356 0x01234567: 0x8bf258bd" } */
+ /* { dg-output "\nres: 0x8bf258bd" } */
exit(0);
}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_struct_va1.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_struct_va1.c
new file mode 100644
index 0000000..175ed96
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_struct_va1.c
@@ -0,0 +1,114 @@
+/* Area: ffi_call, closure_call
+ Purpose: Test doubles passed in variable argument lists.
+ Limitations: none.
+ PR: none.
+ Originator: Blake Chaffin 6/6/2007 */
+
+/* { dg-do run } */
+/* { dg-output "" { xfail avr32*-*-* } } */
+#include "ffitest.h"
+
+struct small_tag
+{
+ unsigned char a;
+ unsigned char b;
+};
+
+struct large_tag
+{
+ unsigned a;
+ unsigned b;
+ unsigned c;
+ unsigned d;
+ unsigned e;
+};
+
+static void
+test_fn (ffi_cif* cif __UNUSED__, void* resp,
+ void** args, void* userdata __UNUSED__)
+{
+ int n = *(int*)args[0];
+ struct small_tag s1 = * (struct small_tag *) args[1];
+ struct large_tag l1 = * (struct large_tag *) args[2];
+ struct small_tag s2 = * (struct small_tag *) args[3];
+
+ printf ("%d %d %d %d %d %d %d %d %d %d\n", n, s1.a, s1.b,
+ l1.a, l1.b, l1.c, l1.d, l1.e,
+ s2.a, s2.b);
+ * (int*) resp = 42;
+}
+
+int
+main (void)
+{
+ ffi_cif cif;
+ void *code;
+ ffi_closure *pcl = ffi_closure_alloc (sizeof (ffi_closure), &code);
+ ffi_type* arg_types[5];
+
+ ffi_arg res = 0;
+
+ ffi_type s_type;
+ ffi_type *s_type_elements[3];
+
+ ffi_type l_type;
+ ffi_type *l_type_elements[6];
+
+ struct small_tag s1;
+ struct small_tag s2;
+ struct large_tag l1;
+
+ int si;
+
+ s_type.size = 0;
+ s_type.alignment = 0;
+ s_type.type = FFI_TYPE_STRUCT;
+ s_type.elements = s_type_elements;
+
+ s_type_elements[0] = &ffi_type_uchar;
+ s_type_elements[1] = &ffi_type_uchar;
+ s_type_elements[2] = NULL;
+
+ l_type.size = 0;
+ l_type.alignment = 0;
+ l_type.type = FFI_TYPE_STRUCT;
+ l_type.elements = l_type_elements;
+
+ l_type_elements[0] = &ffi_type_uint;
+ l_type_elements[1] = &ffi_type_uint;
+ l_type_elements[2] = &ffi_type_uint;
+ l_type_elements[3] = &ffi_type_uint;
+ l_type_elements[4] = &ffi_type_uint;
+ l_type_elements[5] = NULL;
+
+ arg_types[0] = &ffi_type_sint;
+ arg_types[1] = &s_type;
+ arg_types[2] = &l_type;
+ arg_types[3] = &s_type;
+ arg_types[4] = NULL;
+
+ CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &ffi_type_sint,
+ arg_types) == FFI_OK);
+
+ si = 4;
+ s1.a = 5;
+ s1.b = 6;
+
+ s2.a = 20;
+ s2.b = 21;
+
+ l1.a = 10;
+ l1.b = 11;
+ l1.c = 12;
+ l1.d = 13;
+ l1.e = 14;
+
+ CHECK(ffi_prep_closure_loc(pcl, &cif, test_fn, NULL, code) == FFI_OK);
+
+ res = ((int (*)(int, ...))(code))(si, s1, l1, s2);
+ /* { dg-output "4 5 6 10 11 12 13 14 20 21" } */
+ printf("res: %d\n", (int) res);
+ /* { dg-output "\nres: 42" } */
+
+ exit(0);
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_uchar_va.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_uchar_va.c
new file mode 100644
index 0000000..6491c5b
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_uchar_va.c
@@ -0,0 +1,44 @@
+/* Area: closure_call
+ Purpose: Test anonymous unsigned char argument.
+ Limitations: none.
+ PR: none.
+ Originator: ARM Ltd. */
+
+/* { dg-do run } */
+#include "ffitest.h"
+
+typedef unsigned char T;
+
+static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args,
+ void* userdata __UNUSED__)
+ {
+ *(ffi_arg *)resp = *(T *)args[0];
+
+ printf("%d: %d %d\n", (int)(*(ffi_arg *)resp), *(T *)args[0], *(T *)args[1]);
+ }
+
+typedef T (*cls_ret_T)(T, ...);
+
+int main (void)
+{
+ ffi_cif cif;
+ void *code;
+ ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code);
+ ffi_type * cl_arg_types[3];
+ T res;
+
+ cl_arg_types[0] = &ffi_type_uchar;
+ cl_arg_types[1] = &ffi_type_uchar;
+ cl_arg_types[2] = NULL;
+
+ /* Initialize the cif */
+ CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2,
+ &ffi_type_uchar, cl_arg_types) == FFI_OK);
+
+ CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK);
+ res = ((((cls_ret_T)code)(67, 4)));
+ /* { dg-output "67: 67 4" } */
+ printf("res: %d\n", res);
+ /* { dg-output "\nres: 67" } */
+ exit(0);
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_uint_va.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_uint_va.c
new file mode 100644
index 0000000..150fddd
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_uint_va.c
@@ -0,0 +1,45 @@
+/* Area: closure_call
+ Purpose: Test anonymous unsigned int argument.
+ Limitations: none.
+ PR: none.
+ Originator: ARM Ltd. */
+
+/* { dg-do run } */
+
+#include "ffitest.h"
+
+typedef unsigned int T;
+
+static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args,
+ void* userdata __UNUSED__)
+ {
+ *(T *)resp = *(T *)args[0];
+
+ printf("%d: %d %d\n", *(T *)resp, *(T *)args[0], *(T *)args[1]);
+ }
+
+typedef T (*cls_ret_T)(T, ...);
+
+int main (void)
+{
+ ffi_cif cif;
+ void *code;
+ ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code);
+ ffi_type * cl_arg_types[3];
+ T res;
+
+ cl_arg_types[0] = &ffi_type_uint;
+ cl_arg_types[1] = &ffi_type_uint;
+ cl_arg_types[2] = NULL;
+
+ /* Initialize the cif */
+ CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2,
+ &ffi_type_uint, cl_arg_types) == FFI_OK);
+
+ CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK);
+ res = ((((cls_ret_T)code)(67, 4)));
+ /* { dg-output "67: 67 4" } */
+ printf("res: %d\n", res);
+ /* { dg-output "\nres: 67" } */
+ exit(0);
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_ulong_va.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_ulong_va.c
new file mode 100644
index 0000000..0315082
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_ulong_va.c
@@ -0,0 +1,45 @@
+/* Area: closure_call
+ Purpose: Test anonymous unsigned long argument.
+ Limitations: none.
+ PR: none.
+ Originator: ARM Ltd. */
+
+/* { dg-do run } */
+
+#include "ffitest.h"
+
+typedef unsigned long T;
+
+static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args,
+ void* userdata __UNUSED__)
+ {
+ *(T *)resp = *(T *)args[0];
+
+ printf("%ld: %ld %ld\n", *(T *)resp, *(T *)args[0], *(T *)args[1]);
+ }
+
+typedef T (*cls_ret_T)(T, ...);
+
+int main (void)
+{
+ ffi_cif cif;
+ void *code;
+ ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code);
+ ffi_type * cl_arg_types[3];
+ T res;
+
+ cl_arg_types[0] = &ffi_type_ulong;
+ cl_arg_types[1] = &ffi_type_ulong;
+ cl_arg_types[2] = NULL;
+
+ /* Initialize the cif */
+ CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2,
+ &ffi_type_ulong, cl_arg_types) == FFI_OK);
+
+ CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK);
+ res = ((((cls_ret_T)code)(67, 4)));
+ /* { dg-output "67: 67 4" } */
+ printf("res: %ld\n", res);
+ /* { dg-output "\nres: 67" } */
+ exit(0);
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_ulonglong.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_ulonglong.c
index 235ab44..62f2cae 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_ulonglong.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_ulonglong.c
@@ -11,7 +11,7 @@
static void cls_ret_ulonglong_fn(ffi_cif* cif __UNUSED__, void* resp,
void** args, void* userdata __UNUSED__)
{
- *(unsigned long long *)resp= *(unsigned long long *)args[0];
+ *(unsigned long long *)resp= 0xfffffffffffffffLL ^ *(unsigned long long *)args[0];
printf("%" PRIuLL ": %" PRIuLL "\n",*(unsigned long long *)args[0],
*(unsigned long long *)(resp));
@@ -34,14 +34,14 @@ int main (void)
&ffi_type_uint64, cl_arg_types) == FFI_OK);
CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_ulonglong_fn, NULL, code) == FFI_OK);
res = (*((cls_ret_ulonglong)code))(214LL);
- /* { dg-output "214: 214" } */
+ /* { dg-output "214: 1152921504606846761" } */
printf("res: %" PRIdLL "\n", res);
- /* { dg-output "\nres: 214" } */
+ /* { dg-output "\nres: 1152921504606846761" } */
res = (*((cls_ret_ulonglong)code))(9223372035854775808LL);
- /* { dg-output "\n9223372035854775808: 9223372035854775808" } */
+ /* { dg-output "\n9223372035854775808: 8070450533247928831" } */
printf("res: %" PRIdLL "\n", res);
- /* { dg-output "\nres: 9223372035854775808" } */
+ /* { dg-output "\nres: 8070450533247928831" } */
exit(0);
}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/cls_ushort_va.c b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_ushort_va.c
new file mode 100644
index 0000000..37aa106
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/cls_ushort_va.c
@@ -0,0 +1,44 @@
+/* Area: closure_call
+ Purpose: Test anonymous unsigned short argument.
+ Limitations: none.
+ PR: none.
+ Originator: ARM Ltd. */
+
+/* { dg-do run } */
+#include "ffitest.h"
+
+typedef unsigned short T;
+
+static void cls_ret_T_fn(ffi_cif* cif __UNUSED__, void* resp, void** args,
+ void* userdata __UNUSED__)
+ {
+ *(ffi_arg *)resp = *(T *)args[0];
+
+ printf("%d: %d %d\n", (int)(*(ffi_arg *)resp), *(T *)args[0], *(T *)args[1]);
+ }
+
+typedef T (*cls_ret_T)(T, ...);
+
+int main (void)
+{
+ ffi_cif cif;
+ void *code;
+ ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code);
+ ffi_type * cl_arg_types[3];
+ T res;
+
+ cl_arg_types[0] = &ffi_type_ushort;
+ cl_arg_types[1] = &ffi_type_ushort;
+ cl_arg_types[2] = NULL;
+
+ /* Initialize the cif */
+ CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 2,
+ &ffi_type_ushort, cl_arg_types) == FFI_OK);
+
+ CHECK(ffi_prep_closure_loc(pcl, &cif, cls_ret_T_fn, NULL, code) == FFI_OK);
+ res = ((((cls_ret_T)code)(67, 4)));
+ /* { dg-output "67: 67 4" } */
+ printf("res: %d\n", res);
+ /* { dg-output "\nres: 67" } */
+ exit(0);
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/err_bad_abi.c b/Modules/_ctypes/libffi/testsuite/libffi.call/err_bad_abi.c
index a21a3fd..f5a7317 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/err_bad_abi.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/err_bad_abi.c
@@ -4,7 +4,8 @@
PR: none.
Originator: Blake Chaffin 6/6/2007 */
-/* { dg-do run { xfail *-*-* } } */
+/* { dg-do run } */
+
#include "ffitest.h"
static void
@@ -17,11 +18,9 @@ int main (void)
ffi_cif cif;
void *code;
ffi_closure *pcl = ffi_closure_alloc(sizeof(ffi_closure), &code);
- void* args[1];
ffi_type* arg_types[1];
arg_types[0] = NULL;
- args[0] = NULL;
CHECK(ffi_prep_cif(&cif, 255, 0, &ffi_type_void,
arg_types) == FFI_BAD_ABI);
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/err_bad_typedef.c b/Modules/_ctypes/libffi/testsuite/libffi.call/err_bad_typedef.c
index bd2fc54..bf60161 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/err_bad_typedef.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/err_bad_typedef.c
@@ -4,7 +4,8 @@
PR: none.
Originator: Blake Chaffin 6/6/2007 */
-/* { dg-do run { xfail *-*-* } } */
+/* { dg-do run } */
+
#include "ffitest.h"
int main (void)
@@ -12,10 +13,10 @@ int main (void)
ffi_cif cif;
ffi_type* arg_types[1];
- arg_types[0] = NULL;
-
ffi_type badType = ffi_type_void;
+ arg_types[0] = NULL;
+
badType.size = 0;
CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, &badType,
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/fastthis1_win32.c b/Modules/_ctypes/libffi/testsuite/libffi.call/fastthis1_win32.c
new file mode 100644
index 0000000..cbc4724
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/fastthis1_win32.c
@@ -0,0 +1,50 @@
+/* Area: ffi_call
+ Purpose: Check fastcall fct call on X86_WIN32 systems.
+ Limitations: none.
+ PR: none.
+ Originator: From the original ffitest.c */
+
+/* { dg-do run { target i?86-*-cygwin* i?86-*-mingw* } } */
+
+#include "ffitest.h"
+
+static size_t __FASTCALL__ my_fastcall_f(char *s, float a)
+{
+ return (size_t) ((int) strlen(s) + (int) a);
+}
+
+int main (void)
+{
+ ffi_cif cif;
+ ffi_type *args[MAX_ARGS];
+ void *values[MAX_ARGS];
+ ffi_arg rint;
+ char *s;
+ float v2;
+ args[0] = &ffi_type_pointer;
+ args[1] = &ffi_type_float;
+ values[0] = (void*) &s;
+ values[1] = (void*) &v2;
+
+ /* Initialize the cif */
+ CHECK(ffi_prep_cif(&cif, FFI_FASTCALL, 2,
+ &ffi_type_sint, args) == FFI_OK);
+
+ s = "a";
+ v2 = 0.0;
+ ffi_call(&cif, FFI_FN(my_fastcall_f), &rint, values);
+ CHECK(rint == 1);
+
+ s = "1234567";
+ v2 = -1.0;
+ ffi_call(&cif, FFI_FN(my_fastcall_f), &rint, values);
+ CHECK(rint == 6);
+
+ s = "1234567890123456789012345";
+ v2 = 1.0;
+ ffi_call(&cif, FFI_FN(my_fastcall_f), &rint, values);
+ CHECK(rint == 26);
+
+ printf("fastcall fct1 tests passed\n");
+ exit(0);
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/fastthis2_win32.c b/Modules/_ctypes/libffi/testsuite/libffi.call/fastthis2_win32.c
new file mode 100644
index 0000000..7bdd0e1
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/fastthis2_win32.c
@@ -0,0 +1,50 @@
+/* Area: ffi_call
+ Purpose: Check fastcall fct call on X86_WIN32 systems.
+ Limitations: none.
+ PR: none.
+ Originator: From the original ffitest.c */
+
+/* { dg-do run { target i?86-*-cygwin* i?86-*-mingw* } } */
+
+#include "ffitest.h"
+
+static size_t __FASTCALL__ my_fastcall_f(float a, char *s)
+{
+ return (size_t) ((int) strlen(s) + (int) a);
+}
+
+int main (void)
+{
+ ffi_cif cif;
+ ffi_type *args[MAX_ARGS];
+ void *values[MAX_ARGS];
+ ffi_arg rint;
+ char *s;
+ float v2;
+ args[1] = &ffi_type_pointer;
+ args[0] = &ffi_type_float;
+ values[1] = (void*) &s;
+ values[0] = (void*) &v2;
+
+ /* Initialize the cif */
+ CHECK(ffi_prep_cif(&cif, FFI_FASTCALL, 2,
+ &ffi_type_sint, args) == FFI_OK);
+
+ s = "a";
+ v2 = 0.0;
+ ffi_call(&cif, FFI_FN(my_fastcall_f), &rint, values);
+ CHECK(rint == 1);
+
+ s = "1234567";
+ v2 = -1.0;
+ ffi_call(&cif, FFI_FN(my_fastcall_f), &rint, values);
+ CHECK(rint == 6);
+
+ s = "1234567890123456789012345";
+ v2 = 1.0;
+ ffi_call(&cif, FFI_FN(my_fastcall_f), &rint, values);
+ CHECK(rint == 26);
+
+ printf("fastcall fct2 tests passed\n");
+ exit(0);
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/fastthis3_win32.c b/Modules/_ctypes/libffi/testsuite/libffi.call/fastthis3_win32.c
new file mode 100644
index 0000000..b5d606d
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/fastthis3_win32.c
@@ -0,0 +1,56 @@
+/* Area: ffi_call
+ Purpose: Check fastcall f call on X86_WIN32 systems.
+ Limitations: none.
+ PR: none.
+ Originator: From the original ffitest.c */
+
+/* { dg-do run { target i?86-*-cygwin* i?86-*-mingw* } } */
+
+#include "ffitest.h"
+
+static size_t __FASTCALL__ my_fastcall_f(float a, char *s, int i)
+{
+ return (size_t) ((int) strlen(s) + (int) a + i);
+}
+
+int main (void)
+{
+ ffi_cif cif;
+ ffi_type *args[MAX_ARGS];
+ void *values[MAX_ARGS];
+ ffi_arg rint;
+ char *s;
+ int v1;
+ float v2;
+ args[2] = &ffi_type_sint;
+ args[1] = &ffi_type_pointer;
+ args[0] = &ffi_type_float;
+ values[2] = (void*) &v1;
+ values[1] = (void*) &s;
+ values[0] = (void*) &v2;
+
+ /* Initialize the cif */
+ CHECK(ffi_prep_cif(&cif, FFI_FASTCALL, 3,
+ &ffi_type_sint, args) == FFI_OK);
+
+ s = "a";
+ v1 = 1;
+ v2 = 0.0;
+ ffi_call(&cif, FFI_FN(my_fastcall_f), &rint, values);
+ CHECK(rint == 2);
+
+ s = "1234567";
+ v2 = -1.0;
+ v1 = -2;
+ ffi_call(&cif, FFI_FN(my_fastcall_f), &rint, values);
+ CHECK(rint == 4);
+
+ s = "1234567890123456789012345";
+ v2 = 1.0;
+ v1 = 2;
+ ffi_call(&cif, FFI_FN(my_fastcall_f), &rint, values);
+ CHECK(rint == 28);
+
+ printf("fastcall fct3 tests passed\n");
+ exit(0);
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/ffitest.h b/Modules/_ctypes/libffi/testsuite/libffi.call/ffitest.h
index 2cb9849..136a7a6 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/ffitest.h
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/ffitest.h
@@ -15,7 +15,7 @@
#define MAX_ARGS 256
-#define CHECK(x) !(x) ? abort() : 0
+#define CHECK(x) !(x) ? (abort(), 1) : 0
/* Define __UNUSED__ that also other compilers than gcc can run the tests. */
#undef __UNUSED__
@@ -25,6 +25,14 @@
#define __UNUSED__
#endif
+/* Define __FASTCALL__ so that other compilers than gcc can run the tests. */
+#undef __FASTCALL__
+#if defined _MSC_VER
+#define __FASTCALL__ __fastcall
+#else
+#define __FASTCALL__ __attribute__((fastcall))
+#endif
+
/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
file open. */
#ifdef HAVE_MMAP_ANON
@@ -67,6 +75,8 @@
#define PRIdLL "ld"
#undef PRIuLL
#define PRIuLL "lu"
+#define PRId8 "hd"
+#define PRIu8 "hu"
#define PRId64 "ld"
#define PRIu64 "lu"
#define PRIuPTR "lu"
@@ -77,6 +87,28 @@
#define PRIuPTR "lu"
#endif
+/* IRIX kludge. */
+#if defined(__sgi)
+/* IRIX 6.5 <inttypes.h> provides all definitions, but only for C99
+ compilations. */
+#define PRId8 "hhd"
+#define PRIu8 "hhu"
+#if (_MIPS_SZLONG == 32)
+#define PRId64 "lld"
+#define PRIu64 "llu"
+#endif
+/* This doesn't match <inttypes.h>, which always has "lld" here, but the
+ arguments are uint64_t, int64_t, which are unsigned long, long for
+ 64-bit in <sgidefs.h>. */
+#if (_MIPS_SZLONG == 64)
+#define PRId64 "ld"
+#define PRIu64 "lu"
+#endif
+/* This doesn't match <inttypes.h>, which has "u" here, but the arguments
+ are uintptr_t, which is always unsigned long. */
+#define PRIuPTR "lu"
+#endif
+
/* Solaris < 10 kludge. */
#if defined(__sun__) && defined(__svr4__) && !defined(PRIuPTR)
#if defined(__arch64__) || defined (__x86_64__)
@@ -86,44 +118,15 @@
#endif
#endif
-#ifdef USING_MMAP
-static inline void *
-allocate_mmap (size_t size)
-{
- void *page;
-#if defined (HAVE_MMAP_DEV_ZERO)
- static int dev_zero_fd = -1;
-#endif
-
-#ifdef HAVE_MMAP_DEV_ZERO
- if (dev_zero_fd == -1)
- {
- dev_zero_fd = open ("/dev/zero", O_RDONLY);
- if (dev_zero_fd == -1)
- {
- perror ("open /dev/zero: %m");
- exit (1);
- }
- }
-#endif
-
-
-#ifdef HAVE_MMAP_ANON
- page = mmap (NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-#endif
-#ifdef HAVE_MMAP_DEV_ZERO
- page = mmap (NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_PRIVATE, dev_zero_fd, 0);
+/* MSVC kludge. */
+#if defined _MSC_VER
+#define PRIuPTR "lu"
+#define PRIu8 "u"
+#define PRId8 "d"
+#define PRIu64 "I64u"
+#define PRId64 "I64d"
#endif
- if (page == (void *) MAP_FAILED)
- {
- perror ("virtual memory exhausted");
- exit (1);
- }
-
- return page;
-}
-
+#ifndef PRIuPTR
+#define PRIuPTR "u"
#endif
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/float_va.c b/Modules/_ctypes/libffi/testsuite/libffi.call/float_va.c
new file mode 100644
index 0000000..5acff91
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/float_va.c
@@ -0,0 +1,107 @@
+/* Area: fp and variadics
+ Purpose: check fp inputs and returns work on variadics, even the fixed params
+ Limitations: None
+ PR: none
+ Originator: <david.gilbert@linaro.org> 2011-01-25
+
+ Intended to stress the difference in ABI on ARM vfp
+*/
+
+/* { dg-do run } */
+
+#include <stdarg.h>
+
+#include "ffitest.h"
+
+/* prints out all the parameters, and returns the sum of them all.
+ * 'x' is the number of variadic parameters all of which are double in this test
+ */
+double float_va_fn(unsigned int x, double y,...)
+{
+ double total=0.0;
+ va_list ap;
+ unsigned int i;
+
+ total+=(double)x;
+ total+=y;
+
+ printf("%u: %.1f :", x, y);
+
+ va_start(ap, y);
+ for(i=0;i<x;i++)
+ {
+ double arg=va_arg(ap, double);
+ total+=arg;
+ printf(" %d:%.1f ", i, arg);
+ }
+ va_end(ap);
+
+ printf(" total: %.1f\n", total);
+
+ return total;
+}
+
+int main (void)
+{
+ ffi_cif cif;
+
+ ffi_type *arg_types[5];
+ void *values[5];
+ double doubles[5];
+ unsigned int firstarg;
+ double resfp;
+
+ /* First test, pass float_va_fn(0,2.0) - note there are no actual
+ * variadic parameters, but it's declared variadic so the ABI may be
+ * different. */
+ /* Call it statically and then via ffi */
+ resfp=float_va_fn(0,2.0);
+ /* { dg-output "0: 2.0 : total: 2.0" } */
+ printf("compiled: %.1f\n", resfp);
+ /* { dg-output "\ncompiled: 2.0" } */
+
+ arg_types[0] = &ffi_type_uint;
+ arg_types[1] = &ffi_type_double;
+ arg_types[2] = NULL;
+ CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 2, 2,
+ &ffi_type_double, arg_types) == FFI_OK);
+
+ firstarg = 0;
+ doubles[0] = 2.0;
+ values[0] = &firstarg;
+ values[1] = &doubles[0];
+ ffi_call(&cif, FFI_FN(float_va_fn), &resfp, values);
+ /* { dg-output "\n0: 2.0 : total: 2.0" } */
+ printf("ffi: %.1f\n", resfp);
+ /* { dg-output "\nffi: 2.0" } */
+
+ /* Second test, float_va_fn(2,2.0,3.0,4.0), now with variadic params */
+ /* Call it statically and then via ffi */
+ resfp=float_va_fn(2,2.0,3.0,4.0);
+ /* { dg-output "\n2: 2.0 : 0:3.0 1:4.0 total: 11.0" } */
+ printf("compiled: %.1f\n", resfp);
+ /* { dg-output "\ncompiled: 11.0" } */
+
+ arg_types[0] = &ffi_type_uint;
+ arg_types[1] = &ffi_type_double;
+ arg_types[2] = &ffi_type_double;
+ arg_types[3] = &ffi_type_double;
+ arg_types[4] = NULL;
+ CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 2, 4,
+ &ffi_type_double, arg_types) == FFI_OK);
+
+ firstarg = 2;
+ doubles[0] = 2.0;
+ doubles[1] = 3.0;
+ doubles[2] = 4.0;
+ values[0] = &firstarg;
+ values[1] = &doubles[0];
+ values[2] = &doubles[1];
+ values[3] = &doubles[2];
+ ffi_call(&cif, FFI_FN(float_va_fn), &resfp, values);
+ /* { dg-output "\n2: 2.0 : 0:3.0 1:4.0 total: 11.0" } */
+ printf("ffi: %.1f\n", resfp);
+ /* { dg-output "\nffi: 11.0" } */
+
+ exit(0);
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/huge_struct.c b/Modules/_ctypes/libffi/testsuite/libffi.call/huge_struct.c
index 9cffb71..657fe54 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/huge_struct.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/huge_struct.c
@@ -7,7 +7,8 @@
/* { dg-excess-errors "" { target x86_64-*-mingw* x86_64-*-cygwin* } } */
/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */
-/* { dg-options -mlong-double-128 { target powerpc64*-*-* } } */
+/* { dg-options -mlong-double-128 { target powerpc64*-*-linux* } } */
+/* { dg-options -Wformat=0 { target moxie*-*-elf } } */
/* { dg-output "" { xfail x86_64-*-mingw* x86_64-*-cygwin* } } */
#include "ffitest.h"
@@ -129,14 +130,14 @@ test_large_fn(
ui64_4 + 4, si64_4 + 4, f_4 + 4, d_4 + 4, ld_4 + 4, (char*)((intptr_t)p_4 + 4),
ui8_5 + 5, si8_5 + 5};
- printf("%hhu %hhd %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
- "%hhu %hhd %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
- "%hhu %hhd %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
- "%hhu %hhd %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %hhu %hhd: "
- "%hhu %hhd %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
- "%hhu %hhd %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
- "%hhu %hhd %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
- "%hhu %hhd %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %hhu %hhd\n",
+ printf("%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
+ "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
+ "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
+ "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 ": "
+ "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
+ "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
+ "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
+ "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 "\n",
ui8_1, si8_1, ui16_1, si16_1, ui32_1, si32_1, ui64_1, si64_1, f_1, d_1, ld_1, (unsigned long)p_1,
ui8_2, si8_2, ui16_2, si16_2, ui32_2, si32_2, ui64_2, si64_2, f_2, d_2, ld_2, (unsigned long)p_2,
ui8_3, si8_3, ui16_3, si16_3, ui32_3, si32_3, ui64_3, si64_3, f_3, d_3, ld_3, (unsigned long)p_3,
@@ -229,6 +230,19 @@ main(int argc __UNUSED__, const char** argv __UNUSED__)
ffi_type* st_fields[51];
BigStruct retVal;
+ uint8_t ui8 = 1;
+ int8_t si8 = 2;
+ uint16_t ui16 = 3;
+ int16_t si16 = 4;
+ uint32_t ui32 = 5;
+ int32_t si32 = 6;
+ uint64_t ui64 = 7;
+ int64_t si64 = 8;
+ float f = 9;
+ double d = 10;
+ long double ld = 11;
+ char* p = (char*)0x12345678;
+
memset (&retVal, 0, sizeof(retVal));
ret_struct_type.size = 0;
@@ -251,19 +265,6 @@ main(int argc __UNUSED__, const char** argv __UNUSED__)
st_fields[50] = NULL;
- uint8_t ui8 = 1;
- int8_t si8 = 2;
- uint16_t ui16 = 3;
- int16_t si16 = 4;
- uint32_t ui32 = 5;
- int32_t si32 = 6;
- uint64_t ui64 = 7;
- int64_t si64 = 8;
- float f = 9;
- double d = 10;
- long double ld = 11;
- char* p = (char*)0x12345678;
-
argTypes[0] = argTypes[12] = argTypes[24] = argTypes[36] = argTypes[48] = &ffi_type_uint8;
argValues[0] = argValues[12] = argValues[24] = argValues[36] = argValues[48] = &ui8;
argTypes[1] = argTypes[13] = argTypes[25] = argTypes[37] = argTypes[49] = &ffi_type_sint8;
@@ -295,11 +296,11 @@ main(int argc __UNUSED__, const char** argv __UNUSED__)
CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 50, &ret_struct_type, argTypes) == FFI_OK);
ffi_call(&cif, FFI_FN(test_large_fn), &retVal, argValues);
- // { dg-output "1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" }
- printf("res: %hhu %hhd %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
- "%hhu %hhd %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
- "%hhu %hhd %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
- "%hhu %hhd %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %hhu %hhd\n",
+ /* { dg-output "1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */
+ printf("res: %" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
+ "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
+ "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
+ "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 "\n",
retVal.a, retVal.b, retVal.c, retVal.d, retVal.e, retVal.f,
retVal.g, retVal.h, retVal.i, retVal.j, retVal.k, (unsigned long)retVal.l,
retVal.m, retVal.n, retVal.o, retVal.p, retVal.q, retVal.r,
@@ -308,7 +309,7 @@ main(int argc __UNUSED__, const char** argv __UNUSED__)
retVal.ee, retVal.ff, retVal.gg, retVal.hh, retVal.ii, (unsigned long)retVal.jj,
retVal.kk, retVal.ll, retVal.mm, retVal.nn, retVal.oo, retVal.pp,
retVal.qq, retVal.rr, retVal.ss, retVal.tt, retVal.uu, (unsigned long)retVal.vv, retVal.ww, retVal.xx);
- // { dg-output "\nres: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" }
+ /* { dg-output "\nres: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */
CHECK(ffi_prep_closure_loc(pcl, &cif, cls_large_fn, NULL, code) == FFI_OK);
@@ -323,11 +324,11 @@ main(int argc __UNUSED__, const char** argv __UNUSED__)
ui8, si8, ui16, si16, ui32, si32, ui64, si64, f, d, ld, p,
ui8, si8, ui16, si16, ui32, si32, ui64, si64, f, d, ld, p,
ui8, si8);
- // { dg-output "\n1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" }
- printf("res: %hhu %hhd %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
- "%hhu %hhd %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
- "%hhu %hhd %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
- "%hhu %hhd %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %hhu %hhd\n",
+ /* { dg-output "\n1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2 3 4 5 6 7 8 9 10 11 0x12345678 1 2: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */
+ printf("res: %" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
+ "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
+ "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx "
+ "%" PRIu8 " %" PRId8 " %hu %hd %u %d %" PRIu64 " %" PRId64 " %.0f %.0f %.0Lf %#lx %" PRIu8 " %" PRId8 "\n",
retVal.a, retVal.b, retVal.c, retVal.d, retVal.e, retVal.f,
retVal.g, retVal.h, retVal.i, retVal.j, retVal.k, (unsigned long)retVal.l,
retVal.m, retVal.n, retVal.o, retVal.p, retVal.q, retVal.r,
@@ -336,7 +337,7 @@ main(int argc __UNUSED__, const char** argv __UNUSED__)
retVal.ee, retVal.ff, retVal.gg, retVal.hh, retVal.ii, (unsigned long)retVal.jj,
retVal.kk, retVal.ll, retVal.mm, retVal.nn, retVal.oo, retVal.pp,
retVal.qq, retVal.rr, retVal.ss, retVal.tt, retVal.uu, (unsigned long)retVal.vv, retVal.ww, retVal.xx);
- // { dg-output "\nres: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" }
+ /* { dg-output "\nres: 2 3 4 5 6 7 8 9 10 11 12 0x12345679 3 4 5 6 7 8 9 10 11 12 13 0x1234567a 4 5 6 7 8 9 10 11 12 13 14 0x1234567b 5 6 7 8 9 10 11 12 13 14 15 0x1234567c 6 7" } */
return 0;
}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/many2.c b/Modules/_ctypes/libffi/testsuite/libffi.call/many2.c
new file mode 100644
index 0000000..98eac60
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/many2.c
@@ -0,0 +1,57 @@
+/* Area: ffi_call
+ Purpose: Check uint8_t arguments.
+ Limitations: none.
+ PR: PR45677.
+ Originator: Dan Witte <dwitte@gmail.com> 20100916 */
+
+/* { dg-do run } */
+
+#include "ffitest.h"
+
+#define NARGS 7
+
+typedef unsigned char u8;
+
+#ifdef __GNUC__
+__attribute__((noinline))
+#endif
+uint8_t
+foo (uint8_t a, uint8_t b, uint8_t c, uint8_t d,
+ uint8_t e, uint8_t f, uint8_t g)
+{
+ return a + b + c + d + e + f + g;
+}
+
+uint8_t
+bar (uint8_t a, uint8_t b, uint8_t c, uint8_t d,
+ uint8_t e, uint8_t f, uint8_t g)
+{
+ return foo (a, b, c, d, e, f, g);
+}
+
+int
+main (void)
+{
+ ffi_type *ffitypes[NARGS];
+ int i;
+ ffi_cif cif;
+ ffi_arg result = 0;
+ uint8_t args[NARGS];
+ void *argptrs[NARGS];
+
+ for (i = 0; i < NARGS; ++i)
+ ffitypes[i] = &ffi_type_uint8;
+
+ CHECK (ffi_prep_cif (&cif, FFI_DEFAULT_ABI, NARGS,
+ &ffi_type_uint8, ffitypes) == FFI_OK);
+
+ for (i = 0; i < NARGS; ++i)
+ {
+ args[i] = i;
+ argptrs[i] = &args[i];
+ }
+ ffi_call (&cif, FFI_FN (bar), &result, argptrs);
+
+ CHECK (result == 21);
+ return 0;
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/many2_win32.c b/Modules/_ctypes/libffi/testsuite/libffi.call/many2_win32.c
new file mode 100644
index 0000000..4adbe4d
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/many2_win32.c
@@ -0,0 +1,63 @@
+/* Area: ffi_call
+ Purpose: Check stdcall many call on X86_WIN32 systems.
+ Limitations: none.
+ PR: none.
+ Originator: From the original ffitest.c */
+
+/* { dg-do run { target i?86-*-cygwin* i?86-*-mingw* } } */
+
+#include "ffitest.h"
+#include <float.h>
+
+static float __attribute__((fastcall)) fastcall_many(float f1,
+ float f2,
+ float f3,
+ float f4,
+ float f5,
+ float f6,
+ float f7,
+ float f8,
+ float f9,
+ float f10,
+ float f11,
+ float f12,
+ float f13)
+{
+ return ((f1/f2+f3/f4+f5/f6+f7/f8+f9/f10+f11/f12) * f13);
+}
+
+int main (void)
+{
+ ffi_cif cif;
+ ffi_type *args[13];
+ void *values[13];
+ float fa[13];
+ float f, ff;
+ unsigned long ul;
+
+ for (ul = 0; ul < 13; ul++)
+ {
+ args[ul] = &ffi_type_float;
+ values[ul] = &fa[ul];
+ fa[ul] = (float) ul;
+ }
+
+ /* Initialize the cif */
+ CHECK(ffi_prep_cif(&cif, FFI_FASTCALL, 13,
+ &ffi_type_float, args) == FFI_OK);
+
+ ff = fastcall_many(fa[0], fa[1],
+ fa[2], fa[3],
+ fa[4], fa[5],
+ fa[6], fa[7],
+ fa[8], fa[9],
+ fa[10], fa[11], fa[12]);
+
+ ffi_call(&cif, FFI_FN(fastcall_many), &f, values);
+
+ if (f - ff < FLT_EPSILON)
+ printf("fastcall many arg tests ok!\n");
+ else
+ CHECK(0);
+ exit(0);
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/negint.c b/Modules/_ctypes/libffi/testsuite/libffi.call/negint.c
index 3168113..6e2f26f 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/negint.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/negint.c
@@ -5,7 +5,6 @@
Originator: From the original ffitest.c */
/* { dg-do run } */
-/* { dg-options -O2 } */
#include "ffitest.h"
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct.c b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct.c
index 8aa527e..c15e3a0 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct.c
@@ -77,6 +77,12 @@ int main (void)
ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_16byte1 e_dbl = { 9.0, 2.0, 6};
+ struct cls_struct_16byte2 f_dbl = { 1, 2.0, 3.0};
+ struct cls_struct_combined g_dbl = {{4.0, 5.0, 6},
+ {3, 1.0, 8.0}};
+ struct cls_struct_combined res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
@@ -92,12 +98,6 @@ int main (void)
cls_struct_type2.type = FFI_TYPE_STRUCT;
cls_struct_type2.elements = cls_struct_fields2;
- struct cls_struct_16byte1 e_dbl = { 9.0, 2.0, 6};
- struct cls_struct_16byte2 f_dbl = { 1, 2.0, 3.0};
- struct cls_struct_combined g_dbl = {{4.0, 5.0, 6},
- {3, 1.0, 8.0}};
- struct cls_struct_combined res_dbl;
-
cls_struct_fields[0] = &ffi_type_double;
cls_struct_fields[1] = &ffi_type_float;
cls_struct_fields[2] = &ffi_type_sint;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct1.c b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct1.c
index 2a9f515..477a6b9 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct1.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct1.c
@@ -81,6 +81,13 @@ int main (void)
ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_16byte1 e_dbl = { 9.0, 2.0, 6};
+ struct cls_struct_16byte2 f_dbl = { 1, 2.0, 3.0};
+ struct cls_struct_combined g_dbl = {{4.0, 5.0, 6},
+ {3, 1.0, 8.0}};
+ struct cls_struct_16byte1 h_dbl = { 3.0, 2.0, 4};
+ struct cls_struct_combined res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
@@ -96,13 +103,6 @@ int main (void)
cls_struct_type2.type = FFI_TYPE_STRUCT;
cls_struct_type2.elements = cls_struct_fields2;
- struct cls_struct_16byte1 e_dbl = { 9.0, 2.0, 6};
- struct cls_struct_16byte2 f_dbl = { 1, 2.0, 3.0};
- struct cls_struct_combined g_dbl = {{4.0, 5.0, 6},
- {3, 1.0, 8.0}};
- struct cls_struct_16byte1 h_dbl = { 3.0, 2.0, 4};
- struct cls_struct_combined res_dbl;
-
cls_struct_fields[0] = &ffi_type_double;
cls_struct_fields[1] = &ffi_type_float;
cls_struct_fields[2] = &ffi_type_sint;
@@ -156,6 +156,6 @@ int main (void)
CHECK( res_dbl.e.ii == (e_dbl.c + f_dbl.ii + g_dbl.e.ii));
CHECK( res_dbl.e.dd == (e_dbl.a + f_dbl.dd + g_dbl.e.dd));
CHECK( res_dbl.e.ff == (e_dbl.b + f_dbl.ff + g_dbl.e.ff));
- // CHECK( 1 == 0);
+ /* CHECK( 1 == 0); */
exit(0);
}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct10.c b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct10.c
index d6a718b..34a74e7 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct10.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct10.c
@@ -67,6 +67,12 @@ int main (void)
ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2;
ffi_type* dbl_arg_types[4];
+ struct A e_dbl = { 1LL, 7};
+ struct B f_dbl = { 99, {12LL , 127}, 255};
+ struct C g_dbl = { 2LL, 9};
+
+ struct B res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
@@ -82,12 +88,6 @@ int main (void)
cls_struct_type2.type = FFI_TYPE_STRUCT;
cls_struct_type2.elements = cls_struct_fields2;
- struct A e_dbl = { 1LL, 7};
- struct B f_dbl = { 99, {12LL , 127}, 255};
- struct C g_dbl = { 2LL, 9};
-
- struct B res_dbl;
-
cls_struct_fields[0] = &ffi_type_uint64;
cls_struct_fields[1] = &ffi_type_uchar;
cls_struct_fields[2] = NULL;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct11.c b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct11.c
new file mode 100644
index 0000000..fce6948
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct11.c
@@ -0,0 +1,121 @@
+/* Area: ffi_call, closure_call
+ Purpose: Check parameter passing with nested structs
+ of a single type. This tests the special cases
+ for homogenous floating-point aggregates in the
+ AArch64 PCS.
+ Limitations: none.
+ PR: none.
+ Originator: ARM Ltd. */
+
+/* { dg-do run } */
+#include "ffitest.h"
+
+typedef struct A {
+ float a_x;
+ float a_y;
+} A;
+
+typedef struct B {
+ float b_x;
+ float b_y;
+} B;
+
+typedef struct C {
+ A a;
+ B b;
+} C;
+
+static C C_fn (int x, int y, int z, C source, int i, int j, int k)
+{
+ C result;
+ result.a.a_x = source.a.a_x;
+ result.a.a_y = source.a.a_y;
+ result.b.b_x = source.b.b_x;
+ result.b.b_y = source.b.b_y;
+
+ printf ("%d, %d, %d, %d, %d, %d\n", x, y, z, i, j, k);
+
+ printf ("%.1f, %.1f, %.1f, %.1f, "
+ "%.1f, %.1f, %.1f, %.1f\n",
+ source.a.a_x, source.a.a_y,
+ source.b.b_x, source.b.b_y,
+ result.a.a_x, result.a.a_y,
+ result.b.b_x, result.b.b_y);
+
+ return result;
+}
+
+int main (void)
+{
+ ffi_cif cif;
+
+ ffi_type* struct_fields_source_a[3];
+ ffi_type* struct_fields_source_b[3];
+ ffi_type* struct_fields_source_c[3];
+ ffi_type* arg_types[8];
+
+ ffi_type struct_type_a, struct_type_b, struct_type_c;
+
+ struct A source_fld_a = {1.0, 2.0};
+ struct B source_fld_b = {4.0, 8.0};
+ int k = 1;
+
+ struct C result;
+ struct C source = {source_fld_a, source_fld_b};
+
+ struct_type_a.size = 0;
+ struct_type_a.alignment = 0;
+ struct_type_a.type = FFI_TYPE_STRUCT;
+ struct_type_a.elements = struct_fields_source_a;
+
+ struct_type_b.size = 0;
+ struct_type_b.alignment = 0;
+ struct_type_b.type = FFI_TYPE_STRUCT;
+ struct_type_b.elements = struct_fields_source_b;
+
+ struct_type_c.size = 0;
+ struct_type_c.alignment = 0;
+ struct_type_c.type = FFI_TYPE_STRUCT;
+ struct_type_c.elements = struct_fields_source_c;
+
+ struct_fields_source_a[0] = &ffi_type_float;
+ struct_fields_source_a[1] = &ffi_type_float;
+ struct_fields_source_a[2] = NULL;
+
+ struct_fields_source_b[0] = &ffi_type_float;
+ struct_fields_source_b[1] = &ffi_type_float;
+ struct_fields_source_b[2] = NULL;
+
+ struct_fields_source_c[0] = &struct_type_a;
+ struct_fields_source_c[1] = &struct_type_b;
+ struct_fields_source_c[2] = NULL;
+
+ arg_types[0] = &ffi_type_sint32;
+ arg_types[1] = &ffi_type_sint32;
+ arg_types[2] = &ffi_type_sint32;
+ arg_types[3] = &struct_type_c;
+ arg_types[4] = &ffi_type_sint32;
+ arg_types[5] = &ffi_type_sint32;
+ arg_types[6] = &ffi_type_sint32;
+ arg_types[7] = NULL;
+
+ void *args[7];
+ args[0] = &k;
+ args[1] = &k;
+ args[2] = &k;
+ args[3] = &source;
+ args[4] = &k;
+ args[5] = &k;
+ args[6] = &k;
+ CHECK (ffi_prep_cif (&cif, FFI_DEFAULT_ABI, 7, &struct_type_c,
+ arg_types) == FFI_OK);
+
+ ffi_call (&cif, FFI_FN (C_fn), &result, args);
+ /* { dg-output "1, 1, 1, 1, 1, 1\n" } */
+ /* { dg-output "1.0, 2.0, 4.0, 8.0, 1.0, 2.0, 4.0, 8.0" } */
+ CHECK (result.a.a_x == source.a.a_x);
+ CHECK (result.a.a_y == source.a.a_y);
+ CHECK (result.b.b_x == source.b.b_x);
+ CHECK (result.b.b_y == source.b.b_y);
+ exit (0);
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct2.c b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct2.c
index de1584c..69268cd 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct2.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct2.c
@@ -57,6 +57,11 @@ int main (void)
ffi_type cls_struct_type, cls_struct_type1;
ffi_type* dbl_arg_types[3];
+ struct A e_dbl = { 1, 7};
+ struct B f_dbl = {{12 , 127}, 99};
+
+ struct B res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
@@ -67,11 +72,6 @@ int main (void)
cls_struct_type1.type = FFI_TYPE_STRUCT;
cls_struct_type1.elements = cls_struct_fields1;
- struct A e_dbl = { 1, 7};
- struct B f_dbl = {{12 , 127}, 99};
-
- struct B res_dbl;
-
cls_struct_fields[0] = &ffi_type_ulong;
cls_struct_fields[1] = &ffi_type_uchar;
cls_struct_fields[2] = NULL;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct3.c b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct3.c
index 58aa853..ab18cad 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct3.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct3.c
@@ -58,6 +58,11 @@ int main (void)
ffi_type cls_struct_type, cls_struct_type1;
ffi_type* dbl_arg_types[3];
+ struct A e_dbl = { 1LL, 7};
+ struct B f_dbl = {{12LL , 127}, 99};
+
+ struct B res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
@@ -68,11 +73,6 @@ int main (void)
cls_struct_type1.type = FFI_TYPE_STRUCT;
cls_struct_type1.elements = cls_struct_fields1;
- struct A e_dbl = { 1LL, 7};
- struct B f_dbl = {{12LL , 127}, 99};
-
- struct B res_dbl;
-
cls_struct_fields[0] = &ffi_type_uint64;
cls_struct_fields[1] = &ffi_type_uchar;
cls_struct_fields[2] = NULL;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct4.c b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct4.c
index 98e491e..2ffb4d6 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct4.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct4.c
@@ -58,6 +58,11 @@ int main (void)
ffi_type cls_struct_type, cls_struct_type1;
ffi_type* dbl_arg_types[3];
+ struct A e_dbl = { 1.0, 7};
+ struct B f_dbl = {{12.0 , 127}, 99};
+
+ struct B res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
@@ -68,11 +73,6 @@ int main (void)
cls_struct_type1.type = FFI_TYPE_STRUCT;
cls_struct_type1.elements = cls_struct_fields1;
- struct A e_dbl = { 1.0, 7};
- struct B f_dbl = {{12.0 , 127}, 99};
-
- struct B res_dbl;
-
cls_struct_fields[0] = &ffi_type_double;
cls_struct_fields[1] = &ffi_type_uchar;
cls_struct_fields[2] = NULL;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct5.c b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct5.c
index d8e3537..6c79845 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct5.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct5.c
@@ -58,6 +58,11 @@ int main (void)
ffi_type cls_struct_type, cls_struct_type1;
ffi_type* dbl_arg_types[3];
+ struct A e_dbl = { 1.0, 7};
+ struct B f_dbl = {{12.0 , 127}, 99};
+
+ struct B res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
@@ -68,11 +73,6 @@ int main (void)
cls_struct_type1.type = FFI_TYPE_STRUCT;
cls_struct_type1.elements = cls_struct_fields1;
- struct A e_dbl = { 1.0, 7};
- struct B f_dbl = {{12.0 , 127}, 99};
-
- struct B res_dbl;
-
cls_struct_fields[0] = &ffi_type_longdouble;
cls_struct_fields[1] = &ffi_type_uchar;
cls_struct_fields[2] = NULL;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct6.c b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct6.c
index 2f2b25a..59d3579 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct6.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct6.c
@@ -66,6 +66,12 @@ int main (void)
ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2;
ffi_type* dbl_arg_types[4];
+ struct A e_dbl = { 1.0, 7};
+ struct B f_dbl = {{12.0 , 127}, 99};
+ struct C g_dbl = { 2, 9};
+
+ struct B res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
@@ -81,12 +87,6 @@ int main (void)
cls_struct_type2.type = FFI_TYPE_STRUCT;
cls_struct_type2.elements = cls_struct_fields2;
- struct A e_dbl = { 1.0, 7};
- struct B f_dbl = {{12.0 , 127}, 99};
- struct C g_dbl = { 2, 9};
-
- struct B res_dbl;
-
cls_struct_fields[0] = &ffi_type_double;
cls_struct_fields[1] = &ffi_type_uchar;
cls_struct_fields[2] = NULL;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct7.c b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct7.c
index 14c7023..27595e6 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct7.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct7.c
@@ -58,6 +58,11 @@ int main (void)
ffi_type cls_struct_type, cls_struct_type1;
ffi_type* dbl_arg_types[3];
+ struct A e_dbl = { 1LL, 7};
+ struct B f_dbl = {{12.0 , 127}, 99};
+
+ struct B res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
@@ -68,11 +73,6 @@ int main (void)
cls_struct_type1.type = FFI_TYPE_STRUCT;
cls_struct_type1.elements = cls_struct_fields1;
- struct A e_dbl = { 1LL, 7};
- struct B f_dbl = {{12.0 , 127}, 99};
-
- struct B res_dbl;
-
cls_struct_fields[0] = &ffi_type_uint64;
cls_struct_fields[1] = &ffi_type_uchar;
cls_struct_fields[2] = NULL;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct8.c b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct8.c
index bb77ead..0e6c682 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct8.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct8.c
@@ -66,6 +66,12 @@ int main (void)
ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2;
ffi_type* dbl_arg_types[4];
+ struct A e_dbl = { 1LL, 7};
+ struct B f_dbl = {{12LL , 127}, 99};
+ struct C g_dbl = { 2LL, 9};
+
+ struct B res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
@@ -81,12 +87,6 @@ int main (void)
cls_struct_type2.type = FFI_TYPE_STRUCT;
cls_struct_type2.elements = cls_struct_fields2;
- struct A e_dbl = { 1LL, 7};
- struct B f_dbl = {{12LL , 127}, 99};
- struct C g_dbl = { 2LL, 9};
-
- struct B res_dbl;
-
cls_struct_fields[0] = &ffi_type_uint64;
cls_struct_fields[1] = &ffi_type_uchar;
cls_struct_fields[2] = NULL;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct9.c b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct9.c
index e9f541c..5f7ac67 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct9.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/nested_struct9.c
@@ -66,6 +66,12 @@ int main (void)
ffi_type cls_struct_type, cls_struct_type1, cls_struct_type2;
ffi_type* dbl_arg_types[4];
+ struct A e_dbl = { 1, 7LL};
+ struct B f_dbl = {{12.0 , 127}, 99};
+ struct C g_dbl = { 2, 9};
+
+ struct B res_dbl;
+
cls_struct_type.size = 0;
cls_struct_type.alignment = 0;
cls_struct_type.type = FFI_TYPE_STRUCT;
@@ -81,12 +87,6 @@ int main (void)
cls_struct_type2.type = FFI_TYPE_STRUCT;
cls_struct_type2.elements = cls_struct_fields2;
- struct A e_dbl = { 1, 7LL};
- struct B f_dbl = {{12.0 , 127}, 99};
- struct C g_dbl = { 2, 9};
-
- struct B res_dbl;
-
cls_struct_fields[0] = &ffi_type_uchar;
cls_struct_fields[1] = &ffi_type_uint64;
cls_struct_fields[2] = NULL;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/return_dbl.c b/Modules/_ctypes/libffi/testsuite/libffi.call/return_dbl.c
index 1aab403..fd07e50 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/return_dbl.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/return_dbl.c
@@ -9,6 +9,7 @@
static double return_dbl(double dbl)
{
+ printf ("%f\n", dbl);
return 2 * dbl;
}
int main (void)
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/return_sc.c b/Modules/_ctypes/libffi/testsuite/libffi.call/return_sc.c
index 19608ee..a36cf3e 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/return_sc.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/return_sc.c
@@ -30,7 +30,7 @@ int main (void)
sc < (signed char) 127; sc++)
{
ffi_call(&cif, FFI_FN(return_sc), &rint, values);
- CHECK(rint == (ffi_arg) sc);
+ CHECK((signed char)rint == sc);
}
exit(0);
}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/return_uc.c b/Modules/_ctypes/libffi/testsuite/libffi.call/return_uc.c
index 07c45de..6fe5546 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/return_uc.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/return_uc.c
@@ -32,7 +32,7 @@ int main (void)
uc < (unsigned char) '\xff'; uc++)
{
ffi_call(&cif, FFI_FN(return_uc), &rint, values);
- CHECK(rint == (signed int) uc);
+ CHECK((unsigned char)rint == uc);
}
exit(0);
}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/stret_large.c b/Modules/_ctypes/libffi/testsuite/libffi.call/stret_large.c
index 23a93b9..71c2469 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/stret_large.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/stret_large.c
@@ -9,8 +9,8 @@
/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */
#include "ffitest.h"
-// 13 FPRs: 104 bytes
-// 14 FPRs: 112 bytes
+/* 13 FPRs: 104 bytes */
+/* 14 FPRs: 112 bytes */
typedef struct struct_108byte {
double a;
@@ -82,17 +82,17 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
- cls_struct_type.size = 0;
- cls_struct_type.alignment = 0;
- cls_struct_type.type = FFI_TYPE_STRUCT;
- cls_struct_type.elements = cls_struct_fields;
-
struct_108byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 1.0, 2.0, 3.0, 7.0, 2.0, 7 };
struct_108byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4.0, 5.0, 7.0, 9.0, 1.0, 4 };
struct_108byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 8.0, 6.0, 1.0, 4.0, 0.0, 3 };
struct_108byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 9.0, 2.0, 6.0, 5.0, 3.0, 2 };
struct_108byte res_dbl;
+ cls_struct_type.size = 0;
+ cls_struct_type.alignment = 0;
+ cls_struct_type.type = FFI_TYPE_STRUCT;
+ cls_struct_type.elements = cls_struct_fields;
+
cls_struct_fields[0] = &ffi_type_double;
cls_struct_fields[1] = &ffi_type_double;
cls_struct_fields[2] = &ffi_type_double;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/stret_large2.c b/Modules/_ctypes/libffi/testsuite/libffi.call/stret_large2.c
index e2599d2..d9c750e 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/stret_large2.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/stret_large2.c
@@ -9,8 +9,8 @@
/* { dg-do run { xfail strongarm*-*-* xscale*-*-* } } */
#include "ffitest.h"
-// 13 FPRs: 104 bytes
-// 14 FPRs: 112 bytes
+/* 13 FPRs: 104 bytes */
+/* 14 FPRs: 112 bytes */
typedef struct struct_116byte {
double a;
@@ -84,17 +84,17 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
- cls_struct_type.size = 0;
- cls_struct_type.alignment = 0;
- cls_struct_type.type = FFI_TYPE_STRUCT;
- cls_struct_type.elements = cls_struct_fields;
-
struct_116byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 7 };
struct_116byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4.0, 5.0, 7.0, 9.0, 1.0, 6.0, 4 };
struct_116byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 8.0, 6.0, 1.0, 4.0, 0.0, 7.0, 3 };
struct_116byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 9.0, 2.0, 6.0, 5.0, 3.0, 8.0, 2 };
struct_116byte res_dbl;
+ cls_struct_type.size = 0;
+ cls_struct_type.alignment = 0;
+ cls_struct_type.type = FFI_TYPE_STRUCT;
+ cls_struct_type.elements = cls_struct_fields;
+
cls_struct_fields[0] = &ffi_type_double;
cls_struct_fields[1] = &ffi_type_double;
cls_struct_fields[2] = &ffi_type_double;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/stret_medium.c b/Modules/_ctypes/libffi/testsuite/libffi.call/stret_medium.c
index 1fc6a9e..973ee02 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/stret_medium.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/stret_medium.c
@@ -68,17 +68,17 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
- cls_struct_type.size = 0;
- cls_struct_type.alignment = 0;
- cls_struct_type.type = FFI_TYPE_STRUCT;
- cls_struct_type.elements = cls_struct_fields;
-
struct_72byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 7.0 };
struct_72byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4.0 };
struct_72byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 3.0 };
struct_72byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 2.0 };
struct_72byte res_dbl;
+ cls_struct_type.size = 0;
+ cls_struct_type.alignment = 0;
+ cls_struct_type.type = FFI_TYPE_STRUCT;
+ cls_struct_type.elements = cls_struct_fields;
+
cls_struct_fields[0] = &ffi_type_double;
cls_struct_fields[1] = &ffi_type_double;
cls_struct_fields[2] = &ffi_type_double;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/stret_medium2.c b/Modules/_ctypes/libffi/testsuite/libffi.call/stret_medium2.c
index cb2f2fb..84323d1 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/stret_medium2.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/stret_medium2.c
@@ -69,17 +69,17 @@ int main (void)
ffi_type cls_struct_type;
ffi_type* dbl_arg_types[5];
- cls_struct_type.size = 0;
- cls_struct_type.alignment = 0;
- cls_struct_type.type = FFI_TYPE_STRUCT;
- cls_struct_type.elements = cls_struct_fields;
-
struct_72byte e_dbl = { 9.0, 2.0, 6.0, 5.0, 3.0, 4.0, 8.0, 1.0, 7 };
struct_72byte f_dbl = { 1.0, 2.0, 3.0, 7.0, 2.0, 5.0, 6.0, 7.0, 4 };
struct_72byte g_dbl = { 4.0, 5.0, 7.0, 9.0, 1.0, 1.0, 2.0, 9.0, 3 };
struct_72byte h_dbl = { 8.0, 6.0, 1.0, 4.0, 0.0, 3.0, 3.0, 1.0, 2 };
struct_72byte res_dbl;
+ cls_struct_type.size = 0;
+ cls_struct_type.alignment = 0;
+ cls_struct_type.type = FFI_TYPE_STRUCT;
+ cls_struct_type.elements = cls_struct_fields;
+
cls_struct_fields[0] = &ffi_type_double;
cls_struct_fields[1] = &ffi_type_double;
cls_struct_fields[2] = &ffi_type_double;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/strlen2_win32.c b/Modules/_ctypes/libffi/testsuite/libffi.call/strlen2_win32.c
new file mode 100644
index 0000000..0d81061
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/strlen2_win32.c
@@ -0,0 +1,44 @@
+/* Area: ffi_call
+ Purpose: Check fastcall strlen call on X86_WIN32 systems.
+ Limitations: none.
+ PR: none.
+ Originator: From the original ffitest.c */
+
+/* { dg-do run { target i?86-*-cygwin* i?86-*-mingw* } } */
+
+#include "ffitest.h"
+
+static size_t __FASTCALL__ my_fastcall_strlen(char *s)
+{
+ return (strlen(s));
+}
+
+int main (void)
+{
+ ffi_cif cif;
+ ffi_type *args[MAX_ARGS];
+ void *values[MAX_ARGS];
+ ffi_arg rint;
+ char *s;
+ args[0] = &ffi_type_pointer;
+ values[0] = (void*) &s;
+
+ /* Initialize the cif */
+ CHECK(ffi_prep_cif(&cif, FFI_FASTCALL, 1,
+ &ffi_type_sint, args) == FFI_OK);
+
+ s = "a";
+ ffi_call(&cif, FFI_FN(my_fastcall_strlen), &rint, values);
+ CHECK(rint == 1);
+
+ s = "1234567";
+ ffi_call(&cif, FFI_FN(my_fastcall_strlen), &rint, values);
+ CHECK(rint == 7);
+
+ s = "1234567890123456789012345";
+ ffi_call(&cif, FFI_FN(my_fastcall_strlen), &rint, values);
+ CHECK(rint == 25);
+
+ printf("fastcall strlen tests passed\n");
+ exit(0);
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/struct1.c b/Modules/_ctypes/libffi/testsuite/libffi.call/struct1.c
index ea76c85..bfc23f6 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/struct1.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/struct1.c
@@ -30,6 +30,13 @@ int main (void)
void *values[MAX_ARGS];
ffi_type ts1_type;
ffi_type *ts1_type_elements[4];
+
+ test_structure_1 ts1_arg;
+
+ /* This is a hack to get a properly aligned result buffer */
+ test_structure_1 *ts1_result =
+ (test_structure_1 *) malloc (sizeof(test_structure_1));
+
ts1_type.size = 0;
ts1_type.alignment = 0;
ts1_type.type = FFI_TYPE_STRUCT;
@@ -39,11 +46,6 @@ int main (void)
ts1_type_elements[2] = &ffi_type_uint;
ts1_type_elements[3] = NULL;
- test_structure_1 ts1_arg;
- /* This is a hack to get a properly aligned result buffer */
- test_structure_1 *ts1_result =
- (test_structure_1 *) malloc (sizeof(test_structure_1));
-
args[0] = &ts1_type;
values[0] = &ts1_arg;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/struct1_win32.c b/Modules/_ctypes/libffi/testsuite/libffi.call/struct1_win32.c
new file mode 100644
index 0000000..b756f5a
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/struct1_win32.c
@@ -0,0 +1,67 @@
+/* Area: ffi_call
+ Purpose: Check structures with fastcall/thiscall convention.
+ Limitations: none.
+ PR: none.
+ Originator: From the original ffitest.c */
+
+/* { dg-do run { target i?86-*-cygwin* i?86-*-mingw* } } */
+#include "ffitest.h"
+
+typedef struct
+{
+ unsigned char uc;
+ double d;
+ unsigned int ui;
+} test_structure_1;
+
+static test_structure_1 __FASTCALL__ struct1(test_structure_1 ts)
+{
+ ts.uc++;
+ ts.d--;
+ ts.ui++;
+
+ return ts;
+}
+
+int main (void)
+{
+ ffi_cif cif;
+ ffi_type *args[MAX_ARGS];
+ void *values[MAX_ARGS];
+ ffi_type ts1_type;
+ ffi_type *ts1_type_elements[4];
+
+ test_structure_1 ts1_arg;
+
+ /* This is a hack to get a properly aligned result buffer */
+ test_structure_1 *ts1_result =
+ (test_structure_1 *) malloc (sizeof(test_structure_1));
+
+ ts1_type.size = 0;
+ ts1_type.alignment = 0;
+ ts1_type.type = FFI_TYPE_STRUCT;
+ ts1_type.elements = ts1_type_elements;
+ ts1_type_elements[0] = &ffi_type_uchar;
+ ts1_type_elements[1] = &ffi_type_double;
+ ts1_type_elements[2] = &ffi_type_uint;
+ ts1_type_elements[3] = NULL;
+
+ args[0] = &ts1_type;
+ values[0] = &ts1_arg;
+
+ /* Initialize the cif */
+ CHECK(ffi_prep_cif(&cif, FFI_FASTCALL, 1,
+ &ts1_type, args) == FFI_OK);
+
+ ts1_arg.uc = '\x01';
+ ts1_arg.d = 3.14159;
+ ts1_arg.ui = 555;
+
+ ffi_call(&cif, FFI_FN(struct1), ts1_result, values);
+
+ CHECK(ts1_result->ui == 556);
+ CHECK(ts1_result->d == 3.14159 - 1);
+
+ free (ts1_result);
+ exit(0);
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/struct2.c b/Modules/_ctypes/libffi/testsuite/libffi.call/struct2.c
index 14bc9fd..d85385e 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/struct2.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/struct2.c
@@ -29,6 +29,11 @@ int main (void)
test_structure_2 ts2_arg;
ffi_type ts2_type;
ffi_type *ts2_type_elements[3];
+
+ /* This is a hack to get a properly aligned result buffer */
+ test_structure_2 *ts2_result =
+ (test_structure_2 *) malloc (sizeof(test_structure_2));
+
ts2_type.size = 0;
ts2_type.alignment = 0;
ts2_type.type = FFI_TYPE_STRUCT;
@@ -37,11 +42,6 @@ int main (void)
ts2_type_elements[1] = &ffi_type_double;
ts2_type_elements[2] = NULL;
-
- /* This is a hack to get a properly aligned result buffer */
- test_structure_2 *ts2_result =
- (test_structure_2 *) malloc (sizeof(test_structure_2));
-
args[0] = &ts2_type;
values[0] = &ts2_arg;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/struct2_win32.c b/Modules/_ctypes/libffi/testsuite/libffi.call/struct2_win32.c
new file mode 100644
index 0000000..5d02285
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/struct2_win32.c
@@ -0,0 +1,67 @@
+/* Area: ffi_call
+ Purpose: Check structures in fastcall/stdcall function
+ Limitations: none.
+ PR: none.
+ Originator: From the original ffitest.c */
+
+/* { dg-do run { target i?86-*-cygwin* i?86-*-mingw* } } */
+#include "ffitest.h"
+
+typedef struct
+{
+ double d1;
+ double d2;
+} test_structure_2;
+
+static test_structure_2 __FASTCALL__ struct2(test_structure_2 ts)
+{
+ ts.d1--;
+ ts.d2--;
+
+ return ts;
+}
+
+int main (void)
+{
+ ffi_cif cif;
+ ffi_type *args[MAX_ARGS];
+ void *values[MAX_ARGS];
+ test_structure_2 ts2_arg;
+ ffi_type ts2_type;
+ ffi_type *ts2_type_elements[3];
+
+ /* This is a hack to get a properly aligned result buffer */
+ test_structure_2 *ts2_result =
+ (test_structure_2 *) malloc (sizeof(test_structure_2));
+
+ ts2_type.size = 0;
+ ts2_type.alignment = 0;
+ ts2_type.type = FFI_TYPE_STRUCT;
+ ts2_type.elements = ts2_type_elements;
+ ts2_type_elements[0] = &ffi_type_double;
+ ts2_type_elements[1] = &ffi_type_double;
+ ts2_type_elements[2] = NULL;
+
+ args[0] = &ts2_type;
+ values[0] = &ts2_arg;
+
+ /* Initialize the cif */
+ CHECK(ffi_prep_cif(&cif, FFI_FASTCALL, 1, &ts2_type, args) == FFI_OK);
+
+ ts2_arg.d1 = 5.55;
+ ts2_arg.d2 = 6.66;
+
+ printf ("%g\n", ts2_arg.d1);
+ printf ("%g\n", ts2_arg.d2);
+
+ ffi_call(&cif, FFI_FN(struct2), ts2_result, values);
+
+ printf ("%g\n", ts2_result->d1);
+ printf ("%g\n", ts2_result->d2);
+
+ CHECK(ts2_result->d1 == 5.55 - 1);
+ CHECK(ts2_result->d2 == 6.66 - 1);
+
+ free (ts2_result);
+ exit(0);
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/struct3.c b/Modules/_ctypes/libffi/testsuite/libffi.call/struct3.c
index e0bb09b..de883c2 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/struct3.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/struct3.c
@@ -27,6 +27,11 @@ int main (void)
int compare_value;
ffi_type ts3_type;
ffi_type *ts3_type_elements[2];
+
+ test_structure_3 ts3_arg;
+ test_structure_3 *ts3_result =
+ (test_structure_3 *) malloc (sizeof(test_structure_3));
+
ts3_type.size = 0;
ts3_type.alignment = 0;
ts3_type.type = FFI_TYPE_STRUCT;
@@ -34,10 +39,6 @@ int main (void)
ts3_type_elements[0] = &ffi_type_sint;
ts3_type_elements[1] = NULL;
- test_structure_3 ts3_arg;
- test_structure_3 *ts3_result =
- (test_structure_3 *) malloc (sizeof(test_structure_3));
-
args[0] = &ts3_type;
values[0] = &ts3_arg;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/struct4.c b/Modules/_ctypes/libffi/testsuite/libffi.call/struct4.c
index 0ad0a83..48e0349 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/struct4.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/struct4.c
@@ -28,21 +28,22 @@ int main (void)
void *values[MAX_ARGS];
ffi_type ts4_type;
ffi_type *ts4_type_elements[4];
+
+ test_structure_4 ts4_arg;
+
+ /* This is a hack to get a properly aligned result buffer */
+ test_structure_4 *ts4_result =
+ (test_structure_4 *) malloc (sizeof(test_structure_4));
+
ts4_type.size = 0;
ts4_type.alignment = 0;
ts4_type.type = FFI_TYPE_STRUCT;
- test_structure_4 ts4_arg;
ts4_type.elements = ts4_type_elements;
ts4_type_elements[0] = &ffi_type_uint;
ts4_type_elements[1] = &ffi_type_uint;
ts4_type_elements[2] = &ffi_type_uint;
ts4_type_elements[3] = NULL;
-
- /* This is a hack to get a properly aligned result buffer */
- test_structure_4 *ts4_result =
- (test_structure_4 *) malloc (sizeof(test_structure_4));
-
args[0] = &ts4_type;
values[0] = &ts4_arg;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/struct5.c b/Modules/_ctypes/libffi/testsuite/libffi.call/struct5.c
index c03cc97..28b1f0c 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/struct5.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/struct5.c
@@ -27,6 +27,13 @@ int main (void)
void *values[MAX_ARGS];
ffi_type ts5_type;
ffi_type *ts5_type_elements[3];
+
+ test_structure_5 ts5_arg1, ts5_arg2;
+
+ /* This is a hack to get a properly aligned result buffer */
+ test_structure_5 *ts5_result =
+ (test_structure_5 *) malloc (sizeof(test_structure_5));
+
ts5_type.size = 0;
ts5_type.alignment = 0;
ts5_type.type = FFI_TYPE_STRUCT;
@@ -35,12 +42,6 @@ int main (void)
ts5_type_elements[1] = &ffi_type_schar;
ts5_type_elements[2] = NULL;
- test_structure_5 ts5_arg1, ts5_arg2;
-
- /* This is a hack to get a properly aligned result buffer */
- test_structure_5 *ts5_result =
- (test_structure_5 *) malloc (sizeof(test_structure_5));
-
args[0] = &ts5_type;
args[1] = &ts5_type;
values[0] = &ts5_arg1;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/struct6.c b/Modules/_ctypes/libffi/testsuite/libffi.call/struct6.c
index 83db9af..0e26746 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/struct6.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/struct6.c
@@ -27,6 +27,13 @@ int main (void)
void *values[MAX_ARGS];
ffi_type ts6_type;
ffi_type *ts6_type_elements[3];
+
+ test_structure_6 ts6_arg;
+
+ /* This is a hack to get a properly aligned result buffer */
+ test_structure_6 *ts6_result =
+ (test_structure_6 *) malloc (sizeof(test_structure_6));
+
ts6_type.size = 0;
ts6_type.alignment = 0;
ts6_type.type = FFI_TYPE_STRUCT;
@@ -35,13 +42,6 @@ int main (void)
ts6_type_elements[1] = &ffi_type_double;
ts6_type_elements[2] = NULL;
-
- test_structure_6 ts6_arg;
-
- /* This is a hack to get a properly aligned result buffer */
- test_structure_6 *ts6_result =
- (test_structure_6 *) malloc (sizeof(test_structure_6));
-
args[0] = &ts6_type;
values[0] = &ts6_arg;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/struct7.c b/Modules/_ctypes/libffi/testsuite/libffi.call/struct7.c
index 58aac4c..8f2bbfd 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/struct7.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/struct7.c
@@ -29,6 +29,13 @@ int main (void)
void *values[MAX_ARGS];
ffi_type ts7_type;
ffi_type *ts7_type_elements[4];
+
+ test_structure_7 ts7_arg;
+
+ /* This is a hack to get a properly aligned result buffer */
+ test_structure_7 *ts7_result =
+ (test_structure_7 *) malloc (sizeof(test_structure_7));
+
ts7_type.size = 0;
ts7_type.alignment = 0;
ts7_type.type = FFI_TYPE_STRUCT;
@@ -38,13 +45,6 @@ int main (void)
ts7_type_elements[2] = &ffi_type_double;
ts7_type_elements[3] = NULL;
-
- test_structure_7 ts7_arg;
-
- /* This is a hack to get a properly aligned result buffer */
- test_structure_7 *ts7_result =
- (test_structure_7 *) malloc (sizeof(test_structure_7));
-
args[0] = &ts7_type;
values[0] = &ts7_arg;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/struct8.c b/Modules/_ctypes/libffi/testsuite/libffi.call/struct8.c
index c773ac7..266e1f0 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/struct8.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/struct8.c
@@ -31,6 +31,13 @@ int main (void)
void *values[MAX_ARGS];
ffi_type ts8_type;
ffi_type *ts8_type_elements[5];
+
+ test_structure_8 ts8_arg;
+
+ /* This is a hack to get a properly aligned result buffer */
+ test_structure_8 *ts8_result =
+ (test_structure_8 *) malloc (sizeof(test_structure_8));
+
ts8_type.size = 0;
ts8_type.alignment = 0;
ts8_type.type = FFI_TYPE_STRUCT;
@@ -41,12 +48,6 @@ int main (void)
ts8_type_elements[3] = &ffi_type_float;
ts8_type_elements[4] = NULL;
- test_structure_8 ts8_arg;
-
- /* This is a hack to get a properly aligned result buffer */
- test_structure_8 *ts8_result =
- (test_structure_8 *) malloc (sizeof(test_structure_8));
-
args[0] = &ts8_type;
values[0] = &ts8_arg;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/struct9.c b/Modules/_ctypes/libffi/testsuite/libffi.call/struct9.c
index f30091f..efeb716 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/struct9.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/struct9.c
@@ -28,6 +28,13 @@ int main (void)
void *values[MAX_ARGS];
ffi_type ts9_type;
ffi_type *ts9_type_elements[3];
+
+ test_structure_9 ts9_arg;
+
+ /* This is a hack to get a properly aligned result buffer */
+ test_structure_9 *ts9_result =
+ (test_structure_9 *) malloc (sizeof(test_structure_9));
+
ts9_type.size = 0;
ts9_type.alignment = 0;
ts9_type.type = FFI_TYPE_STRUCT;
@@ -36,12 +43,6 @@ int main (void)
ts9_type_elements[1] = &ffi_type_sint;
ts9_type_elements[2] = NULL;
- test_structure_9 ts9_arg;
-
- /* This is a hack to get a properly aligned result buffer */
- test_structure_9 *ts9_result =
- (test_structure_9 *) malloc (sizeof(test_structure_9));
-
args[0] = &ts9_type;
values[0] = &ts9_arg;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/testclosure.c b/Modules/_ctypes/libffi/testsuite/libffi.call/testclosure.c
index 161cc89..ca31056 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.call/testclosure.c
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/testclosure.c
@@ -43,13 +43,13 @@ int main (void)
ffi_type cls_struct_type0;
ffi_type* dbl_arg_types[5];
+ struct cls_struct_combined g_dbl = {4.0, 5.0, 1.0, 8.0};
+
cls_struct_type0.size = 0;
cls_struct_type0.alignment = 0;
cls_struct_type0.type = FFI_TYPE_STRUCT;
cls_struct_type0.elements = cls_struct_fields0;
- struct cls_struct_combined g_dbl = {4.0, 5.0, 1.0, 8.0};
-
cls_struct_fields0[0] = &ffi_type_float;
cls_struct_fields0[1] = &ffi_type_float;
cls_struct_fields0[2] = &ffi_type_float;
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/uninitialized.c b/Modules/_ctypes/libffi/testsuite/libffi.call/uninitialized.c
new file mode 100644
index 0000000..f00d830
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/uninitialized.c
@@ -0,0 +1,61 @@
+/* { dg-do run } */
+#include "ffitest.h"
+
+typedef struct
+{
+ unsigned char uc;
+ double d;
+ unsigned int ui;
+} test_structure_1;
+
+static test_structure_1 struct1(test_structure_1 ts)
+{
+ ts.uc++;
+ ts.d--;
+ ts.ui++;
+
+ return ts;
+}
+
+int main (void)
+{
+ ffi_cif cif;
+ ffi_type *args[MAX_ARGS];
+ void *values[MAX_ARGS];
+ ffi_type ts1_type;
+ ffi_type *ts1_type_elements[4];
+
+ memset(&cif, 1, sizeof(cif));
+ ts1_type.size = 0;
+ ts1_type.alignment = 0;
+ ts1_type.type = FFI_TYPE_STRUCT;
+ ts1_type.elements = ts1_type_elements;
+ ts1_type_elements[0] = &ffi_type_uchar;
+ ts1_type_elements[1] = &ffi_type_double;
+ ts1_type_elements[2] = &ffi_type_uint;
+ ts1_type_elements[3] = NULL;
+
+ test_structure_1 ts1_arg;
+ /* This is a hack to get a properly aligned result buffer */
+ test_structure_1 *ts1_result =
+ (test_structure_1 *) malloc (sizeof(test_structure_1));
+
+ args[0] = &ts1_type;
+ values[0] = &ts1_arg;
+
+ /* Initialize the cif */
+ CHECK(ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 1,
+ &ts1_type, args) == FFI_OK);
+
+ ts1_arg.uc = '\x01';
+ ts1_arg.d = 3.14159;
+ ts1_arg.ui = 555;
+
+ ffi_call(&cif, FFI_FN(struct1), ts1_result, values);
+
+ CHECK(ts1_result->ui == 556);
+ CHECK(ts1_result->d == 3.14159 - 1);
+
+ free (ts1_result);
+ exit(0);
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/va_1.c b/Modules/_ctypes/libffi/testsuite/libffi.call/va_1.c
new file mode 100644
index 0000000..cf4dd85
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/va_1.c
@@ -0,0 +1,196 @@
+/* Area: ffi_call
+ Purpose: Test passing struct in variable argument lists.
+ Limitations: none.
+ PR: none.
+ Originator: ARM Ltd. */
+
+/* { dg-do run } */
+/* { dg-output "" { xfail avr32*-*-* } } */
+
+#include "ffitest.h"
+#include <stdarg.h>
+
+struct small_tag
+{
+ unsigned char a;
+ unsigned char b;
+};
+
+struct large_tag
+{
+ unsigned a;
+ unsigned b;
+ unsigned c;
+ unsigned d;
+ unsigned e;
+};
+
+static int
+test_fn (int n, ...)
+{
+ va_list ap;
+ struct small_tag s1;
+ struct small_tag s2;
+ struct large_tag l;
+ unsigned char uc;
+ signed char sc;
+ unsigned short us;
+ signed short ss;
+ unsigned int ui;
+ signed int si;
+ unsigned long ul;
+ signed long sl;
+ float f;
+ double d;
+
+ va_start (ap, n);
+ s1 = va_arg (ap, struct small_tag);
+ l = va_arg (ap, struct large_tag);
+ s2 = va_arg (ap, struct small_tag);
+
+ uc = va_arg (ap, unsigned);
+ sc = va_arg (ap, signed);
+
+ us = va_arg (ap, unsigned);
+ ss = va_arg (ap, signed);
+
+ ui = va_arg (ap, unsigned int);
+ si = va_arg (ap, signed int);
+
+ ul = va_arg (ap, unsigned long);
+ sl = va_arg (ap, signed long);
+
+ f = va_arg (ap, double); /* C standard promotes float->double
+ when anonymous */
+ d = va_arg (ap, double);
+
+ printf ("%u %u %u %u %u %u %u %u %u uc=%u sc=%d %u %d %u %d %lu %ld %f %f\n",
+ s1.a, s1.b, l.a, l.b, l.c, l.d, l.e,
+ s2.a, s2.b,
+ uc, sc,
+ us, ss,
+ ui, si,
+ ul, sl,
+ f, d);
+ va_end (ap);
+ return n + 1;
+}
+
+int
+main (void)
+{
+ ffi_cif cif;
+ void* args[15];
+ ffi_type* arg_types[15];
+
+ ffi_type s_type;
+ ffi_type *s_type_elements[3];
+
+ ffi_type l_type;
+ ffi_type *l_type_elements[6];
+
+ struct small_tag s1;
+ struct small_tag s2;
+ struct large_tag l1;
+
+ int n;
+ int res;
+
+ unsigned char uc;
+ signed char sc;
+ unsigned short us;
+ signed short ss;
+ unsigned int ui;
+ signed int si;
+ unsigned long ul;
+ signed long sl;
+ double d1;
+ double f1;
+
+ s_type.size = 0;
+ s_type.alignment = 0;
+ s_type.type = FFI_TYPE_STRUCT;
+ s_type.elements = s_type_elements;
+
+ s_type_elements[0] = &ffi_type_uchar;
+ s_type_elements[1] = &ffi_type_uchar;
+ s_type_elements[2] = NULL;
+
+ l_type.size = 0;
+ l_type.alignment = 0;
+ l_type.type = FFI_TYPE_STRUCT;
+ l_type.elements = l_type_elements;
+
+ l_type_elements[0] = &ffi_type_uint;
+ l_type_elements[1] = &ffi_type_uint;
+ l_type_elements[2] = &ffi_type_uint;
+ l_type_elements[3] = &ffi_type_uint;
+ l_type_elements[4] = &ffi_type_uint;
+ l_type_elements[5] = NULL;
+
+ arg_types[0] = &ffi_type_sint;
+ arg_types[1] = &s_type;
+ arg_types[2] = &l_type;
+ arg_types[3] = &s_type;
+ arg_types[4] = &ffi_type_uchar;
+ arg_types[5] = &ffi_type_schar;
+ arg_types[6] = &ffi_type_ushort;
+ arg_types[7] = &ffi_type_sshort;
+ arg_types[8] = &ffi_type_uint;
+ arg_types[9] = &ffi_type_sint;
+ arg_types[10] = &ffi_type_ulong;
+ arg_types[11] = &ffi_type_slong;
+ arg_types[12] = &ffi_type_double;
+ arg_types[13] = &ffi_type_double;
+ arg_types[14] = NULL;
+
+ CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 14, &ffi_type_sint, arg_types) == FFI_OK);
+
+ s1.a = 5;
+ s1.b = 6;
+
+ l1.a = 10;
+ l1.b = 11;
+ l1.c = 12;
+ l1.d = 13;
+ l1.e = 14;
+
+ s2.a = 7;
+ s2.b = 8;
+
+ n = 41;
+
+ uc = 9;
+ sc = 10;
+ us = 11;
+ ss = 12;
+ ui = 13;
+ si = 14;
+ ul = 15;
+ sl = 16;
+ f1 = 2.12;
+ d1 = 3.13;
+
+ args[0] = &n;
+ args[1] = &s1;
+ args[2] = &l1;
+ args[3] = &s2;
+ args[4] = &uc;
+ args[5] = &sc;
+ args[6] = &us;
+ args[7] = &ss;
+ args[8] = &ui;
+ args[9] = &si;
+ args[10] = &ul;
+ args[11] = &sl;
+ args[12] = &f1;
+ args[13] = &d1;
+ args[14] = NULL;
+
+ ffi_call(&cif, FFI_FN(test_fn), &res, args);
+ /* { dg-output "5 6 10 11 12 13 14 7 8 uc=9 sc=10 11 12 13 14 15 16 2.120000 3.130000" } */
+ printf("res: %d\n", (int) res);
+ /* { dg-output "\nres: 42" } */
+
+ return 0;
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/va_struct1.c b/Modules/_ctypes/libffi/testsuite/libffi.call/va_struct1.c
new file mode 100644
index 0000000..11d1f10
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/va_struct1.c
@@ -0,0 +1,121 @@
+/* Area: ffi_call
+ Purpose: Test passing struct in variable argument lists.
+ Limitations: none.
+ PR: none.
+ Originator: ARM Ltd. */
+
+/* { dg-do run } */
+/* { dg-output "" { xfail avr32*-*-* } } */
+
+#include "ffitest.h"
+#include <stdarg.h>
+
+struct small_tag
+{
+ unsigned char a;
+ unsigned char b;
+};
+
+struct large_tag
+{
+ unsigned a;
+ unsigned b;
+ unsigned c;
+ unsigned d;
+ unsigned e;
+};
+
+static int
+test_fn (int n, ...)
+{
+ va_list ap;
+ struct small_tag s1;
+ struct small_tag s2;
+ struct large_tag l;
+
+ va_start (ap, n);
+ s1 = va_arg (ap, struct small_tag);
+ l = va_arg (ap, struct large_tag);
+ s2 = va_arg (ap, struct small_tag);
+ printf ("%u %u %u %u %u %u %u %u %u\n", s1.a, s1.b, l.a, l.b, l.c, l.d, l.e,
+ s2.a, s2.b);
+ va_end (ap);
+ return n + 1;
+}
+
+int
+main (void)
+{
+ ffi_cif cif;
+ void* args[5];
+ ffi_type* arg_types[5];
+
+ ffi_type s_type;
+ ffi_type *s_type_elements[3];
+
+ ffi_type l_type;
+ ffi_type *l_type_elements[6];
+
+ struct small_tag s1;
+ struct small_tag s2;
+ struct large_tag l1;
+
+ int n;
+ int res;
+
+ s_type.size = 0;
+ s_type.alignment = 0;
+ s_type.type = FFI_TYPE_STRUCT;
+ s_type.elements = s_type_elements;
+
+ s_type_elements[0] = &ffi_type_uchar;
+ s_type_elements[1] = &ffi_type_uchar;
+ s_type_elements[2] = NULL;
+
+ l_type.size = 0;
+ l_type.alignment = 0;
+ l_type.type = FFI_TYPE_STRUCT;
+ l_type.elements = l_type_elements;
+
+ l_type_elements[0] = &ffi_type_uint;
+ l_type_elements[1] = &ffi_type_uint;
+ l_type_elements[2] = &ffi_type_uint;
+ l_type_elements[3] = &ffi_type_uint;
+ l_type_elements[4] = &ffi_type_uint;
+ l_type_elements[5] = NULL;
+
+ arg_types[0] = &ffi_type_sint;
+ arg_types[1] = &s_type;
+ arg_types[2] = &l_type;
+ arg_types[3] = &s_type;
+ arg_types[4] = NULL;
+
+ CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &ffi_type_sint, arg_types) == FFI_OK);
+
+ s1.a = 5;
+ s1.b = 6;
+
+ l1.a = 10;
+ l1.b = 11;
+ l1.c = 12;
+ l1.d = 13;
+ l1.e = 14;
+
+ s2.a = 7;
+ s2.b = 8;
+
+ n = 41;
+
+ args[0] = &n;
+ args[1] = &s1;
+ args[2] = &l1;
+ args[3] = &s2;
+ args[4] = NULL;
+
+ ffi_call(&cif, FFI_FN(test_fn), &res, args);
+ /* { dg-output "5 6 10 11 12 13 14 7 8" } */
+ printf("res: %d\n", (int) res);
+ /* { dg-output "\nres: 42" } */
+
+ return 0;
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/va_struct2.c b/Modules/_ctypes/libffi/testsuite/libffi.call/va_struct2.c
new file mode 100644
index 0000000..56f5b9c
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/va_struct2.c
@@ -0,0 +1,123 @@
+/* Area: ffi_call
+ Purpose: Test passing struct in variable argument lists.
+ Limitations: none.
+ PR: none.
+ Originator: ARM Ltd. */
+
+/* { dg-do run } */
+/* { dg-output "" { xfail avr32*-*-* } } */
+
+#include "ffitest.h"
+#include <stdarg.h>
+
+struct small_tag
+{
+ unsigned char a;
+ unsigned char b;
+};
+
+struct large_tag
+{
+ unsigned a;
+ unsigned b;
+ unsigned c;
+ unsigned d;
+ unsigned e;
+};
+
+static struct small_tag
+test_fn (int n, ...)
+{
+ va_list ap;
+ struct small_tag s1;
+ struct small_tag s2;
+ struct large_tag l;
+
+ va_start (ap, n);
+ s1 = va_arg (ap, struct small_tag);
+ l = va_arg (ap, struct large_tag);
+ s2 = va_arg (ap, struct small_tag);
+ printf ("%u %u %u %u %u %u %u %u %u\n", s1.a, s1.b, l.a, l.b, l.c, l.d, l.e,
+ s2.a, s2.b);
+ va_end (ap);
+ s1.a += s2.a;
+ s1.b += s2.b;
+ return s1;
+}
+
+int
+main (void)
+{
+ ffi_cif cif;
+ void* args[5];
+ ffi_type* arg_types[5];
+
+ ffi_type s_type;
+ ffi_type *s_type_elements[3];
+
+ ffi_type l_type;
+ ffi_type *l_type_elements[6];
+
+ struct small_tag s1;
+ struct small_tag s2;
+ struct large_tag l1;
+
+ int n;
+ struct small_tag res;
+
+ s_type.size = 0;
+ s_type.alignment = 0;
+ s_type.type = FFI_TYPE_STRUCT;
+ s_type.elements = s_type_elements;
+
+ s_type_elements[0] = &ffi_type_uchar;
+ s_type_elements[1] = &ffi_type_uchar;
+ s_type_elements[2] = NULL;
+
+ l_type.size = 0;
+ l_type.alignment = 0;
+ l_type.type = FFI_TYPE_STRUCT;
+ l_type.elements = l_type_elements;
+
+ l_type_elements[0] = &ffi_type_uint;
+ l_type_elements[1] = &ffi_type_uint;
+ l_type_elements[2] = &ffi_type_uint;
+ l_type_elements[3] = &ffi_type_uint;
+ l_type_elements[4] = &ffi_type_uint;
+ l_type_elements[5] = NULL;
+
+ arg_types[0] = &ffi_type_sint;
+ arg_types[1] = &s_type;
+ arg_types[2] = &l_type;
+ arg_types[3] = &s_type;
+ arg_types[4] = NULL;
+
+ CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &s_type, arg_types) == FFI_OK);
+
+ s1.a = 5;
+ s1.b = 6;
+
+ l1.a = 10;
+ l1.b = 11;
+ l1.c = 12;
+ l1.d = 13;
+ l1.e = 14;
+
+ s2.a = 7;
+ s2.b = 8;
+
+ n = 41;
+
+ args[0] = &n;
+ args[1] = &s1;
+ args[2] = &l1;
+ args[3] = &s2;
+ args[4] = NULL;
+
+ ffi_call(&cif, FFI_FN(test_fn), &res, args);
+ /* { dg-output "5 6 10 11 12 13 14 7 8" } */
+ printf("res: %d %d\n", res.a, res.b);
+ /* { dg-output "\nres: 12 14" } */
+
+ return 0;
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.call/va_struct3.c b/Modules/_ctypes/libffi/testsuite/libffi.call/va_struct3.c
new file mode 100644
index 0000000..9a27e7f
--- /dev/null
+++ b/Modules/_ctypes/libffi/testsuite/libffi.call/va_struct3.c
@@ -0,0 +1,125 @@
+/* Area: ffi_call
+ Purpose: Test passing struct in variable argument lists.
+ Limitations: none.
+ PR: none.
+ Originator: ARM Ltd. */
+
+/* { dg-do run } */
+/* { dg-output "" { xfail avr32*-*-* } } */
+
+#include "ffitest.h"
+#include <stdarg.h>
+
+struct small_tag
+{
+ unsigned char a;
+ unsigned char b;
+};
+
+struct large_tag
+{
+ unsigned a;
+ unsigned b;
+ unsigned c;
+ unsigned d;
+ unsigned e;
+};
+
+static struct large_tag
+test_fn (int n, ...)
+{
+ va_list ap;
+ struct small_tag s1;
+ struct small_tag s2;
+ struct large_tag l;
+
+ va_start (ap, n);
+ s1 = va_arg (ap, struct small_tag);
+ l = va_arg (ap, struct large_tag);
+ s2 = va_arg (ap, struct small_tag);
+ printf ("%u %u %u %u %u %u %u %u %u\n", s1.a, s1.b, l.a, l.b, l.c, l.d, l.e,
+ s2.a, s2.b);
+ va_end (ap);
+ l.a += s1.a;
+ l.b += s1.b;
+ l.c += s2.a;
+ l.d += s2.b;
+ return l;
+}
+
+int
+main (void)
+{
+ ffi_cif cif;
+ void* args[5];
+ ffi_type* arg_types[5];
+
+ ffi_type s_type;
+ ffi_type *s_type_elements[3];
+
+ ffi_type l_type;
+ ffi_type *l_type_elements[6];
+
+ struct small_tag s1;
+ struct small_tag s2;
+ struct large_tag l1;
+
+ int n;
+ struct large_tag res;
+
+ s_type.size = 0;
+ s_type.alignment = 0;
+ s_type.type = FFI_TYPE_STRUCT;
+ s_type.elements = s_type_elements;
+
+ s_type_elements[0] = &ffi_type_uchar;
+ s_type_elements[1] = &ffi_type_uchar;
+ s_type_elements[2] = NULL;
+
+ l_type.size = 0;
+ l_type.alignment = 0;
+ l_type.type = FFI_TYPE_STRUCT;
+ l_type.elements = l_type_elements;
+
+ l_type_elements[0] = &ffi_type_uint;
+ l_type_elements[1] = &ffi_type_uint;
+ l_type_elements[2] = &ffi_type_uint;
+ l_type_elements[3] = &ffi_type_uint;
+ l_type_elements[4] = &ffi_type_uint;
+ l_type_elements[5] = NULL;
+
+ arg_types[0] = &ffi_type_sint;
+ arg_types[1] = &s_type;
+ arg_types[2] = &l_type;
+ arg_types[3] = &s_type;
+ arg_types[4] = NULL;
+
+ CHECK(ffi_prep_cif_var(&cif, FFI_DEFAULT_ABI, 1, 4, &l_type, arg_types) == FFI_OK);
+
+ s1.a = 5;
+ s1.b = 6;
+
+ l1.a = 10;
+ l1.b = 11;
+ l1.c = 12;
+ l1.d = 13;
+ l1.e = 14;
+
+ s2.a = 7;
+ s2.b = 8;
+
+ n = 41;
+
+ args[0] = &n;
+ args[1] = &s1;
+ args[2] = &l1;
+ args[3] = &s2;
+ args[4] = NULL;
+
+ ffi_call(&cif, FFI_FN(test_fn), &res, args);
+ /* { dg-output "5 6 10 11 12 13 14 7 8" } */
+ printf("res: %d %d %d %d %d\n", res.a, res.b, res.c, res.d, res.e);
+ /* { dg-output "\nres: 15 17 19 21 14" } */
+
+ return 0;
+}
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.special/ffitestcxx.h b/Modules/_ctypes/libffi/testsuite/libffi.special/ffitestcxx.h
index 83f5442..c6da7ef 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.special/ffitestcxx.h
+++ b/Modules/_ctypes/libffi/testsuite/libffi.special/ffitestcxx.h
@@ -53,44 +53,3 @@
#define PRIuLL "llu"
#endif
-#ifdef USING_MMAP
-static inline void *
-allocate_mmap (size_t size)
-{
- void *page;
-#if defined (HAVE_MMAP_DEV_ZERO)
- static int dev_zero_fd = -1;
-#endif
-
-#ifdef HAVE_MMAP_DEV_ZERO
- if (dev_zero_fd == -1)
- {
- dev_zero_fd = open ("/dev/zero", O_RDONLY);
- if (dev_zero_fd == -1)
- {
- perror ("open /dev/zero: %m");
- exit (1);
- }
- }
-#endif
-
-
-#ifdef HAVE_MMAP_ANON
- page = mmap (NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-#endif
-#ifdef HAVE_MMAP_DEV_ZERO
- page = mmap (NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_PRIVATE, dev_zero_fd, 0);
-#endif
-
- if (page == (char *) MAP_FAILED)
- {
- perror ("virtual memory exhausted");
- exit (1);
- }
-
- return page;
-}
-
-#endif
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.special/special.exp b/Modules/_ctypes/libffi/testsuite/libffi.special/special.exp
index e167e86..f1a5fa6 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.special/special.exp
+++ b/Modules/_ctypes/libffi/testsuite/libffi.special/special.exp
@@ -1,4 +1,4 @@
-# Copyright (C) 2003, 2006, 2009 Free Software Foundation, Inc.
+# Copyright (C) 2003, 2006, 2009, 2010 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -14,8 +14,6 @@
# along with this program; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>.
-load_lib libffi-dg.exp
-
dg-init
libffi-init
@@ -25,10 +23,14 @@ global cxx_options
set cxx_options " -shared-libgcc -lstdc++"
-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.cc]] $cxx_options "-O0 -W -Wall"
-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.cc]] $cxx_options "-O2"
-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.cc]] $cxx_options "-O3"
-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.cc]] $cxx_options "-Os"
+if { [string match $using_gcc "yes"] } {
+
+ dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.cc]] $cxx_options "-O0 -W -Wall"
+ dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.cc]] $cxx_options "-O2"
+ dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.cc]] $cxx_options "-O3"
+ dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.cc]] $cxx_options "-Os"
+
+}
dg-finish
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.special/unwindtest.cc b/Modules/_ctypes/libffi/testsuite/libffi.special/unwindtest.cc
index d7ffd4a..a78f4e7 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.special/unwindtest.cc
+++ b/Modules/_ctypes/libffi/testsuite/libffi.special/unwindtest.cc
@@ -5,6 +5,7 @@
Originator: Jeff Sturm <jsturm@one-point.com> */
/* { dg-do run } */
+
#include "ffitestcxx.h"
#if defined HAVE_STDINT_H
diff --git a/Modules/_ctypes/libffi/testsuite/libffi.special/unwindtest_ffi_call.cc b/Modules/_ctypes/libffi/testsuite/libffi.special/unwindtest_ffi_call.cc
index 29739cd..57191f2 100644
--- a/Modules/_ctypes/libffi/testsuite/libffi.special/unwindtest_ffi_call.cc
+++ b/Modules/_ctypes/libffi/testsuite/libffi.special/unwindtest_ffi_call.cc
@@ -5,6 +5,7 @@
Originator: Andreas Tobler <andreast@gcc.gnu.org> 20061213 */
/* { dg-do run } */
+
#include "ffitestcxx.h"
static int checking(int a __UNUSED__, short b __UNUSED__,
diff --git a/Modules/_ctypes/libffi/texinfo.tex b/Modules/_ctypes/libffi/texinfo.tex
index ff2c406..a5a7b2b 100644
--- a/Modules/_ctypes/libffi/texinfo.tex
+++ b/Modules/_ctypes/libffi/texinfo.tex
@@ -1,18 +1,18 @@
% texinfo.tex -- TeX macros to handle Texinfo files.
-%
+%
% Load plain if necessary, i.e., if running under initex.
\expandafter\ifx\csname fmtname\endcsname\relax\input plain\fi
%
-\def\texinfoversion{2005-07-05.19}
+\def\texinfoversion{2012-06-05.14}
%
-% Copyright (C) 1985, 1986, 1988, 1990, 1991, 1992, 1993, 1994, 1995,
-% 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software
-% Foundation, Inc.
+% Copyright 1985, 1986, 1988, 1990, 1991, 1992, 1993, 1994, 1995,
+% 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
+% 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
%
-% This texinfo.tex file is free software; you can redistribute it and/or
+% This texinfo.tex file is free software: you can redistribute it and/or
% modify it under the terms of the GNU General Public License as
-% published by the Free Software Foundation; either version 2, or (at
-% your option) any later version.
+% published by the Free Software Foundation, either version 3 of the
+% License, or (at your option) any later version.
%
% This texinfo.tex file is distributed in the hope that it will be
% useful, but WITHOUT ANY WARRANTY; without even the implied warranty
@@ -20,9 +20,7 @@
% General Public License for more details.
%
% You should have received a copy of the GNU General Public License
-% along with this texinfo.tex file; see the file COPYING. If not, write
-% to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
-% Boston, MA 02110-1301, USA.
+% along with this program. If not, see <http://www.gnu.org/licenses/>.
%
% As a special exception, when this file is read by TeX when processing
% a Texinfo source document, you may use the result without
@@ -30,9 +28,9 @@
%
% Please try the latest version of texinfo.tex before submitting bug
% reports; you can get the latest version from:
-% http://www.gnu.org/software/texinfo/ (the Texinfo home page), or
-% ftp://tug.org/tex/texinfo.tex
-% (and all CTAN mirrors, see http://www.ctan.org).
+% http://ftp.gnu.org/gnu/texinfo/ (the Texinfo release area), or
+% http://ftpmirror.gnu.org/texinfo/ (same, via a mirror), or
+% http://www.gnu.org/software/texinfo/ (the Texinfo home page)
% The texinfo.tex in any given distribution could well be out
% of date, so if that's what you're using, please check.
%
@@ -67,7 +65,6 @@
\everyjob{\message{[Texinfo version \texinfoversion]}%
\catcode`+=\active \catcode`\_=\active}
-\message{Basics,}
\chardef\other=12
% We never want plain's \outer definition of \+ in Texinfo.
@@ -95,10 +92,13 @@
\let\ptexnewwrite\newwrite
\let\ptexnoindent=\noindent
\let\ptexplus=+
+\let\ptexraggedright=\raggedright
\let\ptexrbrace=\}
\let\ptexslash=\/
\let\ptexstar=\*
\let\ptext=\t
+\let\ptextop=\top
+{\catcode`\'=\active \global\let\ptexquoteright'}% active in plain's math mode
% If this character appears in an error message or help string, it
% starts a new line in the output.
@@ -116,10 +116,11 @@
% Set up fixed words for English if not already set.
\ifx\putwordAppendix\undefined \gdef\putwordAppendix{Appendix}\fi
\ifx\putwordChapter\undefined \gdef\putwordChapter{Chapter}\fi
+\ifx\putworderror\undefined \gdef\putworderror{error}\fi
\ifx\putwordfile\undefined \gdef\putwordfile{file}\fi
\ifx\putwordin\undefined \gdef\putwordin{in}\fi
-\ifx\putwordIndexIsEmpty\undefined \gdef\putwordIndexIsEmpty{(Index is empty)}\fi
-\ifx\putwordIndexNonexistent\undefined \gdef\putwordIndexNonexistent{(Index is nonexistent)}\fi
+\ifx\putwordIndexIsEmpty\undefined \gdef\putwordIndexIsEmpty{(Index is empty)}\fi
+\ifx\putwordIndexNonexistent\undefined \gdef\putwordIndexNonexistent{(Index is nonexistent)}\fi
\ifx\putwordInfo\undefined \gdef\putwordInfo{Info}\fi
\ifx\putwordInstanceVariableof\undefined \gdef\putwordInstanceVariableof{Instance Variable of}\fi
\ifx\putwordMethodon\undefined \gdef\putwordMethodon{Method on}\fi
@@ -153,28 +154,25 @@
\ifx\putwordDefopt\undefined \gdef\putwordDefopt{User Option}\fi
\ifx\putwordDeffunc\undefined \gdef\putwordDeffunc{Function}\fi
-% In some macros, we cannot use the `\? notation---the left quote is
-% in some cases the escape char.
-\chardef\backChar = `\\
+% Since the category of space is not known, we have to be careful.
+\chardef\spacecat = 10
+\def\spaceisspace{\catcode`\ =\spacecat}
+
+% sometimes characters are active, so we need control sequences.
+\chardef\ampChar = `\&
\chardef\colonChar = `\:
\chardef\commaChar = `\,
+\chardef\dashChar = `\-
\chardef\dotChar = `\.
\chardef\exclamChar= `\!
-\chardef\plusChar = `\+
+\chardef\hashChar = `\#
+\chardef\lquoteChar= `\`
\chardef\questChar = `\?
+\chardef\rquoteChar= `\'
\chardef\semiChar = `\;
+\chardef\slashChar = `\/
\chardef\underChar = `\_
-\chardef\spaceChar = `\ %
-\chardef\spacecat = 10
-\def\spaceisspace{\catcode\spaceChar=\spacecat}
-
-{% for help with debugging.
- % example usage: \expandafter\show\activebackslash
- \catcode`\! = 0 \catcode`\\ = \active
- !global!def!activebackslash{\}
-}
-
% Ignore a token.
%
\def\gobble#1{}
@@ -203,36 +201,7 @@
% that mark overfull boxes (in case you have decided
% that the text looks ok even though it passes the margin).
%
-\def\finalout{\overfullrule=0pt}
-
-% @| inserts a changebar to the left of the current line. It should
-% surround any changed text. This approach does *not* work if the
-% change spans more than two lines of output. To handle that, we would
-% have adopt a much more difficult approach (putting marks into the main
-% vertical list for the beginning and end of each change).
-%
-\def\|{%
- % \vadjust can only be used in horizontal mode.
- \leavevmode
- %
- % Append this vertical mode material after the current line in the output.
- \vadjust{%
- % We want to insert a rule with the height and depth of the current
- % leading; that is exactly what \strutbox is supposed to record.
- \vskip-\baselineskip
- %
- % \vadjust-items are inserted at the left edge of the type. So
- % the \llap here moves out into the left-hand margin.
- \llap{%
- %
- % For a thicker or thinner bar, change the `1pt'.
- \vrule height\baselineskip width1pt
- %
- % This is the space between the bar and the text.
- \hskip 12pt
- }%
- }%
-}
+\def\finalout{\overfullrule=0pt }
% Sometimes it is convenient to have everything in the transcript file
% and nothing on the terminal. We don't just call \tracingall here,
@@ -250,7 +219,7 @@
\tracingmacros2
\tracingrestores1
\showboxbreadth\maxdimen \showboxdepth\maxdimen
- \ifx\eTeXversion\undefined\else % etex gives us more logging
+ \ifx\eTeXversion\thisisundefined\else % etex gives us more logging
\tracingscantokens1
\tracingifs1
\tracinggroups1
@@ -261,6 +230,13 @@
\errorcontextlines16
}%
+% @errormsg{MSG}. Do the index-like expansions on MSG, but if things
+% aren't perfect, it's not the end of the world, being an error message,
+% after all.
+%
+\def\errormsg{\begingroup \indexnofonts \doerrormsg}
+\def\doerrormsg#1{\errmessage{#1}}
+
% add check for \lastpenalty to plain's definitions. If the last thing
% we did was a \nobreak, we don't want to insert more space.
%
@@ -271,7 +247,6 @@
\def\bigbreak{\ifnum\lastpenalty<10000\par\ifdim\lastskip<\bigskipamount
\removelastskip\penalty-200\bigskip\fi\fi}
-% For @cropmarks command.
% Do @cropmarks to get crop marks.
%
\newif\ifcropmarks
@@ -285,6 +260,50 @@
\newdimen\cornerthick \cornerthick=.3pt
\newdimen\topandbottommargin \topandbottommargin=.75in
+% Output a mark which sets \thischapter, \thissection and \thiscolor.
+% We dump everything together because we only have one kind of mark.
+% This works because we only use \botmark / \topmark, not \firstmark.
+%
+% A mark contains a subexpression of the \ifcase ... \fi construct.
+% \get*marks macros below extract the needed part using \ifcase.
+%
+% Another complication is to let the user choose whether \thischapter
+% (\thissection) refers to the chapter (section) in effect at the top
+% of a page, or that at the bottom of a page. The solution is
+% described on page 260 of The TeXbook. It involves outputting two
+% marks for the sectioning macros, one before the section break, and
+% one after. I won't pretend I can describe this better than DEK...
+\def\domark{%
+ \toks0=\expandafter{\lastchapterdefs}%
+ \toks2=\expandafter{\lastsectiondefs}%
+ \toks4=\expandafter{\prevchapterdefs}%
+ \toks6=\expandafter{\prevsectiondefs}%
+ \toks8=\expandafter{\lastcolordefs}%
+ \mark{%
+ \the\toks0 \the\toks2
+ \noexpand\or \the\toks4 \the\toks6
+ \noexpand\else \the\toks8
+ }%
+}
+% \topmark doesn't work for the very first chapter (after the title
+% page or the contents), so we use \firstmark there -- this gets us
+% the mark with the chapter defs, unless the user sneaks in, e.g.,
+% @setcolor (or @url, or @link, etc.) between @contents and the very
+% first @chapter.
+\def\gettopheadingmarks{%
+ \ifcase0\topmark\fi
+ \ifx\thischapter\empty \ifcase0\firstmark\fi \fi
+}
+\def\getbottomheadingmarks{\ifcase1\botmark\fi}
+\def\getcolormarks{\ifcase2\topmark\fi}
+
+% Avoid "undefined control sequence" errors.
+\def\lastchapterdefs{}
+\def\lastsectiondefs{}
+\def\prevchapterdefs{}
+\def\prevsectiondefs{}
+\def\lastcolordefs{}
+
% Main output routine.
\chardef\PAGE = 255
\output = {\onepageout{\pagecontents\PAGE}}
@@ -302,7 +321,9 @@
%
% Do this outside of the \shipout so @code etc. will be expanded in
% the headline as they should be, not taken literally (outputting ''code).
+ \ifodd\pageno \getoddheadingmarks \else \getevenheadingmarks \fi
\setbox\headlinebox = \vbox{\let\hsize=\pagewidth \makeheadline}%
+ \ifodd\pageno \getoddfootingmarks \else \getevenfootingmarks \fi
\setbox\footlinebox = \vbox{\let\hsize=\pagewidth \makefootline}%
%
{%
@@ -311,6 +332,13 @@
% before the \shipout runs.
%
\indexdummies % don't expand commands in the output.
+ \normalturnoffactive % \ in index entries must not stay \, e.g., if
+ % the page break happens to be in the middle of an example.
+ % We don't want .vr (or whatever) entries like this:
+ % \entry{{\tt \indexbackslash }acronym}{32}{\code {\acronym}}
+ % "\acronym" won't work when it's read back in;
+ % it needs to be
+ % {\code {{\tt \backslashcurfont }acronym}
\shipout\vbox{%
% Do this early so pdf references go to the beginning of the page.
\ifpdfmakepagedest \pdfdest name{\the\pageno} xyz\fi
@@ -338,9 +366,9 @@
\pagebody{#1}%
\ifdim\ht\footlinebox > 0pt
% Only leave this space if the footline is nonempty.
- % (We lessened \vsize for it in \oddfootingxxx.)
+ % (We lessened \vsize for it in \oddfootingyyy.)
% The \baselineskip=24pt in plain's \makefootline has no effect.
- \vskip 2\baselineskip
+ \vskip 24pt
\unvbox\footlinebox
\fi
%
@@ -374,7 +402,7 @@
% marginal hacks, juha@viisa.uucp (Juha Takala)
\ifvoid\margin\else % marginal info is present
\rlap{\kern\hsize\vbox to\z@{\kern1pt\box\margin \vss}}\fi
-\dimen@=\dp#1 \unvbox#1
+\dimen@=\dp#1\relax \unvbox#1\relax
\ifvoid\footins\else\vskip\skip\footins\footnoterule \unvbox\footins\fi
\ifr@ggedbottom \kern-\dimen@ \vfil \fi}
}
@@ -396,7 +424,7 @@
%
\def\parsearg{\parseargusing{}}
\def\parseargusing#1#2{%
- \def\next{#2}%
+ \def\argtorun{#2}%
\begingroup
\obeylines
\spaceisspace
@@ -415,7 +443,7 @@
\def\argremovecomment#1\comment#2\ArgTerm{\argremovec #1\c\ArgTerm}
\def\argremovec#1\c#2\ArgTerm{\argcheckspaces#1\^^M\ArgTerm}
-% Each occurence of `\^^M' or `<space>\^^M' is replaced by a single space.
+% Each occurrence of `\^^M' or `<space>\^^M' is replaced by a single space.
%
% \argremovec might leave us with trailing space, e.g.,
% @end itemize @c foo
@@ -427,8 +455,7 @@
\def\argcheckspacesY#1\^^M#2\^^M#3\ArgTerm{%
\def\temp{#3}%
\ifx\temp\empty
- % We cannot use \next here, as it holds the macro to run;
- % thus we reuse \temp.
+ % Do not use \next, perhaps the caller of \parsearg uses it; reuse \temp:
\let\temp\finishparsearg
\else
\let\temp\argcheckspaces
@@ -440,14 +467,14 @@
% If a _delimited_ argument is enclosed in braces, they get stripped; so
% to get _exactly_ the rest of the line, we had to prevent such situation.
% We prepended an \empty token at the very beginning and we expand it now,
-% just before passing the control to \next.
-% (Similarily, we have to think about #3 of \argcheckspacesY above: it is
+% just before passing the control to \argtorun.
+% (Similarly, we have to think about #3 of \argcheckspacesY above: it is
% either the null string, or it ends with \^^M---thus there is no danger
% that a pair of braces would be stripped.
%
% But first, we have to remove the trailing space token.
%
-\def\finishparsearg#1 \ArgTerm{\expandafter\next\expandafter{#1}}
+\def\finishparsearg#1 \ArgTerm{\expandafter\argtorun\expandafter{#1}}
% \parseargdef\foo{...}
% is roughly equivalent to
@@ -498,12 +525,12 @@
% used to check whether the current environment is the one expected.
%
% Non-false conditionals (@iftex, @ifset) don't fit into this, so they
-% are not treated as enviroments; they don't open a group. (The
+% are not treated as environments; they don't open a group. (The
% implementation of @end takes care not to call \endgroup in this
% special case.)
-% At runtime, environments start with this:
+% At run-time, environments start with this:
\def\startenvironment#1{\begingroup\def\thisenv{#1}}
% initialize
\let\thisenv\empty
@@ -521,7 +548,7 @@
\fi
}
-% Evironment mismatch, #1 expected:
+% Environment mismatch, #1 expected:
\def\badenverr{%
\errhelp = \EMsimple
\errmessage{This command can appear only \inenvironment\temp,
@@ -529,7 +556,7 @@
}
\def\inenvironment#1{%
\ifx#1\empty
- out of any environment%
+ outside of any environment%
\else
in environment \expandafter\string#1%
\fi
@@ -541,7 +568,7 @@
\parseargdef\end{%
\if 1\csname iscond.#1\endcsname
\else
- % The general wording of \badenverr may not be ideal, but... --kasal, 06nov03
+ % The general wording of \badenverr may not be ideal.
\expandafter\checkenv\csname#1\endcsname
\csname E#1\endcsname
\endgroup
@@ -551,85 +578,6 @@
\newhelp\EMsimple{Press RETURN to continue.}
-%% Simple single-character @ commands
-
-% @@ prints an @
-% Kludge this until the fonts are right (grr).
-\def\@{{\tt\char64}}
-
-% This is turned off because it was never documented
-% and you can use @w{...} around a quote to suppress ligatures.
-%% Define @` and @' to be the same as ` and '
-%% but suppressing ligatures.
-%\def\`{{`}}
-%\def\'{{'}}
-
-% Used to generate quoted braces.
-\def\mylbrace {{\tt\char123}}
-\def\myrbrace {{\tt\char125}}
-\let\{=\mylbrace
-\let\}=\myrbrace
-\begingroup
- % Definitions to produce \{ and \} commands for indices,
- % and @{ and @} for the aux/toc files.
- \catcode`\{ = \other \catcode`\} = \other
- \catcode`\[ = 1 \catcode`\] = 2
- \catcode`\! = 0 \catcode`\\ = \other
- !gdef!lbracecmd[\{]%
- !gdef!rbracecmd[\}]%
- !gdef!lbraceatcmd[@{]%
- !gdef!rbraceatcmd[@}]%
-!endgroup
-
-% @comma{} to avoid , parsing problems.
-\let\comma = ,
-
-% Accents: @, @dotaccent @ringaccent @ubaraccent @udotaccent
-% Others are defined by plain TeX: @` @' @" @^ @~ @= @u @v @H.
-\let\, = \c
-\let\dotaccent = \.
-\def\ringaccent#1{{\accent23 #1}}
-\let\tieaccent = \t
-\let\ubaraccent = \b
-\let\udotaccent = \d
-
-% Other special characters: @questiondown @exclamdown @ordf @ordm
-% Plain TeX defines: @AA @AE @O @OE @L (plus lowercase versions) @ss.
-\def\questiondown{?`}
-\def\exclamdown{!`}
-\def\ordf{\leavevmode\raise1ex\hbox{\selectfonts\lllsize \underbar{a}}}
-\def\ordm{\leavevmode\raise1ex\hbox{\selectfonts\lllsize \underbar{o}}}
-
-% Dotless i and dotless j, used for accents.
-\def\imacro{i}
-\def\jmacro{j}
-\def\dotless#1{%
- \def\temp{#1}%
- \ifx\temp\imacro \ptexi
- \else\ifx\temp\jmacro \j
- \else \errmessage{@dotless can be used only with i or j}%
- \fi\fi
-}
-
-% The \TeX{} logo, as in plain, but resetting the spacing so that a
-% period following counts as ending a sentence. (Idea found in latex.)
-%
-\edef\TeX{\TeX \spacefactor=1000 }
-
-% @LaTeX{} logo. Not quite the same results as the definition in
-% latex.ltx, since we use a different font for the raised A; it's most
-% convenient for us to use an explicitly smaller font, rather than using
-% the \scriptstyle font (since we don't reset \scriptstyle and
-% \scriptscriptstyle).
-%
-\def\LaTeX{%
- L\kern-.36em
- {\setbox0=\hbox{T}%
- \vbox to \ht0{\hbox{\selectfonts\lllsize A}\vss}}%
- \kern-.15em
- \TeX
-}
-
% Be sure we're in horizontal mode when doing a tie, since we make space
% equivalent to this in @example-like environments. Otherwise, a space
% at the beginning of a line will start with \penalty -- and
@@ -661,7 +609,7 @@
\def\?{?\spacefactor=\endofsentencespacefactor\space}
% @frenchspacing on|off says whether to put extra space after punctuation.
-%
+%
\def\onword{on}
\def\offword{off}
%
@@ -671,7 +619,7 @@
\else\ifx\temp\offword \plainnonfrenchspacing
\else
\errhelp = \EMsimple
- \errmessage{Unknown @frenchspacing option `\temp', must be on/off}%
+ \errmessage{Unknown @frenchspacing option `\temp', must be on|off}%
\fi\fi
}
@@ -753,15 +701,6 @@ where each line of input produces a line of output.}
\newdimen\mil \mil=0.001in
-% Old definition--didn't work.
-%\parseargdef\need{\par %
-%% This method tries to make TeX break the page naturally
-%% if the depth of the box does not fit.
-%{\baselineskip=0pt%
-%\vtop to #1\mil{\vfil}\kern -#1\mil\nobreak
-%\prevdepth=-1000pt
-%}}
-
\parseargdef\need{%
% Ensure vertical mode, so we don't make a big box in the middle of a
% paragraph.
@@ -825,7 +764,7 @@ where each line of input produces a line of output.}
% @inmargin{WHICH}{TEXT} puts TEXT in the WHICH margin next to the current
% paragraph. For more general purposes, use the \margin insertion
-% class. WHICH is `l' or `r'.
+% class. WHICH is `l' or `r'. Not documented, written for gawk manual.
%
\newskip\inmarginspacing \inmarginspacing=1cm
\def\strutdepth{\dp\strutbox}
@@ -872,15 +811,51 @@ where each line of input produces a line of output.}
\temp
}
-% @include file insert text of that file as input.
+% @| inserts a changebar to the left of the current line. It should
+% surround any changed text. This approach does *not* work if the
+% change spans more than two lines of output. To handle that, we would
+% have adopt a much more difficult approach (putting marks into the main
+% vertical list for the beginning and end of each change). This command
+% is not documented, not supported, and doesn't work.
+%
+\def\|{%
+ % \vadjust can only be used in horizontal mode.
+ \leavevmode
+ %
+ % Append this vertical mode material after the current line in the output.
+ \vadjust{%
+ % We want to insert a rule with the height and depth of the current
+ % leading; that is exactly what \strutbox is supposed to record.
+ \vskip-\baselineskip
+ %
+ % \vadjust-items are inserted at the left edge of the type. So
+ % the \llap here moves out into the left-hand margin.
+ \llap{%
+ %
+ % For a thicker or thinner bar, change the `1pt'.
+ \vrule height\baselineskip width1pt
+ %
+ % This is the space between the bar and the text.
+ \hskip 12pt
+ }%
+ }%
+}
+
+% @include FILE -- \input text of FILE.
%
\def\include{\parseargusing\filenamecatcodes\includezzz}
\def\includezzz#1{%
\pushthisfilestack
\def\thisfile{#1}%
{%
- \makevalueexpandable
- \def\temp{\input #1 }%
+ \makevalueexpandable % we want to expand any @value in FILE.
+ \turnoffactive % and allow special characters in the expansion
+ \indexnofonts % Allow `@@' and other weird things in file names.
+ \wlog{texinfo.tex: doing @include of #1^^J}%
+ \edef\temp{\noexpand\input #1 }%
+ %
+ % This trickery is to read FILE outside of a group, in case it makes
+ % definitions, etc.
\expandafter
}\temp
\popthisfilestack
@@ -895,6 +870,8 @@ where each line of input produces a line of output.}
\catcode`>=\other
\catcode`+=\other
\catcode`-=\other
+ \catcode`\`=\other
+ \catcode`\'=\other
}
\def\pushthisfilestack{%
@@ -910,7 +887,7 @@ where each line of input produces a line of output.}
\def\popthisfilestack{\errthisfilestackempty}
\def\errthisfilestackempty{\errmessage{Internal error:
the stack of filenames is empty.}}
-
+%
\def\thisfile{}
% @center line
@@ -918,36 +895,46 @@ where each line of input produces a line of output.}
%
\parseargdef\center{%
\ifhmode
- \let\next\centerH
+ \let\centersub\centerH
\else
- \let\next\centerV
+ \let\centersub\centerV
\fi
- \next{\hfil \ignorespaces#1\unskip \hfil}%
+ \centersub{\hfil \ignorespaces#1\unskip \hfil}%
+ \let\centersub\relax % don't let the definition persist, just in case
}
-\def\centerH#1{%
- {%
- \hfil\break
- \advance\hsize by -\leftskip
- \advance\hsize by -\rightskip
- \line{#1}%
- \break
- }%
+\def\centerH#1{{%
+ \hfil\break
+ \advance\hsize by -\leftskip
+ \advance\hsize by -\rightskip
+ \line{#1}%
+ \break
+}}
+%
+\newcount\centerpenalty
+\def\centerV#1{%
+ % The idea here is the same as in \startdefun, \cartouche, etc.: if
+ % @center is the first thing after a section heading, we need to wipe
+ % out the negative parskip inserted by \sectionheading, but still
+ % prevent a page break here.
+ \centerpenalty = \lastpenalty
+ \ifnum\centerpenalty>10000 \vskip\parskip \fi
+ \ifnum\centerpenalty>9999 \penalty\centerpenalty \fi
+ \line{\kern\leftskip #1\kern\rightskip}%
}
-\def\centerV#1{\line{\kern\leftskip #1\kern\rightskip}}
% @sp n outputs n lines of vertical space
-
+%
\parseargdef\sp{\vskip #1\baselineskip}
% @comment ...line which is ignored...
% @c is the same as @comment
% @ignore ... @end ignore is another way to write a comment
-
+%
\def\comment{\begingroup \catcode`\^^M=\other%
\catcode`\@=\other \catcode`\{=\other \catcode`\}=\other%
\commentxxx}
{\catcode`\^^M=\other \gdef\commentxxx#1^^M{\endgroup}}
-
+%
\let\c=\comment
% @paragraphindent NCHARS
@@ -1040,86 +1027,6 @@ where each line of input produces a line of output.}
}
-% @asis just yields its argument. Used with @table, for example.
-%
-\def\asis#1{#1}
-
-% @math outputs its argument in math mode.
-%
-% One complication: _ usually means subscripts, but it could also mean
-% an actual _ character, as in @math{@var{some_variable} + 1}. So make
-% _ active, and distinguish by seeing if the current family is \slfam,
-% which is what @var uses.
-{
- \catcode\underChar = \active
- \gdef\mathunderscore{%
- \catcode\underChar=\active
- \def_{\ifnum\fam=\slfam \_\else\sb\fi}%
- }
-}
-% Another complication: we want \\ (and @\) to output a \ character.
-% FYI, plain.tex uses \\ as a temporary control sequence (why?), but
-% this is not advertised and we don't care. Texinfo does not
-% otherwise define @\.
-%
-% The \mathchar is class=0=ordinary, family=7=ttfam, position=5C=\.
-\def\mathbackslash{\ifnum\fam=\ttfam \mathchar"075C \else\backslash \fi}
-%
-\def\math{%
- \tex
- \mathunderscore
- \let\\ = \mathbackslash
- \mathactive
- $\finishmath
-}
-\def\finishmath#1{#1$\endgroup} % Close the group opened by \tex.
-
-% Some active characters (such as <) are spaced differently in math.
-% We have to reset their definitions in case the @math was an argument
-% to a command which sets the catcodes (such as @item or @section).
-%
-{
- \catcode`^ = \active
- \catcode`< = \active
- \catcode`> = \active
- \catcode`+ = \active
- \gdef\mathactive{%
- \let^ = \ptexhat
- \let< = \ptexless
- \let> = \ptexgtr
- \let+ = \ptexplus
- }
-}
-
-% @bullet and @minus need the same treatment as @math, just above.
-\def\bullet{$\ptexbullet$}
-\def\minus{$-$}
-
-% @dots{} outputs an ellipsis using the current font.
-% We do .5em per period so that it has the same spacing in a typewriter
-% font as three actual period characters.
-%
-\def\dots{%
- \leavevmode
- \hbox to 1.5em{%
- \hskip 0pt plus 0.25fil
- .\hfil.\hfil.%
- \hskip 0pt plus 0.5fil
- }%
-}
-
-% @enddots{} is an end-of-sentence ellipsis.
-%
-\def\enddots{%
- \dots
- \spacefactor=\endofsentencespacefactor
-}
-
-% @comma{} is so commas can be inserted into text without messing up
-% Texinfo's parsing.
-%
-\let\comma = ,
-
% @refill is a no-op.
\let\refill=\relax
@@ -1184,9 +1091,8 @@ where each line of input produces a line of output.}
\newif\ifpdfmakepagedest
% when pdftex is run in dvi mode, \pdfoutput is defined (so \pdfoutput=1
-% can be set). So we test for \relax and 0 as well as \undefined,
-% borrowed from ifpdf.sty.
-\ifx\pdfoutput\undefined
+% can be set). So we test for \relax and 0 as well as being undefined.
+\ifx\pdfoutput\thisisundefined
\else
\ifx\pdfoutput\relax
\else
@@ -1197,99 +1103,156 @@ where each line of input produces a line of output.}
\fi
\fi
-% PDF uses PostScript string constants for the names of xref targets, to
+% PDF uses PostScript string constants for the names of xref targets,
% for display in the outlines, and in other places. Thus, we have to
% double any backslashes. Otherwise, a name like "\node" will be
% interpreted as a newline (\n), followed by o, d, e. Not good.
-% http://www.ntg.nl/pipermail/ntg-pdftex/2004-July/000654.html
-% (and related messages, the final outcome is that it is up to the TeX
-% user to double the backslashes and otherwise make the string valid, so
-% that's we do).
-
-% double active backslashes.
-%
-{\catcode`\@=0 \catcode`\\=\active
- @gdef@activebackslash{@catcode`@\=@active @otherbackslash}
- @gdef@activebackslashdouble{%
- @catcode@backChar=@active
- @let\=@doublebackslash}
-}
-
-% To handle parens, we must adopt a different approach, since parens are
-% not active characters. hyperref.dtx (which has the same problem as
-% us) handles it with this amazing macro to replace tokens. I've
-% tinkered with it a little for texinfo, but it's definitely from there.
-%
-% #1 is the tokens to replace.
-% #2 is the replacement.
-% #3 is the control sequence with the string.
%
-\def\HyPsdSubst#1#2#3{%
- \def\HyPsdReplace##1#1##2\END{%
- ##1%
- \ifx\\##2\\%
- \else
- #2%
- \HyReturnAfterFi{%
- \HyPsdReplace##2\END
- }%
- \fi
- }%
- \xdef#3{\expandafter\HyPsdReplace#3#1\END}%
-}
-\long\def\HyReturnAfterFi#1\fi{\fi#1}
-
-% #1 is a control sequence in which to do the replacements.
-\def\backslashparens#1{%
- \xdef#1{#1}% redefine it as its expansion; the definition is simply
- % \lastnode when called from \setref -> \pdfmkdest.
- \HyPsdSubst{(}{\backslashlparen}{#1}%
- \HyPsdSubst{)}{\backslashrparen}{#1}%
+% See http://www.ntg.nl/pipermail/ntg-pdftex/2004-July/000654.html and
+% related messages. The final outcome is that it is up to the TeX user
+% to double the backslashes and otherwise make the string valid, so
+% that's what we do. pdftex 1.30.0 (ca.2005) introduced a primitive to
+% do this reliably, so we use it.
+
+% #1 is a control sequence in which to do the replacements,
+% which we \xdef.
+\def\txiescapepdf#1{%
+ \ifx\pdfescapestring\thisisundefined
+ % No primitive available; should we give a warning or log?
+ % Many times it won't matter.
+ \else
+ % The expandable \pdfescapestring primitive escapes parentheses,
+ % backslashes, and other special chars.
+ \xdef#1{\pdfescapestring{#1}}%
+ \fi
}
-{\catcode\exclamChar = 0 \catcode\backChar = \other
- !gdef!backslashlparen{\(}%
- !gdef!backslashrparen{\)}%
-}
+\newhelp\nopdfimagehelp{Texinfo supports .png, .jpg, .jpeg, and .pdf images
+with PDF output, and none of those formats could be found. (.eps cannot
+be supported due to the design of the PDF format; use regular TeX (DVI
+output) for that.)}
\ifpdf
- \input pdfcolor
- \pdfcatalog{/PageMode /UseOutlines}%
+ %
+ % Color manipulation macros based on pdfcolor.tex,
+ % except using rgb instead of cmyk; the latter is said to render as a
+ % very dark gray on-screen and a very dark halftone in print, instead
+ % of actual black.
+ \def\rgbDarkRed{0.50 0.09 0.12}
+ \def\rgbBlack{0 0 0}
+ %
+ % k sets the color for filling (usual text, etc.);
+ % K sets the color for stroking (thin rules, e.g., normal _'s).
+ \def\pdfsetcolor#1{\pdfliteral{#1 rg #1 RG}}
+ %
+ % Set color, and create a mark which defines \thiscolor accordingly,
+ % so that \makeheadline knows which color to restore.
+ \def\setcolor#1{%
+ \xdef\lastcolordefs{\gdef\noexpand\thiscolor{#1}}%
+ \domark
+ \pdfsetcolor{#1}%
+ }
+ %
+ \def\maincolor{\rgbBlack}
+ \pdfsetcolor{\maincolor}
+ \edef\thiscolor{\maincolor}
+ \def\lastcolordefs{}
+ %
+ \def\makefootline{%
+ \baselineskip24pt
+ \line{\pdfsetcolor{\maincolor}\the\footline}%
+ }
+ %
+ \def\makeheadline{%
+ \vbox to 0pt{%
+ \vskip-22.5pt
+ \line{%
+ \vbox to8.5pt{}%
+ % Extract \thiscolor definition from the marks.
+ \getcolormarks
+ % Typeset the headline with \maincolor, then restore the color.
+ \pdfsetcolor{\maincolor}\the\headline\pdfsetcolor{\thiscolor}%
+ }%
+ \vss
+ }%
+ \nointerlineskip
+ }
+ %
+ %
+ \pdfcatalog{/PageMode /UseOutlines}
+ %
+ % #1 is image name, #2 width (might be empty/whitespace), #3 height (ditto).
\def\dopdfimage#1#2#3{%
- \def\imagewidth{#2}%
- \def\imageheight{#3}%
- % without \immediate, pdftex seg faults when the same image is
+ \def\pdfimagewidth{#2}\setbox0 = \hbox{\ignorespaces #2}%
+ \def\pdfimageheight{#3}\setbox2 = \hbox{\ignorespaces #3}%
+ %
+ % pdftex (and the PDF format) support .pdf, .png, .jpg (among
+ % others). Let's try in that order, PDF first since if
+ % someone has a scalable image, presumably better to use that than a
+ % bitmap.
+ \let\pdfimgext=\empty
+ \begingroup
+ \openin 1 #1.pdf \ifeof 1
+ \openin 1 #1.PDF \ifeof 1
+ \openin 1 #1.png \ifeof 1
+ \openin 1 #1.jpg \ifeof 1
+ \openin 1 #1.jpeg \ifeof 1
+ \openin 1 #1.JPG \ifeof 1
+ \errhelp = \nopdfimagehelp
+ \errmessage{Could not find image file #1 for pdf}%
+ \else \gdef\pdfimgext{JPG}%
+ \fi
+ \else \gdef\pdfimgext{jpeg}%
+ \fi
+ \else \gdef\pdfimgext{jpg}%
+ \fi
+ \else \gdef\pdfimgext{png}%
+ \fi
+ \else \gdef\pdfimgext{PDF}%
+ \fi
+ \else \gdef\pdfimgext{pdf}%
+ \fi
+ \closein 1
+ \endgroup
+ %
+ % without \immediate, ancient pdftex seg faults when the same image is
% included twice. (Version 3.14159-pre-1.0-unofficial-20010704.)
\ifnum\pdftexversion < 14
\immediate\pdfimage
\else
\immediate\pdfximage
\fi
- \ifx\empty\imagewidth\else width \imagewidth \fi
- \ifx\empty\imageheight\else height \imageheight \fi
+ \ifdim \wd0 >0pt width \pdfimagewidth \fi
+ \ifdim \wd2 >0pt height \pdfimageheight \fi
\ifnum\pdftexversion<13
- #1.pdf%
+ #1.\pdfimgext
\else
- {#1.pdf}%
+ {#1.\pdfimgext}%
\fi
\ifnum\pdftexversion < 14 \else
\pdfrefximage \pdflastximage
\fi}
+ %
\def\pdfmkdest#1{{%
% We have to set dummies so commands such as @code, and characters
% such as \, aren't expanded when present in a section title.
- \atdummies
- \activebackslashdouble
+ \indexnofonts
+ \turnoffactive
+ \makevalueexpandable
\def\pdfdestname{#1}%
- \backslashparens\pdfdestname
- \pdfdest name{\pdfdestname} xyz%
- }}%
+ \txiescapepdf\pdfdestname
+ \safewhatsit{\pdfdest name{\pdfdestname} xyz}%
+ }}
%
% used to mark target names; must be expandable.
- \def\pdfmkpgn#1{#1}%
+ \def\pdfmkpgn#1{#1}
+ %
+ % by default, use a color that is dark enough to print on paper as
+ % nearly black, but still distinguishable for online viewing.
+ \def\urlcolor{\rgbDarkRed}
+ \def\linkcolor{\rgbDarkRed}
+ \def\endlink{\setcolor{\maincolor}\pdfendlink}
%
- \let\linkcolor = \Blue % was Cyan, but that seems light?
- \def\endlink{\Black\pdfendlink}
% Adding outlines to PDF; macros for calculating structure of outlines
% come from Petr Olsak
\def\expnumber#1{\expandafter\ifx\csname#1\endcsname\relax 0%
@@ -1309,29 +1272,24 @@ where each line of input produces a line of output.}
% page number. We could generate a destination for the section
% text in the case where a section has no node, but it doesn't
% seem worth the trouble, since most documents are normally structured.
- \def\pdfoutlinedest{#3}%
+ \edef\pdfoutlinedest{#3}%
\ifx\pdfoutlinedest\empty
\def\pdfoutlinedest{#4}%
\else
- % Doubled backslashes in the name.
- {\activebackslashdouble \xdef\pdfoutlinedest{#3}%
- \backslashparens\pdfoutlinedest}%
+ \txiescapepdf\pdfoutlinedest
\fi
%
- % Also double the backslashes in the display string.
- {\activebackslashdouble \xdef\pdfoutlinetext{#1}%
- \backslashparens\pdfoutlinetext}%
+ % Also escape PDF chars in the display string.
+ \edef\pdfoutlinetext{#1}%
+ \txiescapepdf\pdfoutlinetext
%
\pdfoutline goto name{\pdfmkpgn{\pdfoutlinedest}}#2{\pdfoutlinetext}%
}
%
\def\pdfmakeoutlines{%
\begingroup
- % Thanh's hack / proper braces in bookmarks
- \edef\mylbrace{\iftrue \string{\else}\fi}\let\{=\mylbrace
- \edef\myrbrace{\iffalse{\else\string}\fi}\let\}=\myrbrace
- %
% Read toc silently, to get counts of subentries for \pdfoutline.
+ \def\partentry##1##2##3##4{}% ignore parts in the outlines
\def\numchapentry##1##2##3##4{%
\def\thischapnum{##2}%
\def\thissecnum{0}%
@@ -1385,35 +1343,63 @@ where each line of input produces a line of output.}
% Latin 2 (0xea) gets translated to a | character. Info from
% Staszek Wawrykiewicz, 19 Jan 2004 04:09:24 +0100.
%
- % xx to do this right, we have to translate 8-bit characters to
- % their "best" equivalent, based on the @documentencoding. Right
- % now, I guess we'll just let the pdf reader have its way.
+ % TODO this right, we have to translate 8-bit characters to
+ % their "best" equivalent, based on the @documentencoding. Too
+ % much work for too little return. Just use the ASCII equivalents
+ % we use for the index sort strings.
+ %
\indexnofonts
\setupdatafile
- \activebackslash
- \input \jobname.toc
+ % We can have normal brace characters in the PDF outlines, unlike
+ % Texinfo index files. So set that up.
+ \def\{{\lbracecharliteral}%
+ \def\}{\rbracecharliteral}%
+ \catcode`\\=\active \otherbackslash
+ \input \tocreadfilename
\endgroup
}
+ {\catcode`[=1 \catcode`]=2
+ \catcode`{=\other \catcode`}=\other
+ \gdef\lbracecharliteral[{]%
+ \gdef\rbracecharliteral[}]%
+ ]
%
\def\skipspaces#1{\def\PP{#1}\def\D{|}%
\ifx\PP\D\let\nextsp\relax
\else\let\nextsp\skipspaces
- \ifx\p\space\else\addtokens{\filename}{\PP}%
- \advance\filenamelength by 1
- \fi
+ \addtokens{\filename}{\PP}%
+ \advance\filenamelength by 1
\fi
\nextsp}
- \def\getfilename#1{\filenamelength=0\expandafter\skipspaces#1|\relax}
+ \def\getfilename#1{%
+ \filenamelength=0
+ % If we don't expand the argument now, \skipspaces will get
+ % snagged on things like "@value{foo}".
+ \edef\temp{#1}%
+ \expandafter\skipspaces\temp|\relax
+ }
\ifnum\pdftexversion < 14
\let \startlink \pdfannotlink
\else
\let \startlink \pdfstartlink
\fi
+ % make a live url in pdf output.
\def\pdfurl#1{%
\begingroup
- \normalturnoffactive\def\@{@}%
+ % it seems we really need yet another set of dummies; have not
+ % tried to figure out what each command should do in the context
+ % of @url. for now, just make @/ a no-op, that's the only one
+ % people have actually reported a problem with.
+ %
+ \normalturnoffactive
+ \def\@{@}%
+ \let\/=\empty
\makevalueexpandable
- \leavevmode\Red
+ % do we want to go so far as to use \indexnofonts instead of just
+ % special-casing \var here?
+ \def\var##1{##1}%
+ %
+ \leavevmode\setcolor{\urlcolor}%
\startlink attr{/Border [0 0 0]}%
user{/Subtype /Link /A << /S /URI /URI (#1) >>}%
\endgroup}
@@ -1440,13 +1426,15 @@ where each line of input produces a line of output.}
{\noexpand\pdflink{\the\toksC}}\toksC={}\global\countA=0}
\def\pdflink#1{%
\startlink attr{/Border [0 0 0]} goto name{\pdfmkpgn{#1}}
- \linkcolor #1\endlink}
+ \setcolor{\linkcolor}#1\endlink}
\def\done{\edef\st{\global\noexpand\toksA={\the\toksB}}\st}
\else
+ % non-pdf mode
\let\pdfmkdest = \gobble
\let\pdfurl = \gobble
\let\endlink = \relax
- \let\linkcolor = \relax
+ \let\setcolor = \gobble
+ \let\pdfsetcolor = \gobble
\let\pdfmakeoutlines = \relax
\fi % \ifx\pdfoutput
@@ -1472,6 +1460,10 @@ where each line of input produces a line of output.}
\def\bf{\fam=\bffam \setfontstyle{bf}}\def\bfstylename{bf}
\def\tt{\fam=\ttfam \setfontstyle{tt}}
+% Unfortunately, we have to override this for titles and the like, since
+% in those cases "rm" is bold. Sigh.
+\def\rmisbold{\rm\def\curfontstyle{bf}}
+
% Texinfo sort of supports the sans serif font style, which plain TeX does not.
% So we set up a \sf.
\newfam\sffam
@@ -1481,8 +1473,6 @@ where each line of input produces a line of output.}
% We don't need math for this font style.
\def\ttsl{\setfontstyle{ttsl}}
-% Default leading.
-\newdimen\textleading \textleading = 13.2pt
% Set the baselineskip to #1, and the lineskip and strut size
% correspondingly. There is no deep meaning behind these magic numbers
@@ -1492,8 +1482,13 @@ where each line of input produces a line of output.}
\def\strutheightpercent{.70833}
\def\strutdepthpercent {.29167}
%
+% can get a sort of poor man's double spacing by redefining this.
+\def\baselinefactor{1}
+%
+\newdimen\textleading
\def\setleading#1{%
- \normalbaselineskip = #1\relax
+ \dimen0 = #1\relax
+ \normalbaselineskip = \baselinefactor\dimen0
\normallineskip = \lineskipfactor\normalbaselineskip
\normalbaselines
\setbox\strutbox =\hbox{%
@@ -1502,20 +1497,295 @@ where each line of input produces a line of output.}
}%
}
-% Set the font macro #1 to the font named #2, adding on the
-% specified font prefix (normally `cm').
-% #3 is the font's design size, #4 is a scale factor
-\def\setfont#1#2#3#4{\font#1=\fontprefix#2#3 scaled #4}
+% PDF CMaps. See also LaTeX's t1.cmap.
+%
+% do nothing with this by default.
+\expandafter\let\csname cmapOT1\endcsname\gobble
+\expandafter\let\csname cmapOT1IT\endcsname\gobble
+\expandafter\let\csname cmapOT1TT\endcsname\gobble
+
+% if we are producing pdf, and we have \pdffontattr, then define cmaps.
+% (\pdffontattr was introduced many years ago, but people still run
+% older pdftex's; it's easy to conditionalize, so we do.)
+\ifpdf \ifx\pdffontattr\thisisundefined \else
+ \begingroup
+ \catcode`\^^M=\active \def^^M{^^J}% Output line endings as the ^^J char.
+ \catcode`\%=12 \immediate\pdfobj stream {%!PS-Adobe-3.0 Resource-CMap
+%%DocumentNeededResources: ProcSet (CIDInit)
+%%IncludeResource: ProcSet (CIDInit)
+%%BeginResource: CMap (TeX-OT1-0)
+%%Title: (TeX-OT1-0 TeX OT1 0)
+%%Version: 1.000
+%%EndComments
+/CIDInit /ProcSet findresource begin
+12 dict begin
+begincmap
+/CIDSystemInfo
+<< /Registry (TeX)
+/Ordering (OT1)
+/Supplement 0
+>> def
+/CMapName /TeX-OT1-0 def
+/CMapType 2 def
+1 begincodespacerange
+<00> <7F>
+endcodespacerange
+8 beginbfrange
+<00> <01> <0393>
+<09> <0A> <03A8>
+<23> <26> <0023>
+<28> <3B> <0028>
+<3F> <5B> <003F>
+<5D> <5E> <005D>
+<61> <7A> <0061>
+<7B> <7C> <2013>
+endbfrange
+40 beginbfchar
+<02> <0398>
+<03> <039B>
+<04> <039E>
+<05> <03A0>
+<06> <03A3>
+<07> <03D2>
+<08> <03A6>
+<0B> <00660066>
+<0C> <00660069>
+<0D> <0066006C>
+<0E> <006600660069>
+<0F> <00660066006C>
+<10> <0131>
+<11> <0237>
+<12> <0060>
+<13> <00B4>
+<14> <02C7>
+<15> <02D8>
+<16> <00AF>
+<17> <02DA>
+<18> <00B8>
+<19> <00DF>
+<1A> <00E6>
+<1B> <0153>
+<1C> <00F8>
+<1D> <00C6>
+<1E> <0152>
+<1F> <00D8>
+<21> <0021>
+<22> <201D>
+<27> <2019>
+<3C> <00A1>
+<3D> <003D>
+<3E> <00BF>
+<5C> <201C>
+<5F> <02D9>
+<60> <2018>
+<7D> <02DD>
+<7E> <007E>
+<7F> <00A8>
+endbfchar
+endcmap
+CMapName currentdict /CMap defineresource pop
+end
+end
+%%EndResource
+%%EOF
+ }\endgroup
+ \expandafter\edef\csname cmapOT1\endcsname#1{%
+ \pdffontattr#1{/ToUnicode \the\pdflastobj\space 0 R}%
+ }%
+%
+% \cmapOT1IT
+ \begingroup
+ \catcode`\^^M=\active \def^^M{^^J}% Output line endings as the ^^J char.
+ \catcode`\%=12 \immediate\pdfobj stream {%!PS-Adobe-3.0 Resource-CMap
+%%DocumentNeededResources: ProcSet (CIDInit)
+%%IncludeResource: ProcSet (CIDInit)
+%%BeginResource: CMap (TeX-OT1IT-0)
+%%Title: (TeX-OT1IT-0 TeX OT1IT 0)
+%%Version: 1.000
+%%EndComments
+/CIDInit /ProcSet findresource begin
+12 dict begin
+begincmap
+/CIDSystemInfo
+<< /Registry (TeX)
+/Ordering (OT1IT)
+/Supplement 0
+>> def
+/CMapName /TeX-OT1IT-0 def
+/CMapType 2 def
+1 begincodespacerange
+<00> <7F>
+endcodespacerange
+8 beginbfrange
+<00> <01> <0393>
+<09> <0A> <03A8>
+<25> <26> <0025>
+<28> <3B> <0028>
+<3F> <5B> <003F>
+<5D> <5E> <005D>
+<61> <7A> <0061>
+<7B> <7C> <2013>
+endbfrange
+42 beginbfchar
+<02> <0398>
+<03> <039B>
+<04> <039E>
+<05> <03A0>
+<06> <03A3>
+<07> <03D2>
+<08> <03A6>
+<0B> <00660066>
+<0C> <00660069>
+<0D> <0066006C>
+<0E> <006600660069>
+<0F> <00660066006C>
+<10> <0131>
+<11> <0237>
+<12> <0060>
+<13> <00B4>
+<14> <02C7>
+<15> <02D8>
+<16> <00AF>
+<17> <02DA>
+<18> <00B8>
+<19> <00DF>
+<1A> <00E6>
+<1B> <0153>
+<1C> <00F8>
+<1D> <00C6>
+<1E> <0152>
+<1F> <00D8>
+<21> <0021>
+<22> <201D>
+<23> <0023>
+<24> <00A3>
+<27> <2019>
+<3C> <00A1>
+<3D> <003D>
+<3E> <00BF>
+<5C> <201C>
+<5F> <02D9>
+<60> <2018>
+<7D> <02DD>
+<7E> <007E>
+<7F> <00A8>
+endbfchar
+endcmap
+CMapName currentdict /CMap defineresource pop
+end
+end
+%%EndResource
+%%EOF
+ }\endgroup
+ \expandafter\edef\csname cmapOT1IT\endcsname#1{%
+ \pdffontattr#1{/ToUnicode \the\pdflastobj\space 0 R}%
+ }%
+%
+% \cmapOT1TT
+ \begingroup
+ \catcode`\^^M=\active \def^^M{^^J}% Output line endings as the ^^J char.
+ \catcode`\%=12 \immediate\pdfobj stream {%!PS-Adobe-3.0 Resource-CMap
+%%DocumentNeededResources: ProcSet (CIDInit)
+%%IncludeResource: ProcSet (CIDInit)
+%%BeginResource: CMap (TeX-OT1TT-0)
+%%Title: (TeX-OT1TT-0 TeX OT1TT 0)
+%%Version: 1.000
+%%EndComments
+/CIDInit /ProcSet findresource begin
+12 dict begin
+begincmap
+/CIDSystemInfo
+<< /Registry (TeX)
+/Ordering (OT1TT)
+/Supplement 0
+>> def
+/CMapName /TeX-OT1TT-0 def
+/CMapType 2 def
+1 begincodespacerange
+<00> <7F>
+endcodespacerange
+5 beginbfrange
+<00> <01> <0393>
+<09> <0A> <03A8>
+<21> <26> <0021>
+<28> <5F> <0028>
+<61> <7E> <0061>
+endbfrange
+32 beginbfchar
+<02> <0398>
+<03> <039B>
+<04> <039E>
+<05> <03A0>
+<06> <03A3>
+<07> <03D2>
+<08> <03A6>
+<0B> <2191>
+<0C> <2193>
+<0D> <0027>
+<0E> <00A1>
+<0F> <00BF>
+<10> <0131>
+<11> <0237>
+<12> <0060>
+<13> <00B4>
+<14> <02C7>
+<15> <02D8>
+<16> <00AF>
+<17> <02DA>
+<18> <00B8>
+<19> <00DF>
+<1A> <00E6>
+<1B> <0153>
+<1C> <00F8>
+<1D> <00C6>
+<1E> <0152>
+<1F> <00D8>
+<20> <2423>
+<27> <2019>
+<60> <2018>
+<7F> <00A8>
+endbfchar
+endcmap
+CMapName currentdict /CMap defineresource pop
+end
+end
+%%EndResource
+%%EOF
+ }\endgroup
+ \expandafter\edef\csname cmapOT1TT\endcsname#1{%
+ \pdffontattr#1{/ToUnicode \the\pdflastobj\space 0 R}%
+ }%
+\fi\fi
+
+
+% Set the font macro #1 to the font named \fontprefix#2.
+% #3 is the font's design size, #4 is a scale factor, #5 is the CMap
+% encoding (only OT1, OT1IT and OT1TT are allowed, or empty to omit).
+% Example:
+% #1 = \textrm
+% #2 = \rmshape
+% #3 = 10
+% #4 = \mainmagstep
+% #5 = OT1
+%
+\def\setfont#1#2#3#4#5{%
+ \font#1=\fontprefix#2#3 scaled #4
+ \csname cmap#5\endcsname#1%
+}
+% This is what gets called when #5 of \setfont is empty.
+\let\cmap\gobble
+%
+% (end of cmaps)
% Use cm as the default font prefix.
% To specify the font prefix, you must define \fontprefix
% before you read in texinfo.tex.
-\ifx\fontprefix\undefined
+\ifx\fontprefix\thisisundefined
\def\fontprefix{cm}
\fi
% Support font families that don't use the same naming scheme as CM.
\def\rmshape{r}
-\def\rmbshape{bx} %where the normal face is bold
+\def\rmbshape{bx} % where the normal face is bold
\def\bfshape{b}
\def\bxshape{bx}
\def\ttshape{tt}
@@ -1530,118 +1800,291 @@ where each line of input produces a line of output.}
\def\scshape{csc}
\def\scbshape{csc}
+% Definitions for a main text size of 11pt. (The default in Texinfo.)
+%
+\def\definetextfontsizexi{%
% Text fonts (11.2pt, magstep1).
\def\textnominalsize{11pt}
\edef\mainmagstep{\magstephalf}
-\setfont\textrm\rmshape{10}{\mainmagstep}
-\setfont\texttt\ttshape{10}{\mainmagstep}
-\setfont\textbf\bfshape{10}{\mainmagstep}
-\setfont\textit\itshape{10}{\mainmagstep}
-\setfont\textsl\slshape{10}{\mainmagstep}
-\setfont\textsf\sfshape{10}{\mainmagstep}
-\setfont\textsc\scshape{10}{\mainmagstep}
-\setfont\textttsl\ttslshape{10}{\mainmagstep}
+\setfont\textrm\rmshape{10}{\mainmagstep}{OT1}
+\setfont\texttt\ttshape{10}{\mainmagstep}{OT1TT}
+\setfont\textbf\bfshape{10}{\mainmagstep}{OT1}
+\setfont\textit\itshape{10}{\mainmagstep}{OT1IT}
+\setfont\textsl\slshape{10}{\mainmagstep}{OT1}
+\setfont\textsf\sfshape{10}{\mainmagstep}{OT1}
+\setfont\textsc\scshape{10}{\mainmagstep}{OT1}
+\setfont\textttsl\ttslshape{10}{\mainmagstep}{OT1TT}
\font\texti=cmmi10 scaled \mainmagstep
\font\textsy=cmsy10 scaled \mainmagstep
+\def\textecsize{1095}
% A few fonts for @defun names and args.
-\setfont\defbf\bfshape{10}{\magstep1}
-\setfont\deftt\ttshape{10}{\magstep1}
-\setfont\defttsl\ttslshape{10}{\magstep1}
+\setfont\defbf\bfshape{10}{\magstep1}{OT1}
+\setfont\deftt\ttshape{10}{\magstep1}{OT1TT}
+\setfont\defttsl\ttslshape{10}{\magstep1}{OT1TT}
\def\df{\let\tentt=\deftt \let\tenbf = \defbf \let\tenttsl=\defttsl \bf}
% Fonts for indices, footnotes, small examples (9pt).
\def\smallnominalsize{9pt}
-\setfont\smallrm\rmshape{9}{1000}
-\setfont\smalltt\ttshape{9}{1000}
-\setfont\smallbf\bfshape{10}{900}
-\setfont\smallit\itshape{9}{1000}
-\setfont\smallsl\slshape{9}{1000}
-\setfont\smallsf\sfshape{9}{1000}
-\setfont\smallsc\scshape{10}{900}
-\setfont\smallttsl\ttslshape{10}{900}
+\setfont\smallrm\rmshape{9}{1000}{OT1}
+\setfont\smalltt\ttshape{9}{1000}{OT1TT}
+\setfont\smallbf\bfshape{10}{900}{OT1}
+\setfont\smallit\itshape{9}{1000}{OT1IT}
+\setfont\smallsl\slshape{9}{1000}{OT1}
+\setfont\smallsf\sfshape{9}{1000}{OT1}
+\setfont\smallsc\scshape{10}{900}{OT1}
+\setfont\smallttsl\ttslshape{10}{900}{OT1TT}
\font\smalli=cmmi9
\font\smallsy=cmsy9
+\def\smallecsize{0900}
% Fonts for small examples (8pt).
\def\smallernominalsize{8pt}
-\setfont\smallerrm\rmshape{8}{1000}
-\setfont\smallertt\ttshape{8}{1000}
-\setfont\smallerbf\bfshape{10}{800}
-\setfont\smallerit\itshape{8}{1000}
-\setfont\smallersl\slshape{8}{1000}
-\setfont\smallersf\sfshape{8}{1000}
-\setfont\smallersc\scshape{10}{800}
-\setfont\smallerttsl\ttslshape{10}{800}
+\setfont\smallerrm\rmshape{8}{1000}{OT1}
+\setfont\smallertt\ttshape{8}{1000}{OT1TT}
+\setfont\smallerbf\bfshape{10}{800}{OT1}
+\setfont\smallerit\itshape{8}{1000}{OT1IT}
+\setfont\smallersl\slshape{8}{1000}{OT1}
+\setfont\smallersf\sfshape{8}{1000}{OT1}
+\setfont\smallersc\scshape{10}{800}{OT1}
+\setfont\smallerttsl\ttslshape{10}{800}{OT1TT}
\font\smalleri=cmmi8
\font\smallersy=cmsy8
+\def\smallerecsize{0800}
% Fonts for title page (20.4pt):
\def\titlenominalsize{20pt}
-\setfont\titlerm\rmbshape{12}{\magstep3}
-\setfont\titleit\itbshape{10}{\magstep4}
-\setfont\titlesl\slbshape{10}{\magstep4}
-\setfont\titlett\ttbshape{12}{\magstep3}
-\setfont\titlettsl\ttslshape{10}{\magstep4}
-\setfont\titlesf\sfbshape{17}{\magstep1}
+\setfont\titlerm\rmbshape{12}{\magstep3}{OT1}
+\setfont\titleit\itbshape{10}{\magstep4}{OT1IT}
+\setfont\titlesl\slbshape{10}{\magstep4}{OT1}
+\setfont\titlett\ttbshape{12}{\magstep3}{OT1TT}
+\setfont\titlettsl\ttslshape{10}{\magstep4}{OT1TT}
+\setfont\titlesf\sfbshape{17}{\magstep1}{OT1}
\let\titlebf=\titlerm
-\setfont\titlesc\scbshape{10}{\magstep4}
+\setfont\titlesc\scbshape{10}{\magstep4}{OT1}
\font\titlei=cmmi12 scaled \magstep3
\font\titlesy=cmsy10 scaled \magstep4
-\def\authorrm{\secrm}
-\def\authortt{\sectt}
+\def\titleecsize{2074}
% Chapter (and unnumbered) fonts (17.28pt).
\def\chapnominalsize{17pt}
-\setfont\chaprm\rmbshape{12}{\magstep2}
-\setfont\chapit\itbshape{10}{\magstep3}
-\setfont\chapsl\slbshape{10}{\magstep3}
-\setfont\chaptt\ttbshape{12}{\magstep2}
-\setfont\chapttsl\ttslshape{10}{\magstep3}
-\setfont\chapsf\sfbshape{17}{1000}
+\setfont\chaprm\rmbshape{12}{\magstep2}{OT1}
+\setfont\chapit\itbshape{10}{\magstep3}{OT1IT}
+\setfont\chapsl\slbshape{10}{\magstep3}{OT1}
+\setfont\chaptt\ttbshape{12}{\magstep2}{OT1TT}
+\setfont\chapttsl\ttslshape{10}{\magstep3}{OT1TT}
+\setfont\chapsf\sfbshape{17}{1000}{OT1}
\let\chapbf=\chaprm
-\setfont\chapsc\scbshape{10}{\magstep3}
+\setfont\chapsc\scbshape{10}{\magstep3}{OT1}
\font\chapi=cmmi12 scaled \magstep2
\font\chapsy=cmsy10 scaled \magstep3
+\def\chapecsize{1728}
% Section fonts (14.4pt).
\def\secnominalsize{14pt}
-\setfont\secrm\rmbshape{12}{\magstep1}
-\setfont\secit\itbshape{10}{\magstep2}
-\setfont\secsl\slbshape{10}{\magstep2}
-\setfont\sectt\ttbshape{12}{\magstep1}
-\setfont\secttsl\ttslshape{10}{\magstep2}
-\setfont\secsf\sfbshape{12}{\magstep1}
+\setfont\secrm\rmbshape{12}{\magstep1}{OT1}
+\setfont\secit\itbshape{10}{\magstep2}{OT1IT}
+\setfont\secsl\slbshape{10}{\magstep2}{OT1}
+\setfont\sectt\ttbshape{12}{\magstep1}{OT1TT}
+\setfont\secttsl\ttslshape{10}{\magstep2}{OT1TT}
+\setfont\secsf\sfbshape{12}{\magstep1}{OT1}
\let\secbf\secrm
-\setfont\secsc\scbshape{10}{\magstep2}
+\setfont\secsc\scbshape{10}{\magstep2}{OT1}
\font\seci=cmmi12 scaled \magstep1
\font\secsy=cmsy10 scaled \magstep2
+\def\sececsize{1440}
% Subsection fonts (13.15pt).
\def\ssecnominalsize{13pt}
-\setfont\ssecrm\rmbshape{12}{\magstephalf}
-\setfont\ssecit\itbshape{10}{1315}
-\setfont\ssecsl\slbshape{10}{1315}
-\setfont\ssectt\ttbshape{12}{\magstephalf}
-\setfont\ssecttsl\ttslshape{10}{1315}
-\setfont\ssecsf\sfbshape{12}{\magstephalf}
+\setfont\ssecrm\rmbshape{12}{\magstephalf}{OT1}
+\setfont\ssecit\itbshape{10}{1315}{OT1IT}
+\setfont\ssecsl\slbshape{10}{1315}{OT1}
+\setfont\ssectt\ttbshape{12}{\magstephalf}{OT1TT}
+\setfont\ssecttsl\ttslshape{10}{1315}{OT1TT}
+\setfont\ssecsf\sfbshape{12}{\magstephalf}{OT1}
\let\ssecbf\ssecrm
-\setfont\ssecsc\scbshape{10}{1315}
+\setfont\ssecsc\scbshape{10}{1315}{OT1}
\font\sseci=cmmi12 scaled \magstephalf
\font\ssecsy=cmsy10 scaled 1315
+\def\ssececsize{1200}
% Reduced fonts for @acro in text (10pt).
\def\reducednominalsize{10pt}
-\setfont\reducedrm\rmshape{10}{1000}
-\setfont\reducedtt\ttshape{10}{1000}
-\setfont\reducedbf\bfshape{10}{1000}
-\setfont\reducedit\itshape{10}{1000}
-\setfont\reducedsl\slshape{10}{1000}
-\setfont\reducedsf\sfshape{10}{1000}
-\setfont\reducedsc\scshape{10}{1000}
-\setfont\reducedttsl\ttslshape{10}{1000}
+\setfont\reducedrm\rmshape{10}{1000}{OT1}
+\setfont\reducedtt\ttshape{10}{1000}{OT1TT}
+\setfont\reducedbf\bfshape{10}{1000}{OT1}
+\setfont\reducedit\itshape{10}{1000}{OT1IT}
+\setfont\reducedsl\slshape{10}{1000}{OT1}
+\setfont\reducedsf\sfshape{10}{1000}{OT1}
+\setfont\reducedsc\scshape{10}{1000}{OT1}
+\setfont\reducedttsl\ttslshape{10}{1000}{OT1TT}
\font\reducedi=cmmi10
\font\reducedsy=cmsy10
+\def\reducedecsize{1000}
+
+\textleading = 13.2pt % line spacing for 11pt CM
+\textfonts % reset the current fonts
+\rm
+} % end of 11pt text font size definitions, \definetextfontsizexi
+
+
+% Definitions to make the main text be 10pt Computer Modern, with
+% section, chapter, etc., sizes following suit. This is for the GNU
+% Press printing of the Emacs 22 manual. Maybe other manuals in the
+% future. Used with @smallbook, which sets the leading to 12pt.
+%
+\def\definetextfontsizex{%
+% Text fonts (10pt).
+\def\textnominalsize{10pt}
+\edef\mainmagstep{1000}
+\setfont\textrm\rmshape{10}{\mainmagstep}{OT1}
+\setfont\texttt\ttshape{10}{\mainmagstep}{OT1TT}
+\setfont\textbf\bfshape{10}{\mainmagstep}{OT1}
+\setfont\textit\itshape{10}{\mainmagstep}{OT1IT}
+\setfont\textsl\slshape{10}{\mainmagstep}{OT1}
+\setfont\textsf\sfshape{10}{\mainmagstep}{OT1}
+\setfont\textsc\scshape{10}{\mainmagstep}{OT1}
+\setfont\textttsl\ttslshape{10}{\mainmagstep}{OT1TT}
+\font\texti=cmmi10 scaled \mainmagstep
+\font\textsy=cmsy10 scaled \mainmagstep
+\def\textecsize{1000}
+
+% A few fonts for @defun names and args.
+\setfont\defbf\bfshape{10}{\magstephalf}{OT1}
+\setfont\deftt\ttshape{10}{\magstephalf}{OT1TT}
+\setfont\defttsl\ttslshape{10}{\magstephalf}{OT1TT}
+\def\df{\let\tentt=\deftt \let\tenbf = \defbf \let\tenttsl=\defttsl \bf}
+
+% Fonts for indices, footnotes, small examples (9pt).
+\def\smallnominalsize{9pt}
+\setfont\smallrm\rmshape{9}{1000}{OT1}
+\setfont\smalltt\ttshape{9}{1000}{OT1TT}
+\setfont\smallbf\bfshape{10}{900}{OT1}
+\setfont\smallit\itshape{9}{1000}{OT1IT}
+\setfont\smallsl\slshape{9}{1000}{OT1}
+\setfont\smallsf\sfshape{9}{1000}{OT1}
+\setfont\smallsc\scshape{10}{900}{OT1}
+\setfont\smallttsl\ttslshape{10}{900}{OT1TT}
+\font\smalli=cmmi9
+\font\smallsy=cmsy9
+\def\smallecsize{0900}
+
+% Fonts for small examples (8pt).
+\def\smallernominalsize{8pt}
+\setfont\smallerrm\rmshape{8}{1000}{OT1}
+\setfont\smallertt\ttshape{8}{1000}{OT1TT}
+\setfont\smallerbf\bfshape{10}{800}{OT1}
+\setfont\smallerit\itshape{8}{1000}{OT1IT}
+\setfont\smallersl\slshape{8}{1000}{OT1}
+\setfont\smallersf\sfshape{8}{1000}{OT1}
+\setfont\smallersc\scshape{10}{800}{OT1}
+\setfont\smallerttsl\ttslshape{10}{800}{OT1TT}
+\font\smalleri=cmmi8
+\font\smallersy=cmsy8
+\def\smallerecsize{0800}
+
+% Fonts for title page (20.4pt):
+\def\titlenominalsize{20pt}
+\setfont\titlerm\rmbshape{12}{\magstep3}{OT1}
+\setfont\titleit\itbshape{10}{\magstep4}{OT1IT}
+\setfont\titlesl\slbshape{10}{\magstep4}{OT1}
+\setfont\titlett\ttbshape{12}{\magstep3}{OT1TT}
+\setfont\titlettsl\ttslshape{10}{\magstep4}{OT1TT}
+\setfont\titlesf\sfbshape{17}{\magstep1}{OT1}
+\let\titlebf=\titlerm
+\setfont\titlesc\scbshape{10}{\magstep4}{OT1}
+\font\titlei=cmmi12 scaled \magstep3
+\font\titlesy=cmsy10 scaled \magstep4
+\def\titleecsize{2074}
+
+% Chapter fonts (14.4pt).
+\def\chapnominalsize{14pt}
+\setfont\chaprm\rmbshape{12}{\magstep1}{OT1}
+\setfont\chapit\itbshape{10}{\magstep2}{OT1IT}
+\setfont\chapsl\slbshape{10}{\magstep2}{OT1}
+\setfont\chaptt\ttbshape{12}{\magstep1}{OT1TT}
+\setfont\chapttsl\ttslshape{10}{\magstep2}{OT1TT}
+\setfont\chapsf\sfbshape{12}{\magstep1}{OT1}
+\let\chapbf\chaprm
+\setfont\chapsc\scbshape{10}{\magstep2}{OT1}
+\font\chapi=cmmi12 scaled \magstep1
+\font\chapsy=cmsy10 scaled \magstep2
+\def\chapecsize{1440}
+
+% Section fonts (12pt).
+\def\secnominalsize{12pt}
+\setfont\secrm\rmbshape{12}{1000}{OT1}
+\setfont\secit\itbshape{10}{\magstep1}{OT1IT}
+\setfont\secsl\slbshape{10}{\magstep1}{OT1}
+\setfont\sectt\ttbshape{12}{1000}{OT1TT}
+\setfont\secttsl\ttslshape{10}{\magstep1}{OT1TT}
+\setfont\secsf\sfbshape{12}{1000}{OT1}
+\let\secbf\secrm
+\setfont\secsc\scbshape{10}{\magstep1}{OT1}
+\font\seci=cmmi12
+\font\secsy=cmsy10 scaled \magstep1
+\def\sececsize{1200}
+
+% Subsection fonts (10pt).
+\def\ssecnominalsize{10pt}
+\setfont\ssecrm\rmbshape{10}{1000}{OT1}
+\setfont\ssecit\itbshape{10}{1000}{OT1IT}
+\setfont\ssecsl\slbshape{10}{1000}{OT1}
+\setfont\ssectt\ttbshape{10}{1000}{OT1TT}
+\setfont\ssecttsl\ttslshape{10}{1000}{OT1TT}
+\setfont\ssecsf\sfbshape{10}{1000}{OT1}
+\let\ssecbf\ssecrm
+\setfont\ssecsc\scbshape{10}{1000}{OT1}
+\font\sseci=cmmi10
+\font\ssecsy=cmsy10
+\def\ssececsize{1000}
+
+% Reduced fonts for @acro in text (9pt).
+\def\reducednominalsize{9pt}
+\setfont\reducedrm\rmshape{9}{1000}{OT1}
+\setfont\reducedtt\ttshape{9}{1000}{OT1TT}
+\setfont\reducedbf\bfshape{10}{900}{OT1}
+\setfont\reducedit\itshape{9}{1000}{OT1IT}
+\setfont\reducedsl\slshape{9}{1000}{OT1}
+\setfont\reducedsf\sfshape{9}{1000}{OT1}
+\setfont\reducedsc\scshape{10}{900}{OT1}
+\setfont\reducedttsl\ttslshape{10}{900}{OT1TT}
+\font\reducedi=cmmi9
+\font\reducedsy=cmsy9
+\def\reducedecsize{0900}
+
+\divide\parskip by 2 % reduce space between paragraphs
+\textleading = 12pt % line spacing for 10pt CM
+\textfonts % reset the current fonts
+\rm
+} % end of 10pt text font size definitions, \definetextfontsizex
+
+
+% We provide the user-level command
+% @fonttextsize 10
+% (or 11) to redefine the text font size. pt is assumed.
+%
+\def\xiword{11}
+\def\xword{10}
+\def\xwordpt{10pt}
+%
+\parseargdef\fonttextsize{%
+ \def\textsizearg{#1}%
+ %\wlog{doing @fonttextsize \textsizearg}%
+ %
+ % Set \globaldefs so that documents can use this inside @tex, since
+ % makeinfo 4.8 does not support it, but we need it nonetheless.
+ %
+ \begingroup \globaldefs=1
+ \ifx\textsizearg\xword \definetextfontsizex
+ \else \ifx\textsizearg\xiword \definetextfontsizexi
+ \else
+ \errhelp=\EMsimple
+ \errmessage{@fonttextsize only supports `10' or `11', not `\textsizearg'}
+ \fi\fi
+ \endgroup
+}
+
% In order for the font changes to affect most math symbols and letters,
% we have to define the \textfont of the standard families. Since
@@ -1681,8 +2124,8 @@ where each line of input produces a line of output.}
\let\tenttsl=\titlettsl
\def\curfontsize{title}%
\def\lsize{chap}\def\lllsize{subsec}%
- \resetmathfonts \setleading{25pt}}
-\def\titlefont#1{{\titlefonts\rm #1}}
+ \resetmathfonts \setleading{27pt}}
+\def\titlefont#1{{\titlefonts\rmisbold #1}}
\def\chapfonts{%
\let\tenrm=\chaprm \let\tenit=\chapit \let\tensl=\chapsl
\let\tenbf=\chapbf \let\tentt=\chaptt \let\smallcaps=\chapsc
@@ -1733,6 +2176,16 @@ where each line of input produces a line of output.}
\def\lsize{smaller}\def\lllsize{smaller}%
\resetmathfonts \setleading{9.5pt}}
+% Fonts for short table of contents.
+\setfont\shortcontrm\rmshape{12}{1000}{OT1}
+\setfont\shortcontbf\bfshape{10}{\magstep1}{OT1} % no cmb12
+\setfont\shortcontsl\slshape{12}{1000}{OT1}
+\setfont\shortconttt\ttshape{12}{1000}{OT1TT}
+
+% Define these just so they can be easily changed for other fonts.
+\def\angleleft{$\langle$}
+\def\angleright{$\rangle$}
+
% Set the fonts to use with the @small... environments.
\let\smallexamplefonts = \smallfonts
@@ -1746,53 +2199,215 @@ where each line of input produces a line of output.}
%
% By the way, for comparison, here's what fits with @example (10pt):
% 8.5x11=71 smallbook=60 a4=75 a5=58
-%
-% I wish the USA used A4 paper.
% --karl, 24jan03.
-
% Set up the default fonts, so we can use them for creating boxes.
%
-\textfonts \rm
+\definetextfontsizexi
-% Define these so they can be easily changed for other fonts.
-\def\angleleft{$\langle$}
-\def\angleright{$\rangle$}
+
+\message{markup,}
+
+% Check if we are currently using a typewriter font. Since all the
+% Computer Modern typewriter fonts have zero interword stretch (and
+% shrink), and it is reasonable to expect all typewriter fonts to have
+% this property, we can check that font parameter.
+%
+\def\ifmonospace{\ifdim\fontdimen3\font=0pt }
+
+% Markup style infrastructure. \defmarkupstylesetup\INITMACRO will
+% define and register \INITMACRO to be called on markup style changes.
+% \INITMACRO can check \currentmarkupstyle for the innermost
+% style and the set of \ifmarkupSTYLE switches for all styles
+% currently in effect.
+\newif\ifmarkupvar
+\newif\ifmarkupsamp
+\newif\ifmarkupkey
+%\newif\ifmarkupfile % @file == @samp.
+%\newif\ifmarkupoption % @option == @samp.
+\newif\ifmarkupcode
+\newif\ifmarkupkbd
+%\newif\ifmarkupenv % @env == @code.
+%\newif\ifmarkupcommand % @command == @code.
+\newif\ifmarkuptex % @tex (and part of @math, for now).
+\newif\ifmarkupexample
+\newif\ifmarkupverb
+\newif\ifmarkupverbatim
+
+\let\currentmarkupstyle\empty
+
+\def\setupmarkupstyle#1{%
+ \csname markup#1true\endcsname
+ \def\currentmarkupstyle{#1}%
+ \markupstylesetup
+}
+
+\let\markupstylesetup\empty
+
+\def\defmarkupstylesetup#1{%
+ \expandafter\def\expandafter\markupstylesetup
+ \expandafter{\markupstylesetup #1}%
+ \def#1%
+}
+
+% Markup style setup for left and right quotes.
+\defmarkupstylesetup\markupsetuplq{%
+ \expandafter\let\expandafter \temp
+ \csname markupsetuplq\currentmarkupstyle\endcsname
+ \ifx\temp\relax \markupsetuplqdefault \else \temp \fi
+}
+
+\defmarkupstylesetup\markupsetuprq{%
+ \expandafter\let\expandafter \temp
+ \csname markupsetuprq\currentmarkupstyle\endcsname
+ \ifx\temp\relax \markupsetuprqdefault \else \temp \fi
+}
+
+{
+\catcode`\'=\active
+\catcode`\`=\active
+
+\gdef\markupsetuplqdefault{\let`\lq}
+\gdef\markupsetuprqdefault{\let'\rq}
+
+\gdef\markupsetcodequoteleft{\let`\codequoteleft}
+\gdef\markupsetcodequoteright{\let'\codequoteright}
+
+\gdef\markupsetnoligaturesquoteleft{\let`\noligaturesquoteleft}
+}
+
+\let\markupsetuplqcode \markupsetcodequoteleft
+\let\markupsetuprqcode \markupsetcodequoteright
+%
+\let\markupsetuplqexample \markupsetcodequoteleft
+\let\markupsetuprqexample \markupsetcodequoteright
+%
+\let\markupsetuplqsamp \markupsetcodequoteleft
+\let\markupsetuprqsamp \markupsetcodequoteright
+%
+\let\markupsetuplqverb \markupsetcodequoteleft
+\let\markupsetuprqverb \markupsetcodequoteright
+%
+\let\markupsetuplqverbatim \markupsetcodequoteleft
+\let\markupsetuprqverbatim \markupsetcodequoteright
+
+\let\markupsetuplqkbd \markupsetnoligaturesquoteleft
+
+% Allow an option to not use regular directed right quote/apostrophe
+% (char 0x27), but instead the undirected quote from cmtt (char 0x0d).
+% The undirected quote is ugly, so don't make it the default, but it
+% works for pasting with more pdf viewers (at least evince), the
+% lilypond developers report. xpdf does work with the regular 0x27.
+%
+\def\codequoteright{%
+ \expandafter\ifx\csname SETtxicodequoteundirected\endcsname\relax
+ \expandafter\ifx\csname SETcodequoteundirected\endcsname\relax
+ '%
+ \else \char'15 \fi
+ \else \char'15 \fi
+}
+%
+% and a similar option for the left quote char vs. a grave accent.
+% Modern fonts display ASCII 0x60 as a grave accent, so some people like
+% the code environments to do likewise.
+%
+\def\codequoteleft{%
+ \expandafter\ifx\csname SETtxicodequotebacktick\endcsname\relax
+ \expandafter\ifx\csname SETcodequotebacktick\endcsname\relax
+ % [Knuth] pp. 380,381,391
+ % \relax disables Spanish ligatures ?` and !` of \tt font.
+ \relax`%
+ \else \char'22 \fi
+ \else \char'22 \fi
+}
+
+% Commands to set the quote options.
+%
+\parseargdef\codequoteundirected{%
+ \def\temp{#1}%
+ \ifx\temp\onword
+ \expandafter\let\csname SETtxicodequoteundirected\endcsname
+ = t%
+ \else\ifx\temp\offword
+ \expandafter\let\csname SETtxicodequoteundirected\endcsname
+ = \relax
+ \else
+ \errhelp = \EMsimple
+ \errmessage{Unknown @codequoteundirected value `\temp', must be on|off}%
+ \fi\fi
+}
+%
+\parseargdef\codequotebacktick{%
+ \def\temp{#1}%
+ \ifx\temp\onword
+ \expandafter\let\csname SETtxicodequotebacktick\endcsname
+ = t%
+ \else\ifx\temp\offword
+ \expandafter\let\csname SETtxicodequotebacktick\endcsname
+ = \relax
+ \else
+ \errhelp = \EMsimple
+ \errmessage{Unknown @codequotebacktick value `\temp', must be on|off}%
+ \fi\fi
+}
+
+% [Knuth] pp. 380,381,391, disable Spanish ligatures ?` and !` of \tt font.
+\def\noligaturesquoteleft{\relax\lq}
% Count depth in font-changes, for error checks
\newcount\fontdepth \fontdepth=0
-% Fonts for short table of contents.
-\setfont\shortcontrm\rmshape{12}{1000}
-\setfont\shortcontbf\bfshape{10}{\magstep1} % no cmb12
-\setfont\shortcontsl\slshape{12}{1000}
-\setfont\shortconttt\ttshape{12}{1000}
-
-%% Add scribe-like font environments, plus @l for inline lisp (usually sans
-%% serif) and @ii for TeX italic
-
-% \smartitalic{ARG} outputs arg in italics, followed by an italic correction
-% unless the following character is such as not to need one.
-\def\smartitalicx{\ifx\next,\else\ifx\next-\else\ifx\next.\else
- \ptexslash\fi\fi\fi}
-\def\smartslanted#1{{\ifusingtt\ttsl\sl #1}\futurelet\next\smartitalicx}
-\def\smartitalic#1{{\ifusingtt\ttsl\it #1}\futurelet\next\smartitalicx}
-
-% like \smartslanted except unconditionally uses \ttsl.
+% Font commands.
+
+% #1 is the font command (\sl or \it), #2 is the text to slant.
+% If we are in a monospaced environment, however, 1) always use \ttsl,
+% and 2) do not add an italic correction.
+\def\dosmartslant#1#2{%
+ \ifusingtt
+ {{\ttsl #2}\let\next=\relax}%
+ {\def\next{{#1#2}\futurelet\next\smartitaliccorrection}}%
+ \next
+}
+\def\smartslanted{\dosmartslant\sl}
+\def\smartitalic{\dosmartslant\it}
+
+% Output an italic correction unless \next (presumed to be the following
+% character) is such as not to need one.
+\def\smartitaliccorrection{%
+ \ifx\next,%
+ \else\ifx\next-%
+ \else\ifx\next.%
+ \else\ptexslash
+ \fi\fi\fi
+ \aftersmartic
+}
+
+% like \smartslanted except unconditionally uses \ttsl, and no ic.
% @var is set to this for defun arguments.
-\def\ttslanted#1{{\ttsl #1}\futurelet\next\smartitalicx}
+\def\ttslanted#1{{\ttsl #1}}
-% like \smartslanted except unconditionally use \sl. We never want
+% @cite is like \smartslanted except unconditionally use \sl. We never want
% ttsl for book titles, do we?
-\def\cite#1{{\sl #1}\futurelet\next\smartitalicx}
+\def\cite#1{{\sl #1}\futurelet\next\smartitaliccorrection}
+
+\def\aftersmartic{}
+\def\var#1{%
+ \let\saveaftersmartic = \aftersmartic
+ \def\aftersmartic{\null\let\aftersmartic=\saveaftersmartic}%
+ \smartslanted{#1}%
+}
\let\i=\smartitalic
\let\slanted=\smartslanted
-\let\var=\smartslanted
\let\dfn=\smartslanted
\let\emph=\smartitalic
-% @b, explicit bold.
+% Explicit font changes: @r, @sc, undocumented @ii.
+\def\r#1{{\rm #1}} % roman font
+\def\sc#1{{\smallcaps#1}} % smallcaps font
+\def\ii#1{{\it #1}} % italic font
+
+% @b, explicit bold. Also @strong.
\def\b#1{{\bf #1}}
\let\strong=\b
@@ -1824,21 +2439,35 @@ where each line of input produces a line of output.}
\catcode`@=\other
\def\endofsentencespacefactor{3000}% default
+% @t, explicit typewriter.
\def\t#1{%
{\tt \rawbackslash \plainfrenchspacing #1}%
\null
}
-\def\samp#1{`\tclose{#1}'\null}
-\setfont\keyrm\rmshape{8}{1000}
-\font\keysy=cmsy9
-\def\key#1{{\keyrm\textfont2=\keysy \leavevmode\hbox{%
- \raise0.4pt\hbox{\angleleft}\kern-.08em\vtop{%
- \vbox{\hrule\kern-0.4pt
- \hbox{\raise0.4pt\hbox{\vphantom{\angleleft}}#1}}%
- \kern-0.4pt\hrule}%
- \kern-.06em\raise0.4pt\hbox{\angleright}}}}
-% The old definition, with no lozenge:
-%\def\key #1{{\ttsl \nohyphenation \uppercase{#1}}\null}
+
+% @samp.
+\def\samp#1{{\setupmarkupstyle{samp}\lq\tclose{#1}\rq\null}}
+
+% definition of @key that produces a lozenge. Doesn't adjust to text size.
+%\setfont\keyrm\rmshape{8}{1000}{OT1}
+%\font\keysy=cmsy9
+%\def\key#1{{\keyrm\textfont2=\keysy \leavevmode\hbox{%
+% \raise0.4pt\hbox{\angleleft}\kern-.08em\vtop{%
+% \vbox{\hrule\kern-0.4pt
+% \hbox{\raise0.4pt\hbox{\vphantom{\angleleft}}#1}}%
+% \kern-0.4pt\hrule}%
+% \kern-.06em\raise0.4pt\hbox{\angleright}}}}
+
+% definition of @key with no lozenge. If the current font is already
+% monospace, don't change it; that way, we respect @kbdinputstyle. But
+% if it isn't monospace, then use \tt.
+%
+\def\key#1{{\setupmarkupstyle{key}%
+ \nohyphenation
+ \ifmonospace\else\tt\fi
+ #1}\null}
+
+% ctrl is no longer a Texinfo command.
\def\ctrl #1{{\tt \rawbackslash \hat}#1}
% @file, @option are the same as @samp.
@@ -1865,7 +2494,7 @@ where each line of input produces a line of output.}
\plainfrenchspacing
#1%
}%
- \null
+ \null % reset spacefactor to 1000
}
% We *must* turn on hyphenation at `-' and `_' in @code.
@@ -1878,11 +2507,14 @@ where each line of input produces a line of output.}
% and arrange explicitly to hyphenate at a dash.
% -- rms.
{
- \catcode`\-=\active
- \catcode`\_=\active
+ \catcode`\-=\active \catcode`\_=\active
+ \catcode`\'=\active \catcode`\`=\active
+ \global\let'=\rq \global\let`=\lq % default definitions
%
\global\def\code{\begingroup
- \catcode`\-=\active \catcode`\_=\active
+ \setupmarkupstyle{code}%
+ % The following should really be moved into \setupmarkupstyle handlers.
+ \catcode\dashChar=\active \catcode\underChar=\active
\ifallowcodebreaks
\let-\codedash
\let_\codeunder
@@ -1894,6 +2526,8 @@ where each line of input produces a line of output.}
}
}
+\def\codex #1{\tclose{#1}\endgroup}
+
\def\realdash{-}
\def\codedash{-\discretionary{}{}{}}
\def\codeunder{%
@@ -1907,13 +2541,12 @@ where each line of input produces a line of output.}
\discretionary{}{}{}}%
{\_}%
}
-\def\codex #1{\tclose{#1}\endgroup}
% An additional complication: the above will allow breaks after, e.g.,
% each of the four underscores in __typeof__. This is undesirable in
% some manuals, especially if they don't have long identifiers in
% general. @allowcodebreaks provides a way to control this.
-%
+%
\newif\ifallowcodebreaks \allowcodebreakstrue
\def\keywordtrue{true}
@@ -1927,55 +2560,18 @@ where each line of input produces a line of output.}
\allowcodebreaksfalse
\else
\errhelp = \EMsimple
- \errmessage{Unknown @allowcodebreaks option `\txiarg'}%
+ \errmessage{Unknown @allowcodebreaks option `\txiarg', must be true|false}%
\fi\fi
}
-% @kbd is like @code, except that if the argument is just one @key command,
-% then @kbd has no effect.
-
-% @kbdinputstyle -- arg is `distinct' (@kbd uses slanted tty font always),
-% `example' (@kbd uses ttsl only inside of @example and friends),
-% or `code' (@kbd uses normal tty font always).
-\parseargdef\kbdinputstyle{%
- \def\txiarg{#1}%
- \ifx\txiarg\worddistinct
- \gdef\kbdexamplefont{\ttsl}\gdef\kbdfont{\ttsl}%
- \else\ifx\txiarg\wordexample
- \gdef\kbdexamplefont{\ttsl}\gdef\kbdfont{\tt}%
- \else\ifx\txiarg\wordcode
- \gdef\kbdexamplefont{\tt}\gdef\kbdfont{\tt}%
- \else
- \errhelp = \EMsimple
- \errmessage{Unknown @kbdinputstyle option `\txiarg'}%
- \fi\fi\fi
-}
-\def\worddistinct{distinct}
-\def\wordexample{example}
-\def\wordcode{code}
-
-% Default is `distinct.'
-\kbdinputstyle distinct
-
-\def\xkey{\key}
-\def\kbdfoo#1#2#3\par{\def\one{#1}\def\three{#3}\def\threex{??}%
-\ifx\one\xkey\ifx\threex\three \key{#2}%
-\else{\tclose{\kbdfont\look}}\fi
-\else{\tclose{\kbdfont\look}}\fi}
-
-% For @indicateurl, @env, @command quotes seem unnecessary, so use \code.
-\let\indicateurl=\code
-\let\env=\code
-\let\command=\code
-
% @uref (abbreviation for `urlref') takes an optional (comma-separated)
% second argument specifying the text to display and an optional third
% arg as text to display instead of (rather than in addition to) the url
-% itself. First (mandatory) arg is the url. Perhaps eventually put in
-% a hypertex \special here.
-%
-\def\uref#1{\douref #1,,,\finish}
-\def\douref#1,#2,#3,#4\finish{\begingroup
+% itself. First (mandatory) arg is the url.
+% (This \urefnobreak definition isn't used now, leaving it for a while
+% for comparison.)
+\def\urefnobreak#1{\dourefnobreak #1,,,\finish}
+\def\dourefnobreak#1,#2,#3,#4\finish{\begingroup
\unsepspaces
\pdfurl{#1}%
\setbox0 = \hbox{\ignorespaces #3}%
@@ -1996,6 +2592,103 @@ where each line of input produces a line of output.}
\endlink
\endgroup}
+% This \urefbreak definition is the active one.
+\def\urefbreak{\begingroup \urefcatcodes \dourefbreak}
+\let\uref=\urefbreak
+\def\dourefbreak#1{\urefbreakfinish #1,,,\finish}
+\def\urefbreakfinish#1,#2,#3,#4\finish{% doesn't work in @example
+ \unsepspaces
+ \pdfurl{#1}%
+ \setbox0 = \hbox{\ignorespaces #3}%
+ \ifdim\wd0 > 0pt
+ \unhbox0 % third arg given, show only that
+ \else
+ \setbox0 = \hbox{\ignorespaces #2}%
+ \ifdim\wd0 > 0pt
+ \ifpdf
+ \unhbox0 % PDF: 2nd arg given, show only it
+ \else
+ \unhbox0\ (\urefcode{#1})% DVI: 2nd arg given, show both it and url
+ \fi
+ \else
+ \urefcode{#1}% only url given, so show it
+ \fi
+ \fi
+ \endlink
+\endgroup}
+
+% Allow line breaks around only a few characters (only).
+\def\urefcatcodes{%
+ \catcode\ampChar=\active \catcode\dotChar=\active
+ \catcode\hashChar=\active \catcode\questChar=\active
+ \catcode\slashChar=\active
+}
+{
+ \urefcatcodes
+ %
+ \global\def\urefcode{\begingroup
+ \setupmarkupstyle{code}%
+ \urefcatcodes
+ \let&\urefcodeamp
+ \let.\urefcodedot
+ \let#\urefcodehash
+ \let?\urefcodequest
+ \let/\urefcodeslash
+ \codex
+ }
+ %
+ % By default, they are just regular characters.
+ \global\def&{\normalamp}
+ \global\def.{\normaldot}
+ \global\def#{\normalhash}
+ \global\def?{\normalquest}
+ \global\def/{\normalslash}
+}
+
+% we put a little stretch before and after the breakable chars, to help
+% line breaking of long url's. The unequal skips make look better in
+% cmtt at least, especially for dots.
+\def\urefprestretch{\urefprebreak \hskip0pt plus.13em }
+\def\urefpoststretch{\urefpostbreak \hskip0pt plus.1em }
+%
+\def\urefcodeamp{\urefprestretch \&\urefpoststretch}
+\def\urefcodedot{\urefprestretch .\urefpoststretch}
+\def\urefcodehash{\urefprestretch \#\urefpoststretch}
+\def\urefcodequest{\urefprestretch ?\urefpoststretch}
+\def\urefcodeslash{\futurelet\next\urefcodeslashfinish}
+{
+ \catcode`\/=\active
+ \global\def\urefcodeslashfinish{%
+ \urefprestretch \slashChar
+ % Allow line break only after the final / in a sequence of
+ % slashes, to avoid line break between the slashes in http://.
+ \ifx\next/\else \urefpoststretch \fi
+ }
+}
+
+% One more complication: by default we'll break after the special
+% characters, but some people like to break before the special chars, so
+% allow that. Also allow no breaking at all, for manual control.
+%
+\parseargdef\urefbreakstyle{%
+ \def\txiarg{#1}%
+ \ifx\txiarg\wordnone
+ \def\urefprebreak{\nobreak}\def\urefpostbreak{\nobreak}
+ \else\ifx\txiarg\wordbefore
+ \def\urefprebreak{\allowbreak}\def\urefpostbreak{\nobreak}
+ \else\ifx\txiarg\wordafter
+ \def\urefprebreak{\nobreak}\def\urefpostbreak{\allowbreak}
+ \else
+ \errhelp = \EMsimple
+ \errmessage{Unknown @urefbreakstyle setting `\txiarg'}%
+ \fi\fi\fi
+}
+\def\wordafter{after}
+\def\wordbefore{before}
+\def\wordnone{none}
+
+\urefbreakstyle after
+
% @url synonym for @uref, since that's how everyone uses it.
%
\let\url=\uref
@@ -2017,34 +2710,65 @@ where each line of input produces a line of output.}
\let\email=\uref
\fi
-% Check if we are currently using a typewriter font. Since all the
-% Computer Modern typewriter fonts have zero interword stretch (and
-% shrink), and it is reasonable to expect all typewriter fonts to have
-% this property, we can check that font parameter.
-%
-\def\ifmonospace{\ifdim\fontdimen3\font=0pt }
+% @kbd is like @code, except that if the argument is just one @key command,
+% then @kbd has no effect.
+\def\kbd#1{{\setupmarkupstyle{kbd}\def\look{#1}\expandafter\kbdfoo\look??\par}}
+
+% @kbdinputstyle -- arg is `distinct' (@kbd uses slanted tty font always),
+% `example' (@kbd uses ttsl only inside of @example and friends),
+% or `code' (@kbd uses normal tty font always).
+\parseargdef\kbdinputstyle{%
+ \def\txiarg{#1}%
+ \ifx\txiarg\worddistinct
+ \gdef\kbdexamplefont{\ttsl}\gdef\kbdfont{\ttsl}%
+ \else\ifx\txiarg\wordexample
+ \gdef\kbdexamplefont{\ttsl}\gdef\kbdfont{\tt}%
+ \else\ifx\txiarg\wordcode
+ \gdef\kbdexamplefont{\tt}\gdef\kbdfont{\tt}%
+ \else
+ \errhelp = \EMsimple
+ \errmessage{Unknown @kbdinputstyle setting `\txiarg'}%
+ \fi\fi\fi
+}
+\def\worddistinct{distinct}
+\def\wordexample{example}
+\def\wordcode{code}
+
+% Default is `distinct'.
+\kbdinputstyle distinct
+
+\def\xkey{\key}
+\def\kbdfoo#1#2#3\par{\def\one{#1}\def\three{#3}\def\threex{??}%
+\ifx\one\xkey\ifx\threex\three \key{#2}%
+\else{\tclose{\kbdfont\setupmarkupstyle{kbd}\look}}\fi
+\else{\tclose{\kbdfont\setupmarkupstyle{kbd}\look}}\fi}
+
+% For @indicateurl, @env, @command quotes seem unnecessary, so use \code.
+\let\indicateurl=\code
+\let\env=\code
+\let\command=\code
+
+% @clicksequence{File @click{} Open ...}
+\def\clicksequence#1{\begingroup #1\endgroup}
+
+% @clickstyle @arrow (by default)
+\parseargdef\clickstyle{\def\click{#1}}
+\def\click{\arrow}
% Typeset a dimension, e.g., `in' or `pt'. The only reason for the
% argument is to make the input look right: @dmn{pt} instead of @dmn{}pt.
%
\def\dmn#1{\thinspace #1}
-\def\kbd#1{\def\look{#1}\expandafter\kbdfoo\look??\par}
-
% @l was never documented to mean ``switch to the Lisp font'',
% and it is not used as such in any manual I can find. We need it for
% Polish suppressed-l. --karl, 22sep96.
%\def\l#1{{\li #1}\null}
-% Explicit font changes: @r, @sc, undocumented @ii.
-\def\r#1{{\rm #1}} % roman font
-\def\sc#1{{\smallcaps#1}} % smallcaps font
-\def\ii#1{{\it #1}} % italic font
-
% @acronym for "FBI", "NATO", and the like.
% We print this one point size smaller, since it's intended for
% all-uppercase.
-%
+%
\def\acronym#1{\doacronym #1,,\finish}
\def\doacronym#1,#2,#3\finish{%
{\selectfonts\lsize #1}%
@@ -2052,11 +2776,12 @@ where each line of input produces a line of output.}
\ifx\temp\empty \else
\space ({\unsepspaces \ignorespaces \temp \unskip})%
\fi
+ \null % reset \spacefactor=1000
}
% @abbr for "Comput. J." and the like.
% No font change, but don't do end-of-sentence spacing.
-%
+%
\def\abbr#1{\doabbr #1,,\finish}
\def\doabbr#1,#2,#3\finish{%
{\plainfrenchspacing #1}%
@@ -2064,8 +2789,255 @@ where each line of input produces a line of output.}
\ifx\temp\empty \else
\space ({\unsepspaces \ignorespaces \temp \unskip})%
\fi
+ \null % reset \spacefactor=1000
+}
+
+% @asis just yields its argument. Used with @table, for example.
+%
+\def\asis#1{#1}
+
+% @math outputs its argument in math mode.
+%
+% One complication: _ usually means subscripts, but it could also mean
+% an actual _ character, as in @math{@var{some_variable} + 1}. So make
+% _ active, and distinguish by seeing if the current family is \slfam,
+% which is what @var uses.
+{
+ \catcode`\_ = \active
+ \gdef\mathunderscore{%
+ \catcode`\_=\active
+ \def_{\ifnum\fam=\slfam \_\else\sb\fi}%
+ }
+}
+% Another complication: we want \\ (and @\) to output a math (or tt) \.
+% FYI, plain.tex uses \\ as a temporary control sequence (for no
+% particular reason), but this is not advertised and we don't care.
+%
+% The \mathchar is class=0=ordinary, family=7=ttfam, position=5C=\.
+\def\mathbackslash{\ifnum\fam=\ttfam \mathchar"075C \else\backslash \fi}
+%
+\def\math{%
+ \tex
+ \mathunderscore
+ \let\\ = \mathbackslash
+ \mathactive
+ % make the texinfo accent commands work in math mode
+ \let\"=\ddot
+ \let\'=\acute
+ \let\==\bar
+ \let\^=\hat
+ \let\`=\grave
+ \let\u=\breve
+ \let\v=\check
+ \let\~=\tilde
+ \let\dotaccent=\dot
+ $\finishmath
+}
+\def\finishmath#1{#1$\endgroup} % Close the group opened by \tex.
+
+% Some active characters (such as <) are spaced differently in math.
+% We have to reset their definitions in case the @math was an argument
+% to a command which sets the catcodes (such as @item or @section).
+%
+{
+ \catcode`^ = \active
+ \catcode`< = \active
+ \catcode`> = \active
+ \catcode`+ = \active
+ \catcode`' = \active
+ \gdef\mathactive{%
+ \let^ = \ptexhat
+ \let< = \ptexless
+ \let> = \ptexgtr
+ \let+ = \ptexplus
+ \let' = \ptexquoteright
+ }
}
+% @inlinefmt{FMTNAME,PROCESSED-TEXT} and @inlineraw{FMTNAME,RAW-TEXT}.
+% Ignore unless FMTNAME == tex; then it is like @iftex and @tex,
+% except specified as a normal braced arg, so no newlines to worry about.
+%
+\def\outfmtnametex{tex}
+%
+\long\def\inlinefmt#1{\doinlinefmt #1,\finish}
+\long\def\doinlinefmt#1,#2,\finish{%
+ \def\inlinefmtname{#1}%
+ \ifx\inlinefmtname\outfmtnametex \ignorespaces #2\fi
+}
+% For raw, must switch into @tex before parsing the argument, to avoid
+% setting catcodes prematurely. Doing it this way means that, for
+% example, @inlineraw{html, foo{bar} gets a parse error instead of being
+% ignored. But this isn't important because if people want a literal
+% *right* brace they would have to use a command anyway, so they may as
+% well use a command to get a left brace too. We could re-use the
+% delimiter character idea from \verb, but it seems like overkill.
+%
+\long\def\inlineraw{\tex \doinlineraw}
+\long\def\doinlineraw#1{\doinlinerawtwo #1,\finish}
+\def\doinlinerawtwo#1,#2,\finish{%
+ \def\inlinerawname{#1}%
+ \ifx\inlinerawname\outfmtnametex \ignorespaces #2\fi
+ \endgroup % close group opened by \tex.
+}
+
+
+\message{glyphs,}
+% and logos.
+
+% @@ prints an @, as does @atchar{}.
+\def\@{\char64 }
+\let\atchar=\@
+
+% @{ @} @lbracechar{} @rbracechar{} all generate brace characters.
+% Unless we're in typewriter, use \ecfont because the CM text fonts do
+% not have braces, and we don't want to switch into math.
+\def\mylbrace{{\ifmonospace\else\ecfont\fi \char123}}
+\def\myrbrace{{\ifmonospace\else\ecfont\fi \char125}}
+\let\{=\mylbrace \let\lbracechar=\{
+\let\}=\myrbrace \let\rbracechar=\}
+\begingroup
+ % Definitions to produce \{ and \} commands for indices,
+ % and @{ and @} for the aux/toc files.
+ \catcode`\{ = \other \catcode`\} = \other
+ \catcode`\[ = 1 \catcode`\] = 2
+ \catcode`\! = 0 \catcode`\\ = \other
+ !gdef!lbracecmd[\{]%
+ !gdef!rbracecmd[\}]%
+ !gdef!lbraceatcmd[@{]%
+ !gdef!rbraceatcmd[@}]%
+!endgroup
+
+% @comma{} to avoid , parsing problems.
+\let\comma = ,
+
+% Accents: @, @dotaccent @ringaccent @ubaraccent @udotaccent
+% Others are defined by plain TeX: @` @' @" @^ @~ @= @u @v @H.
+\let\, = \ptexc
+\let\dotaccent = \ptexdot
+\def\ringaccent#1{{\accent23 #1}}
+\let\tieaccent = \ptext
+\let\ubaraccent = \ptexb
+\let\udotaccent = \d
+
+% Other special characters: @questiondown @exclamdown @ordf @ordm
+% Plain TeX defines: @AA @AE @O @OE @L (plus lowercase versions) @ss.
+\def\questiondown{?`}
+\def\exclamdown{!`}
+\def\ordf{\leavevmode\raise1ex\hbox{\selectfonts\lllsize \underbar{a}}}
+\def\ordm{\leavevmode\raise1ex\hbox{\selectfonts\lllsize \underbar{o}}}
+
+% Dotless i and dotless j, used for accents.
+\def\imacro{i}
+\def\jmacro{j}
+\def\dotless#1{%
+ \def\temp{#1}%
+ \ifx\temp\imacro \ifmmode\imath \else\ptexi \fi
+ \else\ifx\temp\jmacro \ifmmode\jmath \else\j \fi
+ \else \errmessage{@dotless can be used only with i or j}%
+ \fi\fi
+}
+
+% The \TeX{} logo, as in plain, but resetting the spacing so that a
+% period following counts as ending a sentence. (Idea found in latex.)
+%
+\edef\TeX{\TeX \spacefactor=1000 }
+
+% @LaTeX{} logo. Not quite the same results as the definition in
+% latex.ltx, since we use a different font for the raised A; it's most
+% convenient for us to use an explicitly smaller font, rather than using
+% the \scriptstyle font (since we don't reset \scriptstyle and
+% \scriptscriptstyle).
+%
+\def\LaTeX{%
+ L\kern-.36em
+ {\setbox0=\hbox{T}%
+ \vbox to \ht0{\hbox{%
+ \ifx\textnominalsize\xwordpt
+ % for 10pt running text, \lllsize (8pt) is too small for the A in LaTeX.
+ % Revert to plain's \scriptsize, which is 7pt.
+ \count255=\the\fam $\fam\count255 \scriptstyle A$%
+ \else
+ % For 11pt, we can use our lllsize.
+ \selectfonts\lllsize A%
+ \fi
+ }%
+ \vss
+ }}%
+ \kern-.15em
+ \TeX
+}
+
+% Some math mode symbols.
+\def\bullet{$\ptexbullet$}
+\def\geq{\ifmmode \ge\else $\ge$\fi}
+\def\leq{\ifmmode \le\else $\le$\fi}
+\def\minus{\ifmmode -\else $-$\fi}
+
+% @dots{} outputs an ellipsis using the current font.
+% We do .5em per period so that it has the same spacing in the cm
+% typewriter fonts as three actual period characters; on the other hand,
+% in other typewriter fonts three periods are wider than 1.5em. So do
+% whichever is larger.
+%
+\def\dots{%
+ \leavevmode
+ \setbox0=\hbox{...}% get width of three periods
+ \ifdim\wd0 > 1.5em
+ \dimen0 = \wd0
+ \else
+ \dimen0 = 1.5em
+ \fi
+ \hbox to \dimen0{%
+ \hskip 0pt plus.25fil
+ .\hskip 0pt plus1fil
+ .\hskip 0pt plus1fil
+ .\hskip 0pt plus.5fil
+ }%
+}
+
+% @enddots{} is an end-of-sentence ellipsis.
+%
+\def\enddots{%
+ \dots
+ \spacefactor=\endofsentencespacefactor
+}
+
+% @point{}, @result{}, @expansion{}, @print{}, @equiv{}.
+%
+% Since these characters are used in examples, they should be an even number of
+% \tt widths. Each \tt character is 1en, so two makes it 1em.
+%
+\def\point{$\star$}
+\def\arrow{\leavevmode\raise.05ex\hbox to 1em{\hfil$\rightarrow$\hfil}}
+\def\result{\leavevmode\raise.05ex\hbox to 1em{\hfil$\Rightarrow$\hfil}}
+\def\expansion{\leavevmode\hbox to 1em{\hfil$\mapsto$\hfil}}
+\def\print{\leavevmode\lower.1ex\hbox to 1em{\hfil$\dashv$\hfil}}
+\def\equiv{\leavevmode\hbox to 1em{\hfil$\ptexequiv$\hfil}}
+
+% The @error{} command.
+% Adapted from the TeXbook's \boxit.
+%
+\newbox\errorbox
+%
+{\tentt \global\dimen0 = 3em}% Width of the box.
+\dimen2 = .55pt % Thickness of rules
+% The text. (`r' is open on the right, `e' somewhat less so on the left.)
+\setbox0 = \hbox{\kern-.75pt \reducedsf \putworderror\kern-1.5pt}
+%
+\setbox\errorbox=\hbox to \dimen0{\hfil
+ \hsize = \dimen0 \advance\hsize by -5.8pt % Space to left+right.
+ \advance\hsize by -2\dimen2 % Rules.
+ \vbox{%
+ \hrule height\dimen2
+ \hbox{\vrule width\dimen2 \kern3pt % Space to left of text.
+ \vtop{\kern2.4pt \box0 \kern2.4pt}% Space above/below.
+ \kern3pt\vrule width\dimen2}% Space to right.
+ \hrule height\dimen2}
+ \hfil}
+%
+\def\error{\leavevmode\lower.7ex\copy\errorbox}
+
% @pounds{} is a sterling sign, which Knuth put in the CM italic font.
%
\def\pounds{{\it\$}}
@@ -2075,49 +3047,113 @@ where each line of input produces a line of output.}
% Theiling, which support regular, slanted, bold and bold slanted (and
% "outlined" (blackboard board, sort of) versions, which we don't need).
% It is available from http://www.ctan.org/tex-archive/fonts/eurosym.
-%
+%
% Although only regular is the truly official Euro symbol, we ignore
% that. The Euro is designed to be slightly taller than the regular
% font height.
-%
+%
% feymr - regular
% feymo - slanted
% feybr - bold
% feybo - bold slanted
-%
+%
% There is no good (free) typewriter version, to my knowledge.
% A feymr10 euro is ~7.3pt wide, while a normal cmtt10 char is ~5.25pt wide.
% Hmm.
-%
+%
% Also doesn't work in math. Do we need to do math with euro symbols?
% Hope not.
-%
-%
+%
+%
\def\euro{{\eurofont e}}
\def\eurofont{%
% We set the font at each command, rather than predefining it in
% \textfonts and the other font-switching commands, so that
% installations which never need the symbol don't have to have the
% font installed.
- %
+ %
% There is only one designed size (nominal 10pt), so we always scale
% that to the current nominal size.
- %
+ %
% By the way, simply using "at 1em" works for cmr10 and the like, but
% does not work for cmbx10 and other extended/shrunken fonts.
- %
+ %
\def\eurosize{\csname\curfontsize nominalsize\endcsname}%
%
- \ifx\curfontstyle\bfstylename
+ \ifx\curfontstyle\bfstylename
% bold:
\font\thiseurofont = \ifusingit{feybo10}{feybr10} at \eurosize
- \else
+ \else
% regular:
\font\thiseurofont = \ifusingit{feymo10}{feymr10} at \eurosize
\fi
\thiseurofont
}
+% Glyphs from the EC fonts. We don't use \let for the aliases, because
+% sometimes we redefine the original macro, and the alias should reflect
+% the redefinition.
+%
+% Use LaTeX names for the Icelandic letters.
+\def\DH{{\ecfont \char"D0}} % Eth
+\def\dh{{\ecfont \char"F0}} % eth
+\def\TH{{\ecfont \char"DE}} % Thorn
+\def\th{{\ecfont \char"FE}} % thorn
+%
+\def\guillemetleft{{\ecfont \char"13}}
+\def\guillemotleft{\guillemetleft}
+\def\guillemetright{{\ecfont \char"14}}
+\def\guillemotright{\guillemetright}
+\def\guilsinglleft{{\ecfont \char"0E}}
+\def\guilsinglright{{\ecfont \char"0F}}
+\def\quotedblbase{{\ecfont \char"12}}
+\def\quotesinglbase{{\ecfont \char"0D}}
+%
+% This positioning is not perfect (see the ogonek LaTeX package), but
+% we have the precomposed glyphs for the most common cases. We put the
+% tests to use those glyphs in the single \ogonek macro so we have fewer
+% dummy definitions to worry about for index entries, etc.
+%
+% ogonek is also used with other letters in Lithuanian (IOU), but using
+% the precomposed glyphs for those is not so easy since they aren't in
+% the same EC font.
+\def\ogonek#1{{%
+ \def\temp{#1}%
+ \ifx\temp\macrocharA\Aogonek
+ \else\ifx\temp\macrochara\aogonek
+ \else\ifx\temp\macrocharE\Eogonek
+ \else\ifx\temp\macrochare\eogonek
+ \else
+ \ecfont \setbox0=\hbox{#1}%
+ \ifdim\ht0=1ex\accent"0C #1%
+ \else\ooalign{\unhbox0\crcr\hidewidth\char"0C \hidewidth}%
+ \fi
+ \fi\fi\fi\fi
+ }%
+}
+\def\Aogonek{{\ecfont \char"81}}\def\macrocharA{A}
+\def\aogonek{{\ecfont \char"A1}}\def\macrochara{a}
+\def\Eogonek{{\ecfont \char"86}}\def\macrocharE{E}
+\def\eogonek{{\ecfont \char"A6}}\def\macrochare{e}
+%
+% Use the ec* fonts (cm-super in outline format) for non-CM glyphs.
+\def\ecfont{%
+ % We can't distinguish serif/sans and italic/slanted, but this
+ % is used for crude hacks anyway (like adding French and German
+ % quotes to documents typeset with CM, where we lose kerning), so
+ % hopefully nobody will notice/care.
+ \edef\ecsize{\csname\curfontsize ecsize\endcsname}%
+ \edef\nominalsize{\csname\curfontsize nominalsize\endcsname}%
+ \ifx\curfontstyle\bfstylename
+ % bold:
+ \font\thisecfont = ecb\ifusingit{i}{x}\ecsize \space at \nominalsize
+ \else
+ % regular:
+ \font\thisecfont = ec\ifusingit{ti}{rm}\ecsize \space at \nominalsize
+ \fi
+ \thisecfont
+}
+
% @registeredsymbol - R in a circle. The font for the R should really
% be smaller yet, but lllsize is the best we can do for now.
% Adapted from the plain.tex definition of \copyright.
@@ -2128,14 +3164,24 @@ where each line of input produces a line of output.}
}$%
}
+% @textdegree - the normal degrees sign.
+%
+\def\textdegree{$^\circ$}
+
% Laurent Siebenmann reports \Orb undefined with:
% Textures 1.7.7 (preloaded format=plain 93.10.14) (68K) 16 APR 2004 02:38
% so we'll define it if necessary.
-%
-\ifx\Orb\undefined
+%
+\ifx\Orb\thisisundefined
\def\Orb{\mathhexbox20D}
\fi
+% Quotes.
+\chardef\quotedblleft="5C
+\chardef\quotedblright=`\"
+\chardef\quoteleft=`\`
+\chardef\quoteright=`\'
+
\message{page headings,}
@@ -2154,8 +3200,9 @@ where each line of input produces a line of output.}
\newif\ifsetshortcontentsaftertitlepage
\let\setshortcontentsaftertitlepage = \setshortcontentsaftertitlepagetrue
-\parseargdef\shorttitlepage{\begingroup\hbox{}\vskip 1.5in \chaprm \centerline{#1}%
- \endgroup\page\hbox{}\page}
+\parseargdef\shorttitlepage{%
+ \begingroup \hbox{}\vskip 1.5in \chaprm \centerline{#1}%
+ \endgroup\page\hbox{}\page}
\envdef\titlepage{%
% Open one extra group, as we want to close it in the middle of \Etitlepage.
@@ -2215,17 +3262,14 @@ where each line of input produces a line of output.}
\finishedtitlepagetrue
}
-%%% Macros to be used within @titlepage:
+% Macros to be used within @titlepage:
\let\subtitlerm=\tenrm
\def\subtitlefont{\subtitlerm \normalbaselineskip = 13pt \normalbaselines}
-\def\authorfont{\authorrm \normalbaselineskip = 16pt \normalbaselines
- \let\tt=\authortt}
-
\parseargdef\title{%
\checkenv\titlepage
- \leftline{\titlefonts\rm #1}
+ \leftline{\titlefonts\rmisbold #1}
% print a rule at the page bottom also.
\finishedtitlepagefalse
\vskip4pt \hrule height 4pt width \hsize \vskip4pt
@@ -2246,12 +3290,12 @@ where each line of input produces a line of output.}
\else
\checkenv\titlepage
\ifseenauthor\else \vskip 0pt plus 1filll \seenauthortrue \fi
- {\authorfont \leftline{#1}}%
+ {\secfonts\rmisbold \leftline{#1}}%
\fi
}
-%%% Set up page headings and footings.
+% Set up page headings and footings.
\let\thispage=\folio
@@ -2299,12 +3343,39 @@ where each line of input produces a line of output.}
%
% Leave some space for the footline. Hopefully ok to assume
% @evenfooting will not be used by itself.
- \global\advance\pageheight by -\baselineskip
- \global\advance\vsize by -\baselineskip
+ \global\advance\pageheight by -12pt
+ \global\advance\vsize by -12pt
}
\parseargdef\everyfooting{\oddfootingxxx{#1}\evenfootingxxx{#1}}
+% @evenheadingmarks top \thischapter <- chapter at the top of a page
+% @evenheadingmarks bottom \thischapter <- chapter at the bottom of a page
+%
+% The same set of arguments for:
+%
+% @oddheadingmarks
+% @evenfootingmarks
+% @oddfootingmarks
+% @everyheadingmarks
+% @everyfootingmarks
+
+\def\evenheadingmarks{\headingmarks{even}{heading}}
+\def\oddheadingmarks{\headingmarks{odd}{heading}}
+\def\evenfootingmarks{\headingmarks{even}{footing}}
+\def\oddfootingmarks{\headingmarks{odd}{footing}}
+\def\everyheadingmarks#1 {\headingmarks{even}{heading}{#1}
+ \headingmarks{odd}{heading}{#1} }
+\def\everyfootingmarks#1 {\headingmarks{even}{footing}{#1}
+ \headingmarks{odd}{footing}{#1} }
+% #1 = even/odd, #2 = heading/footing, #3 = top/bottom.
+\def\headingmarks#1#2#3 {%
+ \expandafter\let\expandafter\temp \csname get#3headingmarks\endcsname
+ \global\expandafter\let\csname get#1#2marks\endcsname \temp
+}
+
+\everyheadingmarks bottom
+\everyfootingmarks bottom
% @headings double turns headings on for double-sided printing.
% @headings single turns headings on for single-sided printing.
@@ -2318,10 +3389,14 @@ where each line of input produces a line of output.}
\def\headings #1 {\csname HEADINGS#1\endcsname}
-\def\HEADINGSoff{%
-\global\evenheadline={\hfil} \global\evenfootline={\hfil}
-\global\oddheadline={\hfil} \global\oddfootline={\hfil}}
-\HEADINGSoff
+\def\headingsoff{% non-global headings elimination
+ \evenheadline={\hfil}\evenfootline={\hfil}%
+ \oddheadline={\hfil}\oddfootline={\hfil}%
+}
+
+\def\HEADINGSoff{{\globaldefs=1 \headingsoff}} % global setting
+\HEADINGSoff % it's the default
+
% When we turn headings on, set the page number to 1.
% For double-sided printing, put current file name in lower left corner,
% chapter name on inside top of right hand pages, document
@@ -2372,7 +3447,7 @@ where each line of input produces a line of output.}
% This produces Day Month Year style of output.
% Only define if not already defined, in case a txi-??.tex file has set
% up a different format (e.g., txi-cs.tex does this).
-\ifx\today\undefined
+\ifx\today\thisisundefined
\def\today{%
\number\day\space
\ifcase\month
@@ -2433,7 +3508,7 @@ where each line of input produces a line of output.}
\begingroup
\advance\leftskip by-\tableindent
\advance\hsize by\tableindent
- \advance\rightskip by0pt plus1fil
+ \advance\rightskip by0pt plus1fil\relax
\leavevmode\unhbox0\par
\endgroup
%
@@ -2447,7 +3522,7 @@ where each line of input produces a line of output.}
% cause the example and the item to crash together. So we use this
% bizarre value of 10001 as a signal to \aboveenvbreak to insert
% \parskip glue after all. Section titles are handled this way also.
- %
+ %
\penalty 10001
\endgroup
\itemxneedsnegativevskipfalse
@@ -2541,9 +3616,18 @@ where each line of input produces a line of output.}
\parindent=0pt
\parskip=\smallskipamount
\ifdim\parskip=0pt \parskip=2pt \fi
+ %
+ % Try typesetting the item mark that if the document erroneously says
+ % something like @itemize @samp (intending @table), there's an error
+ % right away at the @itemize. It's not the best error message in the
+ % world, but it's better than leaving it to the @item. This means if
+ % the user wants an empty mark, they have to say @w{} not just @w.
\def\itemcontents{#1}%
+ \setbox0 = \hbox{\itemcontents}%
+ %
% @itemize with no arg is equivalent to @itemize @bullet.
\ifx\itemcontents\empty\def\itemcontents{\bullet}\fi
+ %
\let\item=\itemizeitem
}
@@ -2564,6 +3648,7 @@ where each line of input produces a line of output.}
\ifnum\lastpenalty<10000 \parskip=0in \fi
\noindent
\hbox to 0pt{\hss \itemcontents \kern\itemmargin}%
+ %
\vadjust{\penalty 1200}}% not good to break after first line of item.
\flushcr
}
@@ -2785,12 +3870,19 @@ where each line of input produces a line of output.}
%
% @headitem starts a heading row, which we typeset in bold.
% Assignments have to be global since we are inside the implicit group
-% of an alignment entry. Note that \everycr resets \everytab.
-\def\headitem{\checkenv\multitable \crcr \global\everytab={\bf}\the\everytab}%
+% of an alignment entry. \everycr resets \everytab so we don't have to
+% undo it ourselves.
+\def\headitemfont{\b}% for people to use in the template row; not changeable
+\def\headitem{%
+ \checkenv\multitable
+ \crcr
+ \global\everytab={\bf}% can't use \headitemfont since the parsing differs
+ \the\everytab % for the first item
+}%
%
% A \tab used to include \hskip1sp. But then the space in a template
% line is not enough. That is bad. So let's go back to just `&' until
-% we encounter the problem it was intended to solve again.
+% we again encounter the problem the 1sp was intended to solve.
% --karl, nathan@acm.org, 20apr99.
\def\tab{\checkenv\multitable &\the\everytab}%
@@ -2902,18 +3994,18 @@ where each line of input produces a line of output.}
\setbox0=\vbox{X}\global\multitablelinespace=\the\baselineskip
\global\advance\multitablelinespace by-\ht0
\fi
-%% Test to see if parskip is larger than space between lines of
-%% table. If not, do nothing.
-%% If so, set to same dimension as multitablelinespace.
+% Test to see if parskip is larger than space between lines of
+% table. If not, do nothing.
+% If so, set to same dimension as multitablelinespace.
\ifdim\multitableparskip>\multitablelinespace
\global\multitableparskip=\multitablelinespace
-\global\advance\multitableparskip-7pt %% to keep parskip somewhat smaller
- %% than skip between lines in the table.
+\global\advance\multitableparskip-7pt % to keep parskip somewhat smaller
+ % than skip between lines in the table.
\fi%
\ifdim\multitableparskip=0pt
\global\multitableparskip=\multitablelinespace
-\global\advance\multitableparskip-7pt %% to keep parskip somewhat smaller
- %% than skip between lines in the table.
+\global\advance\multitableparskip-7pt % to keep parskip somewhat smaller
+ % than skip between lines in the table.
\fi}
@@ -2959,6 +4051,7 @@ where each line of input produces a line of output.}
\def\doignore#1{\begingroup
% Scan in ``verbatim'' mode:
+ \obeylines
\catcode`\@ = \other
\catcode`\{ = \other
\catcode`\} = \other
@@ -2979,16 +4072,16 @@ where each line of input produces a line of output.}
\gdef\dodoignore#1{%
% #1 contains the command name as a string, e.g., `ifinfo'.
%
- % Define a command to find the next `@end #1', which must be on a line
- % by itself.
- \long\def\doignoretext##1^^M@end #1{\doignoretextyyy##1^^M@#1\_STOP_}%
+ % Define a command to find the next `@end #1'.
+ \long\def\doignoretext##1^^M@end #1{%
+ \doignoretextyyy##1^^M@#1\_STOP_}%
+ %
% And this command to find another #1 command, at the beginning of a
% line. (Otherwise, we would consider a line `@c @ifset', for
% example, to count as an @ifset for nesting.)
\long\def\doignoretextyyy##1^^M@#1##2\_STOP_{\doignoreyyy{##2}\_STOP_}%
%
% And now expand that command.
- \obeylines %
\doignoretext ^^M%
}%
}
@@ -3018,7 +4111,12 @@ where each line of input produces a line of output.}
}
% Finish off ignored text.
-\def\enddoignore{\endgroup\ignorespaces}
+{ \obeylines%
+ % Ignore anything after the last `@end #1'; this matters in verbatim
+ % environments, where otherwise the newline after an ignored conditional
+ % would result in a blank line in the output.
+ \gdef\enddoignore#1^^M{\endgroup\ignorespaces}%
+}
% @set VAR sets the variable VAR to an empty value.
@@ -3183,11 +4281,11 @@ where each line of input produces a line of output.}
\def\dosynindex#1#2#3{%
% Only do \closeout if we haven't already done it, else we'll end up
% closing the target index.
- \expandafter \ifx\csname donesynindex#2\endcsname \undefined
+ \expandafter \ifx\csname donesynindex#2\endcsname \relax
% The \closeout helps reduce unnecessary open files; the limit on the
% Acorn RISC OS is a mere 16 files.
\expandafter\closeout\csname#2indfile\endcsname
- \expandafter\let\csname\donesynindex#2\endcsname = 1
+ \expandafter\let\csname donesynindex#2\endcsname = 1
\fi
% redefine \fooindfile:
\expandafter\let\expandafter\temp\expandafter=\csname#3indfile\endcsname
@@ -3221,11 +4319,41 @@ where each line of input produces a line of output.}
\escapechar = `\\ % use backslash in output files.
\def\@{@}% change to @@ when we switch to @ as escape char in index files.
\def\ {\realbackslash\space }%
- % Need these in case \tex is in effect and \{ is a \delimiter again.
- % But can't use \lbracecmd and \rbracecmd because texindex assumes
- % braces and backslashes are used only as delimiters.
- \let\{ = \mylbrace
- \let\} = \myrbrace
+ %
+ % Need these unexpandable (because we define \tt as a dummy)
+ % definitions when @{ or @} appear in index entry text. Also, more
+ % complicated, when \tex is in effect and \{ is a \delimiter again.
+ % We can't use \lbracecmd and \rbracecmd because texindex assumes
+ % braces and backslashes are used only as delimiters. Perhaps we
+ % should define @lbrace and @rbrace commands a la @comma.
+ \def\{{{\tt\char123}}%
+ \def\}{{\tt\char125}}%
+ %
+ % I don't entirely understand this, but when an index entry is
+ % generated from a macro call, the \endinput which \scanmacro inserts
+ % causes processing to be prematurely terminated. This is,
+ % apparently, because \indexsorttmp is fully expanded, and \endinput
+ % is an expandable command. The redefinition below makes \endinput
+ % disappear altogether for that purpose -- although logging shows that
+ % processing continues to some further point. On the other hand, it
+ % seems \endinput does not hurt in the printed index arg, since that
+ % is still getting written without apparent harm.
+ %
+ % Sample source (mac-idx3.tex, reported by Graham Percival to
+ % help-texinfo, 22may06):
+ % @macro funindex {WORD}
+ % @findex xyz
+ % @end macro
+ % ...
+ % @funindex commtest
+ %
+ % The above is not enough to reproduce the bug, but it gives the flavor.
+ %
+ % Sample whatsit resulting:
+ % .@write3{\entry{xyz}{@folio }{@code {xyz@endinput }}}
+ %
+ % So:
+ \let\endinput = \empty
%
% Do the redefinitions.
\commondummies
@@ -3244,6 +4372,7 @@ where each line of input produces a line of output.}
%
% Do the redefinitions.
\commondummies
+ \otherbackslash
}
% Called from \indexdummies and \atdummies.
@@ -3251,7 +4380,7 @@ where each line of input produces a line of output.}
\def\commondummies{%
%
% \definedummyword defines \#1 as \string\#1\space, thus effectively
- % preventing its expansion. This is used only for control% words,
+ % preventing its expansion. This is used only for control words,
% not control letters, because the \space would be incorrect for
% control characters, but is needed to separate the control word
% from whatever follows.
@@ -3270,23 +4399,28 @@ where each line of input produces a line of output.}
\commondummiesnofonts
%
\definedummyletter\_%
+ \definedummyletter\-%
%
% Non-English letters.
\definedummyword\AA
\definedummyword\AE
+ \definedummyword\DH
\definedummyword\L
- \definedummyword\OE
\definedummyword\O
+ \definedummyword\OE
+ \definedummyword\TH
\definedummyword\aa
\definedummyword\ae
+ \definedummyword\dh
+ \definedummyword\exclamdown
\definedummyword\l
- \definedummyword\oe
\definedummyword\o
- \definedummyword\ss
- \definedummyword\exclamdown
- \definedummyword\questiondown
+ \definedummyword\oe
\definedummyword\ordf
\definedummyword\ordm
+ \definedummyword\questiondown
+ \definedummyword\ss
+ \definedummyword\th
%
% Although these internal commands shouldn't show up, sometimes they do.
\definedummyword\bf
@@ -3302,21 +4436,39 @@ where each line of input produces a line of output.}
\definedummyword\TeX
%
% Assorted special characters.
+ \definedummyword\arrow
\definedummyword\bullet
\definedummyword\comma
\definedummyword\copyright
\definedummyword\registeredsymbol
\definedummyword\dots
\definedummyword\enddots
+ \definedummyword\entrybreak
\definedummyword\equiv
\definedummyword\error
\definedummyword\euro
\definedummyword\expansion
+ \definedummyword\geq
+ \definedummyword\guillemetleft
+ \definedummyword\guillemetright
+ \definedummyword\guilsinglleft
+ \definedummyword\guilsinglright
+ \definedummyword\lbracechar
+ \definedummyword\leq
\definedummyword\minus
+ \definedummyword\ogonek
\definedummyword\pounds
\definedummyword\point
\definedummyword\print
+ \definedummyword\quotedblbase
+ \definedummyword\quotedblleft
+ \definedummyword\quotedblright
+ \definedummyword\quoteleft
+ \definedummyword\quoteright
+ \definedummyword\quotesinglbase
+ \definedummyword\rbracechar
\definedummyword\result
+ \definedummyword\textdegree
%
% We want to disable all macros so that they are not expanded by \write.
\macrolist
@@ -3330,63 +4482,72 @@ where each line of input produces a line of output.}
% \commondummiesnofonts: common to \commondummies and \indexnofonts.
%
-% Better have this without active chars.
-{
- \catcode`\~=\other
- \gdef\commondummiesnofonts{%
- % Control letters and accents.
- \definedummyletter\!%
- \definedummyaccent\"%
- \definedummyaccent\'%
- \definedummyletter\*%
- \definedummyaccent\,%
- \definedummyletter\.%
- \definedummyletter\/%
- \definedummyletter\:%
- \definedummyaccent\=%
- \definedummyletter\?%
- \definedummyaccent\^%
- \definedummyaccent\`%
- \definedummyaccent\~%
- \definedummyword\u
- \definedummyword\v
- \definedummyword\H
- \definedummyword\dotaccent
- \definedummyword\ringaccent
- \definedummyword\tieaccent
- \definedummyword\ubaraccent
- \definedummyword\udotaccent
- \definedummyword\dotless
- %
- % Texinfo font commands.
- \definedummyword\b
- \definedummyword\i
- \definedummyword\r
- \definedummyword\sc
- \definedummyword\t
- %
- % Commands that take arguments.
- \definedummyword\acronym
- \definedummyword\cite
- \definedummyword\code
- \definedummyword\command
- \definedummyword\dfn
- \definedummyword\emph
- \definedummyword\env
- \definedummyword\file
- \definedummyword\kbd
- \definedummyword\key
- \definedummyword\math
- \definedummyword\option
- \definedummyword\samp
- \definedummyword\strong
- \definedummyword\tie
- \definedummyword\uref
- \definedummyword\url
- \definedummyword\var
- \definedummyword\verb
- \definedummyword\w
- }
+\def\commondummiesnofonts{%
+ % Control letters and accents.
+ \definedummyletter\!%
+ \definedummyaccent\"%
+ \definedummyaccent\'%
+ \definedummyletter\*%
+ \definedummyaccent\,%
+ \definedummyletter\.%
+ \definedummyletter\/%
+ \definedummyletter\:%
+ \definedummyaccent\=%
+ \definedummyletter\?%
+ \definedummyaccent\^%
+ \definedummyaccent\`%
+ \definedummyaccent\~%
+ \definedummyword\u
+ \definedummyword\v
+ \definedummyword\H
+ \definedummyword\dotaccent
+ \definedummyword\ogonek
+ \definedummyword\ringaccent
+ \definedummyword\tieaccent
+ \definedummyword\ubaraccent
+ \definedummyword\udotaccent
+ \definedummyword\dotless
+ %
+ % Texinfo font commands.
+ \definedummyword\b
+ \definedummyword\i
+ \definedummyword\r
+ \definedummyword\sansserif
+ \definedummyword\sc
+ \definedummyword\slanted
+ \definedummyword\t
+ %
+ % Commands that take arguments.
+ \definedummyword\abbr
+ \definedummyword\acronym
+ \definedummyword\anchor
+ \definedummyword\cite
+ \definedummyword\code
+ \definedummyword\command
+ \definedummyword\dfn
+ \definedummyword\dmn
+ \definedummyword\email
+ \definedummyword\emph
+ \definedummyword\env
+ \definedummyword\file
+ \definedummyword\image
+ \definedummyword\indicateurl
+ \definedummyword\inforef
+ \definedummyword\kbd
+ \definedummyword\key
+ \definedummyword\math
+ \definedummyword\option
+ \definedummyword\pxref
+ \definedummyword\ref
+ \definedummyword\samp
+ \definedummyword\strong
+ \definedummyword\tie
+ \definedummyword\uref
+ \definedummyword\url
+ \definedummyword\var
+ \definedummyword\verb
+ \definedummyword\w
+ \definedummyword\xref
}
% \indexnofonts is used when outputting the strings to sort the index
@@ -3399,7 +4560,7 @@ where each line of input produces a line of output.}
\def\definedummyaccent##1{\let##1\asis}%
% We can just ignore other control letters.
\def\definedummyletter##1{\let##1\empty}%
- % Hopefully, all control words can become @asis.
+ % All control words become @asis by default; overrides below.
\let\definedummyword\definedummyaccent
%
\commondummiesnofonts
@@ -3411,60 +4572,95 @@ where each line of input produces a line of output.}
%
\def\ { }%
\def\@{@}%
- % how to handle braces?
\def\_{\normalunderscore}%
+ \def\-{}% @- shouldn't affect sorting
+ %
+ % Unfortunately, texindex is not prepared to handle braces in the
+ % content at all. So for index sorting, we map @{ and @} to strings
+ % starting with |, since that ASCII character is between ASCII { and }.
+ \def\{{|a}%
+ \def\lbracechar{|a}%
+ %
+ \def\}{|b}%
+ \def\rbracechar{|b}%
%
% Non-English letters.
\def\AA{AA}%
\def\AE{AE}%
+ \def\DH{DZZ}%
\def\L{L}%
\def\OE{OE}%
\def\O{O}%
+ \def\TH{ZZZ}%
\def\aa{aa}%
\def\ae{ae}%
+ \def\dh{dzz}%
+ \def\exclamdown{!}%
\def\l{l}%
\def\oe{oe}%
- \def\o{o}%
- \def\ss{ss}%
- \def\exclamdown{!}%
- \def\questiondown{?}%
\def\ordf{a}%
\def\ordm{o}%
+ \def\o{o}%
+ \def\questiondown{?}%
+ \def\ss{ss}%
+ \def\th{zzz}%
%
\def\LaTeX{LaTeX}%
\def\TeX{TeX}%
%
% Assorted special characters.
% (The following {} will end up in the sort string, but that's ok.)
+ \def\arrow{->}%
\def\bullet{bullet}%
\def\comma{,}%
\def\copyright{copyright}%
- \def\registeredsymbol{R}%
\def\dots{...}%
\def\enddots{...}%
\def\equiv{==}%
\def\error{error}%
\def\euro{euro}%
\def\expansion{==>}%
+ \def\geq{>=}%
+ \def\guillemetleft{<<}%
+ \def\guillemetright{>>}%
+ \def\guilsinglleft{<}%
+ \def\guilsinglright{>}%
+ \def\leq{<=}%
\def\minus{-}%
- \def\pounds{pounds}%
\def\point{.}%
+ \def\pounds{pounds}%
\def\print{-|}%
+ \def\quotedblbase{"}%
+ \def\quotedblleft{"}%
+ \def\quotedblright{"}%
+ \def\quoteleft{`}%
+ \def\quoteright{'}%
+ \def\quotesinglbase{,}%
+ \def\registeredsymbol{R}%
\def\result{=>}%
+ \def\textdegree{o}%
+ %
+ \expandafter\ifx\csname SETtxiindexlquoteignore\endcsname\relax
+ \else \indexlquoteignore \fi
%
% We need to get rid of all macros, leaving only the arguments (if present).
% Of course this is not nearly correct, but it is the best we can do for now.
% makeinfo does not expand macros in the argument to @deffn, which ends up
% writing an index entry, and texindex isn't prepared for an index sort entry
% that starts with \.
- %
+ %
% Since macro invocations are followed by braces, we can just redefine them
% to take a single TeX argument. The case of a macro invocation that
% goes to end-of-line is not handled.
- %
+ %
\macrolist
}
+% Undocumented (for FSFS 2nd ed.): @set txiindexlquoteignore makes us
+% ignore left quotes in the sort term.
+{\catcode`\`=\active
+ \gdef\indexlquoteignore{\let`=\empty}}
+
\let\indexbackslash=0 %overridden during \printindex.
\let\SETmarginindex=\relax % put index entries in margin (undocumented)?
@@ -3490,11 +4686,7 @@ where each line of input produces a line of output.}
%
\edef\writeto{\csname#1indfile\endcsname}%
%
- \ifvmode
- \dosubindsanitize
- \else
- \dosubindwrite
- \fi
+ \safewhatsit\dosubindwrite
}%
\fi
}
@@ -3531,13 +4723,13 @@ where each line of input produces a line of output.}
\temp
}
-% Take care of unwanted page breaks:
+% Take care of unwanted page breaks/skips around a whatsit:
%
% If a skip is the last thing on the list now, preserve it
% by backing up by \lastskip, doing the \write, then inserting
% the skip again. Otherwise, the whatsit generated by the
-% \write will make \lastskip zero. The result is that sequences
-% like this:
+% \write or \pdfdest will make \lastskip zero. The result is that
+% sequences like this:
% @end defun
% @tindex whatever
% @defun ...
@@ -3561,25 +4753,30 @@ where each line of input produces a line of output.}
%
\edef\zeroskipmacro{\expandafter\the\csname z@skip\endcsname}
%
+\newskip\whatsitskip
+\newcount\whatsitpenalty
+%
% ..., ready, GO:
%
-\def\dosubindsanitize{%
+\def\safewhatsit#1{\ifhmode
+ #1%
+ \else
% \lastskip and \lastpenalty cannot both be nonzero simultaneously.
- \skip0 = \lastskip
+ \whatsitskip = \lastskip
\edef\lastskipmacro{\the\lastskip}%
- \count255 = \lastpenalty
+ \whatsitpenalty = \lastpenalty
%
% If \lastskip is nonzero, that means the last item was a
% skip. And since a skip is discardable, that means this
- % -\skip0 glue we're inserting is preceded by a
+ % -\whatsitskip glue we're inserting is preceded by a
% non-discardable item, therefore it is not a potential
% breakpoint, therefore no \nobreak needed.
\ifx\lastskipmacro\zeroskipmacro
\else
- \vskip-\skip0
+ \vskip-\whatsitskip
\fi
%
- \dosubindwrite
+ #1%
%
\ifx\lastskipmacro\zeroskipmacro
% If \lastskip was zero, perhaps the last item was a penalty, and
@@ -3587,20 +4784,19 @@ where each line of input produces a line of output.}
% to re-insert the same penalty (values >10000 are used for various
% signals); since we just inserted a non-discardable item, any
% following glue (such as a \parskip) would be a breakpoint. For example:
- %
% @deffn deffn-whatever
% @vindex index-whatever
% Description.
% would allow a break between the index-whatever whatsit
% and the "Description." paragraph.
- \ifnum\count255>9999 \penalty\count255 \fi
+ \ifnum\whatsitpenalty>9999 \penalty\whatsitpenalty \fi
\else
% On the other hand, if we had a nonzero \lastskip,
% this make-up glue would be preceded by a non-discardable item
% (the whatsit from the \write), so we must insert a \nobreak.
- \nobreak\vskip\skip0
+ \nobreak\vskip\whatsitskip
\fi
-}
+\fi}
% The index entry written in the file actually looks like
% \entry {sortstring}{page}{topic}
@@ -3642,6 +4838,7 @@ where each line of input produces a line of output.}
%
\smallfonts \rm
\tolerance = 9500
+ \plainfrenchspacing
\everypar = {}% don't want the \kern\-parindent from indentation suppression.
%
% See if the index file exists and is nonempty.
@@ -3715,10 +4912,9 @@ where each line of input produces a line of output.}
%
% A straightforward implementation would start like this:
% \def\entry#1#2{...
-% But this frozes the catcodes in the argument, and can cause problems to
+% But this freezes the catcodes in the argument, and can cause problems to
% @code, which sets - active. This problem was fixed by a kludge---
% ``-'' was active throughout whole index, but this isn't really right.
-%
% The right solution is to prevent \entry from swallowing the whole text.
% --kasal, 21nov03
\def\entry{%
@@ -3755,10 +4951,17 @@ where each line of input produces a line of output.}
% columns.
\vskip 0pt plus1pt
%
+ % When reading the text of entry, convert explicit line breaks
+ % from @* into spaces. The user might give these in long section
+ % titles, for instance.
+ \def\*{\unskip\space\ignorespaces}%
+ \def\entrybreak{\hfil\break}%
+ %
% Swallow the left brace of the text (first parameter):
\afterassignment\doentry
\let\temp =
}
+\def\entrybreak{\unskip\space\ignorespaces}%
\def\doentry{%
\bgroup % Instead of the swallowed brace.
\noindent
@@ -3771,11 +4974,8 @@ where each line of input produces a line of output.}
% The following is kludged to not output a line of dots in the index if
% there are no page numbers. The next person who breaks this will be
% cursed by a Unix daemon.
- \def\tempa{{\rm }}%
- \def\tempb{#1}%
- \edef\tempc{\tempa}%
- \edef\tempd{\tempb}%
- \ifx\tempc\tempd
+ \setbox\boxA = \hbox{#1}%
+ \ifdim\wd\boxA = 0pt
\ %
\else
%
@@ -3799,9 +4999,9 @@ where each line of input produces a line of output.}
\endgroup
}
-% Like \dotfill except takes at least 1 em.
+% Like plain.tex's \dotfill, except uses up at least 1 em.
\def\indexdotfill{\cleaders
- \hbox{$\mathsurround=0pt \mkern1.5mu ${\it .}$ \mkern1.5mu$}\hskip 1em plus 1fill}
+ \hbox{$\mathsurround=0pt \mkern1.5mu.\mkern1.5mu$}\hskip 1em plus 1fill}
\def\primary #1{\line{#1\hfil}}
@@ -3911,6 +5111,34 @@ where each line of input produces a line of output.}
%
% All done with double columns.
\def\enddoublecolumns{%
+ % The following penalty ensures that the page builder is exercised
+ % _before_ we change the output routine. This is necessary in the
+ % following situation:
+ %
+ % The last section of the index consists only of a single entry.
+ % Before this section, \pagetotal is less than \pagegoal, so no
+ % break occurs before the last section starts. However, the last
+ % section, consisting of \initial and the single \entry, does not
+ % fit on the page and has to be broken off. Without the following
+ % penalty the page builder will not be exercised until \eject
+ % below, and by that time we'll already have changed the output
+ % routine to the \balancecolumns version, so the next-to-last
+ % double-column page will be processed with \balancecolumns, which
+ % is wrong: The two columns will go to the main vertical list, with
+ % the broken-off section in the recent contributions. As soon as
+ % the output routine finishes, TeX starts reconsidering the page
+ % break. The two columns and the broken-off section both fit on the
+ % page, because the two columns now take up only half of the page
+ % goal. When TeX sees \eject from below which follows the final
+ % section, it invokes the new output routine that we've set after
+ % \balancecolumns below; \onepageout will try to fit the two columns
+ % and the final section into the vbox of \pageheight (see
+ % \pagebody), causing an overfull box.
+ %
+ % Note that glue won't work here, because glue does not exercise the
+ % page builder, unlike penalties (see The TeXbook, pp. 280-281).
+ \penalty0
+ %
\output = {%
% Split the last of the double-column material. Leave it on the
% current page, no automatic page break.
@@ -3966,7 +5194,22 @@ where each line of input produces a line of output.}
\message{sectioning,}
% Chapters, sections, etc.
-% \unnumberedno is an oxymoron, of course. But we count the unnumbered
+% Let's start with @part.
+\outer\parseargdef\part{\partzzz{#1}}
+\def\partzzz#1{%
+ \chapoddpage
+ \null
+ \vskip.3\vsize % move it down on the page a bit
+ \begingroup
+ \noindent \titlefonts\rmisbold #1\par % the text
+ \let\lastnode=\empty % no node to associate with
+ \writetocentry{part}{#1}{}% but put it in the toc
+ \headingsoff % no headline or footline on the part page
+ \chapoddpage
+ \endgroup
+}
+
+% \unnumberedno is an oxymoron. But we count the unnumbered
% sections so that we can refer to them unambiguously in the pdf
% outlines by their "section number". We avoid collisions with chapter
% numbers by starting them at 10000. (If a document ever has 10000
@@ -4020,11 +5263,15 @@ where each line of input produces a line of output.}
\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi
\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi}
-% Each @chapter defines this as the name of the chapter.
-% page headings and footings can use it. @section does likewise.
-% However, they are not reliable, because we don't use marks.
+% Each @chapter defines these (using marks) as the number+name, number
+% and name of the chapter. Page headings and footings can use
+% these. @section does likewise.
\def\thischapter{}
+\def\thischapternum{}
+\def\thischaptername{}
\def\thissection{}
+\def\thissectionnum{}
+\def\thissectionname{}
\newcount\absseclevel % used to calculate proper heading level
\newcount\secbase\secbase=0 % @raisesections/@lowersections modify this count
@@ -4041,8 +5288,8 @@ where each line of input produces a line of output.}
\chardef\maxseclevel = 3
%
% A numbered section within an unnumbered changes to unnumbered too.
-% To achive this, remember the "biggest" unnum. sec. we are currently in:
-\chardef\unmlevel = \maxseclevel
+% To achieve this, remember the "biggest" unnum. sec. we are currently in:
+\chardef\unnlevel = \maxseclevel
%
% Trace whether the current chapter is an appendix or not:
% \chapheadtype is "N" or "A", unnumbered chapters are ignored.
@@ -4067,8 +5314,8 @@ where each line of input produces a line of output.}
% The heading type:
\def\headtype{#1}%
\if \headtype U%
- \ifnum \absseclevel < \unmlevel
- \chardef\unmlevel = \absseclevel
+ \ifnum \absseclevel < \unnlevel
+ \chardef\unnlevel = \absseclevel
\fi
\else
% Check for appendix sections:
@@ -4080,10 +5327,10 @@ where each line of input produces a line of output.}
\fi\fi
\fi
% Check for numbered within unnumbered:
- \ifnum \absseclevel > \unmlevel
+ \ifnum \absseclevel > \unnlevel
\def\headtype{U}%
\else
- \chardef\unmlevel = 3
+ \chardef\unnlevel = 3
\fi
\fi
% Now print the heading:
@@ -4137,7 +5384,9 @@ where each line of input produces a line of output.}
\gdef\chaplevelprefix{\the\chapno.}%
\resetallfloatnos
%
- \message{\putwordChapter\space \the\chapno}%
+ % \putwordChapter can contain complex things in translations.
+ \toks0=\expandafter{\putwordChapter}%
+ \message{\the\toks0 \space \the\chapno}%
%
% Write the actual heading.
\chapmacro{#1}{Ynumbered}{\the\chapno}%
@@ -4148,15 +5397,17 @@ where each line of input produces a line of output.}
\global\let\subsubsection = \numberedsubsubsec
}
-\outer\parseargdef\appendix{\apphead0{#1}} % normally apphead0 calls appendixzzz
+\outer\parseargdef\appendix{\apphead0{#1}} % normally calls appendixzzz
+%
\def\appendixzzz#1{%
\global\secno=0 \global\subsecno=0 \global\subsubsecno=0
\global\advance\appendixno by 1
\gdef\chaplevelprefix{\appendixletter.}%
\resetallfloatnos
%
- \def\appendixnum{\putwordAppendix\space \appendixletter}%
- \message{\appendixnum}%
+ % \putwordAppendix can contain complex things in translations.
+ \toks0=\expandafter{\putwordAppendix}%
+ \message{\the\toks0 \space \appendixletter}%
%
\chapmacro{#1}{Yappendix}{\appendixletter}%
%
@@ -4165,7 +5416,8 @@ where each line of input produces a line of output.}
\global\let\subsubsection = \appendixsubsubsec
}
-\outer\parseargdef\unnumbered{\unnmhead0{#1}} % normally unnmhead0 calls unnumberedzzz
+% normally unnmhead0 calls unnumberedzzz:
+\outer\parseargdef\unnumbered{\unnmhead0{#1}}
\def\unnumberedzzz#1{%
\global\secno=0 \global\subsecno=0 \global\subsubsecno=0
\global\advance\unnumberedno by 1
@@ -4209,40 +5461,47 @@ where each line of input produces a line of output.}
\let\top\unnumbered
% Sections.
+%
\outer\parseargdef\numberedsec{\numhead1{#1}} % normally calls seczzz
\def\seczzz#1{%
\global\subsecno=0 \global\subsubsecno=0 \global\advance\secno by 1
\sectionheading{#1}{sec}{Ynumbered}{\the\chapno.\the\secno}%
}
-\outer\parseargdef\appendixsection{\apphead1{#1}} % normally calls appendixsectionzzz
+% normally calls appendixsectionzzz:
+\outer\parseargdef\appendixsection{\apphead1{#1}}
\def\appendixsectionzzz#1{%
\global\subsecno=0 \global\subsubsecno=0 \global\advance\secno by 1
\sectionheading{#1}{sec}{Yappendix}{\appendixletter.\the\secno}%
}
\let\appendixsec\appendixsection
-\outer\parseargdef\unnumberedsec{\unnmhead1{#1}} % normally calls unnumberedseczzz
+% normally calls unnumberedseczzz:
+\outer\parseargdef\unnumberedsec{\unnmhead1{#1}}
\def\unnumberedseczzz#1{%
\global\subsecno=0 \global\subsubsecno=0 \global\advance\secno by 1
\sectionheading{#1}{sec}{Ynothing}{\the\unnumberedno.\the\secno}%
}
% Subsections.
-\outer\parseargdef\numberedsubsec{\numhead2{#1}} % normally calls numberedsubseczzz
+%
+% normally calls numberedsubseczzz:
+\outer\parseargdef\numberedsubsec{\numhead2{#1}}
\def\numberedsubseczzz#1{%
\global\subsubsecno=0 \global\advance\subsecno by 1
\sectionheading{#1}{subsec}{Ynumbered}{\the\chapno.\the\secno.\the\subsecno}%
}
-\outer\parseargdef\appendixsubsec{\apphead2{#1}} % normally calls appendixsubseczzz
+% normally calls appendixsubseczzz:
+\outer\parseargdef\appendixsubsec{\apphead2{#1}}
\def\appendixsubseczzz#1{%
\global\subsubsecno=0 \global\advance\subsecno by 1
\sectionheading{#1}{subsec}{Yappendix}%
{\appendixletter.\the\secno.\the\subsecno}%
}
-\outer\parseargdef\unnumberedsubsec{\unnmhead2{#1}} %normally calls unnumberedsubseczzz
+% normally calls unnumberedsubseczzz:
+\outer\parseargdef\unnumberedsubsec{\unnmhead2{#1}}
\def\unnumberedsubseczzz#1{%
\global\subsubsecno=0 \global\advance\subsecno by 1
\sectionheading{#1}{subsec}{Ynothing}%
@@ -4250,21 +5509,25 @@ where each line of input produces a line of output.}
}
% Subsubsections.
-\outer\parseargdef\numberedsubsubsec{\numhead3{#1}} % normally numberedsubsubseczzz
+%
+% normally numberedsubsubseczzz:
+\outer\parseargdef\numberedsubsubsec{\numhead3{#1}}
\def\numberedsubsubseczzz#1{%
\global\advance\subsubsecno by 1
\sectionheading{#1}{subsubsec}{Ynumbered}%
{\the\chapno.\the\secno.\the\subsecno.\the\subsubsecno}%
}
-\outer\parseargdef\appendixsubsubsec{\apphead3{#1}} % normally appendixsubsubseczzz
+% normally appendixsubsubseczzz:
+\outer\parseargdef\appendixsubsubsec{\apphead3{#1}}
\def\appendixsubsubseczzz#1{%
\global\advance\subsubsecno by 1
\sectionheading{#1}{subsubsec}{Yappendix}%
{\appendixletter.\the\secno.\the\subsecno.\the\subsubsecno}%
}
-\outer\parseargdef\unnumberedsubsubsec{\unnmhead3{#1}} %normally unnumberedsubsubseczzz
+% normally unnumberedsubsubseczzz:
+\outer\parseargdef\unnumberedsubsubsec{\unnmhead3{#1}}
\def\unnumberedsubsubseczzz#1{%
\global\advance\subsubsecno by 1
\sectionheading{#1}{subsubsec}{Ynothing}%
@@ -4288,7 +5551,6 @@ where each line of input produces a line of output.}
% 3) Likewise, headings look best if no \parindent is used, and
% if justification is not attempted. Hence \raggedright.
-
\def\majorheading{%
{\advance\chapheadingskip by 10pt \chapbreak }%
\parsearg\chapheadingzzz
@@ -4297,8 +5559,8 @@ where each line of input produces a line of output.}
\def\chapheading{\chapbreak \parsearg\chapheadingzzz}
\def\chapheadingzzz#1{%
{\chapfonts \vbox{\hyphenpenalty=10000\tolerance=5000
- \parindent=0pt\raggedright
- \rm #1\hfill}}%
+ \parindent=0pt\ptexraggedright
+ \rmisbold #1\hfill}}%
\bigskip \par\penalty 200\relax
\suppressfirstparagraphindent
}
@@ -4315,17 +5577,28 @@ where each line of input produces a line of output.}
% (including whitespace, linebreaking, etc. around it),
% given all the information in convenient, parsed form.
-%%% Args are the skip and penalty (usually negative)
+% Args are the skip and penalty (usually negative)
\def\dobreak#1#2{\par\ifdim\lastskip<#1\removelastskip\penalty#2\vskip#1\fi}
-%%% Define plain chapter starts, and page on/off switching for it
% Parameter controlling skip before chapter headings (if needed)
-
\newskip\chapheadingskip
+% Define plain chapter starts, and page on/off switching for it.
\def\chapbreak{\dobreak \chapheadingskip {-4000}}
\def\chappager{\par\vfill\supereject}
-\def\chapoddpage{\chappager \ifodd\pageno \else \hbox to 0pt{} \chappager\fi}
+% Because \domark is called before \chapoddpage, the filler page will
+% get the headings for the next chapter, which is wrong. But we don't
+% care -- we just disable all headings on the filler page.
+\def\chapoddpage{%
+ \chappager
+ \ifodd\pageno \else
+ \begingroup
+ \headingsoff
+ \null
+ \chappager
+ \endgroup
+ \fi
+}
\def\setchapternewpage #1 {\csname CHAPPAG#1\endcsname}
@@ -4359,41 +5632,78 @@ where each line of input produces a line of output.}
\def\Yappendixkeyword{Yappendix}
%
\def\chapmacro#1#2#3{%
+ % Insert the first mark before the heading break (see notes for \domark).
+ \let\prevchapterdefs=\lastchapterdefs
+ \let\prevsectiondefs=\lastsectiondefs
+ \gdef\lastsectiondefs{\gdef\thissectionname{}\gdef\thissectionnum{}%
+ \gdef\thissection{}}%
+ %
+ \def\temptype{#2}%
+ \ifx\temptype\Ynothingkeyword
+ \gdef\lastchapterdefs{\gdef\thischaptername{#1}\gdef\thischapternum{}%
+ \gdef\thischapter{\thischaptername}}%
+ \else\ifx\temptype\Yomitfromtockeyword
+ \gdef\lastchapterdefs{\gdef\thischaptername{#1}\gdef\thischapternum{}%
+ \gdef\thischapter{}}%
+ \else\ifx\temptype\Yappendixkeyword
+ \toks0={#1}%
+ \xdef\lastchapterdefs{%
+ \gdef\noexpand\thischaptername{\the\toks0}%
+ \gdef\noexpand\thischapternum{\appendixletter}%
+ % \noexpand\putwordAppendix avoids expanding indigestible
+ % commands in some of the translations.
+ \gdef\noexpand\thischapter{\noexpand\putwordAppendix{}
+ \noexpand\thischapternum:
+ \noexpand\thischaptername}%
+ }%
+ \else
+ \toks0={#1}%
+ \xdef\lastchapterdefs{%
+ \gdef\noexpand\thischaptername{\the\toks0}%
+ \gdef\noexpand\thischapternum{\the\chapno}%
+ % \noexpand\putwordChapter avoids expanding indigestible
+ % commands in some of the translations.
+ \gdef\noexpand\thischapter{\noexpand\putwordChapter{}
+ \noexpand\thischapternum:
+ \noexpand\thischaptername}%
+ }%
+ \fi\fi\fi
+ %
+ % Output the mark. Pass it through \safewhatsit, to take care of
+ % the preceding space.
+ \safewhatsit\domark
+ %
+ % Insert the chapter heading break.
\pchapsepmacro
+ %
+ % Now the second mark, after the heading break. No break points
+ % between here and the heading.
+ \let\prevchapterdefs=\lastchapterdefs
+ \let\prevsectiondefs=\lastsectiondefs
+ \domark
+ %
{%
- \chapfonts \rm
+ \chapfonts \rmisbold
%
- % Have to define \thissection before calling \donoderef, because the
+ % Have to define \lastsection before calling \donoderef, because the
% xref code eventually uses it. On the other hand, it has to be called
% after \pchapsepmacro, or the headline will change too soon.
- \gdef\thissection{#1}%
- \gdef\thischaptername{#1}%
+ \gdef\lastsection{#1}%
%
% Only insert the separating space if we have a chapter/appendix
% number, and don't print the unnumbered ``number''.
- \def\temptype{#2}%
\ifx\temptype\Ynothingkeyword
\setbox0 = \hbox{}%
\def\toctype{unnchap}%
- \gdef\thischapter{#1}%
\else\ifx\temptype\Yomitfromtockeyword
\setbox0 = \hbox{}% contents like unnumbered, but no toc entry
\def\toctype{omit}%
- \gdef\thischapter{}%
\else\ifx\temptype\Yappendixkeyword
\setbox0 = \hbox{\putwordAppendix{} #3\enspace}%
\def\toctype{app}%
- % We don't substitute the actual chapter name into \thischapter
- % because we don't want its macros evaluated now. And we don't
- % use \thissection because that changes with each section.
- %
- \xdef\thischapter{\putwordAppendix{} \appendixletter:
- \noexpand\thischaptername}%
\else
\setbox0 = \hbox{#3\enspace}%
\def\toctype{numchap}%
- \xdef\thischapter{\putwordChapter{} \the\chapno:
- \noexpand\thischaptername}%
\fi\fi\fi
%
% Write the toc entry for this chapter. Must come before the
@@ -4409,7 +5719,8 @@ where each line of input produces a line of output.}
\donoderef{#2}%
%
% Typeset the actual heading.
- \vbox{\hyphenpenalty=10000 \tolerance=5000 \parindent=0pt \raggedright
+ \nobreak % Avoid page breaks at the interline glue.
+ \vbox{\hyphenpenalty=10000 \tolerance=5000 \parindent=0pt \ptexraggedright
\hangindent=\wd0 \centerparametersmaybe
\unhbox0 #1\par}%
}%
@@ -4433,8 +5744,8 @@ where each line of input produces a line of output.}
%
\def\unnchfopen #1{%
\chapoddpage {\chapfonts \vbox{\hyphenpenalty=10000\tolerance=5000
- \parindent=0pt\raggedright
- \rm #1\hfill}}\bigskip \par\nobreak
+ \parindent=0pt\ptexraggedright
+ \rmisbold #1\hfill}}\bigskip \par\nobreak
}
\def\chfopen #1#2{\chapoddpage {\chapfonts
\vbox to 3in{\vfil \hbox to\hsize{\hfil #2} \hbox to\hsize{\hfil #1} \vfil}}%
@@ -4443,7 +5754,7 @@ where each line of input produces a line of output.}
\def\centerchfopen #1{%
\chapoddpage {\chapfonts \vbox{\hyphenpenalty=10000\tolerance=5000
\parindent=0pt
- \hfill {\rm #1}\hfill}}\bigskip \par\nobreak
+ \hfill {\rmisbold #1}\hfill}}\bigskip \par\nobreak
}
\def\CHAPFopen{%
\global\let\chapmacro=\chfopen
@@ -4471,47 +5782,110 @@ where each line of input produces a line of output.}
% the section type for xrefs (Ynumbered, Ynothing, Yappendix), #4 is the
% section number.
%
+\def\seckeyword{sec}
+%
\def\sectionheading#1#2#3#4{%
{%
+ \checkenv{}% should not be in an environment.
+ %
% Switch to the right set of fonts.
- \csname #2fonts\endcsname \rm
+ \csname #2fonts\endcsname \rmisbold
+ %
+ \def\sectionlevel{#2}%
+ \def\temptype{#3}%
+ %
+ % Insert first mark before the heading break (see notes for \domark).
+ \let\prevsectiondefs=\lastsectiondefs
+ \ifx\temptype\Ynothingkeyword
+ \ifx\sectionlevel\seckeyword
+ \gdef\lastsectiondefs{\gdef\thissectionname{#1}\gdef\thissectionnum{}%
+ \gdef\thissection{\thissectionname}}%
+ \fi
+ \else\ifx\temptype\Yomitfromtockeyword
+ % Don't redefine \thissection.
+ \else\ifx\temptype\Yappendixkeyword
+ \ifx\sectionlevel\seckeyword
+ \toks0={#1}%
+ \xdef\lastsectiondefs{%
+ \gdef\noexpand\thissectionname{\the\toks0}%
+ \gdef\noexpand\thissectionnum{#4}%
+ % \noexpand\putwordSection avoids expanding indigestible
+ % commands in some of the translations.
+ \gdef\noexpand\thissection{\noexpand\putwordSection{}
+ \noexpand\thissectionnum:
+ \noexpand\thissectionname}%
+ }%
+ \fi
+ \else
+ \ifx\sectionlevel\seckeyword
+ \toks0={#1}%
+ \xdef\lastsectiondefs{%
+ \gdef\noexpand\thissectionname{\the\toks0}%
+ \gdef\noexpand\thissectionnum{#4}%
+ % \noexpand\putwordSection avoids expanding indigestible
+ % commands in some of the translations.
+ \gdef\noexpand\thissection{\noexpand\putwordSection{}
+ \noexpand\thissectionnum:
+ \noexpand\thissectionname}%
+ }%
+ \fi
+ \fi\fi\fi
+ %
+ % Go into vertical mode. Usually we'll already be there, but we
+ % don't want the following whatsit to end up in a preceding paragraph
+ % if the document didn't happen to have a blank line.
+ \par
+ %
+ % Output the mark. Pass it through \safewhatsit, to take care of
+ % the preceding space.
+ \safewhatsit\domark
%
% Insert space above the heading.
\csname #2headingbreak\endcsname
%
- % Only insert the space after the number if we have a section number.
- \def\sectionlevel{#2}%
- \def\temptype{#3}%
+ % Now the second mark, after the heading break. No break points
+ % between here and the heading.
+ \let\prevsectiondefs=\lastsectiondefs
+ \domark
%
+ % Only insert the space after the number if we have a section number.
\ifx\temptype\Ynothingkeyword
\setbox0 = \hbox{}%
\def\toctype{unn}%
- \gdef\thissection{#1}%
+ \gdef\lastsection{#1}%
\else\ifx\temptype\Yomitfromtockeyword
% for @headings -- no section number, don't include in toc,
- % and don't redefine \thissection.
+ % and don't redefine \lastsection.
\setbox0 = \hbox{}%
\def\toctype{omit}%
\let\sectionlevel=\empty
\else\ifx\temptype\Yappendixkeyword
\setbox0 = \hbox{#4\enspace}%
\def\toctype{app}%
- \gdef\thissection{#1}%
+ \gdef\lastsection{#1}%
\else
\setbox0 = \hbox{#4\enspace}%
\def\toctype{num}%
- \gdef\thissection{#1}%
+ \gdef\lastsection{#1}%
\fi\fi\fi
%
- % Write the toc entry (before \donoderef). See comments in \chfplain.
+ % Write the toc entry (before \donoderef). See comments in \chapmacro.
\writetocentry{\toctype\sectionlevel}{#1}{#4}%
%
% Write the node reference (= pdf destination for pdftex).
- % Again, see comments in \chfplain.
+ % Again, see comments in \chapmacro.
\donoderef{#3}%
%
+ % Interline glue will be inserted when the vbox is completed.
+ % That glue will be a valid breakpoint for the page, since it'll be
+ % preceded by a whatsit (usually from the \donoderef, or from the
+ % \writetocentry if there was no node). We don't want to allow that
+ % break, since then the whatsits could end up on page n while the
+ % section is on page n+1, thus toc/etc. are wrong. Debian bug 276000.
+ \nobreak
+ %
% Output the actual section heading.
- \vbox{\hyphenpenalty=10000 \tolerance=5000 \parindent=0pt \raggedright
+ \vbox{\hyphenpenalty=10000 \tolerance=5000 \parindent=0pt \ptexraggedright
\hangindent=\wd0 % zero if no section number
\unhbox0 #1}%
}%
@@ -4525,15 +5899,15 @@ where each line of input produces a line of output.}
%
% We'll almost certainly start a paragraph next, so don't let that
% glue accumulate. (Not a breakpoint because it's preceded by a
- % discardable item.)
+ % discardable item.) However, when a paragraph is not started next
+ % (\startdefun, \cartouche, \center, etc.), this needs to be wiped out
+ % or the negative glue will cause weirdly wrong output, typically
+ % obscuring the section heading with something else.
\vskip-\parskip
- %
- % This is purely so the last item on the list is a known \penalty >
- % 10000. This is so \startdefun can avoid allowing breakpoints after
- % section headings. Otherwise, it would insert a valid breakpoint between:
- %
- % @section sec-whatever
- % @deffn def-whatever
+ %
+ % This is so the last item on the main vertical list is a known
+ % \penalty > 10000, so \startdefun, etc., can recognize the situation
+ % and do the needful.
\penalty 10001
}
@@ -4572,7 +5946,7 @@ where each line of input produces a line of output.}
\edef\temp{%
\write\tocfile{@#1entry{#2}{#3}{\lastnode}{\noexpand\folio}}}%
\temp
- }
+ }%
\fi
\fi
%
@@ -4589,7 +5963,7 @@ where each line of input produces a line of output.}
% These characters do not print properly in the Computer Modern roman
% fonts, so we must take special care. This is more or less redundant
% with the Texinfo input format setup at the end of this file.
-%
+%
\def\activecatcodes{%
\catcode`\"=\active
\catcode`\$=\active
@@ -4607,7 +5981,7 @@ where each line of input produces a line of output.}
\def\readtocfile{%
\setupdatafile
\activecatcodes
- \input \jobname.toc
+ \input \tocreadfilename
}
\newskip\contentsrightmargin \contentsrightmargin=1in
@@ -4626,7 +6000,6 @@ where each line of input produces a line of output.}
%
% Don't need to put `Contents' or `Short Contents' in the headline.
% It is abundantly clear what they are.
- \def\thischapter{}%
\chapmacro{#1}{Yomitfromtoc}{}%
%
\savepageno = \pageno
@@ -4638,11 +6011,16 @@ where each line of input produces a line of output.}
\ifnum \pageno>0 \global\pageno = \lastnegativepageno \fi
}
+% redefined for the two-volume lispref. We always output on
+% \jobname.toc even if this is redefined.
+%
+\def\tocreadfilename{\jobname.toc}
% Normal (long) toc.
+%
\def\contents{%
\startcontents{\putwordTOC}%
- \openin 1 \jobname.toc
+ \openin 1 \tocreadfilename\space
\ifeof 1 \else
\readtocfile
\fi
@@ -4661,6 +6039,7 @@ where each line of input produces a line of output.}
\def\summarycontents{%
\startcontents{\putwordShortTOC}%
%
+ \let\partentry = \shortpartentry
\let\numchapentry = \shortchapentry
\let\appentry = \shortchapentry
\let\unnchapentry = \shortunnchapentry
@@ -4680,7 +6059,7 @@ where each line of input produces a line of output.}
\let\numsubsubsecentry = \numsecentry
\let\appsubsubsecentry = \numsecentry
\let\unnsubsubsecentry = \numsecentry
- \openin 1 \jobname.toc
+ \openin 1 \tocreadfilename\space
\ifeof 1 \else
\readtocfile
\fi
@@ -4716,6 +6095,19 @@ where each line of input produces a line of output.}
% The last argument is the page number.
% The arguments in between are the chapter number, section number, ...
+% Parts, in the main contents. Replace the part number, which doesn't
+% exist, with an empty box. Let's hope all the numbers have the same width.
+% Also ignore the page number, which is conventionally not printed.
+\def\numeralbox{\setbox0=\hbox{8}\hbox to \wd0{\hfil}}
+\def\partentry#1#2#3#4{\dochapentry{\numeralbox\labelspace#1}{}}
+%
+% Parts, in the short toc.
+\def\shortpartentry#1#2#3#4{%
+ \penalty-300
+ \vskip.5\baselineskip plus.15\baselineskip minus.1\baselineskip
+ \shortchapentry{{\bf #1}}{\numeralbox}{}{}%
+}
+
% Chapters, in the main contents.
\def\numchapentry#1#2#3#4{\dochapentry{#2\labelspace#1}{#4}}
%
@@ -4805,45 +6197,12 @@ where each line of input produces a line of output.}
\message{environments,}
% @foo ... @end foo.
-% @point{}, @result{}, @expansion{}, @print{}, @equiv{}.
-%
-% Since these characters are used in examples, it should be an even number of
-% \tt widths. Each \tt character is 1en, so two makes it 1em.
-%
-\def\point{$\star$}
-\def\result{\leavevmode\raise.15ex\hbox to 1em{\hfil$\Rightarrow$\hfil}}
-\def\expansion{\leavevmode\raise.1ex\hbox to 1em{\hfil$\mapsto$\hfil}}
-\def\print{\leavevmode\lower.1ex\hbox to 1em{\hfil$\dashv$\hfil}}
-\def\equiv{\leavevmode\lower.1ex\hbox to 1em{\hfil$\ptexequiv$\hfil}}
-
-% The @error{} command.
-% Adapted from the TeXbook's \boxit.
-%
-\newbox\errorbox
-%
-{\tentt \global\dimen0 = 3em}% Width of the box.
-\dimen2 = .55pt % Thickness of rules
-% The text. (`r' is open on the right, `e' somewhat less so on the left.)
-\setbox0 = \hbox{\kern-.75pt \tensf error\kern-1.5pt}
-%
-\setbox\errorbox=\hbox to \dimen0{\hfil
- \hsize = \dimen0 \advance\hsize by -5.8pt % Space to left+right.
- \advance\hsize by -2\dimen2 % Rules.
- \vbox{%
- \hrule height\dimen2
- \hbox{\vrule width\dimen2 \kern3pt % Space to left of text.
- \vtop{\kern2.4pt \box0 \kern2.4pt}% Space above/below.
- \kern3pt\vrule width\dimen2}% Space to right.
- \hrule height\dimen2}
- \hfil}
-%
-\def\error{\leavevmode\lower.7ex\copy\errorbox}
-
-% @tex ... @end tex escapes into raw Tex temporarily.
+% @tex ... @end tex escapes into raw TeX temporarily.
% One exception: @ is still an escape character, so that @end tex works.
-% But \@ or @@ will get a plain tex @ character.
+% But \@ or @@ will get a plain @ character.
\envdef\tex{%
+ \setupmarkupstyle{tex}%
\catcode `\\=0 \catcode `\{=1 \catcode `\}=2
\catcode `\$=3 \catcode `\&=4 \catcode `\#=6
\catcode `\^=7 \catcode `\_=8 \catcode `\~=\active \let~=\tie
@@ -4853,8 +6212,14 @@ where each line of input produces a line of output.}
\catcode `\|=\other
\catcode `\<=\other
\catcode `\>=\other
+ \catcode`\`=\other
+ \catcode`\'=\other
\escapechar=`\\
%
+ % ' is active in math mode (mathcode"8000). So reset it, and all our
+ % other math active characters (just in case), to plain's definitions.
+ \mathactive
+ %
\let\b=\ptexb
\let\bullet=\ptexbullet
\let\c=\ptexc
@@ -4872,6 +6237,7 @@ where each line of input produces a line of output.}
\let\/=\ptexslash
\let\*=\ptexstar
\let\t=\ptext
+ \expandafter \let\csname top\endcsname=\ptextop % outer
\let\frenchspacing=\plainfrenchspacing
%
\def\endldots{\mathinner{\ldots\ldots\ldots\ldots}}%
@@ -4957,6 +6323,12 @@ where each line of input produces a line of output.}
\normbskip=\baselineskip \normpskip=\parskip \normlskip=\lineskip
% Flag to tell @lisp, etc., not to narrow margin.
\let\nonarrowing = t%
+ %
+ % If this cartouche directly follows a sectioning command, we need the
+ % \parskip glue (backspaced over by default) or the cartouche can
+ % collide with the section heading.
+ \ifnum\lastpenalty>10000 \vskip\parskip \penalty\lastpenalty \fi
+ %
\vbox\bgroup
\baselineskip=0pt\parskip=0pt\lineskip=0pt
\carttop
@@ -4970,7 +6342,7 @@ where each line of input produces a line of output.}
\lineskip=\normlskip
\parskip=\normpskip
\vskip -\parskip
- \comment % For explanation, see the end of \def\group.
+ \comment % For explanation, see the end of def\group.
}
\def\Ecartouche{%
\ifhmode\par\fi
@@ -4987,6 +6359,7 @@ where each line of input produces a line of output.}
% This macro is called at the beginning of all the @example variants,
% inside a group.
+\newdimen\nonfillparindent
\def\nonfillstart{%
\aboveenvbreak
\hfuzz = 12pt % Don't be fussy
@@ -4994,7 +6367,12 @@ where each line of input produces a line of output.}
\let\par = \lisppar % don't ignore blank lines
\obeylines % each line of input is a line of output
\parskip = 0pt
+ % Turn off paragraph indentation but redefine \indent to emulate
+ % the normal \indent.
+ \nonfillparindent=\parindent
\parindent = 0pt
+ \let\indent\nonfillindent
+ %
\emergencystretch = 0pt % don't try to avoid overfull boxes
\ifx\nonarrowing\relax
\advance \leftskip by \lispnarrowing
@@ -5005,6 +6383,24 @@ where each line of input produces a line of output.}
\let\exdent=\nofillexdent
}
+\begingroup
+\obeyspaces
+% We want to swallow spaces (but not other tokens) after the fake
+% @indent in our nonfill-environments, where spaces are normally
+% active and set to @tie, resulting in them not being ignored after
+% @indent.
+\gdef\nonfillindent{\futurelet\temp\nonfillindentcheck}%
+\gdef\nonfillindentcheck{%
+\ifx\temp %
+\expandafter\nonfillindentgobble%
+\else%
+\leavevmode\nonfillindentbox%
+\fi%
+}%
+\endgroup
+\def\nonfillindentgobble#1{\nonfillindent}
+\def\nonfillindentbox{\hbox to \nonfillparindent{\hss}}
+
% If you want all examples etc. small: @set dispenvsize small.
% If you want even small examples the full size: @set dispenvsize nosmall.
% This affects the following displayed environments:
@@ -5015,53 +6411,59 @@ where each line of input produces a line of output.}
\let\SETdispenvsize\relax
\def\setnormaldispenv{%
\ifx\SETdispenvsize\smallword
+ % end paragraph for sake of leading, in case document has no blank
+ % line. This is redundant with what happens in \aboveenvbreak, but
+ % we need to do it before changing the fonts, and it's inconvenient
+ % to change the fonts afterward.
+ \ifnum \lastpenalty=10000 \else \endgraf \fi
\smallexamplefonts \rm
\fi
}
\def\setsmalldispenv{%
\ifx\SETdispenvsize\nosmallword
\else
+ \ifnum \lastpenalty=10000 \else \endgraf \fi
\smallexamplefonts \rm
\fi
}
% We often define two environments, @foo and @smallfoo.
-% Let's do it by one command:
-\def\makedispenv #1#2{
- \expandafter\envdef\csname#1\endcsname {\setnormaldispenv #2}
- \expandafter\envdef\csname small#1\endcsname {\setsmalldispenv #2}
+% Let's do it in one command. #1 is the env name, #2 the definition.
+\def\makedispenvdef#1#2{%
+ \expandafter\envdef\csname#1\endcsname {\setnormaldispenv #2}%
+ \expandafter\envdef\csname small#1\endcsname {\setsmalldispenv #2}%
\expandafter\let\csname E#1\endcsname \afterenvbreak
\expandafter\let\csname Esmall#1\endcsname \afterenvbreak
}
-% Define two synonyms:
-\def\maketwodispenvs #1#2#3{
- \makedispenv{#1}{#3}
- \makedispenv{#2}{#3}
+% Define two environment synonyms (#1 and #2) for an environment.
+\def\maketwodispenvdef#1#2#3{%
+ \makedispenvdef{#1}{#3}%
+ \makedispenvdef{#2}{#3}%
}
-
-% @lisp: indented, narrowed, typewriter font; @example: same as @lisp.
+%
+% @lisp: indented, narrowed, typewriter font;
+% @example: same as @lisp.
%
% @smallexample and @smalllisp: use smaller fonts.
% Originally contributed by Pavel@xerox.
%
-\maketwodispenvs {lisp}{example}{%
+\maketwodispenvdef{lisp}{example}{%
\nonfillstart
- \tt
+ \tt\setupmarkupstyle{example}%
\let\kbdfont = \kbdexamplefont % Allow @kbd to do something special.
- \gobble % eat return
+ \gobble % eat return
}
-
% @display/@smalldisplay: same as @lisp except keep current font.
%
-\makedispenv {display}{%
+\makedispenvdef{display}{%
\nonfillstart
\gobble
}
% @format/@smallformat: same as @display except don't narrow margins.
%
-\makedispenv{format}{%
+\makedispenvdef{format}{%
\let\nonarrowing = t%
\nonfillstart
\gobble
@@ -5080,18 +6482,44 @@ where each line of input produces a line of output.}
\envdef\flushright{%
\let\nonarrowing = t%
\nonfillstart
- \advance\leftskip by 0pt plus 1fill
+ \advance\leftskip by 0pt plus 1fill\relax
\gobble
}
\let\Eflushright = \afterenvbreak
+% @raggedright does more-or-less normal line breaking but no right
+% justification. From plain.tex.
+\envdef\raggedright{%
+ \rightskip0pt plus2em \spaceskip.3333em \xspaceskip.5em\relax
+}
+\let\Eraggedright\par
+
+\envdef\raggedleft{%
+ \parindent=0pt \leftskip0pt plus2em
+ \spaceskip.3333em \xspaceskip.5em \parfillskip=0pt
+ \hbadness=10000 % Last line will usually be underfull, so turn off
+ % badness reporting.
+}
+\let\Eraggedleft\par
+
+\envdef\raggedcenter{%
+ \parindent=0pt \rightskip0pt plus1em \leftskip0pt plus1em
+ \spaceskip.3333em \xspaceskip.5em \parfillskip=0pt
+ \hbadness=10000 % Last line will usually be underfull, so turn off
+ % badness reporting.
+}
+\let\Eraggedcenter\par
+
+
% @quotation does normal linebreaking (hence we can't use \nonfillstart)
% and narrows the margins. We keep \parskip nonzero in general, since
% we're doing normal filling. So, when using \aboveenvbreak and
% \afterenvbreak, temporarily make \parskip 0.
%
-\envdef\quotation{%
+\makedispenvdef{quotation}{\quotationstart}
+%
+\def\quotationstart{%
{\parskip=0pt \aboveenvbreak}% because \aboveenvbreak inserts \parskip
\parindent=0pt
%
@@ -5111,12 +6539,13 @@ where each line of input produces a line of output.}
%
\def\Equotation{%
\par
- \ifx\quotationauthor\undefined\else
+ \ifx\quotationauthor\thisisundefined\else
% indent a bit.
\leftline{\kern 2\leftskip \sl ---\quotationauthor}%
\fi
{\parskip=0pt \afterenvbreak}%
}
+\def\Esmallquotation{\Equotation}
% If we're given an argument, typeset it in bold with a colon after.
\def\quotationlabel#1{%
@@ -5141,18 +6570,16 @@ where each line of input produces a line of output.}
\do\ \do\\\do\{\do\}\do\$\do\&%
\do\#\do\^\do\^^K\do\_\do\^^A\do\%\do\~%
\do\<\do\>\do\|\do\@\do+\do\"%
+ % Don't do the quotes -- if we do, @set txicodequoteundirected and
+ % @set txicodequotebacktick will not have effect on @verb and
+ % @verbatim, and ?` and !` ligatures won't get disabled.
+ %\do\`\do\'%
}
%
% [Knuth] p. 380
\def\uncatcodespecials{%
\def\do##1{\catcode`##1=\other}\dospecials}
%
-% [Knuth] pp. 380,381,391
-% Disable Spanish ligatures ?` and !` of \tt font
-\begingroup
- \catcode`\`=\active\gdef`{\relax\lq}
-\endgroup
-%
% Setup for the @verb command.
%
% Eight spaces for a tab
@@ -5164,7 +6591,7 @@ where each line of input produces a line of output.}
\def\setupverb{%
\tt % easiest (and conventionally used) font for verbatim
\def\par{\leavevmode\endgraf}%
- \catcode`\`=\active
+ \setupmarkupstyle{verb}%
\tabeightspaces
% Respect line breaks,
% print special symbols as themselves, and
@@ -5175,35 +6602,46 @@ where each line of input produces a line of output.}
% Setup for the @verbatim environment
%
-% Real tab expansion
+% Real tab expansion.
\newdimen\tabw \setbox0=\hbox{\tt\space} \tabw=8\wd0 % tab amount
%
-\def\starttabbox{\setbox0=\hbox\bgroup}
+% We typeset each line of the verbatim in an \hbox, so we can handle
+% tabs. The \global is in case the verbatim line starts with an accent,
+% or some other command that starts with a begin-group. Otherwise, the
+% entire \verbbox would disappear at the corresponding end-group, before
+% it is typeset. Meanwhile, we can't have nested verbatim commands
+% (can we?), so the \global won't be overwriting itself.
+\newbox\verbbox
+\def\starttabbox{\global\setbox\verbbox=\hbox\bgroup}
+%
\begingroup
\catcode`\^^I=\active
\gdef\tabexpand{%
\catcode`\^^I=\active
\def^^I{\leavevmode\egroup
- \dimen0=\wd0 % the width so far, or since the previous tab
- \divide\dimen0 by\tabw
- \multiply\dimen0 by\tabw % compute previous multiple of \tabw
- \advance\dimen0 by\tabw % advance to next multiple of \tabw
- \wd0=\dimen0 \box0 \starttabbox
+ \dimen\verbbox=\wd\verbbox % the width so far, or since the previous tab
+ \divide\dimen\verbbox by\tabw
+ \multiply\dimen\verbbox by\tabw % compute previous multiple of \tabw
+ \advance\dimen\verbbox by\tabw % advance to next multiple of \tabw
+ \wd\verbbox=\dimen\verbbox \box\verbbox \starttabbox
}%
}
\endgroup
+
+% start the verbatim environment.
\def\setupverbatim{%
\let\nonarrowing = t%
\nonfillstart
- % Easiest (and conventionally used) font for verbatim
- \tt
- \def\par{\leavevmode\egroup\box0\endgraf}%
- \catcode`\`=\active
+ \tt % easiest (and conventionally used) font for verbatim
+ % The \leavevmode here is for blank lines. Otherwise, we would
+ % never \starttabox and the \egroup would end verbatim mode.
+ \def\par{\leavevmode\egroup\box\verbbox\endgraf}%
\tabexpand
+ \setupmarkupstyle{verbatim}%
% Respect line breaks,
% print special symbols as themselves, and
- % make each space count
- % must do in this order:
+ % make each space count.
+ % Must do in this order:
\obeylines \uncatcodespecials \sepspaces
\everypar{\starttabbox}%
}
@@ -5259,6 +6697,8 @@ where each line of input produces a line of output.}
{%
\makevalueexpandable
\setupverbatim
+ \indexnofonts % Allow `@@' and other weird things in file names.
+ \wlog{texinfo.tex: doing @verbatiminclude of #1^^J}%
\input #1
\afterenvbreak
}%
@@ -5284,27 +6724,35 @@ where each line of input produces a line of output.}
\endgroup
}
+
\message{defuns,}
% @defun etc.
\newskip\defbodyindent \defbodyindent=.4in
\newskip\defargsindent \defargsindent=50pt
\newskip\deflastargmargin \deflastargmargin=18pt
+\newcount\defunpenalty
% Start the processing of @deffn:
\def\startdefun{%
\ifnum\lastpenalty<10000
\medbreak
+ \defunpenalty=10003 % Will keep this @deffn together with the
+ % following @def command, see below.
\else
% If there are two @def commands in a row, we'll have a \nobreak,
% which is there to keep the function description together with its
% header. But if there's nothing but headers, we need to allow a
% break somewhere. Check specifically for penalty 10002, inserted
- % by \defargscommonending, instead of 10000, since the sectioning
+ % by \printdefunline, instead of 10000, since the sectioning
% commands also insert a nobreak penalty, and we don't want to allow
% a break between a section heading and a defun.
- %
- \ifnum\lastpenalty=10002 \penalty2000 \fi
+ %
+ % As a further refinement, we avoid "club" headers by signalling
+ % with penalty of 10003 after the very first @deffn in the
+ % sequence (see above), and penalty of 10002 after any following
+ % @def command.
+ \ifnum\lastpenalty=10002 \penalty2000 \else \defunpenalty=10002 \fi
%
% Similarly, after a section heading, do not allow a break.
% But do insert the glue.
@@ -5322,7 +6770,7 @@ where each line of input produces a line of output.}
%
% As above, allow line break if we have multiple x headers in a row.
% It's not a great place, though.
- \ifnum\lastpenalty=10002 \penalty3000 \fi
+ \ifnum\lastpenalty=10002 \penalty3000 \else \defunpenalty=10002 \fi
%
% And now, it's time to reuse the body of the original defun:
\expandafter\gobbledefun#1%
@@ -5337,10 +6785,10 @@ where each line of input produces a line of output.}
#1#2 \endheader
% common ending:
\interlinepenalty = 10000
- \advance\rightskip by 0pt plus 1fil
+ \advance\rightskip by 0pt plus 1fil\relax
\endgraf
\nobreak\vskip -\parskip
- \penalty 10002 % signal to \startdefun and \dodefunx
+ \penalty\defunpenalty % signal to \startdefun and \dodefunx
% Some of the @defun-type tags do not enable magic parentheses,
% rendering the following check redundant. But we don't optimize.
\checkparencounts
@@ -5350,7 +6798,7 @@ where each line of input produces a line of output.}
\def\Edefun{\endgraf\medbreak}
% \makedefun{deffn} creates \deffn, \deffnx and \Edeffn;
-% the only thing remainnig is to define \deffnheader.
+% the only thing remaining is to define \deffnheader.
%
\def\makedefun#1{%
\expandafter\let\csname E#1\endcsname = \Edefun
@@ -5367,13 +6815,36 @@ where each line of input produces a line of output.}
\def\domakedefun#1#2#3{%
\envdef#1{%
\startdefun
+ \doingtypefnfalse % distinguish typed functions from all else
\parseargusing\activeparens{\printdefunline#3}%
}%
\def#2{\dodefunx#1}%
\def#3%
}
-%%% Untyped functions:
+\newif\ifdoingtypefn % doing typed function?
+\newif\ifrettypeownline % typeset return type on its own line?
+
+% @deftypefnnewline on|off says whether the return type of typed functions
+% are printed on their own line. This affects @deftypefn, @deftypefun,
+% @deftypeop, and @deftypemethod.
+%
+\parseargdef\deftypefnnewline{%
+ \def\temp{#1}%
+ \ifx\temp\onword
+ \expandafter\let\csname SETtxideftypefnnl\endcsname
+ = \empty
+ \else\ifx\temp\offword
+ \expandafter\let\csname SETtxideftypefnnl\endcsname
+ = \relax
+ \else
+ \errhelp = \EMsimple
+ \errmessage{Unknown @txideftypefnnl value `\temp',
+ must be on|off}%
+ \fi\fi
+}
+
+% Untyped functions:
% @deffn category name args
\makedefun{deffn}{\deffngeneral{}}
@@ -5392,7 +6863,7 @@ where each line of input produces a line of output.}
\defname{#2}{}{#3}\magicamp\defunargs{#4\unskip}%
}
-%%% Typed functions:
+% Typed functions:
% @deftypefn category type name args
\makedefun{deftypefn}{\deftypefngeneral{}}
@@ -5407,10 +6878,11 @@ where each line of input produces a line of output.}
%
\def\deftypefngeneral#1#2 #3 #4 #5\endheader{%
\dosubind{fn}{\code{#4}}{#1}%
+ \doingtypefntrue
\defname{#2}{#3}{#4}\defunargs{#5\unskip}%
}
-%%% Typed variables:
+% Typed variables:
% @deftypevr category type var args
\makedefun{deftypevr}{\deftypecvgeneral{}}
@@ -5428,7 +6900,7 @@ where each line of input produces a line of output.}
\defname{#2}{#3}{#4}\defunargs{#5\unskip}%
}
-%%% Untyped variables:
+% Untyped variables:
% @defvr category var args
\makedefun{defvr}#1 {\deftypevrheader{#1} {} }
@@ -5439,7 +6911,8 @@ where each line of input produces a line of output.}
% \defcvof {category of}class var args
\def\defcvof#1#2 {\deftypecvof{#1}#2 {} }
-%%% Type:
+% Types:
+
% @deftp category name args
\makedefun{deftp}#1 #2 #3\endheader{%
\doind{tp}{\code{#2}}%
@@ -5467,25 +6940,49 @@ where each line of input produces a line of output.}
% We are followed by (but not passed) the arguments, if any.
%
\def\defname#1#2#3{%
+ \par
% Get the values of \leftskip and \rightskip as they were outside the @def...
\advance\leftskip by -\defbodyindent
%
- % How we'll format the type name. Putting it in brackets helps
+ % Determine if we are typesetting the return type of a typed function
+ % on a line by itself.
+ \rettypeownlinefalse
+ \ifdoingtypefn % doing a typed function specifically?
+ % then check user option for putting return type on its own line:
+ \expandafter\ifx\csname SETtxideftypefnnl\endcsname\relax \else
+ \rettypeownlinetrue
+ \fi
+ \fi
+ %
+ % How we'll format the category name. Putting it in brackets helps
% distinguish it from the body text that may end up on the next line
% just below it.
\def\temp{#1}%
\setbox0=\hbox{\kern\deflastargmargin \ifx\temp\empty\else [\rm\temp]\fi}
%
- % Figure out line sizes for the paragraph shape.
+ % Figure out line sizes for the paragraph shape. We'll always have at
+ % least two.
+ \tempnum = 2
+ %
% The first line needs space for \box0; but if \rightskip is nonzero,
% we need only space for the part of \box0 which exceeds it:
\dimen0=\hsize \advance\dimen0 by -\wd0 \advance\dimen0 by \rightskip
+ %
+ % If doing a return type on its own line, we'll have another line.
+ \ifrettypeownline
+ \advance\tempnum by 1
+ \def\maybeshapeline{0in \hsize}%
+ \else
+ \def\maybeshapeline{}%
+ \fi
+ %
% The continuations:
\dimen2=\hsize \advance\dimen2 by -\defargsindent
- % (plain.tex says that \dimen1 should be used only as global.)
- \parshape 2 0in \dimen0 \defargsindent \dimen2
%
- % Put the type name to the right margin.
+ % The final paragraph shape:
+ \parshape \tempnum 0in \dimen0 \maybeshapeline \defargsindent \dimen2
+ %
+ % Put the category name at the right margin.
\noindent
\hbox to 0pt{%
\hfil\box0 \kern-\hsize
@@ -5507,8 +7004,16 @@ where each line of input produces a line of output.}
% . this still does not fix the ?` and !` ligatures, but so far no
% one has made identifiers using them :).
\df \tt
- \def\temp{#2}% return value type
- \ifx\temp\empty\else \tclose{\temp} \fi
+ \def\temp{#2}% text of the return type
+ \ifx\temp\empty\else
+ \tclose{\temp}% typeset the return type
+ \ifrettypeownline
+ % put return type on its own line; prohibit line break following:
+ \hfil\vadjust{\nobreak}\break
+ \else
+ \space % type on same line, so just followed by a space
+ \fi
+ \fi % no return type
#3% output function name
}%
{\rm\enskip}% hskip 0.5 em of \tenrm
@@ -5529,7 +7034,7 @@ where each line of input produces a line of output.}
%
% On the other hand, if an argument has two dashes (for instance), we
% want a way to get ttsl. Let's try @var for that.
- \let\var=\ttslanted
+ \def\var##1{{\setupmarkupstyle{var}\ttslanted{##1}}}%
#1%
\sl\hyphenchar\font=45
}
@@ -5609,12 +7114,14 @@ where each line of input produces a line of output.}
\ifnum\parencount=0 \else \badparencount \fi
\ifnum\brackcount=0 \else \badbrackcount \fi
}
+% these should not use \errmessage; the glibc manual, at least, actually
+% has such constructs (when documenting function pointers).
\def\badparencount{%
- \errmessage{Unbalanced parentheses in @def}%
+ \message{Warning: unbalanced parentheses in @def...}%
\global\parencount=0
}
\def\badbrackcount{%
- \errmessage{Unbalanced square braces in @def}%
+ \message{Warning: unbalanced square brackets in @def...}%
\global\brackcount=0
}
@@ -5624,7 +7131,7 @@ where each line of input produces a line of output.}
% To do this right we need a feature of e-TeX, \scantokens,
% which we arrange to emulate with a temporary file in ordinary TeX.
-\ifx\eTeXversion\undefined
+\ifx\eTeXversion\thisisundefined
\newwrite\macscribble
\def\scantokens#1{%
\toks0={#1}%
@@ -5635,26 +7142,30 @@ where each line of input produces a line of output.}
}
\fi
-\def\scanmacro#1{%
- \begingroup
- \newlinechar`\^^M
- \let\xeatspaces\eatspaces
- % Undo catcode changes of \startcontents and \doprintindex
- % When called from @insertcopying or (short)caption, we need active
- % backslash to get it printed correctly. Previously, we had
- % \catcode`\\=\other instead. We'll see whether a problem appears
- % with macro expansion. --kasal, 19aug04
- \catcode`\@=0 \catcode`\\=\active \escapechar=`\@
- % ... and \example
- \spaceisspace
- %
- % Append \endinput to make sure that TeX does not see the ending newline.
- %
- % I've verified that it is necessary both for e-TeX and for ordinary TeX
- % --kasal, 29nov03
- \scantokens{#1\endinput}%
- \endgroup
-}
+\def\scanmacro#1{\begingroup
+ \newlinechar`\^^M
+ \let\xeatspaces\eatspaces
+ %
+ % Undo catcode changes of \startcontents and \doprintindex
+ % When called from @insertcopying or (short)caption, we need active
+ % backslash to get it printed correctly. Previously, we had
+ % \catcode`\\=\other instead. We'll see whether a problem appears
+ % with macro expansion. --kasal, 19aug04
+ \catcode`\@=0 \catcode`\\=\active \escapechar=`\@
+ %
+ % ... and for \example:
+ \spaceisspace
+ %
+ % The \empty here causes a following catcode 5 newline to be eaten as
+ % part of reading whitespace after a control sequence. It does not
+ % eat a catcode 13 newline. There's no good way to handle the two
+ % cases (untried: maybe e-TeX's \everyeof could help, though plain TeX
+ % would then have different behavior). See the Macro Details node in
+ % the manual for the workaround we recommend for macros and
+ % line-oriented commands.
+ %
+ \scantokens{#1\empty}%
+\endgroup}
\def\scanexp#1{%
\edef\temp{\noexpand\scanmacro{#1}}%
@@ -5682,7 +7193,7 @@ where each line of input produces a line of output.}
% This does \let #1 = #2, with \csnames; that is,
% \let \csname#1\endcsname = \csname#2\endcsname
% (except of course we have to play expansion games).
-%
+%
\def\cslet#1#2{%
\expandafter\let
\csname#1\expandafter\endcsname
@@ -5708,13 +7219,18 @@ where each line of input produces a line of output.}
% Macro bodies are absorbed as an argument in a context where
% all characters are catcode 10, 11 or 12, except \ which is active
-% (as in normal texinfo). It is necessary to change the definition of \.
-
+% (as in normal texinfo). It is necessary to change the definition of \
+% to recognize macro arguments; this is the job of \mbodybackslash.
+%
+% Non-ASCII encodings make 8-bit characters active, so un-activate
+% them to avoid their expansion. Must do this non-globally, to
+% confine the change to the current group.
+%
% It's necessary to have hard CRs when the macro is executed. This is
-% done by making ^^M (\endlinechar) catcode 12 when reading the macro
+% done by making ^^M (\endlinechar) catcode 12 when reading the macro
% body, and then making it the \newlinechar in \scanmacro.
-
-\def\scanctxt{%
+%
+\def\scanctxt{% used as subroutine
\catcode`\"=\other
\catcode`\+=\other
\catcode`\<=\other
@@ -5724,15 +7240,16 @@ where each line of input produces a line of output.}
\catcode`\_=\other
\catcode`\|=\other
\catcode`\~=\other
+ \ifx\declaredencoding\ascii \else \setnonasciicharscatcodenonglobal\other \fi
}
-\def\scanargctxt{%
+\def\scanargctxt{% used for copying and captions, not macros.
\scanctxt
\catcode`\\=\other
\catcode`\^^M=\other
}
-\def\macrobodyctxt{%
+\def\macrobodyctxt{% used for @macro definitions
\scanctxt
\catcode`\{=\other
\catcode`\}=\other
@@ -5740,32 +7257,56 @@ where each line of input produces a line of output.}
\usembodybackslash
}
-\def\macroargctxt{%
+\def\macroargctxt{% used when scanning invocations
\scanctxt
- \catcode`\\=\other
+ \catcode`\\=0
}
+% why catcode 0 for \ in the above? To recognize \\ \{ \} as "escapes"
+% for the single characters \ { }. Thus, we end up with the "commands"
+% that would be written @\ @{ @} in a Texinfo document.
+%
+% We already have @{ and @}. For @\, we define it here, and only for
+% this purpose, to produce a typewriter backslash (so, the @\ that we
+% define for @math can't be used with @macro calls):
+%
+\def\\{\normalbackslash}%
+%
+% We would like to do this for \, too, since that is what makeinfo does.
+% But it is not possible, because Texinfo already has a command @, for a
+% cedilla accent. Documents must use @comma{} instead.
+%
+% \anythingelse will almost certainly be an error of some kind.
+
% \mbodybackslash is the definition of \ in @macro bodies.
% It maps \foo\ => \csname macarg.foo\endcsname => #N
% where N is the macro parameter number.
% We define \csname macarg.\endcsname to be \realbackslash, so
% \\ in macro replacement text gets you a backslash.
-
+%
{\catcode`@=0 @catcode`@\=@active
@gdef@usembodybackslash{@let\=@mbodybackslash}
@gdef@mbodybackslash#1\{@csname macarg.#1@endcsname}
}
\expandafter\def\csname macarg.\endcsname{\realbackslash}
+\def\margbackslash#1{\char`\#1 }
+
\def\macro{\recursivefalse\parsearg\macroxxx}
\def\rmacro{\recursivetrue\parsearg\macroxxx}
\def\macroxxx#1{%
- \getargs{#1}% now \macname is the macname and \argl the arglist
+ \getargs{#1}% now \macname is the macname and \argl the arglist
\ifx\argl\empty % no arguments
- \paramno=0%
+ \paramno=0\relax
\else
\expandafter\parsemargdef \argl;%
+ \if\paramno>256\relax
+ \ifx\eTeXversion\thisisundefined
+ \errhelp = \EMsimple
+ \errmessage{You need eTeX to compile a file with macros with more than 256 arguments}
+ \fi
+ \fi
\fi
\if1\csname ismacro.\the\macname\endcsname
\message{Warning: redefining \the\macname}%
@@ -5812,46 +7353,269 @@ where each line of input produces a line of output.}
% an opening brace, and that opening brace is not consumed.
\def\getargs#1{\getargsxxx#1{}}
\def\getargsxxx#1#{\getmacname #1 \relax\getmacargs}
-\def\getmacname #1 #2\relax{\macname={#1}}
+\def\getmacname#1 #2\relax{\macname={#1}}
\def\getmacargs#1{\def\argl{#1}}
+% For macro processing make @ a letter so that we can make Texinfo private macro names.
+\edef\texiatcatcode{\the\catcode`\@}
+\catcode `@=11\relax
+
% Parse the optional {params} list. Set up \paramno and \paramlist
-% so \defmacro knows what to do. Define \macarg.blah for each blah
-% in the params list, to be ##N where N is the position in that list.
+% so \defmacro knows what to do. Define \macarg.BLAH for each BLAH
+% in the params list to some hook where the argument si to be expanded. If
+% there are less than 10 arguments that hook is to be replaced by ##N where N
+% is the position in that list, that is to say the macro arguments are to be
+% defined `a la TeX in the macro body.
+%
% That gets used by \mbodybackslash (above).
-
+%
% We need to get `macro parameter char #' into several definitions.
-% The technique used is stolen from LaTeX: let \hash be something
+% The technique used is stolen from LaTeX: let \hash be something
% unexpandable, insert that wherever you need a #, and then redefine
% it to # just before using the token list produced.
%
% The same technique is used to protect \eatspaces till just before
% the macro is used.
-
-\def\parsemargdef#1;{\paramno=0\def\paramlist{}%
- \let\hash\relax\let\xeatspaces\relax\parsemargdefxxx#1,;,}
+%
+% If there are 10 or more arguments, a different technique is used, where the
+% hook remains in the body, and when macro is to be expanded the body is
+% processed again to replace the arguments.
+%
+% In that case, the hook is \the\toks N-1, and we simply set \toks N-1 to the
+% argument N value and then \edef the body (nothing else will expand because of
+% the catcode regime underwhich the body was input).
+%
+% If you compile with TeX (not eTeX), and you have macros with 10 or more
+% arguments, you need that no macro has more than 256 arguments, otherwise an
+% error is produced.
+\def\parsemargdef#1;{%
+ \paramno=0\def\paramlist{}%
+ \let\hash\relax
+ \let\xeatspaces\relax
+ \parsemargdefxxx#1,;,%
+ % In case that there are 10 or more arguments we parse again the arguments
+ % list to set new definitions for the \macarg.BLAH macros corresponding to
+ % each BLAH argument. It was anyhow needed to parse already once this list
+ % in order to count the arguments, and as macros with at most 9 arguments
+ % are by far more frequent than macro with 10 or more arguments, defining
+ % twice the \macarg.BLAH macros does not cost too much processing power.
+ \ifnum\paramno<10\relax\else
+ \paramno0\relax
+ \parsemmanyargdef@@#1,;,% 10 or more arguments
+ \fi
+}
\def\parsemargdefxxx#1,{%
\if#1;\let\next=\relax
\else \let\next=\parsemargdefxxx
- \advance\paramno by 1%
+ \advance\paramno by 1
\expandafter\edef\csname macarg.\eatspaces{#1}\endcsname
{\xeatspaces{\hash\the\paramno}}%
\edef\paramlist{\paramlist\hash\the\paramno,}%
\fi\next}
+\def\parsemmanyargdef@@#1,{%
+ \if#1;\let\next=\relax
+ \else
+ \let\next=\parsemmanyargdef@@
+ \edef\tempb{\eatspaces{#1}}%
+ \expandafter\def\expandafter\tempa
+ \expandafter{\csname macarg.\tempb\endcsname}%
+ % Note that we need some extra \noexpand\noexpand, this is because we
+ % don't want \the to be expanded in the \parsermacbody as it uses an
+ % \xdef .
+ \expandafter\edef\tempa
+ {\noexpand\noexpand\noexpand\the\toks\the\paramno}%
+ \advance\paramno by 1\relax
+ \fi\next}
+
% These two commands read recursive and nonrecursive macro bodies.
% (They're different since rec and nonrec macros end differently.)
+%
+\catcode `\@\texiatcatcode
\long\def\parsemacbody#1@end macro%
{\xdef\temp{\eatcr{#1}}\endgroup\defmacro}%
\long\def\parsermacbody#1@end rmacro%
{\xdef\temp{\eatcr{#1}}\endgroup\defmacro}%
+\catcode `\@=11\relax
+
+\let\endargs@\relax
+\let\nil@\relax
+\def\nilm@{\nil@}%
+\long\def\nillm@{\nil@}%
+
+% This macro is expanded during the Texinfo macro expansion, not during its
+% definition. It gets all the arguments values and assigns them to macros
+% macarg.ARGNAME
+%
+% #1 is the macro name
+% #2 is the list of argument names
+% #3 is the list of argument values
+\def\getargvals@#1#2#3{%
+ \def\macargdeflist@{}%
+ \def\saveparamlist@{#2}% Need to keep a copy for parameter expansion.
+ \def\paramlist{#2,\nil@}%
+ \def\macroname{#1}%
+ \begingroup
+ \macroargctxt
+ \def\argvaluelist{#3,\nil@}%
+ \def\@tempa{#3}%
+ \ifx\@tempa\empty
+ \setemptyargvalues@
+ \else
+ \getargvals@@
+ \fi
+}
+
+%
+\def\getargvals@@{%
+ \ifx\paramlist\nilm@
+ % Some sanity check needed here that \argvaluelist is also empty.
+ \ifx\argvaluelist\nillm@
+ \else
+ \errhelp = \EMsimple
+ \errmessage{Too many arguments in macro `\macroname'!}%
+ \fi
+ \let\next\macargexpandinbody@
+ \else
+ \ifx\argvaluelist\nillm@
+ % No more arguments values passed to macro. Set remaining named-arg
+ % macros to empty.
+ \let\next\setemptyargvalues@
+ \else
+ % pop current arg name into \@tempb
+ \def\@tempa##1{\pop@{\@tempb}{\paramlist}##1\endargs@}%
+ \expandafter\@tempa\expandafter{\paramlist}%
+ % pop current argument value into \@tempc
+ \def\@tempa##1{\longpop@{\@tempc}{\argvaluelist}##1\endargs@}%
+ \expandafter\@tempa\expandafter{\argvaluelist}%
+ % Here \@tempb is the current arg name and \@tempc is the current arg value.
+ % First place the new argument macro definition into \@tempd
+ \expandafter\macname\expandafter{\@tempc}%
+ \expandafter\let\csname macarg.\@tempb\endcsname\relax
+ \expandafter\def\expandafter\@tempe\expandafter{%
+ \csname macarg.\@tempb\endcsname}%
+ \edef\@tempd{\long\def\@tempe{\the\macname}}%
+ \push@\@tempd\macargdeflist@
+ \let\next\getargvals@@
+ \fi
+ \fi
+ \next
+}
+
+\def\push@#1#2{%
+ \expandafter\expandafter\expandafter\def
+ \expandafter\expandafter\expandafter#2%
+ \expandafter\expandafter\expandafter{%
+ \expandafter#1#2}%
+}
+
+% Replace arguments by their values in the macro body, and place the result
+% in macro \@tempa
+\def\macvalstoargs@{%
+ % To do this we use the property that token registers that are \the'ed
+ % within an \edef expand only once. So we are going to place all argument
+ % values into respective token registers.
+ %
+ % First we save the token context, and initialize argument numbering.
+ \begingroup
+ \paramno0\relax
+ % Then, for each argument number #N, we place the corresponding argument
+ % value into a new token list register \toks#N
+ \expandafter\putargsintokens@\saveparamlist@,;,%
+ % Then, we expand the body so that argument are replaced by their
+ % values. The trick for values not to be expanded themselves is that they
+ % are within tokens and that tokens expand only once in an \edef .
+ \edef\@tempc{\csname mac.\macroname .body\endcsname}%
+ % Now we restore the token stack pointer to free the token list registers
+ % which we have used, but we make sure that expanded body is saved after
+ % group.
+ \expandafter
+ \endgroup
+ \expandafter\def\expandafter\@tempa\expandafter{\@tempc}%
+ }
+
+\def\macargexpandinbody@{%
+ %% Define the named-macro outside of this group and then close this group.
+ \expandafter
+ \endgroup
+ \macargdeflist@
+ % First the replace in body the macro arguments by their values, the result
+ % is in \@tempa .
+ \macvalstoargs@
+ % Then we point at the \norecurse or \gobble (for recursive) macro value
+ % with \@tempb .
+ \expandafter\let\expandafter\@tempb\csname mac.\macroname .recurse\endcsname
+ % Depending on whether it is recursive or not, we need some tailing
+ % \egroup .
+ \ifx\@tempb\gobble
+ \let\@tempc\relax
+ \else
+ \let\@tempc\egroup
+ \fi
+ % And now we do the real job:
+ \edef\@tempd{\noexpand\@tempb{\macroname}\noexpand\scanmacro{\@tempa}\@tempc}%
+ \@tempd
+}
+
+\def\putargsintokens@#1,{%
+ \if#1;\let\next\relax
+ \else
+ \let\next\putargsintokens@
+ % First we allocate the new token list register, and give it a temporary
+ % alias \@tempb .
+ \toksdef\@tempb\the\paramno
+ % Then we place the argument value into that token list register.
+ \expandafter\let\expandafter\@tempa\csname macarg.#1\endcsname
+ \expandafter\@tempb\expandafter{\@tempa}%
+ \advance\paramno by 1\relax
+ \fi
+ \next
+}
+
+% Save the token stack pointer into macro #1
+\def\texisavetoksstackpoint#1{\edef#1{\the\@cclvi}}
+% Restore the token stack pointer from number in macro #1
+\def\texirestoretoksstackpoint#1{\expandafter\mathchardef\expandafter\@cclvi#1\relax}
+% newtoks that can be used non \outer .
+\def\texinonouternewtoks{\alloc@ 5\toks \toksdef \@cclvi}
+
+% Tailing missing arguments are set to empty
+\def\setemptyargvalues@{%
+ \ifx\paramlist\nilm@
+ \let\next\macargexpandinbody@
+ \else
+ \expandafter\setemptyargvaluesparser@\paramlist\endargs@
+ \let\next\setemptyargvalues@
+ \fi
+ \next
+}
+
+\def\setemptyargvaluesparser@#1,#2\endargs@{%
+ \expandafter\def\expandafter\@tempa\expandafter{%
+ \expandafter\def\csname macarg.#1\endcsname{}}%
+ \push@\@tempa\macargdeflist@
+ \def\paramlist{#2}%
+}
+
+% #1 is the element target macro
+% #2 is the list macro
+% #3,#4\endargs@ is the list value
+\def\pop@#1#2#3,#4\endargs@{%
+ \def#1{#3}%
+ \def#2{#4}%
+}
+\long\def\longpop@#1#2#3,#4\endargs@{%
+ \long\def#1{#3}%
+ \long\def#2{#4}%
+}
-% This defines the macro itself. There are six cases: recursive and
-% nonrecursive macros of zero, one, and many arguments.
+% This defines a Texinfo @macro. There are eight cases: recursive and
+% nonrecursive macros of zero, one, up to nine, and many arguments.
% Much magic with \expandafter here.
% \xdef is used so that macro definitions will survive the file
% they're defined in; @include reads the file inside a group.
+%
\def\defmacro{%
\let\hash=##% convert placeholders to macro parameter chars
\ifrecursive
@@ -5866,17 +7630,25 @@ where each line of input produces a line of output.}
\expandafter\noexpand\csname\the\macname xxx\endcsname}%
\expandafter\xdef\csname\the\macname xxx\endcsname##1{%
\egroup\noexpand\scanmacro{\temp}}%
- \else % many
- \expandafter\xdef\csname\the\macname\endcsname{%
- \bgroup\noexpand\macroargctxt
- \noexpand\csname\the\macname xx\endcsname}%
- \expandafter\xdef\csname\the\macname xx\endcsname##1{%
- \expandafter\noexpand\csname\the\macname xxx\endcsname ##1,}%
- \expandafter\expandafter
- \expandafter\xdef
- \expandafter\expandafter
- \csname\the\macname xxx\endcsname
- \paramlist{\egroup\noexpand\scanmacro{\temp}}%
+ \else
+ \ifnum\paramno<10\relax % at most 9
+ \expandafter\xdef\csname\the\macname\endcsname{%
+ \bgroup\noexpand\macroargctxt
+ \noexpand\csname\the\macname xx\endcsname}%
+ \expandafter\xdef\csname\the\macname xx\endcsname##1{%
+ \expandafter\noexpand\csname\the\macname xxx\endcsname ##1,}%
+ \expandafter\expandafter
+ \expandafter\xdef
+ \expandafter\expandafter
+ \csname\the\macname xxx\endcsname
+ \paramlist{\egroup\noexpand\scanmacro{\temp}}%
+ \else % 10 or more
+ \expandafter\xdef\csname\the\macname\endcsname{%
+ \noexpand\getargvals@{\the\macname}{\argl}%
+ }%
+ \global\expandafter\let\csname mac.\the\macname .body\endcsname\temp
+ \global\expandafter\let\csname mac.\the\macname .recurse\endcsname\gobble
+ \fi
\fi
\else
\ifcase\paramno
@@ -5893,39 +7665,51 @@ where each line of input produces a line of output.}
\egroup
\noexpand\norecurse{\the\macname}%
\noexpand\scanmacro{\temp}\egroup}%
- \else % many
- \expandafter\xdef\csname\the\macname\endcsname{%
- \bgroup\noexpand\macroargctxt
- \expandafter\noexpand\csname\the\macname xx\endcsname}%
- \expandafter\xdef\csname\the\macname xx\endcsname##1{%
- \expandafter\noexpand\csname\the\macname xxx\endcsname ##1,}%
- \expandafter\expandafter
- \expandafter\xdef
- \expandafter\expandafter
- \csname\the\macname xxx\endcsname
- \paramlist{%
- \egroup
- \noexpand\norecurse{\the\macname}%
- \noexpand\scanmacro{\temp}\egroup}%
+ \else % at most 9
+ \ifnum\paramno<10\relax
+ \expandafter\xdef\csname\the\macname\endcsname{%
+ \bgroup\noexpand\macroargctxt
+ \expandafter\noexpand\csname\the\macname xx\endcsname}%
+ \expandafter\xdef\csname\the\macname xx\endcsname##1{%
+ \expandafter\noexpand\csname\the\macname xxx\endcsname ##1,}%
+ \expandafter\expandafter
+ \expandafter\xdef
+ \expandafter\expandafter
+ \csname\the\macname xxx\endcsname
+ \paramlist{%
+ \egroup
+ \noexpand\norecurse{\the\macname}%
+ \noexpand\scanmacro{\temp}\egroup}%
+ \else % 10 or more:
+ \expandafter\xdef\csname\the\macname\endcsname{%
+ \noexpand\getargvals@{\the\macname}{\argl}%
+ }%
+ \global\expandafter\let\csname mac.\the\macname .body\endcsname\temp
+ \global\expandafter\let\csname mac.\the\macname .recurse\endcsname\norecurse
+ \fi
\fi
\fi}
+\catcode `\@\texiatcatcode\relax
+
\def\norecurse#1{\bgroup\cslet{#1}{macsave.#1}}
% \braceorline decides whether the next nonwhitespace character is a
% {. If so it reads up to the closing }, if not, it reads the whole
% line. Whatever was read is then fed to the next control sequence
-% as an argument (by \parsebrace or \parsearg)
-\def\braceorline#1{\let\next=#1\futurelet\nchar\braceorlinexxx}
+% as an argument (by \parsebrace or \parsearg).
+%
+\def\braceorline#1{\let\macnamexxx=#1\futurelet\nchar\braceorlinexxx}
\def\braceorlinexxx{%
\ifx\nchar\bgroup\else
\expandafter\parsearg
- \fi \next}
+ \fi \macnamexxx}
% @alias.
% We need some trickery to remove the optional spaces around the equal
-% sign. Just make them active and then expand them all to nothing.
+% sign. Make them active and then expand them all to nothing.
+%
\def\alias{\parseargusing\obeyspaces\aliasxxx}
\def\aliasxxx #1{\aliasyyy#1\relax}
\def\aliasyyy #1=#2\relax{%
@@ -5941,13 +7725,13 @@ where each line of input produces a line of output.}
\message{cross references,}
\newwrite\auxfile
-
\newif\ifhavexrefs % True if xref values are known.
\newif\ifwarnedxrefs % True if we warned once that they aren't known.
% @inforef is relatively simple.
\def\inforef #1{\inforefzzz #1,,,,**}
-\def\inforefzzz #1,#2,#3,#4**{\putwordSee{} \putwordInfo{} \putwordfile{} \file{\ignorespaces #3{}},
+\def\inforefzzz #1,#2,#3,#4**{%
+ \putwordSee{} \putwordInfo{} \putwordfile{} \file{\ignorespaces #3{}},
node \samp{\ignorespaces#1{}}}
% @node's only job in TeX is to define \lastnode, which is used in
@@ -5986,7 +7770,7 @@ where each line of input produces a line of output.}
% \setref{NAME}{SNT} defines a cross-reference point NAME (a node or an
% anchor), which consists of three parts:
-% 1) NAME-title - the current sectioning name taken from \thissection,
+% 1) NAME-title - the current sectioning name taken from \lastsection,
% or the anchor name.
% 2) NAME-snt - section number and type, passed as the SNT arg, or
% empty for anchors.
@@ -6005,14 +7789,35 @@ where each line of input produces a line of output.}
\write\auxfile{@xrdef{#1-% #1 of \setref, expanded by the \edef
##1}{##2}}% these are parameters of \writexrdef
}%
- \toks0 = \expandafter{\thissection}%
+ \toks0 = \expandafter{\lastsection}%
\immediate \writexrdef{title}{\the\toks0 }%
\immediate \writexrdef{snt}{\csname #2\endcsname}% \Ynumbered etc.
- \writexrdef{pg}{\folio}% will be written later, during \shipout
+ \safewhatsit{\writexrdef{pg}{\folio}}% will be written later, at \shipout
}%
\fi
}
+% @xrefautosectiontitle on|off says whether @section(ing) names are used
+% automatically in xrefs, if the third arg is not explicitly specified.
+% This was provided as a "secret" @set xref-automatic-section-title
+% variable, now it's official.
+%
+\parseargdef\xrefautomaticsectiontitle{%
+ \def\temp{#1}%
+ \ifx\temp\onword
+ \expandafter\let\csname SETxref-automatic-section-title\endcsname
+ = \empty
+ \else\ifx\temp\offword
+ \expandafter\let\csname SETxref-automatic-section-title\endcsname
+ = \relax
+ \else
+ \errhelp = \EMsimple
+ \errmessage{Unknown @xrefautomaticsectiontitle value `\temp',
+ must be on|off}%
+ \fi\fi
+}
+
+%
% @xref, @pxref, and @ref generate cross-references. For \xrefX, #1 is
% the node name, #2 the name of the Info cross-reference, #3 the printed
% node name, #4 the name of the Info file, #5 the name of the printed
@@ -6021,26 +7826,41 @@ where each line of input produces a line of output.}
\def\pxref#1{\putwordsee{} \xrefX[#1,,,,,,,]}
\def\xref#1{\putwordSee{} \xrefX[#1,,,,,,,]}
\def\ref#1{\xrefX[#1,,,,,,,]}
+%
+\newbox\toprefbox
+\newbox\printedrefnamebox
+\newbox\infofilenamebox
+\newbox\printedmanualbox
+%
\def\xrefX[#1,#2,#3,#4,#5,#6]{\begingroup
\unsepspaces
- \def\printedmanual{\ignorespaces #5}%
+ %
+ % Get args without leading/trailing spaces.
\def\printedrefname{\ignorespaces #3}%
- \setbox1=\hbox{\printedmanual\unskip}%
- \setbox0=\hbox{\printedrefname\unskip}%
- \ifdim \wd0 = 0pt
+ \setbox\printedrefnamebox = \hbox{\printedrefname\unskip}%
+ %
+ \def\infofilename{\ignorespaces #4}%
+ \setbox\infofilenamebox = \hbox{\infofilename\unskip}%
+ %
+ \def\printedmanual{\ignorespaces #5}%
+ \setbox\printedmanualbox = \hbox{\printedmanual\unskip}%
+ %
+ % If the printed reference name (arg #3) was not explicitly given in
+ % the @xref, figure out what we want to use.
+ \ifdim \wd\printedrefnamebox = 0pt
% No printed node name was explicitly given.
- \expandafter\ifx\csname SETxref-automatic-section-title\endcsname\relax
- % Use the node name inside the square brackets.
+ \expandafter\ifx\csname SETxref-automatic-section-title\endcsname \relax
+ % Not auto section-title: use node name inside the square brackets.
\def\printedrefname{\ignorespaces #1}%
\else
- % Use the actual chapter/section title appear inside
- % the square brackets. Use the real section title if we have it.
- \ifdim \wd1 > 0pt
- % It is in another manual, so we don't have it.
+ % Auto section-title: use chapter/section title inside
+ % the square brackets if we have it.
+ \ifdim \wd\printedmanualbox > 0pt
+ % It is in another manual, so we don't have it; use node name.
\def\printedrefname{\ignorespaces #1}%
\else
\ifhavexrefs
- % We know the real title if we have the xref values.
+ % We (should) know the real title if we have the xref values.
\def\printedrefname{\refx{#1-title}{}}%
\else
% Otherwise just copy the Info node name.
@@ -6052,22 +7872,32 @@ where each line of input produces a line of output.}
%
% Make link in pdf output.
\ifpdf
- \leavevmode
- \getfilename{#4}%
- {\turnoffactive
- % See comments at \activebackslashdouble.
- {\activebackslashdouble \xdef\pdfxrefdest{#1}%
- \backslashparens\pdfxrefdest}%
+ {\indexnofonts
+ \turnoffactive
+ \makevalueexpandable
+ % This expands tokens, so do it after making catcode changes, so _
+ % etc. don't get their TeX definitions. This ignores all spaces in
+ % #4, including (wrongly) those in the middle of the filename.
+ \getfilename{#4}%
%
+ % This (wrongly) does not take account of leading or trailing
+ % spaces in #1, which should be ignored.
+ \edef\pdfxrefdest{#1}%
+ \ifx\pdfxrefdest\empty
+ \def\pdfxrefdest{Top}% no empty targets
+ \else
+ \txiescapepdf\pdfxrefdest % escape PDF special chars
+ \fi
+ %
+ \leavevmode
+ \startlink attr{/Border [0 0 0]}%
\ifnum\filenamelength>0
- \startlink attr{/Border [0 0 0]}%
- goto file{\the\filename.pdf} name{\pdfxrefdest}%
+ goto file{\the\filename.pdf} name{\pdfxrefdest}%
\else
- \startlink attr{/Border [0 0 0]}%
- goto name{\pdfmkpgn{\pdfxrefdest}}%
+ goto name{\pdfmkpgn{\pdfxrefdest}}%
\fi
}%
- \linkcolor
+ \setcolor{\linkcolor}%
\fi
%
% Float references are printed completely differently: "Figure 1.2"
@@ -6084,29 +7914,42 @@ where each line of input produces a line of output.}
\iffloat\Xthisreftitle
% If the user specified the print name (third arg) to the ref,
% print it instead of our usual "Figure 1.2".
- \ifdim\wd0 = 0pt
- \refx{#1-snt}%
+ \ifdim\wd\printedrefnamebox = 0pt
+ \refx{#1-snt}{}%
\else
\printedrefname
\fi
%
- % if the user also gave the printed manual name (fifth arg), append
+ % If the user also gave the printed manual name (fifth arg), append
% "in MANUALNAME".
- \ifdim \wd1 > 0pt
+ \ifdim \wd\printedmanualbox > 0pt
\space \putwordin{} \cite{\printedmanual}%
\fi
\else
% node/anchor (non-float) references.
+ %
+ % If we use \unhbox to print the node names, TeX does not insert
+ % empty discretionaries after hyphens, which means that it will not
+ % find a line break at a hyphen in a node names. Since some manuals
+ % are best written with fairly long node names, containing hyphens,
+ % this is a loss. Therefore, we give the text of the node name
+ % again, so it is as if TeX is seeing it for the first time.
+ %
+ \ifdim \wd\printedmanualbox > 0pt
+ % Cross-manual reference with a printed manual name.
+ %
+ \crossmanualxref{\cite{\printedmanual\unskip}}%
+ %
+ \else\ifdim \wd\infofilenamebox > 0pt
+ % Cross-manual reference with only an info filename (arg 4), no
+ % printed manual name (arg 5). This is essentially the same as
+ % the case above; we output the filename, since we have nothing else.
+ %
+ \crossmanualxref{\code{\infofilename\unskip}}%
%
- % If we use \unhbox0 and \unhbox1 to print the node names, TeX does not
- % insert empty discretionaries after hyphens, which means that it will
- % not find a line break at a hyphen in a node names. Since some manuals
- % are best written with fairly long node names, containing hyphens, this
- % is a loss. Therefore, we give the text of the node name again, so it
- % is as if TeX is seeing it for the first time.
- \ifdim \wd1 > 0pt
- \putwordsection{} ``\printedrefname'' \putwordin{} \cite{\printedmanual}%
\else
+ % Reference within this manual.
+ %
% _ (for example) has to be the character _ for the purposes of the
% control sequence corresponding to the node, but it has to expand
% into the usual \leavevmode...\vrule stuff for purposes of
@@ -6118,7 +7961,7 @@ where each line of input produces a line of output.}
\setbox2 = \hbox{\ignorespaces \refx{#1-snt}{}}%
\ifdim \wd2 > 0pt \refx{#1-snt}\space\fi
}%
- % output the `[mynode]' via a macro so it can be overridden.
+ % output the `[mynode]' via the macro below so it can be overridden.
\xrefprintnodename\printedrefname
%
% But we always want a comma and a space:
@@ -6126,11 +7969,37 @@ where each line of input produces a line of output.}
%
% output the `page 3'.
\turnoffactive \putwordpage\tie\refx{#1-pg}{}%
- \fi
+ \fi\fi
\fi
\endlink
\endgroup}
+% Output a cross-manual xref to #1. Used just above (twice).
+%
+% Only include the text "Section ``foo'' in" if the foo is neither
+% missing or Top. Thus, @xref{,,,foo,The Foo Manual} outputs simply
+% "see The Foo Manual", the idea being to refer to the whole manual.
+%
+% But, this being TeX, we can't easily compare our node name against the
+% string "Top" while ignoring the possible spaces before and after in
+% the input. By adding the arbitrary 7sp below, we make it much less
+% likely that a real node name would have the same width as "Top" (e.g.,
+% in a monospaced font). Hopefully it will never happen in practice.
+%
+% For the same basic reason, we retypeset the "Top" at every
+% reference, since the current font is indeterminate.
+%
+\def\crossmanualxref#1{%
+ \setbox\toprefbox = \hbox{Top\kern7sp}%
+ \setbox2 = \hbox{\ignorespaces \printedrefname \unskip \kern7sp}%
+ \ifdim \wd2 > 7sp % nonempty?
+ \ifdim \wd2 = \wd\toprefbox \else % same as Top?
+ \putwordSection{} ``\printedrefname'' \putwordin{}\space
+ \fi
+ \fi
+ #1%
+}
+
% This macro is called from \xrefX for the `[nodename]' part of xref
% output. It's a separate macro only so it can be changed more easily,
% since square brackets don't work well in some documents. Particularly
@@ -6181,7 +8050,8 @@ where each line of input produces a line of output.}
\angleleft un\-de\-fined\angleright
\iflinks
\ifhavexrefs
- \message{\linenumber Undefined cross reference `#1'.}%
+ {\toks0 = {#1}% avoid expansion of possibly-complex value
+ \message{\linenumber Undefined cross reference `\the\toks0'.}}%
\else
\ifwarnedxrefs\else
\global\warnedxrefstrue
@@ -6201,10 +8071,18 @@ where each line of input produces a line of output.}
% collisions). But if this is a float type, we have more work to do.
%
\def\xrdef#1#2{%
- \expandafter\gdef\csname XR#1\endcsname{#2}% remember this xref value.
+ {% The node name might contain 8-bit characters, which in our current
+ % implementation are changed to commands like @'e. Don't let these
+ % mess up the control sequence name.
+ \indexnofonts
+ \turnoffactive
+ \xdef\safexrefname{#1}%
+ }%
+ %
+ \expandafter\gdef\csname XR\safexrefname\endcsname{#2}% remember this xref
%
% Was that xref control sequence that we just defined for a float?
- \expandafter\iffloat\csname XR#1\endcsname
+ \expandafter\iffloat\csname XR\safexrefname\endcsname
% it was a float, and we have the (safe) float type in \iffloattype.
\expandafter\let\expandafter\floatlist
\csname floatlist\iffloattype\endcsname
@@ -6219,7 +8097,8 @@ where each line of input produces a line of output.}
%
% Remember this xref in the control sequence \floatlistFLOATTYPE,
% for later use in \listoffloats.
- \expandafter\xdef\csname floatlist\iffloattype\endcsname{\the\toks0{#1}}%
+ \expandafter\xdef\csname floatlist\iffloattype\endcsname{\the\toks0
+ {\safexrefname}}%
\fi
}
@@ -6323,6 +8202,7 @@ where each line of input produces a line of output.}
\input\jobname.#1
\endgroup}
+
\message{insertions,}
% including footnotes.
@@ -6335,7 +8215,7 @@ where each line of input produces a line of output.}
% space to prevent strange expansion errors.)
\def\supereject{\par\penalty -20000\footnoteno =0 }
-% @footnotestyle is meaningful for info output only.
+% @footnotestyle is meaningful for Info output only.
\let\footnotestyle=\comment
{\catcode `\@=11
@@ -6398,6 +8278,8 @@ where each line of input produces a line of output.}
% expands into a box, it must come within the paragraph, lest it
% provide a place where TeX can split the footnote.
\footstrut
+ %
+ % Invoke rest of plain TeX footnote routine.
\futurelet\next\fo@t
}
}%end \catcode `\@=11
@@ -6405,7 +8287,7 @@ where each line of input produces a line of output.}
% In case a @footnote appears in a vbox, save the footnote text and create
% the real \insert just after the vbox finished. Otherwise, the insertion
% would be lost.
-% Similarily, if a @footnote appears inside an alignment, save the footnote
+% Similarly, if a @footnote appears inside an alignment, save the footnote
% text to a box and make the \insert when a row of the table is finished.
% And the same can be done for other insert classes. --kasal, 16nov03.
@@ -6485,7 +8367,7 @@ where each line of input produces a line of output.}
it from ftp://tug.org/tex/epsf.tex.}
%
\def\image#1{%
- \ifx\epsfbox\undefined
+ \ifx\epsfbox\thisisundefined
\ifwarnednoepsf \else
\errhelp = \noepsfhelp
\errmessage{epsf.tex not found, images will be ignored}%
@@ -6501,7 +8383,7 @@ where each line of input produces a line of output.}
% #2 is (optional) width, #3 is (optional) height.
% #4 is (ignored optional) html alt text.
% #5 is (ignored optional) extension.
-% #6 is just the usual extra ignored arg for parsing this stuff.
+% #6 is just the usual extra ignored arg for parsing stuff.
\newif\ifimagevmode
\def\imagexxx#1,#2,#3,#4,#5,#6\finish{\begingroup
\catcode`\^^M = 5 % in case we're inside an example
@@ -6509,15 +8391,30 @@ where each line of input produces a line of output.}
% If the image is by itself, center it.
\ifvmode
\imagevmodetrue
- \nobreak\bigskip
+ \else \ifx\centersub\centerV
+ % for @center @image, we need a vbox so we can have our vertical space
+ \imagevmodetrue
+ \vbox\bgroup % vbox has better behavior than vtop herev
+ \fi\fi
+ %
+ \ifimagevmode
+ \nobreak\medskip
% Usually we'll have text after the image which will insert
% \parskip glue, so insert it here too to equalize the space
% above and below.
\nobreak\vskip\parskip
\nobreak
- \line\bgroup\hss
\fi
%
+ % Leave vertical mode so that indentation from an enclosing
+ % environment such as @quotation is respected.
+ % However, if we're at the top level, we don't want the
+ % normal paragraph indentation.
+ % On the other hand, if we are in the case of @center @image, we don't
+ % want to start a paragraph, which will create a hsize-width box and
+ % eradicate the centering.
+ \ifx\centersub\centerV\else \noindent \fi
+ %
% Output the image.
\ifpdf
\dopdfimage{#1}{#2}{#3}%
@@ -6528,7 +8425,10 @@ where each line of input produces a line of output.}
\epsfbox{#1.eps}%
\fi
%
- \ifimagevmode \hss \egroup \bigbreak \fi % space after the image
+ \ifimagevmode
+ \medskip % space after a standalone image
+ \fi
+ \ifx\centersub\centerV \egroup \fi
\endgroup}
@@ -6595,13 +8495,13 @@ where each line of input produces a line of output.}
\global\advance\floatno by 1
%
{%
- % This magic value for \thissection is output by \setref as the
+ % This magic value for \lastsection is output by \setref as the
% XREFLABEL-title value. \xrefX uses it to distinguish float
% labels (which have a completely different output format) from
% node and anchor labels. And \xrdef uses it to construct the
% lists of floats.
%
- \edef\thissection{\floatmagic=\safefloattype}%
+ \edef\lastsection{\floatmagic=\safefloattype}%
\setref{\floatlabel}{Yfloat}%
}%
\fi
@@ -6669,6 +8569,7 @@ where each line of input produces a line of output.}
% caption if specified, else the full caption if specified, else nothing.
{%
\atdummies
+ %
% since we read the caption text in the macro world, where ^^M
% is turned into a normal character, we have to scan it back, so
% we don't write the literal three characters "^^M" into the aux file.
@@ -6689,8 +8590,9 @@ where each line of input produces a line of output.}
%
% place the captured inserts
%
- % BEWARE: when the floats start float, we have to issue warning whenever an
- % insert appears inside a float which could possibly float. --kasal, 26may04
+ % BEWARE: when the floats start floating, we have to issue warning
+ % whenever an insert appears inside a float which could possibly
+ % float. --kasal, 26may04
%
\checkinserts
}
@@ -6734,7 +8636,7 @@ where each line of input produces a line of output.}
% #1 is the control sequence we are passed; we expand into a conditional
% which is true if #1 represents a float ref. That is, the magic
-% \thissection value which we \setref above.
+% \lastsection value which we \setref above.
%
\def\iffloat#1{\expandafter\doiffloat#1==\finish}
%
@@ -6795,39 +8697,909 @@ where each line of input produces a line of output.}
\writeentry
}}
+
\message{localization,}
-% and i18n.
-% @documentlanguage is usually given very early, just after
-% @setfilename. If done too late, it may not override everything
-% properly. Single argument is the language abbreviation.
-% It would be nice if we could set up a hyphenation file here.
+% For single-language documents, @documentlanguage is usually given very
+% early, just after @documentencoding. Single argument is the language
+% (de) or locale (de_DE) abbreviation.
%
-\parseargdef\documentlanguage{%
+{
+ \catcode`\_ = \active
+ \globaldefs=1
+\parseargdef\documentlanguage{\begingroup
+ \let_=\normalunderscore % normal _ character for filenames
\tex % read txi-??.tex file in plain TeX.
- % Read the file if it exists.
+ % Read the file by the name they passed if it exists.
\openin 1 txi-#1.tex
\ifeof 1
- \errhelp = \nolanghelp
- \errmessage{Cannot read language file txi-#1.tex}%
+ \documentlanguagetrywithoutunderscore{#1_\finish}%
\else
+ \globaldefs = 1 % everything in the txi-LL files needs to persist
\input txi-#1.tex
\fi
\closein 1
- \endgroup
+ \endgroup % end raw TeX
+\endgroup}
+%
+% If they passed de_DE, and txi-de_DE.tex doesn't exist,
+% try txi-de.tex.
+%
+\gdef\documentlanguagetrywithoutunderscore#1_#2\finish{%
+ \openin 1 txi-#1.tex
+ \ifeof 1
+ \errhelp = \nolanghelp
+ \errmessage{Cannot read language file txi-#1.tex}%
+ \else
+ \globaldefs = 1 % everything in the txi-LL files needs to persist
+ \input txi-#1.tex
+ \fi
+ \closein 1
}
+}% end of special _ catcode
+%
\newhelp\nolanghelp{The given language definition file cannot be found or
-is empty. Maybe you need to install it? In the current directory
-should work if nowhere else does.}
+is empty. Maybe you need to install it? Putting it in the current
+directory should work if nowhere else does.}
+% This macro is called from txi-??.tex files; the first argument is the
+% \language name to set (without the "\lang@" prefix), the second and
+% third args are \{left,right}hyphenmin.
+%
+% The language names to pass are determined when the format is built.
+% See the etex.log file created at that time, e.g.,
+% /usr/local/texlive/2008/texmf-var/web2c/pdftex/etex.log.
+%
+% With TeX Live 2008, etex now includes hyphenation patterns for all
+% available languages. This means we can support hyphenation in
+% Texinfo, at least to some extent. (This still doesn't solve the
+% accented characters problem.)
+%
+\catcode`@=11
+\def\txisetlanguage#1#2#3{%
+ % do not set the language if the name is undefined in the current TeX.
+ \expandafter\ifx\csname lang@#1\endcsname \relax
+ \message{no patterns for #1}%
+ \else
+ \global\language = \csname lang@#1\endcsname
+ \fi
+ % but there is no harm in adjusting the hyphenmin values regardless.
+ \global\lefthyphenmin = #2\relax
+ \global\righthyphenmin = #3\relax
+}
-% @documentencoding should change something in TeX eventually, most
-% likely, but for now just recognize it.
-\let\documentencoding = \comment
+% Helpers for encodings.
+% Set the catcode of characters 128 through 255 to the specified number.
+%
+\def\setnonasciicharscatcode#1{%
+ \count255=128
+ \loop\ifnum\count255<256
+ \global\catcode\count255=#1\relax
+ \advance\count255 by 1
+ \repeat
+}
+\def\setnonasciicharscatcodenonglobal#1{%
+ \count255=128
+ \loop\ifnum\count255<256
+ \catcode\count255=#1\relax
+ \advance\count255 by 1
+ \repeat
+}
-% Page size parameters.
+% @documentencoding sets the definition of non-ASCII characters
+% according to the specified encoding.
%
+\parseargdef\documentencoding{%
+ % Encoding being declared for the document.
+ \def\declaredencoding{\csname #1.enc\endcsname}%
+ %
+ % Supported encodings: names converted to tokens in order to be able
+ % to compare them with \ifx.
+ \def\ascii{\csname US-ASCII.enc\endcsname}%
+ \def\latnine{\csname ISO-8859-15.enc\endcsname}%
+ \def\latone{\csname ISO-8859-1.enc\endcsname}%
+ \def\lattwo{\csname ISO-8859-2.enc\endcsname}%
+ \def\utfeight{\csname UTF-8.enc\endcsname}%
+ %
+ \ifx \declaredencoding \ascii
+ \asciichardefs
+ %
+ \else \ifx \declaredencoding \lattwo
+ \setnonasciicharscatcode\active
+ \lattwochardefs
+ %
+ \else \ifx \declaredencoding \latone
+ \setnonasciicharscatcode\active
+ \latonechardefs
+ %
+ \else \ifx \declaredencoding \latnine
+ \setnonasciicharscatcode\active
+ \latninechardefs
+ %
+ \else \ifx \declaredencoding \utfeight
+ \setnonasciicharscatcode\active
+ \utfeightchardefs
+ %
+ \else
+ \message{Unknown document encoding #1, ignoring.}%
+ %
+ \fi % utfeight
+ \fi % latnine
+ \fi % latone
+ \fi % lattwo
+ \fi % ascii
+}
+
+% A message to be logged when using a character that isn't available
+% the default font encoding (OT1).
+%
+\def\missingcharmsg#1{\message{Character missing in OT1 encoding: #1.}}
+
+% Take account of \c (plain) vs. \, (Texinfo) difference.
+\def\cedilla#1{\ifx\c\ptexc\c{#1}\else\,{#1}\fi}
+
+% First, make active non-ASCII characters in order for them to be
+% correctly categorized when TeX reads the replacement text of
+% macros containing the character definitions.
+\setnonasciicharscatcode\active
+%
+% Latin1 (ISO-8859-1) character definitions.
+\def\latonechardefs{%
+ \gdef^^a0{\tie}
+ \gdef^^a1{\exclamdown}
+ \gdef^^a2{\missingcharmsg{CENT SIGN}}
+ \gdef^^a3{{\pounds}}
+ \gdef^^a4{\missingcharmsg{CURRENCY SIGN}}
+ \gdef^^a5{\missingcharmsg{YEN SIGN}}
+ \gdef^^a6{\missingcharmsg{BROKEN BAR}}
+ \gdef^^a7{\S}
+ \gdef^^a8{\"{}}
+ \gdef^^a9{\copyright}
+ \gdef^^aa{\ordf}
+ \gdef^^ab{\guillemetleft}
+ \gdef^^ac{$\lnot$}
+ \gdef^^ad{\-}
+ \gdef^^ae{\registeredsymbol}
+ \gdef^^af{\={}}
+ %
+ \gdef^^b0{\textdegree}
+ \gdef^^b1{$\pm$}
+ \gdef^^b2{$^2$}
+ \gdef^^b3{$^3$}
+ \gdef^^b4{\'{}}
+ \gdef^^b5{$\mu$}
+ \gdef^^b6{\P}
+ %
+ \gdef^^b7{$^.$}
+ \gdef^^b8{\cedilla\ }
+ \gdef^^b9{$^1$}
+ \gdef^^ba{\ordm}
+ %
+ \gdef^^bb{\guillemetright}
+ \gdef^^bc{$1\over4$}
+ \gdef^^bd{$1\over2$}
+ \gdef^^be{$3\over4$}
+ \gdef^^bf{\questiondown}
+ %
+ \gdef^^c0{\`A}
+ \gdef^^c1{\'A}
+ \gdef^^c2{\^A}
+ \gdef^^c3{\~A}
+ \gdef^^c4{\"A}
+ \gdef^^c5{\ringaccent A}
+ \gdef^^c6{\AE}
+ \gdef^^c7{\cedilla C}
+ \gdef^^c8{\`E}
+ \gdef^^c9{\'E}
+ \gdef^^ca{\^E}
+ \gdef^^cb{\"E}
+ \gdef^^cc{\`I}
+ \gdef^^cd{\'I}
+ \gdef^^ce{\^I}
+ \gdef^^cf{\"I}
+ %
+ \gdef^^d0{\DH}
+ \gdef^^d1{\~N}
+ \gdef^^d2{\`O}
+ \gdef^^d3{\'O}
+ \gdef^^d4{\^O}
+ \gdef^^d5{\~O}
+ \gdef^^d6{\"O}
+ \gdef^^d7{$\times$}
+ \gdef^^d8{\O}
+ \gdef^^d9{\`U}
+ \gdef^^da{\'U}
+ \gdef^^db{\^U}
+ \gdef^^dc{\"U}
+ \gdef^^dd{\'Y}
+ \gdef^^de{\TH}
+ \gdef^^df{\ss}
+ %
+ \gdef^^e0{\`a}
+ \gdef^^e1{\'a}
+ \gdef^^e2{\^a}
+ \gdef^^e3{\~a}
+ \gdef^^e4{\"a}
+ \gdef^^e5{\ringaccent a}
+ \gdef^^e6{\ae}
+ \gdef^^e7{\cedilla c}
+ \gdef^^e8{\`e}
+ \gdef^^e9{\'e}
+ \gdef^^ea{\^e}
+ \gdef^^eb{\"e}
+ \gdef^^ec{\`{\dotless i}}
+ \gdef^^ed{\'{\dotless i}}
+ \gdef^^ee{\^{\dotless i}}
+ \gdef^^ef{\"{\dotless i}}
+ %
+ \gdef^^f0{\dh}
+ \gdef^^f1{\~n}
+ \gdef^^f2{\`o}
+ \gdef^^f3{\'o}
+ \gdef^^f4{\^o}
+ \gdef^^f5{\~o}
+ \gdef^^f6{\"o}
+ \gdef^^f7{$\div$}
+ \gdef^^f8{\o}
+ \gdef^^f9{\`u}
+ \gdef^^fa{\'u}
+ \gdef^^fb{\^u}
+ \gdef^^fc{\"u}
+ \gdef^^fd{\'y}
+ \gdef^^fe{\th}
+ \gdef^^ff{\"y}
+}
+
+% Latin9 (ISO-8859-15) encoding character definitions.
+\def\latninechardefs{%
+ % Encoding is almost identical to Latin1.
+ \latonechardefs
+ %
+ \gdef^^a4{\euro}
+ \gdef^^a6{\v S}
+ \gdef^^a8{\v s}
+ \gdef^^b4{\v Z}
+ \gdef^^b8{\v z}
+ \gdef^^bc{\OE}
+ \gdef^^bd{\oe}
+ \gdef^^be{\"Y}
+}
+
+% Latin2 (ISO-8859-2) character definitions.
+\def\lattwochardefs{%
+ \gdef^^a0{\tie}
+ \gdef^^a1{\ogonek{A}}
+ \gdef^^a2{\u{}}
+ \gdef^^a3{\L}
+ \gdef^^a4{\missingcharmsg{CURRENCY SIGN}}
+ \gdef^^a5{\v L}
+ \gdef^^a6{\'S}
+ \gdef^^a7{\S}
+ \gdef^^a8{\"{}}
+ \gdef^^a9{\v S}
+ \gdef^^aa{\cedilla S}
+ \gdef^^ab{\v T}
+ \gdef^^ac{\'Z}
+ \gdef^^ad{\-}
+ \gdef^^ae{\v Z}
+ \gdef^^af{\dotaccent Z}
+ %
+ \gdef^^b0{\textdegree}
+ \gdef^^b1{\ogonek{a}}
+ \gdef^^b2{\ogonek{ }}
+ \gdef^^b3{\l}
+ \gdef^^b4{\'{}}
+ \gdef^^b5{\v l}
+ \gdef^^b6{\'s}
+ \gdef^^b7{\v{}}
+ \gdef^^b8{\cedilla\ }
+ \gdef^^b9{\v s}
+ \gdef^^ba{\cedilla s}
+ \gdef^^bb{\v t}
+ \gdef^^bc{\'z}
+ \gdef^^bd{\H{}}
+ \gdef^^be{\v z}
+ \gdef^^bf{\dotaccent z}
+ %
+ \gdef^^c0{\'R}
+ \gdef^^c1{\'A}
+ \gdef^^c2{\^A}
+ \gdef^^c3{\u A}
+ \gdef^^c4{\"A}
+ \gdef^^c5{\'L}
+ \gdef^^c6{\'C}
+ \gdef^^c7{\cedilla C}
+ \gdef^^c8{\v C}
+ \gdef^^c9{\'E}
+ \gdef^^ca{\ogonek{E}}
+ \gdef^^cb{\"E}
+ \gdef^^cc{\v E}
+ \gdef^^cd{\'I}
+ \gdef^^ce{\^I}
+ \gdef^^cf{\v D}
+ %
+ \gdef^^d0{\DH}
+ \gdef^^d1{\'N}
+ \gdef^^d2{\v N}
+ \gdef^^d3{\'O}
+ \gdef^^d4{\^O}
+ \gdef^^d5{\H O}
+ \gdef^^d6{\"O}
+ \gdef^^d7{$\times$}
+ \gdef^^d8{\v R}
+ \gdef^^d9{\ringaccent U}
+ \gdef^^da{\'U}
+ \gdef^^db{\H U}
+ \gdef^^dc{\"U}
+ \gdef^^dd{\'Y}
+ \gdef^^de{\cedilla T}
+ \gdef^^df{\ss}
+ %
+ \gdef^^e0{\'r}
+ \gdef^^e1{\'a}
+ \gdef^^e2{\^a}
+ \gdef^^e3{\u a}
+ \gdef^^e4{\"a}
+ \gdef^^e5{\'l}
+ \gdef^^e6{\'c}
+ \gdef^^e7{\cedilla c}
+ \gdef^^e8{\v c}
+ \gdef^^e9{\'e}
+ \gdef^^ea{\ogonek{e}}
+ \gdef^^eb{\"e}
+ \gdef^^ec{\v e}
+ \gdef^^ed{\'{\dotless{i}}}
+ \gdef^^ee{\^{\dotless{i}}}
+ \gdef^^ef{\v d}
+ %
+ \gdef^^f0{\dh}
+ \gdef^^f1{\'n}
+ \gdef^^f2{\v n}
+ \gdef^^f3{\'o}
+ \gdef^^f4{\^o}
+ \gdef^^f5{\H o}
+ \gdef^^f6{\"o}
+ \gdef^^f7{$\div$}
+ \gdef^^f8{\v r}
+ \gdef^^f9{\ringaccent u}
+ \gdef^^fa{\'u}
+ \gdef^^fb{\H u}
+ \gdef^^fc{\"u}
+ \gdef^^fd{\'y}
+ \gdef^^fe{\cedilla t}
+ \gdef^^ff{\dotaccent{}}
+}
+
+% UTF-8 character definitions.
+%
+% This code to support UTF-8 is based on LaTeX's utf8.def, with some
+% changes for Texinfo conventions. It is included here under the GPL by
+% permission from Frank Mittelbach and the LaTeX team.
+%
+\newcount\countUTFx
+\newcount\countUTFy
+\newcount\countUTFz
+
+\gdef\UTFviiiTwoOctets#1#2{\expandafter
+ \UTFviiiDefined\csname u8:#1\string #2\endcsname}
+%
+\gdef\UTFviiiThreeOctets#1#2#3{\expandafter
+ \UTFviiiDefined\csname u8:#1\string #2\string #3\endcsname}
+%
+\gdef\UTFviiiFourOctets#1#2#3#4{\expandafter
+ \UTFviiiDefined\csname u8:#1\string #2\string #3\string #4\endcsname}
+
+\gdef\UTFviiiDefined#1{%
+ \ifx #1\relax
+ \message{\linenumber Unicode char \string #1 not defined for Texinfo}%
+ \else
+ \expandafter #1%
+ \fi
+}
+
+\begingroup
+ \catcode`\~13
+ \catcode`\"12
+
+ \def\UTFviiiLoop{%
+ \global\catcode\countUTFx\active
+ \uccode`\~\countUTFx
+ \uppercase\expandafter{\UTFviiiTmp}%
+ \advance\countUTFx by 1
+ \ifnum\countUTFx < \countUTFy
+ \expandafter\UTFviiiLoop
+ \fi}
+
+ \countUTFx = "C2
+ \countUTFy = "E0
+ \def\UTFviiiTmp{%
+ \xdef~{\noexpand\UTFviiiTwoOctets\string~}}
+ \UTFviiiLoop
+
+ \countUTFx = "E0
+ \countUTFy = "F0
+ \def\UTFviiiTmp{%
+ \xdef~{\noexpand\UTFviiiThreeOctets\string~}}
+ \UTFviiiLoop
+
+ \countUTFx = "F0
+ \countUTFy = "F4
+ \def\UTFviiiTmp{%
+ \xdef~{\noexpand\UTFviiiFourOctets\string~}}
+ \UTFviiiLoop
+\endgroup
+
+\begingroup
+ \catcode`\"=12
+ \catcode`\<=12
+ \catcode`\.=12
+ \catcode`\,=12
+ \catcode`\;=12
+ \catcode`\!=12
+ \catcode`\~=13
+
+ \gdef\DeclareUnicodeCharacter#1#2{%
+ \countUTFz = "#1\relax
+ %\wlog{\space\space defining Unicode char U+#1 (decimal \the\countUTFz)}%
+ \begingroup
+ \parseXMLCharref
+ \def\UTFviiiTwoOctets##1##2{%
+ \csname u8:##1\string ##2\endcsname}%
+ \def\UTFviiiThreeOctets##1##2##3{%
+ \csname u8:##1\string ##2\string ##3\endcsname}%
+ \def\UTFviiiFourOctets##1##2##3##4{%
+ \csname u8:##1\string ##2\string ##3\string ##4\endcsname}%
+ \expandafter\expandafter\expandafter\expandafter
+ \expandafter\expandafter\expandafter
+ \gdef\UTFviiiTmp{#2}%
+ \endgroup}
+
+ \gdef\parseXMLCharref{%
+ \ifnum\countUTFz < "A0\relax
+ \errhelp = \EMsimple
+ \errmessage{Cannot define Unicode char value < 00A0}%
+ \else\ifnum\countUTFz < "800\relax
+ \parseUTFviiiA,%
+ \parseUTFviiiB C\UTFviiiTwoOctets.,%
+ \else\ifnum\countUTFz < "10000\relax
+ \parseUTFviiiA;%
+ \parseUTFviiiA,%
+ \parseUTFviiiB E\UTFviiiThreeOctets.{,;}%
+ \else
+ \parseUTFviiiA;%
+ \parseUTFviiiA,%
+ \parseUTFviiiA!%
+ \parseUTFviiiB F\UTFviiiFourOctets.{!,;}%
+ \fi\fi\fi
+ }
+
+ \gdef\parseUTFviiiA#1{%
+ \countUTFx = \countUTFz
+ \divide\countUTFz by 64
+ \countUTFy = \countUTFz
+ \multiply\countUTFz by 64
+ \advance\countUTFx by -\countUTFz
+ \advance\countUTFx by 128
+ \uccode `#1\countUTFx
+ \countUTFz = \countUTFy}
+
+ \gdef\parseUTFviiiB#1#2#3#4{%
+ \advance\countUTFz by "#10\relax
+ \uccode `#3\countUTFz
+ \uppercase{\gdef\UTFviiiTmp{#2#3#4}}}
+\endgroup
+
+\def\utfeightchardefs{%
+ \DeclareUnicodeCharacter{00A0}{\tie}
+ \DeclareUnicodeCharacter{00A1}{\exclamdown}
+ \DeclareUnicodeCharacter{00A3}{\pounds}
+ \DeclareUnicodeCharacter{00A8}{\"{ }}
+ \DeclareUnicodeCharacter{00A9}{\copyright}
+ \DeclareUnicodeCharacter{00AA}{\ordf}
+ \DeclareUnicodeCharacter{00AB}{\guillemetleft}
+ \DeclareUnicodeCharacter{00AD}{\-}
+ \DeclareUnicodeCharacter{00AE}{\registeredsymbol}
+ \DeclareUnicodeCharacter{00AF}{\={ }}
+
+ \DeclareUnicodeCharacter{00B0}{\ringaccent{ }}
+ \DeclareUnicodeCharacter{00B4}{\'{ }}
+ \DeclareUnicodeCharacter{00B8}{\cedilla{ }}
+ \DeclareUnicodeCharacter{00BA}{\ordm}
+ \DeclareUnicodeCharacter{00BB}{\guillemetright}
+ \DeclareUnicodeCharacter{00BF}{\questiondown}
+
+ \DeclareUnicodeCharacter{00C0}{\`A}
+ \DeclareUnicodeCharacter{00C1}{\'A}
+ \DeclareUnicodeCharacter{00C2}{\^A}
+ \DeclareUnicodeCharacter{00C3}{\~A}
+ \DeclareUnicodeCharacter{00C4}{\"A}
+ \DeclareUnicodeCharacter{00C5}{\AA}
+ \DeclareUnicodeCharacter{00C6}{\AE}
+ \DeclareUnicodeCharacter{00C7}{\cedilla{C}}
+ \DeclareUnicodeCharacter{00C8}{\`E}
+ \DeclareUnicodeCharacter{00C9}{\'E}
+ \DeclareUnicodeCharacter{00CA}{\^E}
+ \DeclareUnicodeCharacter{00CB}{\"E}
+ \DeclareUnicodeCharacter{00CC}{\`I}
+ \DeclareUnicodeCharacter{00CD}{\'I}
+ \DeclareUnicodeCharacter{00CE}{\^I}
+ \DeclareUnicodeCharacter{00CF}{\"I}
+
+ \DeclareUnicodeCharacter{00D0}{\DH}
+ \DeclareUnicodeCharacter{00D1}{\~N}
+ \DeclareUnicodeCharacter{00D2}{\`O}
+ \DeclareUnicodeCharacter{00D3}{\'O}
+ \DeclareUnicodeCharacter{00D4}{\^O}
+ \DeclareUnicodeCharacter{00D5}{\~O}
+ \DeclareUnicodeCharacter{00D6}{\"O}
+ \DeclareUnicodeCharacter{00D8}{\O}
+ \DeclareUnicodeCharacter{00D9}{\`U}
+ \DeclareUnicodeCharacter{00DA}{\'U}
+ \DeclareUnicodeCharacter{00DB}{\^U}
+ \DeclareUnicodeCharacter{00DC}{\"U}
+ \DeclareUnicodeCharacter{00DD}{\'Y}
+ \DeclareUnicodeCharacter{00DE}{\TH}
+ \DeclareUnicodeCharacter{00DF}{\ss}
+
+ \DeclareUnicodeCharacter{00E0}{\`a}
+ \DeclareUnicodeCharacter{00E1}{\'a}
+ \DeclareUnicodeCharacter{00E2}{\^a}
+ \DeclareUnicodeCharacter{00E3}{\~a}
+ \DeclareUnicodeCharacter{00E4}{\"a}
+ \DeclareUnicodeCharacter{00E5}{\aa}
+ \DeclareUnicodeCharacter{00E6}{\ae}
+ \DeclareUnicodeCharacter{00E7}{\cedilla{c}}
+ \DeclareUnicodeCharacter{00E8}{\`e}
+ \DeclareUnicodeCharacter{00E9}{\'e}
+ \DeclareUnicodeCharacter{00EA}{\^e}
+ \DeclareUnicodeCharacter{00EB}{\"e}
+ \DeclareUnicodeCharacter{00EC}{\`{\dotless{i}}}
+ \DeclareUnicodeCharacter{00ED}{\'{\dotless{i}}}
+ \DeclareUnicodeCharacter{00EE}{\^{\dotless{i}}}
+ \DeclareUnicodeCharacter{00EF}{\"{\dotless{i}}}
+
+ \DeclareUnicodeCharacter{00F0}{\dh}
+ \DeclareUnicodeCharacter{00F1}{\~n}
+ \DeclareUnicodeCharacter{00F2}{\`o}
+ \DeclareUnicodeCharacter{00F3}{\'o}
+ \DeclareUnicodeCharacter{00F4}{\^o}
+ \DeclareUnicodeCharacter{00F5}{\~o}
+ \DeclareUnicodeCharacter{00F6}{\"o}
+ \DeclareUnicodeCharacter{00F8}{\o}
+ \DeclareUnicodeCharacter{00F9}{\`u}
+ \DeclareUnicodeCharacter{00FA}{\'u}
+ \DeclareUnicodeCharacter{00FB}{\^u}
+ \DeclareUnicodeCharacter{00FC}{\"u}
+ \DeclareUnicodeCharacter{00FD}{\'y}
+ \DeclareUnicodeCharacter{00FE}{\th}
+ \DeclareUnicodeCharacter{00FF}{\"y}
+
+ \DeclareUnicodeCharacter{0100}{\=A}
+ \DeclareUnicodeCharacter{0101}{\=a}
+ \DeclareUnicodeCharacter{0102}{\u{A}}
+ \DeclareUnicodeCharacter{0103}{\u{a}}
+ \DeclareUnicodeCharacter{0104}{\ogonek{A}}
+ \DeclareUnicodeCharacter{0105}{\ogonek{a}}
+ \DeclareUnicodeCharacter{0106}{\'C}
+ \DeclareUnicodeCharacter{0107}{\'c}
+ \DeclareUnicodeCharacter{0108}{\^C}
+ \DeclareUnicodeCharacter{0109}{\^c}
+ \DeclareUnicodeCharacter{0118}{\ogonek{E}}
+ \DeclareUnicodeCharacter{0119}{\ogonek{e}}
+ \DeclareUnicodeCharacter{010A}{\dotaccent{C}}
+ \DeclareUnicodeCharacter{010B}{\dotaccent{c}}
+ \DeclareUnicodeCharacter{010C}{\v{C}}
+ \DeclareUnicodeCharacter{010D}{\v{c}}
+ \DeclareUnicodeCharacter{010E}{\v{D}}
+
+ \DeclareUnicodeCharacter{0112}{\=E}
+ \DeclareUnicodeCharacter{0113}{\=e}
+ \DeclareUnicodeCharacter{0114}{\u{E}}
+ \DeclareUnicodeCharacter{0115}{\u{e}}
+ \DeclareUnicodeCharacter{0116}{\dotaccent{E}}
+ \DeclareUnicodeCharacter{0117}{\dotaccent{e}}
+ \DeclareUnicodeCharacter{011A}{\v{E}}
+ \DeclareUnicodeCharacter{011B}{\v{e}}
+ \DeclareUnicodeCharacter{011C}{\^G}
+ \DeclareUnicodeCharacter{011D}{\^g}
+ \DeclareUnicodeCharacter{011E}{\u{G}}
+ \DeclareUnicodeCharacter{011F}{\u{g}}
+
+ \DeclareUnicodeCharacter{0120}{\dotaccent{G}}
+ \DeclareUnicodeCharacter{0121}{\dotaccent{g}}
+ \DeclareUnicodeCharacter{0124}{\^H}
+ \DeclareUnicodeCharacter{0125}{\^h}
+ \DeclareUnicodeCharacter{0128}{\~I}
+ \DeclareUnicodeCharacter{0129}{\~{\dotless{i}}}
+ \DeclareUnicodeCharacter{012A}{\=I}
+ \DeclareUnicodeCharacter{012B}{\={\dotless{i}}}
+ \DeclareUnicodeCharacter{012C}{\u{I}}
+ \DeclareUnicodeCharacter{012D}{\u{\dotless{i}}}
+
+ \DeclareUnicodeCharacter{0130}{\dotaccent{I}}
+ \DeclareUnicodeCharacter{0131}{\dotless{i}}
+ \DeclareUnicodeCharacter{0132}{IJ}
+ \DeclareUnicodeCharacter{0133}{ij}
+ \DeclareUnicodeCharacter{0134}{\^J}
+ \DeclareUnicodeCharacter{0135}{\^{\dotless{j}}}
+ \DeclareUnicodeCharacter{0139}{\'L}
+ \DeclareUnicodeCharacter{013A}{\'l}
+
+ \DeclareUnicodeCharacter{0141}{\L}
+ \DeclareUnicodeCharacter{0142}{\l}
+ \DeclareUnicodeCharacter{0143}{\'N}
+ \DeclareUnicodeCharacter{0144}{\'n}
+ \DeclareUnicodeCharacter{0147}{\v{N}}
+ \DeclareUnicodeCharacter{0148}{\v{n}}
+ \DeclareUnicodeCharacter{014C}{\=O}
+ \DeclareUnicodeCharacter{014D}{\=o}
+ \DeclareUnicodeCharacter{014E}{\u{O}}
+ \DeclareUnicodeCharacter{014F}{\u{o}}
+
+ \DeclareUnicodeCharacter{0150}{\H{O}}
+ \DeclareUnicodeCharacter{0151}{\H{o}}
+ \DeclareUnicodeCharacter{0152}{\OE}
+ \DeclareUnicodeCharacter{0153}{\oe}
+ \DeclareUnicodeCharacter{0154}{\'R}
+ \DeclareUnicodeCharacter{0155}{\'r}
+ \DeclareUnicodeCharacter{0158}{\v{R}}
+ \DeclareUnicodeCharacter{0159}{\v{r}}
+ \DeclareUnicodeCharacter{015A}{\'S}
+ \DeclareUnicodeCharacter{015B}{\'s}
+ \DeclareUnicodeCharacter{015C}{\^S}
+ \DeclareUnicodeCharacter{015D}{\^s}
+ \DeclareUnicodeCharacter{015E}{\cedilla{S}}
+ \DeclareUnicodeCharacter{015F}{\cedilla{s}}
+
+ \DeclareUnicodeCharacter{0160}{\v{S}}
+ \DeclareUnicodeCharacter{0161}{\v{s}}
+ \DeclareUnicodeCharacter{0162}{\cedilla{t}}
+ \DeclareUnicodeCharacter{0163}{\cedilla{T}}
+ \DeclareUnicodeCharacter{0164}{\v{T}}
+
+ \DeclareUnicodeCharacter{0168}{\~U}
+ \DeclareUnicodeCharacter{0169}{\~u}
+ \DeclareUnicodeCharacter{016A}{\=U}
+ \DeclareUnicodeCharacter{016B}{\=u}
+ \DeclareUnicodeCharacter{016C}{\u{U}}
+ \DeclareUnicodeCharacter{016D}{\u{u}}
+ \DeclareUnicodeCharacter{016E}{\ringaccent{U}}
+ \DeclareUnicodeCharacter{016F}{\ringaccent{u}}
+
+ \DeclareUnicodeCharacter{0170}{\H{U}}
+ \DeclareUnicodeCharacter{0171}{\H{u}}
+ \DeclareUnicodeCharacter{0174}{\^W}
+ \DeclareUnicodeCharacter{0175}{\^w}
+ \DeclareUnicodeCharacter{0176}{\^Y}
+ \DeclareUnicodeCharacter{0177}{\^y}
+ \DeclareUnicodeCharacter{0178}{\"Y}
+ \DeclareUnicodeCharacter{0179}{\'Z}
+ \DeclareUnicodeCharacter{017A}{\'z}
+ \DeclareUnicodeCharacter{017B}{\dotaccent{Z}}
+ \DeclareUnicodeCharacter{017C}{\dotaccent{z}}
+ \DeclareUnicodeCharacter{017D}{\v{Z}}
+ \DeclareUnicodeCharacter{017E}{\v{z}}
+
+ \DeclareUnicodeCharacter{01C4}{D\v{Z}}
+ \DeclareUnicodeCharacter{01C5}{D\v{z}}
+ \DeclareUnicodeCharacter{01C6}{d\v{z}}
+ \DeclareUnicodeCharacter{01C7}{LJ}
+ \DeclareUnicodeCharacter{01C8}{Lj}
+ \DeclareUnicodeCharacter{01C9}{lj}
+ \DeclareUnicodeCharacter{01CA}{NJ}
+ \DeclareUnicodeCharacter{01CB}{Nj}
+ \DeclareUnicodeCharacter{01CC}{nj}
+ \DeclareUnicodeCharacter{01CD}{\v{A}}
+ \DeclareUnicodeCharacter{01CE}{\v{a}}
+ \DeclareUnicodeCharacter{01CF}{\v{I}}
+
+ \DeclareUnicodeCharacter{01D0}{\v{\dotless{i}}}
+ \DeclareUnicodeCharacter{01D1}{\v{O}}
+ \DeclareUnicodeCharacter{01D2}{\v{o}}
+ \DeclareUnicodeCharacter{01D3}{\v{U}}
+ \DeclareUnicodeCharacter{01D4}{\v{u}}
+
+ \DeclareUnicodeCharacter{01E2}{\={\AE}}
+ \DeclareUnicodeCharacter{01E3}{\={\ae}}
+ \DeclareUnicodeCharacter{01E6}{\v{G}}
+ \DeclareUnicodeCharacter{01E7}{\v{g}}
+ \DeclareUnicodeCharacter{01E8}{\v{K}}
+ \DeclareUnicodeCharacter{01E9}{\v{k}}
+
+ \DeclareUnicodeCharacter{01F0}{\v{\dotless{j}}}
+ \DeclareUnicodeCharacter{01F1}{DZ}
+ \DeclareUnicodeCharacter{01F2}{Dz}
+ \DeclareUnicodeCharacter{01F3}{dz}
+ \DeclareUnicodeCharacter{01F4}{\'G}
+ \DeclareUnicodeCharacter{01F5}{\'g}
+ \DeclareUnicodeCharacter{01F8}{\`N}
+ \DeclareUnicodeCharacter{01F9}{\`n}
+ \DeclareUnicodeCharacter{01FC}{\'{\AE}}
+ \DeclareUnicodeCharacter{01FD}{\'{\ae}}
+ \DeclareUnicodeCharacter{01FE}{\'{\O}}
+ \DeclareUnicodeCharacter{01FF}{\'{\o}}
+
+ \DeclareUnicodeCharacter{021E}{\v{H}}
+ \DeclareUnicodeCharacter{021F}{\v{h}}
+
+ \DeclareUnicodeCharacter{0226}{\dotaccent{A}}
+ \DeclareUnicodeCharacter{0227}{\dotaccent{a}}
+ \DeclareUnicodeCharacter{0228}{\cedilla{E}}
+ \DeclareUnicodeCharacter{0229}{\cedilla{e}}
+ \DeclareUnicodeCharacter{022E}{\dotaccent{O}}
+ \DeclareUnicodeCharacter{022F}{\dotaccent{o}}
+
+ \DeclareUnicodeCharacter{0232}{\=Y}
+ \DeclareUnicodeCharacter{0233}{\=y}
+ \DeclareUnicodeCharacter{0237}{\dotless{j}}
+
+ \DeclareUnicodeCharacter{02DB}{\ogonek{ }}
+
+ \DeclareUnicodeCharacter{1E02}{\dotaccent{B}}
+ \DeclareUnicodeCharacter{1E03}{\dotaccent{b}}
+ \DeclareUnicodeCharacter{1E04}{\udotaccent{B}}
+ \DeclareUnicodeCharacter{1E05}{\udotaccent{b}}
+ \DeclareUnicodeCharacter{1E06}{\ubaraccent{B}}
+ \DeclareUnicodeCharacter{1E07}{\ubaraccent{b}}
+ \DeclareUnicodeCharacter{1E0A}{\dotaccent{D}}
+ \DeclareUnicodeCharacter{1E0B}{\dotaccent{d}}
+ \DeclareUnicodeCharacter{1E0C}{\udotaccent{D}}
+ \DeclareUnicodeCharacter{1E0D}{\udotaccent{d}}
+ \DeclareUnicodeCharacter{1E0E}{\ubaraccent{D}}
+ \DeclareUnicodeCharacter{1E0F}{\ubaraccent{d}}
+
+ \DeclareUnicodeCharacter{1E1E}{\dotaccent{F}}
+ \DeclareUnicodeCharacter{1E1F}{\dotaccent{f}}
+
+ \DeclareUnicodeCharacter{1E20}{\=G}
+ \DeclareUnicodeCharacter{1E21}{\=g}
+ \DeclareUnicodeCharacter{1E22}{\dotaccent{H}}
+ \DeclareUnicodeCharacter{1E23}{\dotaccent{h}}
+ \DeclareUnicodeCharacter{1E24}{\udotaccent{H}}
+ \DeclareUnicodeCharacter{1E25}{\udotaccent{h}}
+ \DeclareUnicodeCharacter{1E26}{\"H}
+ \DeclareUnicodeCharacter{1E27}{\"h}
+
+ \DeclareUnicodeCharacter{1E30}{\'K}
+ \DeclareUnicodeCharacter{1E31}{\'k}
+ \DeclareUnicodeCharacter{1E32}{\udotaccent{K}}
+ \DeclareUnicodeCharacter{1E33}{\udotaccent{k}}
+ \DeclareUnicodeCharacter{1E34}{\ubaraccent{K}}
+ \DeclareUnicodeCharacter{1E35}{\ubaraccent{k}}
+ \DeclareUnicodeCharacter{1E36}{\udotaccent{L}}
+ \DeclareUnicodeCharacter{1E37}{\udotaccent{l}}
+ \DeclareUnicodeCharacter{1E3A}{\ubaraccent{L}}
+ \DeclareUnicodeCharacter{1E3B}{\ubaraccent{l}}
+ \DeclareUnicodeCharacter{1E3E}{\'M}
+ \DeclareUnicodeCharacter{1E3F}{\'m}
+
+ \DeclareUnicodeCharacter{1E40}{\dotaccent{M}}
+ \DeclareUnicodeCharacter{1E41}{\dotaccent{m}}
+ \DeclareUnicodeCharacter{1E42}{\udotaccent{M}}
+ \DeclareUnicodeCharacter{1E43}{\udotaccent{m}}
+ \DeclareUnicodeCharacter{1E44}{\dotaccent{N}}
+ \DeclareUnicodeCharacter{1E45}{\dotaccent{n}}
+ \DeclareUnicodeCharacter{1E46}{\udotaccent{N}}
+ \DeclareUnicodeCharacter{1E47}{\udotaccent{n}}
+ \DeclareUnicodeCharacter{1E48}{\ubaraccent{N}}
+ \DeclareUnicodeCharacter{1E49}{\ubaraccent{n}}
+
+ \DeclareUnicodeCharacter{1E54}{\'P}
+ \DeclareUnicodeCharacter{1E55}{\'p}
+ \DeclareUnicodeCharacter{1E56}{\dotaccent{P}}
+ \DeclareUnicodeCharacter{1E57}{\dotaccent{p}}
+ \DeclareUnicodeCharacter{1E58}{\dotaccent{R}}
+ \DeclareUnicodeCharacter{1E59}{\dotaccent{r}}
+ \DeclareUnicodeCharacter{1E5A}{\udotaccent{R}}
+ \DeclareUnicodeCharacter{1E5B}{\udotaccent{r}}
+ \DeclareUnicodeCharacter{1E5E}{\ubaraccent{R}}
+ \DeclareUnicodeCharacter{1E5F}{\ubaraccent{r}}
+
+ \DeclareUnicodeCharacter{1E60}{\dotaccent{S}}
+ \DeclareUnicodeCharacter{1E61}{\dotaccent{s}}
+ \DeclareUnicodeCharacter{1E62}{\udotaccent{S}}
+ \DeclareUnicodeCharacter{1E63}{\udotaccent{s}}
+ \DeclareUnicodeCharacter{1E6A}{\dotaccent{T}}
+ \DeclareUnicodeCharacter{1E6B}{\dotaccent{t}}
+ \DeclareUnicodeCharacter{1E6C}{\udotaccent{T}}
+ \DeclareUnicodeCharacter{1E6D}{\udotaccent{t}}
+ \DeclareUnicodeCharacter{1E6E}{\ubaraccent{T}}
+ \DeclareUnicodeCharacter{1E6F}{\ubaraccent{t}}
+
+ \DeclareUnicodeCharacter{1E7C}{\~V}
+ \DeclareUnicodeCharacter{1E7D}{\~v}
+ \DeclareUnicodeCharacter{1E7E}{\udotaccent{V}}
+ \DeclareUnicodeCharacter{1E7F}{\udotaccent{v}}
+
+ \DeclareUnicodeCharacter{1E80}{\`W}
+ \DeclareUnicodeCharacter{1E81}{\`w}
+ \DeclareUnicodeCharacter{1E82}{\'W}
+ \DeclareUnicodeCharacter{1E83}{\'w}
+ \DeclareUnicodeCharacter{1E84}{\"W}
+ \DeclareUnicodeCharacter{1E85}{\"w}
+ \DeclareUnicodeCharacter{1E86}{\dotaccent{W}}
+ \DeclareUnicodeCharacter{1E87}{\dotaccent{w}}
+ \DeclareUnicodeCharacter{1E88}{\udotaccent{W}}
+ \DeclareUnicodeCharacter{1E89}{\udotaccent{w}}
+ \DeclareUnicodeCharacter{1E8A}{\dotaccent{X}}
+ \DeclareUnicodeCharacter{1E8B}{\dotaccent{x}}
+ \DeclareUnicodeCharacter{1E8C}{\"X}
+ \DeclareUnicodeCharacter{1E8D}{\"x}
+ \DeclareUnicodeCharacter{1E8E}{\dotaccent{Y}}
+ \DeclareUnicodeCharacter{1E8F}{\dotaccent{y}}
+
+ \DeclareUnicodeCharacter{1E90}{\^Z}
+ \DeclareUnicodeCharacter{1E91}{\^z}
+ \DeclareUnicodeCharacter{1E92}{\udotaccent{Z}}
+ \DeclareUnicodeCharacter{1E93}{\udotaccent{z}}
+ \DeclareUnicodeCharacter{1E94}{\ubaraccent{Z}}
+ \DeclareUnicodeCharacter{1E95}{\ubaraccent{z}}
+ \DeclareUnicodeCharacter{1E96}{\ubaraccent{h}}
+ \DeclareUnicodeCharacter{1E97}{\"t}
+ \DeclareUnicodeCharacter{1E98}{\ringaccent{w}}
+ \DeclareUnicodeCharacter{1E99}{\ringaccent{y}}
+
+ \DeclareUnicodeCharacter{1EA0}{\udotaccent{A}}
+ \DeclareUnicodeCharacter{1EA1}{\udotaccent{a}}
+
+ \DeclareUnicodeCharacter{1EB8}{\udotaccent{E}}
+ \DeclareUnicodeCharacter{1EB9}{\udotaccent{e}}
+ \DeclareUnicodeCharacter{1EBC}{\~E}
+ \DeclareUnicodeCharacter{1EBD}{\~e}
+
+ \DeclareUnicodeCharacter{1ECA}{\udotaccent{I}}
+ \DeclareUnicodeCharacter{1ECB}{\udotaccent{i}}
+ \DeclareUnicodeCharacter{1ECC}{\udotaccent{O}}
+ \DeclareUnicodeCharacter{1ECD}{\udotaccent{o}}
+
+ \DeclareUnicodeCharacter{1EE4}{\udotaccent{U}}
+ \DeclareUnicodeCharacter{1EE5}{\udotaccent{u}}
+
+ \DeclareUnicodeCharacter{1EF2}{\`Y}
+ \DeclareUnicodeCharacter{1EF3}{\`y}
+ \DeclareUnicodeCharacter{1EF4}{\udotaccent{Y}}
+
+ \DeclareUnicodeCharacter{1EF8}{\~Y}
+ \DeclareUnicodeCharacter{1EF9}{\~y}
+
+ \DeclareUnicodeCharacter{2013}{--}
+ \DeclareUnicodeCharacter{2014}{---}
+ \DeclareUnicodeCharacter{2018}{\quoteleft}
+ \DeclareUnicodeCharacter{2019}{\quoteright}
+ \DeclareUnicodeCharacter{201A}{\quotesinglbase}
+ \DeclareUnicodeCharacter{201C}{\quotedblleft}
+ \DeclareUnicodeCharacter{201D}{\quotedblright}
+ \DeclareUnicodeCharacter{201E}{\quotedblbase}
+ \DeclareUnicodeCharacter{2022}{\bullet}
+ \DeclareUnicodeCharacter{2026}{\dots}
+ \DeclareUnicodeCharacter{2039}{\guilsinglleft}
+ \DeclareUnicodeCharacter{203A}{\guilsinglright}
+ \DeclareUnicodeCharacter{20AC}{\euro}
+
+ \DeclareUnicodeCharacter{2192}{\expansion}
+ \DeclareUnicodeCharacter{21D2}{\result}
+
+ \DeclareUnicodeCharacter{2212}{\minus}
+ \DeclareUnicodeCharacter{2217}{\point}
+ \DeclareUnicodeCharacter{2261}{\equiv}
+}% end of \utfeightchardefs
+
+
+% US-ASCII character definitions.
+\def\asciichardefs{% nothing need be done
+ \relax
+}
+
+% Make non-ASCII characters printable again for compatibility with
+% existing Texinfo documents that may use them, even without declaring a
+% document encoding.
+%
+\setnonasciicharscatcode \other
+
+
+\message{formatting,}
+
\newdimen\defaultparindent \defaultparindent = 15pt
\chapheadingskip = 15pt plus 4pt minus 2pt
@@ -6837,10 +9609,10 @@ should work if nowhere else does.}
% Prevent underfull vbox error messages.
\vbadness = 10000
-% Don't be so finicky about underfull hboxes, either.
-\hbadness = 2000
+% Don't be very finicky about underfull hboxes, either.
+\hbadness = 6666
-% Following George Bush, just get rid of widows and orphans.
+% Following George Bush, get rid of widows and orphans.
\widowpenalty=10000
\clubpenalty=10000
@@ -6887,6 +9659,10 @@ should work if nowhere else does.}
\ifpdf
\pdfpageheight #7\relax
\pdfpagewidth #8\relax
+ % if we don't reset these, they will remain at "1 true in" of
+ % whatever layout pdftex was dumped with.
+ \pdfhorigin = 1 true in
+ \pdfvorigin = 1 true in
\fi
%
\setleading{\textleading}
@@ -6901,7 +9677,7 @@ should work if nowhere else does.}
\textleading = 13.2pt
%
% If page is nothing but text, make it come out even.
- \internalpagesizes{46\baselineskip}{6in}%
+ \internalpagesizes{607.2pt}{6in}% that's 46 lines
{\voffset}{.25in}%
{\bindingoffset}{36pt}%
{11in}{8.5in}%
@@ -6913,7 +9689,7 @@ should work if nowhere else does.}
\textleading = 12pt
%
\internalpagesizes{7.5in}{5in}%
- {\voffset}{.25in}%
+ {-.2in}{0in}%
{\bindingoffset}{16pt}%
{9.25in}{7in}%
%
@@ -6957,7 +9733,7 @@ should work if nowhere else does.}
% \global\normaloffset = -6mm
% \global\bindingoffset = 10mm
% @end tex
- \internalpagesizes{51\baselineskip}{160mm}
+ \internalpagesizes{673.2pt}{160mm}% that's 51 lines
{\voffset}{\hoffset}%
{\bindingoffset}{44pt}%
{297mm}{210mm}%
@@ -7022,7 +9798,7 @@ should work if nowhere else does.}
\parskip = 3pt plus 2pt minus 1pt
\setleading{\textleading}%
%
- \dimen0 = #1
+ \dimen0 = #1\relax
\advance\dimen0 by \voffset
%
\dimen2 = \hsize
@@ -7041,25 +9817,21 @@ should work if nowhere else does.}
\message{and turning on texinfo input format.}
+\def^^L{\par} % remove \outer, so ^L can appear in an @comment
+
+% DEL is a comment character, in case @c does not suffice.
+\catcode`\^^? = 14
+
% Define macros to output various characters with catcode for normal text.
-\catcode`\"=\other
-\catcode`\~=\other
-\catcode`\^=\other
-\catcode`\_=\other
-\catcode`\|=\other
-\catcode`\<=\other
-\catcode`\>=\other
-\catcode`\+=\other
-\catcode`\$=\other
-\def\normaldoublequote{"}
-\def\normaltilde{~}
-\def\normalcaret{^}
-\def\normalunderscore{_}
-\def\normalverticalbar{|}
-\def\normalless{<}
-\def\normalgreater{>}
-\def\normalplus{+}
-\def\normaldollar{$}%$ font-lock fix
+\catcode`\"=\other \def\normaldoublequote{"}
+\catcode`\$=\other \def\normaldollar{$}%$ font-lock fix
+\catcode`\+=\other \def\normalplus{+}
+\catcode`\<=\other \def\normalless{<}
+\catcode`\>=\other \def\normalgreater{>}
+\catcode`\^=\other \def\normalcaret{^}
+\catcode`\_=\other \def\normalunderscore{_}
+\catcode`\|=\other \def\normalverticalbar{|}
+\catcode`\~=\other \def\normaltilde{~}
% This macro is used to make a character print one way in \tt
% (where it can probably be output as-is), and another way in other fonts,
@@ -7117,6 +9889,13 @@ should work if nowhere else does.}
% \otherifyactive is called near the end of this file.
\def\otherifyactive{\catcode`+=\other \catcode`\_=\other}
+% Used sometimes to turn off (effectively) the active characters even after
+% parsing them.
+\def\turnoffactive{%
+ \normalturnoffactive
+ \otherbackslash
+}
+
\catcode`\@=0
% \backslashcurfont outputs one backslash character in current font,
@@ -7124,45 +9903,52 @@ should work if nowhere else does.}
\global\chardef\backslashcurfont=`\\
\global\let\rawbackslashxx=\backslashcurfont % let existing .??s files work
-% \rawbackslash defines an active \ to do \backslashcurfont.
-% \otherbackslash defines an active \ to be a literal `\' character with
-% catcode other.
-{\catcode`\\=\active
- @gdef@rawbackslash{@let\=@backslashcurfont}
- @gdef@otherbackslash{@let\=@realbackslash}
-}
-
% \realbackslash is an actual character `\' with catcode other, and
% \doublebackslash is two of them (for the pdf outlines).
{\catcode`\\=\other @gdef@realbackslash{\} @gdef@doublebackslash{\\}}
-% \normalbackslash outputs one backslash in fixed width font.
-\def\normalbackslash{{\tt\backslashcurfont}}
-
-\catcode`\\=\active
+% In texinfo, backslash is an active character; it prints the backslash
+% in fixed width font.
+\catcode`\\=\active % @ for escape char from now on.
+
+% The story here is that in math mode, the \char of \backslashcurfont
+% ends up printing the roman \ from the math symbol font (because \char
+% in math mode uses the \mathcode, and plain.tex sets
+% \mathcode`\\="026E). It seems better for @backslashchar{} to always
+% print a typewriter backslash, hence we use an explicit \mathchar,
+% which is the decimal equivalent of "715c (class 7, e.g., use \fam;
+% ignored family value; char position "5C). We can't use " for the
+% usual hex value because it has already been made active.
+@def@normalbackslash{{@tt @ifmmode @mathchar29020 @else @backslashcurfont @fi}}
+@let@backslashchar = @normalbackslash % @backslashchar{} is for user documents.
+
+% On startup, @fixbackslash assigns:
+% @let \ = @normalbackslash
+% \rawbackslash defines an active \ to do \backslashcurfont.
+% \otherbackslash defines an active \ to be a literal `\' character with
+% catcode other. We switch back and forth between these.
+@gdef@rawbackslash{@let\=@backslashcurfont}
+@gdef@otherbackslash{@let\=@realbackslash}
-% Used sometimes to turn off (effectively) the active characters
-% even after parsing them.
-@def@turnoffactive{%
+% Same as @turnoffactive except outputs \ as {\tt\char`\\} instead of
+% the literal character `\'.
+%
+@def@normalturnoffactive{%
@let"=@normaldoublequote
- @let\=@realbackslash
- @let~=@normaltilde
+ @let$=@normaldollar %$ font-lock fix
+ @let+=@normalplus
+ @let<=@normalless
+ @let>=@normalgreater
+ @let\=@normalbackslash
@let^=@normalcaret
@let_=@normalunderscore
@let|=@normalverticalbar
- @let<=@normalless
- @let>=@normalgreater
- @let+=@normalplus
- @let$=@normaldollar %$ font-lock fix
+ @let~=@normaltilde
+ @markupsetuplqdefault
+ @markupsetuprqdefault
@unsepspaces
}
-% Same as @turnoffactive except outputs \ as {\tt\char`\\} instead of
-% the literal character `\'. (Thus, \ is not expandable when this is in
-% effect.)
-%
-@def@normalturnoffactive{@turnoffactive @let\=@normalbackslash}
-
% Make _ and + \other characters, temporarily.
% This is canceled by @fixbackslash.
@otherifyactive
@@ -7175,7 +9961,7 @@ should work if nowhere else does.}
@global@let\ = @eatinput
% On the other hand, perhaps the file did not have a `\input texinfo'. Then
-% the first `\{ in the file would cause an error. This macro tries to fix
+% the first `\' in the file would cause an error. This macro tries to fix
% that, assuming it is called before the first `\' could plausibly occur.
% Also turn back on active characters that might appear in the input
% file name, in case not using a pre-dumped format.
@@ -7189,11 +9975,28 @@ should work if nowhere else does.}
% Say @foo, not \foo, in error messages.
@escapechar = `@@
-% These look ok in all fonts, so just make them not special.
-@catcode`@& = @other
-@catcode`@# = @other
-@catcode`@% = @other
+% These (along with & and #) are made active for url-breaking, so need
+% active definitions as the normal characters.
+@def@normaldot{.}
+@def@normalquest{?}
+@def@normalslash{/}
+% These look ok in all fonts, so just make them not special.
+% @hashchar{} gets its own user-level command, because of #line.
+@catcode`@& = @other @def@normalamp{&}
+@catcode`@# = @other @def@normalhash{#}
+@catcode`@% = @other @def@normalpercent{%}
+
+@let @hashchar = @normalhash
+
+@c Finally, make ` and ' active, so that txicodequoteundirected and
+@c txicodequotebacktick work right in, e.g., @w{@code{`foo'}}. If we
+@c don't make ` and ' active, @code will not get them as active chars.
+@c Do this last of all since we use ` in the previous @catcode assignments.
+@catcode`@'=@active
+@catcode`@`=@active
+@markupsetuplqdefault
+@markupsetuprqdefault
@c Local variables:
@c eval: (add-hook 'write-file-hooks 'time-stamp)
diff --git a/Modules/_ctypes/libffi_osx/x86/darwin64.S b/Modules/_ctypes/libffi_osx/x86/darwin64.S
index eba451e..165d469 100644
--- a/Modules/_ctypes/libffi_osx/x86/darwin64.S
+++ b/Modules/_ctypes/libffi_osx/x86/darwin64.S
@@ -45,6 +45,7 @@
_ffi_call_unix64:
LUW0:
movq (%rsp), %r10 /* Load return address. */
+ movq %rdi, %r12 /* Save a copy of the register area. */
leaq (%rdi, %rsi), %rax /* Find local stack base. */
movq %rdx, (%rax) /* Save flags. */
movq %rcx, 8(%rax) /* Save raddr. */
@@ -52,7 +53,8 @@ LUW0:
movq %r10, 24(%rax) /* Relocate return address. */
movq %rax, %rbp /* Finalize local stack frame. */
LUW1:
- movq %rdi, %r10 /* Save a copy of the register area. */
+ /* movq %rdi, %r10 // Save a copy of the register area. */
+ movq %r12, %r10
movq %r8, %r11 /* Save a copy of the target fn. */
movl %r9d, %eax /* Set number of SSE registers. */
@@ -255,7 +257,7 @@ Lld_void:
ret
.align 3
Lld_int8:
- movzbl -24(%rsp), %eax
+ movzbl -24(%rsp), %eax
ret
.align 3
Lld_int16:
diff --git a/Modules/_ctypes/libffi_osx/x86/x86-darwin.S b/Modules/_ctypes/libffi_osx/x86/x86-darwin.S
index 6c85ea6..925a841 100644
--- a/Modules/_ctypes/libffi_osx/x86/x86-darwin.S
+++ b/Modules/_ctypes/libffi_osx/x86/x86-darwin.S
@@ -198,8 +198,12 @@ LCFI7:
je Lcls_retldouble
cmpl $FFI_TYPE_SINT64, %eax
je Lcls_retllong
+ cmpl $FFI_TYPE_UINT8, %eax
+ je Lcls_retstruct1
cmpl $FFI_TYPE_SINT8, %eax
je Lcls_retstruct1
+ cmpl $FFI_TYPE_UINT16, %eax
+ je Lcls_retstruct2
cmpl $FFI_TYPE_SINT16, %eax
je Lcls_retstruct2
cmpl $FFI_TYPE_STRUCT, %eax
diff --git a/Modules/_ctypes/libffi_osx/x86/x86-ffi64.c b/Modules/_ctypes/libffi_osx/x86/x86-ffi64.c
index d4a5cc1..06feaf2 100644
--- a/Modules/_ctypes/libffi_osx/x86/x86-ffi64.c
+++ b/Modules/_ctypes/libffi_osx/x86/x86-ffi64.c
@@ -152,12 +152,42 @@ classify_argument(
case FFI_TYPE_UINT64:
case FFI_TYPE_SINT64:
case FFI_TYPE_POINTER:
+#if 0
if (byte_offset + type->size <= 4)
classes[0] = X86_64_INTEGERSI_CLASS;
else
classes[0] = X86_64_INTEGER_CLASS;
return 1;
+#else
+ {
+ int size = byte_offset + type->size;
+
+ if (size <= 4)
+ {
+ classes[0] = X86_64_INTEGERSI_CLASS;
+ return 1;
+ }
+ else if (size <= 8)
+ {
+ classes[0] = X86_64_INTEGER_CLASS;
+ return 1;
+ }
+ else if (size <= 12)
+ {
+ classes[0] = X86_64_INTEGER_CLASS;
+ classes[1] = X86_64_INTEGERSI_CLASS;
+ return 2;
+ }
+ else if (size <= 16)
+ {
+ classes[0] = classes[1] = X86_64_INTEGERSI_CLASS;
+ return 2;
+ }
+ else
+ FFI_ASSERT (0);
+ }
+#endif
case FFI_TYPE_FLOAT:
if (byte_offset == 0)
@@ -213,6 +243,21 @@ classify_argument(
byte_offset += (*ptr)->size;
}
+ if (words > 2)
+ {
+ /* When size > 16 bytes, if the first one isn't
+ X86_64_SSE_CLASS or any other ones aren't
+ X86_64_SSEUP_CLASS, everything should be passed in
+ memory. */
+ if (classes[0] != X86_64_SSE_CLASS)
+ return 0;
+
+ for (i = 1; i < words; i++)
+ if (classes[i] != X86_64_SSEUP_CLASS)
+ return 0;
+ }
+
+
/* Final merger cleanup. */
for (i = 0; i < words; i++)
{
@@ -224,13 +269,20 @@ classify_argument(
/* The X86_64_SSEUP_CLASS should be always preceded by
X86_64_SSE_CLASS. */
if (classes[i] == X86_64_SSEUP_CLASS
- && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
+ && classes[i - 1] != X86_64_SSE_CLASS
+ && classes[i - 1] != X86_64_SSEUP_CLASS)
+ {
+ FFI_ASSERT(i != 0);
classes[i] = X86_64_SSE_CLASS;
+ }
/* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
if (classes[i] == X86_64_X87UP_CLASS
- && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
+ && classes[i - 1] != X86_64_X87_CLASS)
+ {
+ FFI_ASSERT(i != 0);
classes[i] = X86_64_SSE_CLASS;
+ }
}
return words;
@@ -369,6 +421,7 @@ ffi_prep_cif_machdep(
cif->flags = flags;
cif->bytes = bytes;
+ cif->bytes = ALIGN(bytes,8);
return FFI_OK;
}
@@ -449,7 +502,61 @@ ffi_call(
case X86_64_INTEGER_CLASS:
case X86_64_INTEGERSI_CLASS:
reg_args->gpr[gprcount] = 0;
- memcpy (&reg_args->gpr[gprcount], a, size < 8 ? size : 8);
+ switch (arg_types[i]->type) {
+ case FFI_TYPE_SINT8:
+ {
+ int8_t shortval = *(int8_t*)a;
+ int64_t actval = (int64_t)shortval;
+ reg_args->gpr[gprcount] = actval;
+ /*memcpy (&reg_args->gpr[gprcount], &actval, 8);*/
+ break;
+ }
+
+ case FFI_TYPE_SINT16:
+ {
+ int16_t shortval = *(int16_t*)a;
+ int64_t actval = (int64_t)shortval;
+ memcpy (&reg_args->gpr[gprcount], &actval, 8);
+ break;
+ }
+
+ case FFI_TYPE_SINT32:
+ {
+ int32_t shortval = *(int32_t*)a;
+ int64_t actval = (int64_t)shortval;
+ memcpy (&reg_args->gpr[gprcount], &actval, 8);
+ break;
+ }
+
+ case FFI_TYPE_UINT8:
+ {
+ u_int8_t shortval = *(u_int8_t*)a;
+ u_int64_t actval = (u_int64_t)shortval;
+ /*memcpy (&reg_args->gpr[gprcount], &actval, 8);*/
+ reg_args->gpr[gprcount] = actval;
+ break;
+ }
+
+ case FFI_TYPE_UINT16:
+ {
+ u_int16_t shortval = *(u_int16_t*)a;
+ u_int64_t actval = (u_int64_t)shortval;
+ memcpy (&reg_args->gpr[gprcount], &actval, 8);
+ break;
+ }
+
+ case FFI_TYPE_UINT32:
+ {
+ u_int32_t shortval = *(u_int32_t*)a;
+ u_int64_t actval = (u_int64_t)shortval;
+ memcpy (&reg_args->gpr[gprcount], &actval, 8);
+ break;
+ }
+
+ default:
+ //memcpy (&reg_args->gpr[gprcount], a, size < 8 ? size : 8);
+ reg_args->gpr[gprcount] = *(int64_t*)a;
+ }
gprcount++;
break;
@@ -505,12 +612,15 @@ ffi_prep_closure(
return FFI_OK;
}
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wmissing-prototypes"
int
ffi_closure_unix64_inner(
ffi_closure* closure,
void* rvalue,
RegisterArgs* reg_args,
char* argp)
+#pragma clang diagnostic pop
{
ffi_cif* cif = closure->cif;
void** avalue = alloca(cif->nargs * sizeof(void *));
@@ -621,4 +731,4 @@ ffi_closure_unix64_inner(
return ret;
}
-#endif /* __x86_64__ */ \ No newline at end of file
+#endif /* __x86_64__ */
diff --git a/Modules/_ctypes/libffi_osx/x86/x86-ffi_darwin.c b/Modules/_ctypes/libffi_osx/x86/x86-ffi_darwin.c
index c9a306e..706ea0f 100644
--- a/Modules/_ctypes/libffi_osx/x86/x86-ffi_darwin.c
+++ b/Modules/_ctypes/libffi_osx/x86/x86-ffi_darwin.c
@@ -35,6 +35,8 @@
/* ffi_prep_args is called by the assembly routine once stack space
has been allocated for the function's arguments */
+void ffi_prep_args(char *stack, extended_cif *ecif);
+
void ffi_prep_args(char *stack, extended_cif *ecif)
{
register unsigned int i;
@@ -433,4 +435,4 @@ ffi_raw_call(ffi_cif *cif, void (*fn)(), void *rvalue, ffi_raw *fake_avalue)
}
#endif
-#endif // __i386__ \ No newline at end of file
+#endif // __i386__
diff --git a/Modules/_ctypes/stgdict.c b/Modules/_ctypes/stgdict.c
index 773233f..95fa0f5 100644
--- a/Modules/_ctypes/stgdict.c
+++ b/Modules/_ctypes/stgdict.c
@@ -343,7 +343,7 @@ PyCStructUnionType_update_stgdict(PyObject *type, PyObject *fields, int isStruct
isPacked = PyObject_GetAttrString(type, "_pack_");
if (isPacked) {
- pack = PyInt_AsLong(isPacked);
+ pack = _PyInt_AsInt(isPacked);
if (pack < 0 || PyErr_Occurred()) {
Py_XDECREF(isPacked);
PyErr_SetString(PyExc_ValueError,
@@ -518,7 +518,12 @@ PyCStructUnionType_update_stgdict(PyObject *type, PyObject *fields, int isStruct
sprintf(buf, "%s:%s:", fieldfmt, fieldname);
ptr = stgdict->format;
- stgdict->format = _ctypes_alloc_format_string(stgdict->format, buf);
+ if (dict->shape != NULL) {
+ stgdict->format = _ctypes_alloc_format_string_with_shape(
+ dict->ndim, dict->shape, stgdict->format, buf);
+ } else {
+ stgdict->format = _ctypes_alloc_format_string(stgdict->format, buf);
+ }
PyMem_Free(ptr);
PyMem_Free(buf);
diff --git a/Modules/_curses_panel.c b/Modules/_curses_panel.c
index 04a0a28..bdd5cf0 100644
--- a/Modules/_curses_panel.c
+++ b/Modules/_curses_panel.c
@@ -293,9 +293,18 @@ PyCursesPanel_replace_panel(PyCursesPanelObject *self, PyObject *args)
static PyObject *
PyCursesPanel_set_panel_userptr(PyCursesPanelObject *self, PyObject *obj)
{
+ PyObject *oldobj;
+ int rc;
+ PyCursesInitialised;
Py_INCREF(obj);
- return PyCursesCheckERR(set_panel_userptr(self->pan, (void*)obj),
- "set_panel_userptr");
+ oldobj = (PyObject *) panel_userptr(self->pan);
+ rc = set_panel_userptr(self->pan, (void*)obj);
+ if (rc == ERR) {
+ /* In case of an ncurses error, decref the new object again */
+ Py_DECREF(obj);
+ }
+ Py_XDECREF(oldobj);
+ return PyCursesCheckERR(rc, "set_panel_userptr");
}
static PyObject *
diff --git a/Modules/_cursesmodule.c b/Modules/_cursesmodule.c
index 5f26c7f..b914e5f 100644
--- a/Modules/_cursesmodule.c
+++ b/Modules/_cursesmodule.c
@@ -885,7 +885,9 @@ PyCursesWindow_GetKey(PyCursesWindowObject *self, PyObject *args)
}
if (rtn == ERR) {
/* getch() returns ERR in nodelay mode */
- PyErr_SetString(PyCursesError, "no input");
+ PyErr_CheckSignals();
+ if (!PyErr_Occurred())
+ PyErr_SetString(PyCursesError, "no input");
return NULL;
} else if (rtn<=255) {
return Py_BuildValue("c", rtn);
diff --git a/Modules/_elementtree.c b/Modules/_elementtree.c
index 379aa01..3e9f9df 100644
--- a/Modules/_elementtree.c
+++ b/Modules/_elementtree.c
@@ -2338,7 +2338,10 @@ expat_start_ns_handler(XMLParserObject* self, const XML_Char* prefix,
PyObject* sprefix = NULL;
PyObject* suri = NULL;
- suri = makestring(uri, strlen(uri));
+ if (uri)
+ suri = makestring(uri, strlen(uri));
+ else
+ suri = PyString_FromStringAndSize("", 0);
if (!suri)
return;
@@ -2427,6 +2430,8 @@ expat_unknown_encoding_handler(XMLParserObject *self, const XML_Char *name,
if (PyUnicode_GET_SIZE(u) != 256) {
Py_DECREF(u);
+ PyErr_SetString(PyExc_ValueError,
+ "multi-byte encodings are not supported");
return XML_STATUS_ERROR;
}
@@ -2734,10 +2739,10 @@ xmlparser_setevents(XMLParserObject* self, PyObject* args)
target->events = events;
/* clear out existing events */
- Py_XDECREF(target->start_event_obj); target->start_event_obj = NULL;
- Py_XDECREF(target->end_event_obj); target->end_event_obj = NULL;
- Py_XDECREF(target->start_ns_event_obj); target->start_ns_event_obj = NULL;
- Py_XDECREF(target->end_ns_event_obj); target->end_ns_event_obj = NULL;
+ Py_CLEAR(target->start_event_obj);
+ Py_CLEAR(target->end_event_obj);
+ Py_CLEAR(target->start_ns_event_obj);
+ Py_CLEAR(target->end_ns_event_obj);
if (event_set == Py_None) {
/* default is "end" only */
diff --git a/Modules/_functoolsmodule.c b/Modules/_functoolsmodule.c
index aa9eaee..6397ba9 100644
--- a/Modules/_functoolsmodule.c
+++ b/Modules/_functoolsmodule.c
@@ -290,10 +290,10 @@ partial_reduce(partialobject *pto, PyObject *unused)
}
PyObject *
-partial_setstate(partialobject *pto, PyObject *args)
+partial_setstate(partialobject *pto, PyObject *state)
{
PyObject *fn, *fnargs, *kw, *dict;
- if (!PyArg_ParseTuple(args, "(OOOO):__setstate__",
+ if (!PyArg_ParseTuple(state, "OOOO",
&fn, &fnargs, &kw, &dict))
return NULL;
Py_XDECREF(pto->fn);
@@ -317,7 +317,7 @@ partial_setstate(partialobject *pto, PyObject *args)
static PyMethodDef partial_methods[] = {
{"__reduce__", (PyCFunction)partial_reduce, METH_NOARGS},
- {"__setstate__", (PyCFunction)partial_setstate, METH_VARARGS},
+ {"__setstate__", (PyCFunction)partial_setstate, METH_O},
{NULL, NULL} /* sentinel */
};
diff --git a/Modules/_hashopenssl.c b/Modules/_hashopenssl.c
index aa9dd4e..7116eeb 100644
--- a/Modules/_hashopenssl.c
+++ b/Modules/_hashopenssl.c
@@ -37,6 +37,8 @@
/* EVP is the preferred interface to hashing in OpenSSL */
#include <openssl/evp.h>
+#include <openssl/hmac.h>
+#include <openssl/err.h>
#define MUNCH_SIZE INT_MAX
@@ -67,7 +69,7 @@ static PyTypeObject EVPtype;
#define DEFINE_CONSTS_FOR_NEW(Name) \
- static PyObject *CONST_ ## Name ## _name_obj; \
+ static PyObject *CONST_ ## Name ## _name_obj = NULL; \
static EVP_MD_CTX CONST_new_ ## Name ## _ctx; \
static EVP_MD_CTX *CONST_new_ ## Name ## _ctx_p = NULL;
@@ -477,6 +479,7 @@ EVP_new(PyObject *self, PyObject *args, PyObject *kwdict)
}
if (!PyArg_Parse(name_obj, "s", &name)) {
+ PyBuffer_Release(&view);
PyErr_SetString(PyExc_TypeError, "name must be a string");
return NULL;
}
@@ -490,6 +493,225 @@ EVP_new(PyObject *self, PyObject *args, PyObject *kwdict)
return ret_obj;
}
+
+#if (OPENSSL_VERSION_NUMBER >= 0x10000000 && !defined(OPENSSL_NO_HMAC) \
+ && !defined(OPENSSL_NO_SHA))
+
+#define PY_PBKDF2_HMAC 1
+
+/* Improved implementation of PKCS5_PBKDF2_HMAC()
+ *
+ * PKCS5_PBKDF2_HMAC_fast() hashes the password exactly one time instead of
+ * `iter` times. Today (2013) the iteration count is typically 100,000 or
+ * more. The improved algorithm is not subject to a Denial-of-Service
+ * vulnerability with overly large passwords.
+ *
+ * Also OpenSSL < 1.0 don't provide PKCS5_PBKDF2_HMAC(), only
+ * PKCS5_PBKDF2_SHA1.
+ */
+static int
+PKCS5_PBKDF2_HMAC_fast(const char *pass, int passlen,
+ const unsigned char *salt, int saltlen,
+ int iter, const EVP_MD *digest,
+ int keylen, unsigned char *out)
+{
+ unsigned char digtmp[EVP_MAX_MD_SIZE], *p, itmp[4];
+ int cplen, j, k, tkeylen, mdlen;
+ unsigned long i = 1;
+ HMAC_CTX hctx_tpl, hctx;
+
+ mdlen = EVP_MD_size(digest);
+ if (mdlen < 0)
+ return 0;
+
+ HMAC_CTX_init(&hctx_tpl);
+ HMAC_CTX_init(&hctx);
+ p = out;
+ tkeylen = keylen;
+ if (!HMAC_Init_ex(&hctx_tpl, pass, passlen, digest, NULL)) {
+ HMAC_CTX_cleanup(&hctx_tpl);
+ return 0;
+ }
+ while(tkeylen) {
+ if(tkeylen > mdlen)
+ cplen = mdlen;
+ else
+ cplen = tkeylen;
+ /* We are unlikely to ever use more than 256 blocks (5120 bits!)
+ * but just in case...
+ */
+ itmp[0] = (unsigned char)((i >> 24) & 0xff);
+ itmp[1] = (unsigned char)((i >> 16) & 0xff);
+ itmp[2] = (unsigned char)((i >> 8) & 0xff);
+ itmp[3] = (unsigned char)(i & 0xff);
+ if (!HMAC_CTX_copy(&hctx, &hctx_tpl)) {
+ HMAC_CTX_cleanup(&hctx_tpl);
+ return 0;
+ }
+ if (!HMAC_Update(&hctx, salt, saltlen)
+ || !HMAC_Update(&hctx, itmp, 4)
+ || !HMAC_Final(&hctx, digtmp, NULL)) {
+ HMAC_CTX_cleanup(&hctx_tpl);
+ HMAC_CTX_cleanup(&hctx);
+ return 0;
+ }
+ HMAC_CTX_cleanup(&hctx);
+ memcpy(p, digtmp, cplen);
+ for (j = 1; j < iter; j++) {
+ if (!HMAC_CTX_copy(&hctx, &hctx_tpl)) {
+ HMAC_CTX_cleanup(&hctx_tpl);
+ return 0;
+ }
+ if (!HMAC_Update(&hctx, digtmp, mdlen)
+ || !HMAC_Final(&hctx, digtmp, NULL)) {
+ HMAC_CTX_cleanup(&hctx_tpl);
+ HMAC_CTX_cleanup(&hctx);
+ return 0;
+ }
+ HMAC_CTX_cleanup(&hctx);
+ for (k = 0; k < cplen; k++) {
+ p[k] ^= digtmp[k];
+ }
+ }
+ tkeylen-= cplen;
+ i++;
+ p+= cplen;
+ }
+ HMAC_CTX_cleanup(&hctx_tpl);
+ return 1;
+}
+
+/* LCOV_EXCL_START */
+static PyObject *
+_setException(PyObject *exc)
+{
+ unsigned long errcode;
+ const char *lib, *func, *reason;
+
+ errcode = ERR_peek_last_error();
+ if (!errcode) {
+ PyErr_SetString(exc, "unknown reasons");
+ return NULL;
+ }
+ ERR_clear_error();
+
+ lib = ERR_lib_error_string(errcode);
+ func = ERR_func_error_string(errcode);
+ reason = ERR_reason_error_string(errcode);
+
+ if (lib && func) {
+ PyErr_Format(exc, "[%s: %s] %s", lib, func, reason);
+ }
+ else if (lib) {
+ PyErr_Format(exc, "[%s] %s", lib, reason);
+ }
+ else {
+ PyErr_SetString(exc, reason);
+ }
+ return NULL;
+}
+/* LCOV_EXCL_STOP */
+
+PyDoc_STRVAR(pbkdf2_hmac__doc__,
+"pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None) -> key\n\
+\n\
+Password based key derivation function 2 (PKCS #5 v2.0) with HMAC as\n\
+pseudorandom function.");
+
+static PyObject *
+pbkdf2_hmac(PyObject *self, PyObject *args, PyObject *kwdict)
+{
+ static char *kwlist[] = {"hash_name", "password", "salt", "iterations",
+ "dklen", NULL};
+ PyObject *key_obj = NULL, *dklen_obj = Py_None;
+ char *name, *key;
+ Py_buffer password, salt;
+ long iterations, dklen;
+ int retval;
+ const EVP_MD *digest;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwdict, "ss*s*l|O:pbkdf2_hmac",
+ kwlist, &name, &password, &salt,
+ &iterations, &dklen_obj)) {
+ return NULL;
+ }
+
+ digest = EVP_get_digestbyname(name);
+ if (digest == NULL) {
+ PyErr_SetString(PyExc_ValueError, "unsupported hash type");
+ goto end;
+ }
+
+ if (password.len > INT_MAX) {
+ PyErr_SetString(PyExc_OverflowError,
+ "password is too long.");
+ goto end;
+ }
+
+ if (salt.len > INT_MAX) {
+ PyErr_SetString(PyExc_OverflowError,
+ "salt is too long.");
+ goto end;
+ }
+
+ if (iterations < 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "iteration value must be greater than 0.");
+ goto end;
+ }
+ if (iterations > INT_MAX) {
+ PyErr_SetString(PyExc_OverflowError,
+ "iteration value is too great.");
+ goto end;
+ }
+
+ if (dklen_obj == Py_None) {
+ dklen = EVP_MD_size(digest);
+ } else {
+ dklen = PyLong_AsLong(dklen_obj);
+ if ((dklen == -1) && PyErr_Occurred()) {
+ goto end;
+ }
+ }
+ if (dklen < 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "key length must be greater than 0.");
+ goto end;
+ }
+ if (dklen > INT_MAX) {
+ /* INT_MAX is always smaller than dkLen max (2^32 - 1) * hLen */
+ PyErr_SetString(PyExc_OverflowError,
+ "key length is too great.");
+ goto end;
+ }
+
+ key_obj = PyBytes_FromStringAndSize(NULL, dklen);
+ if (key_obj == NULL) {
+ goto end;
+ }
+ key = PyBytes_AS_STRING(key_obj);
+
+ Py_BEGIN_ALLOW_THREADS
+ retval = PKCS5_PBKDF2_HMAC_fast((char*)password.buf, (int)password.len,
+ (unsigned char *)salt.buf, (int)salt.len,
+ iterations, digest, dklen,
+ (unsigned char *)key);
+ Py_END_ALLOW_THREADS
+
+ if (!retval) {
+ Py_CLEAR(key_obj);
+ _setException(PyExc_ValueError);
+ goto end;
+ }
+
+ end:
+ PyBuffer_Release(&password);
+ PyBuffer_Release(&salt);
+ return key_obj;
+}
+
+#endif
+
/*
* This macro generates constructor function definitions for specific
* hash algorithms. These constructors are much faster than calling
@@ -524,12 +746,15 @@ EVP_new(PyObject *self, PyObject *args, PyObject *kwdict)
" hash object; optionally initialized with a string") \
}
-/* used in the init function to setup a constructor */
+/* used in the init function to setup a constructor: initialize OpenSSL
+ constructor constants if they haven't been initialized already. */
#define INIT_CONSTRUCTOR_CONSTANTS(NAME) do { \
+ if (CONST_ ## NAME ## _name_obj == NULL) { \
CONST_ ## NAME ## _name_obj = PyString_FromString(#NAME); \
- if (EVP_get_digestbyname(#NAME)) { \
- CONST_new_ ## NAME ## _ctx_p = &CONST_new_ ## NAME ## _ctx; \
- EVP_DigestInit(CONST_new_ ## NAME ## _ctx_p, EVP_get_digestbyname(#NAME)); \
+ if (EVP_get_digestbyname(#NAME)) { \
+ CONST_new_ ## NAME ## _ctx_p = &CONST_new_ ## NAME ## _ctx; \
+ EVP_DigestInit(CONST_new_ ## NAME ## _ctx_p, EVP_get_digestbyname(#NAME)); \
+ } \
} \
} while (0);
@@ -554,6 +779,10 @@ static struct PyMethodDef EVP_functions[] = {
CONSTRUCTOR_METH_DEF(sha384),
CONSTRUCTOR_METH_DEF(sha512),
#endif
+#ifdef PY_PBKDF2_HMAC
+ {"pbkdf2_hmac", (PyCFunction)pbkdf2_hmac, METH_VARARGS|METH_KEYWORDS,
+ pbkdf2_hmac__doc__},
+#endif
{NULL, NULL} /* Sentinel */
};
diff --git a/Modules/_heapqmodule.c b/Modules/_heapqmodule.c
index 495114b..30cedb9 100644
--- a/Modules/_heapqmodule.c
+++ b/Modules/_heapqmodule.c
@@ -35,12 +35,14 @@ cmp_lt(PyObject *x, PyObject *y)
static int
_siftdown(PyListObject *heap, Py_ssize_t startpos, Py_ssize_t pos)
{
- PyObject *newitem, *parent;
+ PyObject *newitem, *parent, *olditem;
int cmp;
Py_ssize_t parentpos;
+ Py_ssize_t size;
assert(PyList_Check(heap));
- if (pos >= PyList_GET_SIZE(heap)) {
+ size = PyList_GET_SIZE(heap);
+ if (pos >= size) {
PyErr_SetString(PyExc_IndexError, "index out of range");
return -1;
}
@@ -57,12 +59,24 @@ _siftdown(PyListObject *heap, Py_ssize_t startpos, Py_ssize_t pos)
Py_DECREF(newitem);
return -1;
}
+ if (size != PyList_GET_SIZE(heap)) {
+ Py_DECREF(newitem);
+ PyErr_SetString(PyExc_RuntimeError,
+ "list changed size during iteration");
+ return -1;
+ }
if (cmp == 0)
break;
Py_INCREF(parent);
- Py_DECREF(PyList_GET_ITEM(heap, pos));
+ olditem = PyList_GET_ITEM(heap, pos);
PyList_SET_ITEM(heap, pos, parent);
+ Py_DECREF(olditem);
pos = parentpos;
+ if (size != PyList_GET_SIZE(heap)) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "list changed size during iteration");
+ return -1;
+ }
}
Py_DECREF(PyList_GET_ITEM(heap, pos));
PyList_SET_ITEM(heap, pos, newitem);
@@ -72,12 +86,14 @@ _siftdown(PyListObject *heap, Py_ssize_t startpos, Py_ssize_t pos)
static int
_siftup(PyListObject *heap, Py_ssize_t pos)
{
- Py_ssize_t startpos, endpos, childpos, rightpos;
+ Py_ssize_t startpos, endpos, childpos, rightpos, limit;
int cmp;
- PyObject *newitem, *tmp;
+ PyObject *newitem, *tmp, *olditem;
+ Py_ssize_t size;
assert(PyList_Check(heap));
- endpos = PyList_GET_SIZE(heap);
+ size = PyList_GET_SIZE(heap);
+ endpos = size;
startpos = pos;
if (pos >= endpos) {
PyErr_SetString(PyExc_IndexError, "index out of range");
@@ -87,9 +103,10 @@ _siftup(PyListObject *heap, Py_ssize_t pos)
Py_INCREF(newitem);
/* Bubble up the smaller child until hitting a leaf. */
- childpos = 2*pos + 1; /* leftmost child position */
- while (childpos < endpos) {
+ limit = endpos / 2; /* smallest pos that has no child */
+ while (pos < limit) {
/* Set childpos to index of smaller child. */
+ childpos = 2*pos + 1; /* leftmost child position */
rightpos = childpos + 1;
if (rightpos < endpos) {
cmp = cmp_lt(
@@ -102,16 +119,27 @@ _siftup(PyListObject *heap, Py_ssize_t pos)
if (cmp == 0)
childpos = rightpos;
}
+ if (size != PyList_GET_SIZE(heap)) {
+ Py_DECREF(newitem);
+ PyErr_SetString(PyExc_RuntimeError,
+ "list changed size during iteration");
+ return -1;
+ }
/* Move the smaller child up. */
tmp = PyList_GET_ITEM(heap, childpos);
Py_INCREF(tmp);
- Py_DECREF(PyList_GET_ITEM(heap, pos));
+ olditem = PyList_GET_ITEM(heap, pos);
PyList_SET_ITEM(heap, pos, tmp);
+ Py_DECREF(olditem);
pos = childpos;
- childpos = 2*pos + 1;
+ if (size != PyList_GET_SIZE(heap)) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "list changed size during iteration");
+ return -1;
+ }
}
- /* The leaf at pos is empty now. Put newitem there, and and bubble
+ /* The leaf at pos is empty now. Put newitem there, and bubble
it up to its final resting place (by sifting its parents down). */
Py_DECREF(PyList_GET_ITEM(heap, pos));
PyList_SET_ITEM(heap, pos, newitem);
@@ -141,7 +169,7 @@ heappush(PyObject *self, PyObject *args)
}
PyDoc_STRVAR(heappush_doc,
-"Push item onto heap, maintaining the heap invariant.");
+"heappush(heap, item) -> None. Push item onto heap, maintaining the heap invariant.");
static PyObject *
heappop(PyObject *self, PyObject *heap)
@@ -209,7 +237,7 @@ heapreplace(PyObject *self, PyObject *args)
}
PyDoc_STRVAR(heapreplace_doc,
-"Pop and return the current smallest value, and add the new item.\n\
+"heapreplace(heap, item) -> value. Pop and return the current smallest value, and add the new item.\n\
\n\
This is more efficient than heappop() followed by heappush(), and can be\n\
more appropriate when using a fixed-size heap. Note that the value\n\
@@ -256,7 +284,7 @@ heappushpop(PyObject *self, PyObject *args)
}
PyDoc_STRVAR(heappushpop_doc,
-"Push item on the heap, then pop and return the smallest item\n\
+"heappushpop(heap, item) -> value. Push item on the heap, then pop and return the smallest item\n\
from the heap. The combined action runs more efficiently than\n\
heappush() followed by a separate call to heappop().");
@@ -411,7 +439,7 @@ _siftdownmax(PyListObject *heap, Py_ssize_t startpos, Py_ssize_t pos)
static int
_siftupmax(PyListObject *heap, Py_ssize_t pos)
{
- Py_ssize_t startpos, endpos, childpos, rightpos;
+ Py_ssize_t startpos, endpos, childpos, rightpos, limit;
int cmp;
PyObject *newitem, *tmp;
@@ -426,9 +454,10 @@ _siftupmax(PyListObject *heap, Py_ssize_t pos)
Py_INCREF(newitem);
/* Bubble up the smaller child until hitting a leaf. */
- childpos = 2*pos + 1; /* leftmost child position */
- while (childpos < endpos) {
+ limit = endpos / 2; /* smallest pos that has no child */
+ while (pos < limit) {
/* Set childpos to index of smaller child. */
+ childpos = 2*pos + 1; /* leftmost child position */
rightpos = childpos + 1;
if (rightpos < endpos) {
cmp = cmp_lt(
@@ -447,10 +476,9 @@ _siftupmax(PyListObject *heap, Py_ssize_t pos)
Py_DECREF(PyList_GET_ITEM(heap, pos));
PyList_SET_ITEM(heap, pos, tmp);
pos = childpos;
- childpos = 2*pos + 1;
}
- /* The leaf at pos is empty now. Put newitem there, and and bubble
+ /* The leaf at pos is empty now. Put newitem there, and bubble
it up to its final resting place (by sifting its parents down). */
Py_DECREF(PyList_GET_ITEM(heap, pos));
PyList_SET_ITEM(heap, pos, newitem);
diff --git a/Modules/_io/_iomodule.c b/Modules/_io/_iomodule.c
index 6d7121c..40acdf5 100644
--- a/Modules/_io/_iomodule.c
+++ b/Modules/_io/_iomodule.c
@@ -58,8 +58,8 @@ PyDoc_STRVAR(module_doc,
"\n"
"At the top of the I/O hierarchy is the abstract base class IOBase. It\n"
"defines the basic interface to a stream. Note, however, that there is no\n"
-"seperation between reading and writing to streams; implementations are\n"
-"allowed to throw an IOError if they do not support a given operation.\n"
+"separation between reading and writing to streams; implementations are\n"
+"allowed to raise an IOError if they do not support a given operation.\n"
"\n"
"Extending IOBase is RawIOBase which deals simply with the reading and\n"
"writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide\n"
@@ -300,9 +300,10 @@ io_open(PyObject *self, PyObject *args, PyObject *kwds)
int text = 0, binary = 0, universal = 0;
char rawmode[5], *m;
- int line_buffering, isatty;
+ int line_buffering;
+ long isatty;
- PyObject *raw, *modeobj = NULL, *buffer = NULL, *wrapper = NULL;
+ PyObject *raw, *modeobj = NULL, *buffer, *wrapper, *result = NULL;
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|sizzzi:open", kwlist,
&file, &mode, &buffering,
@@ -415,6 +416,7 @@ io_open(PyObject *self, PyObject *args, PyObject *kwds)
"Osi", file, rawmode, closefd);
if (raw == NULL)
return NULL;
+ result = raw;
modeobj = PyUnicode_FromString(mode);
if (modeobj == NULL)
@@ -443,12 +445,12 @@ io_open(PyObject *self, PyObject *args, PyObject *kwds)
#ifdef HAVE_STRUCT_STAT_ST_BLKSIZE
{
struct stat st;
- long fileno;
+ int fileno;
PyObject *res = PyObject_CallMethod(raw, "fileno", NULL);
if (res == NULL)
goto error;
- fileno = PyInt_AsLong(res);
+ fileno = _PyInt_AsInt(res);
Py_DECREF(res);
if (fileno == -1 && PyErr_Occurred())
goto error;
@@ -473,7 +475,7 @@ io_open(PyObject *self, PyObject *args, PyObject *kwds)
}
Py_DECREF(modeobj);
- return raw;
+ return result;
}
/* wraps into a buffered file */
@@ -494,15 +496,16 @@ io_open(PyObject *self, PyObject *args, PyObject *kwds)
buffer = PyObject_CallFunction(Buffered_class, "Oi", raw, buffering);
}
- Py_CLEAR(raw);
if (buffer == NULL)
goto error;
+ result = buffer;
+ Py_DECREF(raw);
/* if binary, returns the buffered file */
if (binary) {
Py_DECREF(modeobj);
- return buffer;
+ return result;
}
/* wraps into a TextIOWrapper */
@@ -511,20 +514,30 @@ io_open(PyObject *self, PyObject *args, PyObject *kwds)
buffer,
encoding, errors, newline,
line_buffering);
- Py_CLEAR(buffer);
if (wrapper == NULL)
goto error;
+ result = wrapper;
+ Py_DECREF(buffer);
if (PyObject_SetAttrString(wrapper, "mode", modeobj) < 0)
goto error;
Py_DECREF(modeobj);
- return wrapper;
+ return result;
error:
- Py_XDECREF(raw);
+ if (result != NULL) {
+ PyObject *exc, *val, *tb;
+ PyErr_Fetch(&exc, &val, &tb);
+ if (PyObject_CallMethod(result, "close", NULL) != NULL)
+ PyErr_Restore(exc, val, tb);
+ else {
+ Py_XDECREF(exc);
+ Py_XDECREF(val);
+ Py_XDECREF(tb);
+ }
+ Py_DECREF(result);
+ }
Py_XDECREF(modeobj);
- Py_XDECREF(buffer);
- Py_XDECREF(wrapper);
return NULL;
}
diff --git a/Modules/_io/_iomodule.h b/Modules/_io/_iomodule.h
index 0fa5391..c282e61 100644
--- a/Modules/_io/_iomodule.h
+++ b/Modules/_io/_iomodule.h
@@ -57,6 +57,11 @@ extern Py_ssize_t _PyIO_find_line_ending(
int translated, int universal, PyObject *readnl,
Py_UNICODE *start, Py_UNICODE *end, Py_ssize_t *consumed);
+/* Return 1 if an EnvironmentError with errno == EINTR is set (and then
+ clears the error indicator), 0 otherwise.
+ Should only be called when PyErr_Occurred() is true.
+*/
+extern int _PyIO_trap_eintr(void);
#define DEFAULT_BUFFER_SIZE (8 * 1024) /* bytes */
@@ -72,7 +77,7 @@ typedef struct {
PyObject *filename; /* Not used, but part of the IOError object */
Py_ssize_t written;
} PyBlockingIOErrorObject;
-PyAPI_DATA(PyObject *) PyExc_BlockingIOError;
+extern PyObject *PyExc_BlockingIOError;
/*
* Offset type for positioning.
diff --git a/Modules/_io/bufferedio.c b/Modules/_io/bufferedio.c
index 9b403d9..758edf4 100644
--- a/Modules/_io/bufferedio.c
+++ b/Modules/_io/bufferedio.c
@@ -386,6 +386,17 @@ buffered_dealloc(buffered *self)
Py_TYPE(self)->tp_free((PyObject *)self);
}
+static PyObject *
+buffered_sizeof(buffered *self, void *unused)
+{
+ Py_ssize_t res;
+
+ res = sizeof(buffered);
+ if (self->buffer)
+ res += self->buffer_size;
+ return PyLong_FromSsize_t(res);
+}
+
static int
buffered_traverse(buffered *self, visitproc visit, void *arg)
{
@@ -444,7 +455,7 @@ buffered_closed_get(buffered *self, void *context)
static PyObject *
buffered_close(buffered *self, PyObject *args)
{
- PyObject *res = NULL;
+ PyObject *res = NULL, *exc = NULL, *val, *tb;
int r;
CHECK_INITIALIZED(self)
@@ -464,13 +475,25 @@ buffered_close(buffered *self, PyObject *args)
res = PyObject_CallMethodObjArgs((PyObject *)self, _PyIO_str_flush, NULL);
if (!ENTER_BUFFERED(self))
return NULL;
- if (res == NULL) {
- goto end;
- }
- Py_XDECREF(res);
+ if (res == NULL)
+ PyErr_Fetch(&exc, &val, &tb);
+ else
+ Py_DECREF(res);
res = PyObject_CallMethodObjArgs(self->raw, _PyIO_str_close, NULL);
+ if (exc != NULL) {
+ if (res != NULL) {
+ Py_CLEAR(res);
+ PyErr_Restore(exc, val, tb);
+ }
+ else {
+ Py_DECREF(exc);
+ Py_XDECREF(val);
+ Py_XDECREF(tb);
+ }
+ }
+
end:
LEAVE_BUFFERED(self)
return res;
@@ -699,8 +722,8 @@ _buffered_init(buffered *self)
clears the error indicator), 0 otherwise.
Should only be called when PyErr_Occurred() is true.
*/
-static int
-_trap_eintr(void)
+int
+_PyIO_trap_eintr(void)
{
static PyObject *eintr_int = NULL;
PyObject *typ, *val, *tb;
@@ -1285,7 +1308,7 @@ _bufferedreader_raw_read(buffered *self, char *start, Py_ssize_t len)
*/
do {
res = PyObject_CallMethodObjArgs(self->raw, _PyIO_str_readinto, memobj, NULL);
- } while (res == NULL && _trap_eintr());
+ } while (res == NULL && _PyIO_trap_eintr());
Py_DECREF(memobj);
if (res == NULL)
return -1;
@@ -1560,6 +1583,7 @@ static PyMethodDef bufferedreader_methods[] = {
{"seek", (PyCFunction)buffered_seek, METH_VARARGS},
{"tell", (PyCFunction)buffered_tell, METH_NOARGS},
{"truncate", (PyCFunction)buffered_truncate, METH_VARARGS},
+ {"__sizeof__", (PyCFunction)buffered_sizeof, METH_NOARGS},
{NULL, NULL}
};
@@ -1659,7 +1683,7 @@ bufferedwriter_init(buffered *self, PyObject *args, PyObject *kwds)
self->ok = 0;
self->detached = 0;
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|nn:BufferedReader", kwlist,
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|nn:BufferedWriter", kwlist,
&raw, &buffer_size, &max_buffer_size)) {
return -1;
}
@@ -1711,7 +1735,7 @@ _bufferedwriter_raw_write(buffered *self, char *start, Py_ssize_t len)
errno = 0;
res = PyObject_CallMethodObjArgs(self->raw, _PyIO_str_write, memobj, NULL);
errnum = errno;
- } while (res == NULL && _trap_eintr());
+ } while (res == NULL && _PyIO_trap_eintr());
Py_DECREF(memobj);
if (res == NULL)
return -1;
@@ -1952,6 +1976,7 @@ static PyMethodDef bufferedwriter_methods[] = {
{"flush", (PyCFunction)buffered_flush, METH_NOARGS},
{"seek", (PyCFunction)buffered_seek, METH_VARARGS},
{"tell", (PyCFunction)buffered_tell, METH_NOARGS},
+ {"__sizeof__", (PyCFunction)buffered_sizeof, METH_NOARGS},
{NULL, NULL}
};
@@ -2104,9 +2129,14 @@ bufferedrwpair_dealloc(rwpair *self)
static PyObject *
_forward_call(buffered *self, const char *name, PyObject *args)
{
- PyObject *func = PyObject_GetAttrString((PyObject *)self, name);
- PyObject *ret;
+ PyObject *func, *ret;
+ if (self == NULL) {
+ PyErr_SetString(PyExc_ValueError,
+ "I/O operation on uninitialized object");
+ return NULL;
+ }
+ func = PyObject_GetAttrString((PyObject *)self, name);
if (func == NULL) {
PyErr_SetString(PyExc_AttributeError, name);
return NULL;
@@ -2291,7 +2321,7 @@ bufferedrandom_init(buffered *self, PyObject *args, PyObject *kwds)
self->ok = 0;
self->detached = 0;
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|nn:BufferedReader", kwlist,
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|nn:BufferedRandom", kwlist,
&raw, &buffer_size, &max_buffer_size)) {
return -1;
}
@@ -2347,6 +2377,7 @@ static PyMethodDef bufferedrandom_methods[] = {
{"readline", (PyCFunction)buffered_readline, METH_VARARGS},
{"peek", (PyCFunction)buffered_peek, METH_VARARGS},
{"write", (PyCFunction)bufferedwriter_write, METH_VARARGS},
+ {"__sizeof__", (PyCFunction)buffered_sizeof, METH_NOARGS},
{NULL, NULL}
};
diff --git a/Modules/_io/bytesio.c b/Modules/_io/bytesio.c
index 51fad21..6498320 100644
--- a/Modules/_io/bytesio.c
+++ b/Modules/_io/bytesio.c
@@ -106,7 +106,7 @@ resize_buffer(bytesio *self, size_t size)
}
/* Internal routine for writing a string of bytes to the buffer of a BytesIO
- object. Returns the number of bytes wrote, or -1 on error. */
+ object. Returns the number of bytes written, or -1 on error. */
static Py_ssize_t
write_bytes(bytesio *self, const char *bytes, Py_ssize_t len)
{
@@ -156,10 +156,20 @@ bytesio_get_closed(bytesio *self)
}
}
+PyDoc_STRVAR(readable_doc,
+"readable() -> bool. Returns True if the IO object can be read.");
+
+PyDoc_STRVAR(writable_doc,
+"writable() -> bool. Returns True if the IO object can be written.");
+
+PyDoc_STRVAR(seekable_doc,
+"seekable() -> bool. Returns True if the IO object can be seeked.");
+
/* Generic getter for the writable, readable and seekable properties */
static PyObject *
-return_true(bytesio *self)
+return_not_closed(bytesio *self)
{
+ CHECK_CLOSED(self);
Py_RETURN_TRUE;
}
@@ -794,6 +804,17 @@ bytesio_init(bytesio *self, PyObject *args, PyObject *kwds)
return 0;
}
+static PyObject *
+bytesio_sizeof(bytesio *self, void *unused)
+{
+ Py_ssize_t res;
+
+ res = sizeof(bytesio);
+ if (self->buf)
+ res += self->buf_size;
+ return PyLong_FromSsize_t(res);
+}
+
static int
bytesio_traverse(bytesio *self, visitproc visit, void *arg)
{
@@ -816,9 +837,9 @@ static PyGetSetDef bytesio_getsetlist[] = {
};
static struct PyMethodDef bytesio_methods[] = {
- {"readable", (PyCFunction)return_true, METH_NOARGS, NULL},
- {"seekable", (PyCFunction)return_true, METH_NOARGS, NULL},
- {"writable", (PyCFunction)return_true, METH_NOARGS, NULL},
+ {"readable", (PyCFunction)return_not_closed, METH_NOARGS, readable_doc},
+ {"seekable", (PyCFunction)return_not_closed, METH_NOARGS, seekable_doc},
+ {"writable", (PyCFunction)return_not_closed, METH_NOARGS, writable_doc},
{"close", (PyCFunction)bytesio_close, METH_NOARGS, close_doc},
{"flush", (PyCFunction)bytesio_flush, METH_NOARGS, flush_doc},
{"isatty", (PyCFunction)bytesio_isatty, METH_NOARGS, isatty_doc},
@@ -835,6 +856,7 @@ static struct PyMethodDef bytesio_methods[] = {
{"truncate", (PyCFunction)bytesio_truncate, METH_VARARGS, truncate_doc},
{"__getstate__", (PyCFunction)bytesio_getstate, METH_NOARGS, NULL},
{"__setstate__", (PyCFunction)bytesio_setstate, METH_O, NULL},
+ {"__sizeof__", (PyCFunction)bytesio_sizeof, METH_NOARGS, NULL},
{NULL, NULL} /* sentinel */
};
diff --git a/Modules/_io/fileio.c b/Modules/_io/fileio.c
index 0048240..58b68b6 100644
--- a/Modules/_io/fileio.c
+++ b/Modules/_io/fileio.c
@@ -47,6 +47,7 @@ typedef struct {
int fd;
unsigned int readable : 1;
unsigned int writable : 1;
+ unsigned int appending : 1;
signed int seekable : 2; /* -1 means unknown */
unsigned int closefd : 1;
PyObject *weakreflist;
@@ -124,6 +125,7 @@ fileio_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
self->fd = -1;
self->readable = 0;
self->writable = 0;
+ self->appending = 0;
self->seekable = -1;
self->closefd = 1;
self->weakreflist = NULL;
@@ -137,22 +139,15 @@ fileio_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
directories, so we need a check. */
static int
-dircheck(fileio* self, const char *name)
+dircheck(fileio* self, PyObject *nameobj)
{
#if defined(HAVE_FSTAT) && defined(S_IFDIR) && defined(EISDIR)
struct stat buf;
if (self->fd < 0)
return 0;
if (fstat(self->fd, &buf) == 0 && S_ISDIR(buf.st_mode)) {
- char *msg = strerror(EISDIR);
- PyObject *exc;
- if (internal_close(self))
- return -1;
-
- exc = PyObject_CallFunction(PyExc_IOError, "(iss)",
- EISDIR, msg, name);
- PyErr_SetObject(PyExc_IOError, exc);
- Py_XDECREF(exc);
+ errno = EISDIR;
+ PyErr_SetFromErrnoWithFilenameObject(PyExc_IOError, nameobj);
return -1;
}
#endif
@@ -191,16 +186,21 @@ fileio_init(PyObject *oself, PyObject *args, PyObject *kwds)
Py_UNICODE *widename = NULL;
#endif
int ret = 0;
- int rwa = 0, plus = 0, append = 0;
+ int rwa = 0, plus = 0;
int flags = 0;
int fd = -1;
int closefd = 1;
+ int fd_is_own = 0;
assert(PyFileIO_Check(oself));
if (self->fd >= 0) {
- /* Have to close the existing file first. */
- if (internal_close(self) < 0)
- return -1;
+ if (self->closefd) {
+ /* Have to close the existing file first. */
+ if (internal_close(self) < 0)
+ return -1;
+ }
+ else
+ self->fd = -1;
}
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|si:fileio",
@@ -213,7 +213,7 @@ fileio_init(PyObject *oself, PyObject *args, PyObject *kwds)
return -1;
}
- fd = PyLong_AsLong(nameobj);
+ fd = _PyLong_AsInt(nameobj);
if (fd < 0) {
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ValueError,
@@ -281,8 +281,8 @@ fileio_init(PyObject *oself, PyObject *args, PyObject *kwds)
goto bad_mode;
rwa = 1;
self->writable = 1;
- flags |= O_CREAT;
- append = 1;
+ self->appending = 1;
+ flags |= O_APPEND | O_CREAT;
break;
case 'b':
break;
@@ -313,11 +313,6 @@ fileio_init(PyObject *oself, PyObject *args, PyObject *kwds)
flags |= O_BINARY;
#endif
-#ifdef O_APPEND
- if (append)
- flags |= O_APPEND;
-#endif
-
if (fd >= 0) {
if (check_fd(fd))
goto error;
@@ -341,6 +336,7 @@ fileio_init(PyObject *oself, PyObject *args, PyObject *kwds)
#endif
self->fd = open(name, flags, 0666);
Py_END_ALLOW_THREADS
+ fd_is_own = 1;
if (self->fd < 0) {
#ifdef MS_WINDOWS
if (widename != NULL)
@@ -350,31 +346,29 @@ fileio_init(PyObject *oself, PyObject *args, PyObject *kwds)
PyErr_SetFromErrnoWithFilename(PyExc_IOError, name);
goto error;
}
- if(dircheck(self, name) < 0)
- goto error;
}
+ if (dircheck(self, nameobj) < 0)
+ goto error;
if (PyObject_SetAttrString((PyObject *)self, "name", nameobj) < 0)
goto error;
- if (append) {
+ if (self->appending) {
/* For consistent behaviour, we explicitly seek to the
end of file (otherwise, it might be done only on the
first write()). */
PyObject *pos = portable_lseek(self->fd, NULL, 2);
- if (pos == NULL) {
- if (closefd) {
- close(self->fd);
- self->fd = -1;
- }
+ if (pos == NULL)
goto error;
- }
Py_DECREF(pos);
}
goto done;
error:
+ if (!fd_is_own)
+ self->fd = -1;
+
ret = -1;
done:
@@ -533,7 +527,7 @@ fileio_readall(fileio *self)
{
PyObject *result;
Py_ssize_t total = 0;
- int n;
+ Py_ssize_t n;
if (self->fd < 0)
return err_closed();
@@ -555,24 +549,34 @@ fileio_readall(fileio *self)
}
if (PyBytes_GET_SIZE(result) < (Py_ssize_t)newsize) {
- if (_PyBytes_Resize(&result, newsize) < 0) {
- if (total == 0) {
- Py_DECREF(result);
- return NULL;
- }
- PyErr_Clear();
- break;
- }
+ if (_PyBytes_Resize(&result, newsize) < 0)
+ return NULL; /* result has been freed */
}
Py_BEGIN_ALLOW_THREADS
errno = 0;
+ n = newsize - total;
+#if defined(MS_WIN64) || defined(MS_WINDOWS)
+ if (n > INT_MAX)
+ n = INT_MAX;
+ n = read(self->fd,
+ PyBytes_AS_STRING(result) + total,
+ (int)n);
+#else
n = read(self->fd,
PyBytes_AS_STRING(result) + total,
- newsize - total);
+ n);
+#endif
Py_END_ALLOW_THREADS
if (n == 0)
break;
if (n < 0) {
+ if (errno == EINTR) {
+ if (PyErr_CheckSignals()) {
+ Py_DECREF(result);
+ return NULL;
+ }
+ continue;
+ }
if (total > 0)
break;
if (errno == EAGAIN) {
@@ -589,7 +593,6 @@ fileio_readall(fileio *self)
if (PyBytes_GET_SIZE(result) > total) {
if (_PyBytes_Resize(&result, total) < 0) {
/* This should never happen, but just in case */
- Py_DECREF(result);
return NULL;
}
}
@@ -646,10 +649,8 @@ fileio_read(fileio *self, PyObject *args)
}
if (n != size) {
- if (_PyBytes_Resize(&bytes, n) < 0) {
- Py_DECREF(bytes);
+ if (_PyBytes_Resize(&bytes, n) < 0)
return NULL;
- }
}
return (PyObject *) bytes;
@@ -885,7 +886,13 @@ fileio_truncate(fileio *self, PyObject *args)
static char *
mode_string(fileio *self)
{
- if (self->readable) {
+ if (self->appending) {
+ if (self->readable)
+ return "ab+";
+ else
+ return "ab";
+ }
+ else if (self->readable) {
if (self->writable)
return "rb+";
else
diff --git a/Modules/_io/iobase.c b/Modules/_io/iobase.c
index 176761d..ab6911d 100644
--- a/Modules/_io/iobase.c
+++ b/Modules/_io/iobase.c
@@ -41,8 +41,8 @@ PyDoc_STRVAR(iobase_doc,
"bytes. bytearrays are accepted too, and in some cases (such as\n"
"readinto) needed. Text I/O classes work with str data.\n"
"\n"
- "Note that calling any method (even inquiries) on a closed stream is\n"
- "undefined. Implementations may raise IOError in this case.\n"
+ "Note that calling any method (except additional calls to close(),\n"
+ "which are ignored) on a closed stream should raise a ValueError.\n"
"\n"
"IOBase (and its subclasses) support the iterator protocol, meaning\n"
"that an IOBase object can be iterated over yielding the lines in a\n"
@@ -74,7 +74,7 @@ iobase_unsupported(const char *message)
PyDoc_STRVAR(iobase_seek_doc,
"Change stream position.\n"
"\n"
- "Change the stream position to byte offset offset. offset is\n"
+ "Change the stream position to the given byte offset. The offset is\n"
"interpreted relative to the position indicated by whence. Values\n"
"for whence are:\n"
"\n"
@@ -437,7 +437,7 @@ PyDoc_STRVAR(iobase_readline_doc,
"\n"
"If limit is specified, at most limit bytes will be read.\n"
"\n"
- "The line terminator is always b'\n' for binary files; for text\n"
+ "The line terminator is always b'\\n' for binary files; for text\n"
"files, the newlines argument to open can be used to select the line\n"
"terminator(s) recognized.\n");
@@ -468,8 +468,14 @@ iobase_readline(PyObject *self, PyObject *args)
if (has_peek) {
PyObject *readahead = PyObject_CallMethod(self, "peek", "i", 1);
- if (readahead == NULL)
+ if (readahead == NULL) {
+ /* NOTE: PyErr_SetFromErrno() calls PyErr_CheckSignals()
+ when EINTR occurs so we needn't do it ourselves. */
+ if (_PyIO_trap_eintr()) {
+ continue;
+ }
goto fail;
+ }
if (!PyBytes_Check(readahead)) {
PyErr_Format(PyExc_IOError,
"peek() should have returned a bytes object, "
@@ -502,8 +508,14 @@ iobase_readline(PyObject *self, PyObject *args)
}
b = PyObject_CallMethod(self, "read", "n", nreadahead);
- if (b == NULL)
+ if (b == NULL) {
+ /* NOTE: PyErr_SetFromErrno() calls PyErr_CheckSignals()
+ when EINTR occurs so we needn't do it ourselves. */
+ if (_PyIO_trap_eintr()) {
+ continue;
+ }
goto fail;
+ }
if (!PyBytes_Check(b)) {
PyErr_Format(PyExc_IOError,
"read() should have returned a bytes object, "
@@ -648,7 +660,10 @@ iobase_writelines(PyObject *self, PyObject *args)
break; /* Stop Iteration */
}
- res = PyObject_CallMethodObjArgs(self, _PyIO_str_write, line, NULL);
+ res = NULL;
+ do {
+ res = PyObject_CallMethodObjArgs(self, _PyIO_str_write, line, NULL);
+ } while (res == NULL && _PyIO_trap_eintr());
Py_DECREF(line);
if (res == NULL) {
Py_DECREF(iter);
@@ -811,6 +826,11 @@ rawiobase_readall(PyObject *self, PyObject *args)
PyObject *data = PyObject_CallMethod(self, "read",
"i", DEFAULT_BUFFER_SIZE);
if (!data) {
+ /* NOTE: PyErr_SetFromErrno() calls PyErr_CheckSignals()
+ when EINTR occurs so we needn't do it ourselves. */
+ if (_PyIO_trap_eintr()) {
+ continue;
+ }
Py_DECREF(chunks);
return NULL;
}
diff --git a/Modules/_io/stringio.c b/Modules/_io/stringio.c
index e3de751..59a3905 100644
--- a/Modules/_io/stringio.c
+++ b/Modules/_io/stringio.c
@@ -632,10 +632,21 @@ stringio_init(stringio *self, PyObject *args, PyObject *kwds)
}
/* Properties and pseudo-properties */
+
+PyDoc_STRVAR(stringio_readable_doc,
+"readable() -> bool. Returns True if the IO object can be read.");
+
+PyDoc_STRVAR(stringio_writable_doc,
+"writable() -> bool. Returns True if the IO object can be written.");
+
+PyDoc_STRVAR(stringio_seekable_doc,
+"seekable() -> bool. Returns True if the IO object can be seeked.");
+
static PyObject *
stringio_seekable(stringio *self, PyObject *args)
{
CHECK_INITIALIZED(self);
+ CHECK_CLOSED(self);
Py_RETURN_TRUE;
}
@@ -643,6 +654,7 @@ static PyObject *
stringio_readable(stringio *self, PyObject *args)
{
CHECK_INITIALIZED(self);
+ CHECK_CLOSED(self);
Py_RETURN_TRUE;
}
@@ -650,6 +662,7 @@ static PyObject *
stringio_writable(stringio *self, PyObject *args)
{
CHECK_INITIALIZED(self);
+ CHECK_CLOSED(self);
Py_RETURN_TRUE;
}
@@ -817,9 +830,9 @@ static struct PyMethodDef stringio_methods[] = {
{"seek", (PyCFunction)stringio_seek, METH_VARARGS, stringio_seek_doc},
{"write", (PyCFunction)stringio_write, METH_O, stringio_write_doc},
- {"seekable", (PyCFunction)stringio_seekable, METH_NOARGS},
- {"readable", (PyCFunction)stringio_readable, METH_NOARGS},
- {"writable", (PyCFunction)stringio_writable, METH_NOARGS},
+ {"seekable", (PyCFunction)stringio_seekable, METH_NOARGS, stringio_seekable_doc},
+ {"readable", (PyCFunction)stringio_readable, METH_NOARGS, stringio_readable_doc},
+ {"writable", (PyCFunction)stringio_writable, METH_NOARGS, stringio_writable_doc},
{"__getstate__", (PyCFunction)stringio_getstate, METH_NOARGS},
{"__setstate__", (PyCFunction)stringio_setstate, METH_O},
diff --git a/Modules/_io/textio.c b/Modules/_io/textio.c
index c5d7b85..6802758 100644
--- a/Modules/_io/textio.c
+++ b/Modules/_io/textio.c
@@ -236,6 +236,21 @@ incrementalnewlinedecoder_dealloc(nldecoder_object *self)
Py_TYPE(self)->tp_free((PyObject *)self);
}
+static int
+check_decoded(PyObject *decoded)
+{
+ if (decoded == NULL)
+ return -1;
+ if (!PyUnicode_Check(decoded)) {
+ PyErr_Format(PyExc_TypeError,
+ "decoder should return a string result, not '%.200s'",
+ Py_TYPE(decoded)->tp_name);
+ Py_DECREF(decoded);
+ return -1;
+ }
+ return 0;
+}
+
#define SEEN_CR 1
#define SEEN_LF 2
#define SEEN_CRLF 4
@@ -265,15 +280,9 @@ _PyIncrementalNewlineDecoder_decode(PyObject *_self,
Py_INCREF(output);
}
- if (output == NULL)
+ if (check_decoded(output) < 0)
return NULL;
- if (!PyUnicode_Check(output)) {
- PyErr_SetString(PyExc_TypeError,
- "decoder should return a string result");
- goto error;
- }
-
output_len = PyUnicode_GET_SIZE(output);
if (self->pendingcr && (final || output_len > 0)) {
Py_UNICODE *out;
@@ -622,15 +631,22 @@ PyDoc_STRVAR(textiowrapper_doc,
"errors determines the strictness of encoding and decoding (see the\n"
"codecs.register) and defaults to \"strict\".\n"
"\n"
- "newline can be None, '', '\\n', '\\r', or '\\r\\n'. It controls the\n"
- "handling of line endings. If it is None, universal newlines is\n"
- "enabled. With this enabled, on input, the lines endings '\\n', '\\r',\n"
- "or '\\r\\n' are translated to '\\n' before being returned to the\n"
- "caller. Conversely, on output, '\\n' is translated to the system\n"
- "default line seperator, os.linesep. If newline is any other of its\n"
- "legal values, that newline becomes the newline when the file is read\n"
- "and it is returned untranslated. On output, '\\n' is converted to the\n"
- "newline.\n"
+ "newline controls how line endings are handled. It can be None, '',\n"
+ "'\\n', '\\r', and '\\r\\n'. It works as follows:\n"
+ "\n"
+ "* On input, if newline is None, universal newlines mode is\n"
+ " enabled. Lines in the input can end in '\\n', '\\r', or '\\r\\n', and\n"
+ " these are translated into '\\n' before being returned to the\n"
+ " caller. If it is '', universal newline mode is enabled, but line\n"
+ " endings are returned to the caller untranslated. If it has any of\n"
+ " the other legal values, input lines are only terminated by the given\n"
+ " string, and the line ending is returned to the caller untranslated.\n"
+ "\n"
+ "* On output, if newline is None, any '\\n' characters written are\n"
+ " translated to the system default line separator, os.linesep. If\n"
+ " newline is '', no translation takes place. If newline is any of the\n"
+ " other legal values, any '\\n' characters written are translated to\n"
+ " the given string.\n"
"\n"
"If line_buffering is True, a call to flush is implied when a call to\n"
"write contains a newline character."
@@ -1006,8 +1022,11 @@ textiowrapper_init(textio *self, PyObject *args, PyObject *kwds)
res = PyObject_CallMethod(buffer, "seekable", NULL);
if (res == NULL)
goto error;
- self->seekable = self->telling = PyObject_IsTrue(res);
+ r = PyObject_IsTrue(res);
Py_DECREF(res);
+ if (r < 0)
+ goto error;
+ self->seekable = self->telling = r;
self->encoding_start_of_stream = 0;
if (self->seekable && self->encoder) {
@@ -1203,8 +1222,11 @@ _textiowrapper_writeflush(textio *self)
Py_DECREF(pending);
if (b == NULL)
return -1;
- ret = PyObject_CallMethodObjArgs(self->buffer,
- _PyIO_str_write, b, NULL);
+ ret = NULL;
+ do {
+ ret = PyObject_CallMethodObjArgs(self->buffer,
+ _PyIO_str_write, b, NULL);
+ } while (ret == NULL && _PyIO_trap_eintr());
Py_DECREF(b);
if (ret == NULL)
return -1;
@@ -1404,7 +1426,12 @@ textiowrapper_read_chunk(textio *self)
Py_DECREF(chunk_size);
if (input_chunk == NULL)
goto fail;
- assert(PyBytes_Check(input_chunk));
+ if (!PyBytes_Check(input_chunk)) {
+ PyErr_Format(PyExc_TypeError,
+ "underlying read1() should have returned a bytes object, "
+ "not '%.200s'", Py_TYPE(input_chunk)->tp_name);
+ goto fail;
+ }
eof = (PyBytes_Size(input_chunk) == 0);
@@ -1417,8 +1444,7 @@ textiowrapper_read_chunk(textio *self)
_PyIO_str_decode, input_chunk, eof ? Py_True : Py_False, NULL);
}
- /* TODO sanity check: isinstance(decoded_chars, unicode) */
- if (decoded_chars == NULL)
+ if (check_decoded(decoded_chars) < 0)
goto fail;
textiowrapper_set_decoded_chars(self, decoded_chars);
if (PyUnicode_GET_SIZE(decoded_chars) > 0)
@@ -1431,7 +1457,14 @@ textiowrapper_read_chunk(textio *self)
PyObject *next_input = PyNumber_Add(dec_buffer, input_chunk);
if (next_input == NULL)
goto fail;
- assert (PyBytes_Check(next_input));
+ if (!PyBytes_Check(next_input)) {
+ PyErr_Format(PyExc_TypeError,
+ "decoder getstate() should have returned a bytes "
+ "object, not '%.200s'",
+ Py_TYPE(next_input)->tp_name);
+ Py_DECREF(next_input);
+ goto fail;
+ }
Py_DECREF(dec_buffer);
Py_CLEAR(self->snapshot);
self->snapshot = Py_BuildValue("NN", dec_flags, next_input);
@@ -1477,7 +1510,7 @@ textiowrapper_read(textio *self, PyObject *args)
decoded = PyObject_CallMethodObjArgs(self->decoder, _PyIO_str_decode,
bytes, Py_True, NULL);
Py_DECREF(bytes);
- if (decoded == NULL)
+ if (check_decoded(decoded) < 0)
goto fail;
result = textiowrapper_get_decoded_chars(self, -1);
@@ -1508,8 +1541,14 @@ textiowrapper_read(textio *self, PyObject *args)
/* Keep reading chunks until we have n characters to return */
while (remaining > 0) {
res = textiowrapper_read_chunk(self);
- if (res < 0)
+ if (res < 0) {
+ /* NOTE: PyErr_SetFromErrno() calls PyErr_CheckSignals()
+ when EINTR occurs so we needn't do it ourselves. */
+ if (_PyIO_trap_eintr()) {
+ continue;
+ }
goto fail;
+ }
if (res == 0) /* EOF */
break;
if (chunks == NULL) {
@@ -1668,8 +1707,14 @@ _textiowrapper_readline(textio *self, Py_ssize_t limit)
while (!self->decoded_chars ||
!PyUnicode_GET_SIZE(self->decoded_chars)) {
res = textiowrapper_read_chunk(self);
- if (res < 0)
+ if (res < 0) {
+ /* NOTE: PyErr_SetFromErrno() calls PyErr_CheckSignals()
+ when EINTR occurs so we needn't do it ourselves. */
+ if (_PyIO_trap_eintr()) {
+ continue;
+ }
goto error;
+ }
if (res == 0)
break;
}
@@ -2085,7 +2130,14 @@ textiowrapper_seek(textio *self, PyObject *args)
if (input_chunk == NULL)
goto fail;
- assert (PyBytes_Check(input_chunk));
+ if (!PyBytes_Check(input_chunk)) {
+ PyErr_Format(PyExc_TypeError,
+ "underlying read() should have returned a bytes "
+ "object, not '%.200s'",
+ Py_TYPE(input_chunk)->tp_name);
+ Py_DECREF(input_chunk);
+ goto fail;
+ }
self->snapshot = Py_BuildValue("iN", cookie.dec_flags, input_chunk);
if (self->snapshot == NULL) {
@@ -2096,7 +2148,7 @@ textiowrapper_seek(textio *self, PyObject *args)
decoded = PyObject_CallMethod(self->decoder, "decode",
"Oi", input_chunk, (int)cookie.need_eof);
- if (decoded == NULL)
+ if (check_decoded(decoded) < 0)
goto fail;
textiowrapper_set_decoded_chars(self, decoded);
@@ -2219,10 +2271,9 @@ textiowrapper_tell(textio *self, PyObject *args)
int dec_flags;
PyObject *decoded = PyObject_CallMethod(
- self->decoder, "decode", "s#", input, 1);
- if (decoded == NULL)
+ self->decoder, "decode", "s#", input, (Py_ssize_t)1);
+ if (check_decoded(decoded) < 0)
goto fail;
- assert (PyUnicode_Check(decoded));
chars_decoded += PyUnicode_GET_SIZE(decoded);
Py_DECREF(decoded);
@@ -2254,9 +2305,8 @@ textiowrapper_tell(textio *self, PyObject *args)
/* We didn't get enough decoded data; signal EOF to get more. */
PyObject *decoded = PyObject_CallMethod(
self->decoder, "decode", "si", "", /* final = */ 1);
- if (decoded == NULL)
+ if (check_decoded(decoded) < 0)
goto fail;
- assert (PyUnicode_Check(decoded));
chars_decoded += PyUnicode_GET_SIZE(decoded);
Py_DECREF(decoded);
cookie.need_eof = 1;
@@ -2415,19 +2465,31 @@ textiowrapper_close(textio *self, PyObject *args)
Py_DECREF(res);
if (r < 0)
return NULL;
-
+
if (r > 0) {
Py_RETURN_NONE; /* stream already closed */
}
else {
+ PyObject *exc = NULL, *val, *tb;
res = PyObject_CallMethod((PyObject *)self, "flush", NULL);
- if (res == NULL) {
- return NULL;
- }
+ if (res == NULL)
+ PyErr_Fetch(&exc, &val, &tb);
else
Py_DECREF(res);
- return PyObject_CallMethod(self->buffer, "close", NULL);
+ res = PyObject_CallMethod(self->buffer, "close", NULL);
+ if (exc != NULL) {
+ if (res != NULL) {
+ Py_CLEAR(res);
+ PyErr_Restore(exc, val, tb);
+ }
+ else {
+ Py_DECREF(exc);
+ Py_XDECREF(val);
+ Py_XDECREF(tb);
+ }
+ }
+ return res;
}
}
diff --git a/Modules/_json.c b/Modules/_json.c
index 434b83c..c913409 100644
--- a/Modules/_json.c
+++ b/Modules/_json.c
@@ -524,16 +524,10 @@ scanstring_str(PyObject *pystr, Py_ssize_t end, char *encoding, int strict, Py_s
}
#ifdef Py_UNICODE_WIDE
/* Surrogate pair */
- if ((c & 0xfc00) == 0xd800) {
+ if ((c & 0xfc00) == 0xd800 && end + 6 < len &&
+ buf[next++] == '\\' &&
+ buf[next++] == 'u') {
Py_UNICODE c2 = 0;
- if (end + 6 >= len) {
- raise_errmsg("Unpaired high surrogate", pystr, end - 5);
- goto bail;
- }
- if (buf[next++] != '\\' || buf[next++] != 'u') {
- raise_errmsg("Unpaired high surrogate", pystr, end - 5);
- goto bail;
- }
end += 6;
/* Decode 4 hex digits */
for (; next < end; next++) {
@@ -554,15 +548,10 @@ scanstring_str(PyObject *pystr, Py_ssize_t end, char *encoding, int strict, Py_s
goto bail;
}
}
- if ((c2 & 0xfc00) != 0xdc00) {
- raise_errmsg("Unpaired high surrogate", pystr, end - 5);
- goto bail;
- }
- c = 0x10000 + (((c - 0xd800) << 10) | (c2 - 0xdc00));
- }
- else if ((c & 0xfc00) == 0xdc00) {
- raise_errmsg("Unpaired low surrogate", pystr, end - 5);
- goto bail;
+ if ((c2 & 0xfc00) == 0xdc00)
+ c = 0x10000 + (((c - 0xd800) << 10) | (c2 - 0xdc00));
+ else
+ end -= 6;
}
#endif
}
@@ -703,16 +692,9 @@ scanstring_unicode(PyObject *pystr, Py_ssize_t end, int strict, Py_ssize_t *next
}
#ifdef Py_UNICODE_WIDE
/* Surrogate pair */
- if ((c & 0xfc00) == 0xd800) {
+ if ((c & 0xfc00) == 0xd800 && end + 6 < len &&
+ buf[next++] == '\\' && buf[next++] == 'u') {
Py_UNICODE c2 = 0;
- if (end + 6 >= len) {
- raise_errmsg("Unpaired high surrogate", pystr, end - 5);
- goto bail;
- }
- if (buf[next++] != '\\' || buf[next++] != 'u') {
- raise_errmsg("Unpaired high surrogate", pystr, end - 5);
- goto bail;
- }
end += 6;
/* Decode 4 hex digits */
for (; next < end; next++) {
@@ -733,15 +715,10 @@ scanstring_unicode(PyObject *pystr, Py_ssize_t end, int strict, Py_ssize_t *next
goto bail;
}
}
- if ((c2 & 0xfc00) != 0xdc00) {
- raise_errmsg("Unpaired high surrogate", pystr, end - 5);
- goto bail;
- }
- c = 0x10000 + (((c - 0xd800) << 10) | (c2 - 0xdc00));
- }
- else if ((c & 0xfc00) == 0xdc00) {
- raise_errmsg("Unpaired low surrogate", pystr, end - 5);
- goto bail;
+ if ((c2 & 0xfc00) == 0xdc00)
+ c = 0x10000 + (((c - 0xd800) << 10) | (c2 - 0xdc00));
+ else
+ end -= 6;
}
#endif
}
@@ -1032,7 +1009,7 @@ _parse_object_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ss
while (idx <= end_idx) {
/* read key */
if (str[idx] != '"') {
- raise_errmsg("Expecting property name", pystr, idx);
+ raise_errmsg("Expecting property name enclosed in double quotes", pystr, idx);
goto bail;
}
key = scanstring_unicode(pystr, idx + 1, strict, &next_idx);
@@ -1043,7 +1020,7 @@ _parse_object_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ss
/* skip whitespace between key and : delimiter, read :, skip whitespace */
while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
if (idx > end_idx || str[idx] != ':') {
- raise_errmsg("Expecting : delimiter", pystr, idx);
+ raise_errmsg("Expecting ':' delimiter", pystr, idx);
goto bail;
}
idx++;
@@ -1075,7 +1052,7 @@ _parse_object_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ss
break;
}
else if (str[idx] != ',') {
- raise_errmsg("Expecting , delimiter", pystr, idx);
+ raise_errmsg("Expecting ',' delimiter", pystr, idx);
goto bail;
}
idx++;
@@ -1236,7 +1213,7 @@ _parse_array_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssi
break;
}
else if (str[idx] != ',') {
- raise_errmsg("Expecting , delimiter", pystr, idx);
+ raise_errmsg("Expecting ',' delimiter", pystr, idx);
goto bail;
}
idx++;
@@ -1491,6 +1468,10 @@ scan_once_str(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *n
PyObject *res;
char *str = PyString_AS_STRING(pystr);
Py_ssize_t length = PyString_GET_SIZE(pystr);
+ if (idx < 0) {
+ PyErr_SetString(PyExc_ValueError, "idx cannot be negative");
+ return NULL;
+ }
if (idx >= length) {
PyErr_SetNone(PyExc_StopIteration);
return NULL;
@@ -1578,6 +1559,10 @@ scan_once_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_
PyObject *res;
Py_UNICODE *str = PyUnicode_AS_UNICODE(pystr);
Py_ssize_t length = PyUnicode_GET_SIZE(pystr);
+ if (idx < 0) {
+ PyErr_SetString(PyExc_ValueError, "idx cannot be negative");
+ return NULL;
+ }
if (idx >= length) {
PyErr_SetNone(PyExc_StopIteration);
return NULL;
@@ -2224,8 +2209,6 @@ encoder_listencode_list(PyEncoderObject *s, PyObject *rval, PyObject *seq, Py_ss
static PyObject *empty_array = NULL;
PyObject *ident = NULL;
PyObject *s_fast = NULL;
- Py_ssize_t num_items;
- PyObject **seq_items;
Py_ssize_t i;
if (open_array == NULL || close_array == NULL || empty_array == NULL) {
@@ -2239,8 +2222,7 @@ encoder_listencode_list(PyEncoderObject *s, PyObject *rval, PyObject *seq, Py_ss
s_fast = PySequence_Fast(seq, "_iterencode_list needs a sequence");
if (s_fast == NULL)
return -1;
- num_items = PySequence_Fast_GET_SIZE(s_fast);
- if (num_items == 0) {
+ if (PySequence_Fast_GET_SIZE(s_fast) == 0) {
Py_DECREF(s_fast);
return PyList_Append(rval, empty_array);
}
@@ -2261,7 +2243,6 @@ encoder_listencode_list(PyEncoderObject *s, PyObject *rval, PyObject *seq, Py_ss
}
}
- seq_items = PySequence_Fast_ITEMS(s_fast);
if (PyList_Append(rval, open_array))
goto bail;
if (s->indent != Py_None) {
@@ -2273,8 +2254,8 @@ encoder_listencode_list(PyEncoderObject *s, PyObject *rval, PyObject *seq, Py_ss
buf += newline_indent
*/
}
- for (i = 0; i < num_items; i++) {
- PyObject *obj = seq_items[i];
+ for (i = 0; i < PySequence_Fast_GET_SIZE(s_fast); i++) {
+ PyObject *obj = PySequence_Fast_GET_ITEM(s_fast, i);
if (i) {
if (PyList_Append(rval, s->item_separator))
goto bail;
diff --git a/Modules/_math.c b/Modules/_math.c
index 2fef481..fe75a36 100644
--- a/Modules/_math.c
+++ b/Modules/_math.c
@@ -189,6 +189,27 @@ _Py_expm1(double x)
significant loss of precision that arises from direct evaluation when x is
small. */
+#ifdef HAVE_LOG1P
+
+double
+_Py_log1p(double x)
+{
+ /* Some platforms supply a log1p function but don't respect the sign of
+ zero: log1p(-0.0) gives 0.0 instead of the correct result of -0.0.
+
+ To save fiddling with configure tests and platform checks, we handle the
+ special case of zero input directly on all platforms.
+ */
+ if (x == 0.0) {
+ return x;
+ }
+ else {
+ return log1p(x);
+ }
+}
+
+#else
+
double
_Py_log1p(double x)
{
@@ -230,3 +251,5 @@ _Py_log1p(double x)
return log(1.+x);
}
}
+
+#endif /* ifdef HAVE_LOG1P */
diff --git a/Modules/_math.h b/Modules/_math.h
index c0ceece..cf079ad 100644
--- a/Modules/_math.h
+++ b/Modules/_math.h
@@ -36,10 +36,6 @@ double _Py_log1p(double x);
#define m_expm1 _Py_expm1
#endif
-#ifdef HAVE_LOG1P
-#define m_log1p log1p
-#else
-/* if the system doesn't have log1p, use the substitute
- function defined in Modules/_math.c. */
+/* Use the substitute from _math.c on all platforms:
+ it includes workarounds for buggy handling of zeros. */
#define m_log1p _Py_log1p
-#endif
diff --git a/Modules/_multiprocessing/multiprocessing.c b/Modules/_multiprocessing/multiprocessing.c
index c831637..d192a07 100644
--- a/Modules/_multiprocessing/multiprocessing.c
+++ b/Modules/_multiprocessing/multiprocessing.c
@@ -63,7 +63,7 @@ mp_SetError(PyObject *Type, int num)
break;
default:
PyErr_Format(PyExc_RuntimeError,
- "unkown error number %d", num);
+ "unknown error number %d", num);
}
return NULL;
}
diff --git a/Modules/_multiprocessing/semaphore.c b/Modules/_multiprocessing/semaphore.c
index 5bda1ce..b9e8f34 100644
--- a/Modules/_multiprocessing/semaphore.c
+++ b/Modules/_multiprocessing/semaphore.c
@@ -197,6 +197,13 @@ semlock_release(SemLockObject *self, PyObject *args)
#define SEM_GETVALUE(sem, pval) sem_getvalue(sem, pval)
#define SEM_UNLINK(name) sem_unlink(name)
+/* OS X 10.4 defines SEM_FAILED as -1 instead of (sem_t *)-1; this gives
+ compiler warnings, and (potentially) undefined behaviour. */
+#ifdef __APPLE__
+# undef SEM_FAILED
+# define SEM_FAILED ((sem_t *)-1)
+#endif
+
#ifndef HAVE_SEM_UNLINK
# define sem_unlink(name) 0
#endif
diff --git a/Modules/_multiprocessing/socket_connection.c b/Modules/_multiprocessing/socket_connection.c
index 7ebf338..bdb0a32 100644
--- a/Modules/_multiprocessing/socket_connection.c
+++ b/Modules/_multiprocessing/socket_connection.c
@@ -8,6 +8,10 @@
#include "multiprocessing.h"
+#if defined(HAVE_POLL) && !defined(HAVE_BROKEN_POLL)
+# include "poll.h"
+#endif
+
#ifdef MS_WINDOWS
# define WRITE(h, buffer, length) send((SOCKET)h, buffer, length, 0)
# define READ(h, buffer, length) recv((SOCKET)h, buffer, length, 0)
@@ -19,6 +23,21 @@
#endif
/*
+ * Wrapper for PyErr_CheckSignals() which can be called without the GIL
+ */
+
+static int
+check_signals(void)
+{
+ PyGILState_STATE state;
+ int res;
+ state = PyGILState_Ensure();
+ res = PyErr_CheckSignals();
+ PyGILState_Release(state);
+ return res;
+}
+
+/*
* Send string to file descriptor
*/
@@ -30,8 +49,14 @@ _conn_sendall(HANDLE h, char *string, size_t length)
while (length > 0) {
res = WRITE(h, p, length);
- if (res < 0)
+ if (res < 0) {
+ if (errno == EINTR) {
+ if (check_signals() < 0)
+ return MP_EXCEPTION_HAS_BEEN_SET;
+ continue;
+ }
return MP_SOCKET_ERROR;
+ }
length -= res;
p += res;
}
@@ -52,12 +77,16 @@ _conn_recvall(HANDLE h, char *buffer, size_t length)
while (remaining > 0) {
temp = READ(h, p, remaining);
- if (temp <= 0) {
- if (temp == 0)
- return remaining == length ?
- MP_END_OF_FILE : MP_EARLY_END_OF_FILE;
- else
- return temp;
+ if (temp < 0) {
+ if (errno == EINTR) {
+ if (check_signals() < 0)
+ return MP_EXCEPTION_HAS_BEEN_SET;
+ continue;
+ }
+ return temp;
+ }
+ else if (temp == 0) {
+ return remaining == length ? MP_END_OF_FILE : MP_EARLY_END_OF_FILE;
}
remaining -= temp;
p += temp;
@@ -117,7 +146,7 @@ static Py_ssize_t
conn_recv_string(ConnectionObject *conn, char *buffer,
size_t buflength, char **newbuffer, size_t maxlength)
{
- int res;
+ Py_ssize_t res;
UINT32 ulength;
*newbuffer = NULL;
@@ -132,20 +161,23 @@ conn_recv_string(ConnectionObject *conn, char *buffer,
if (ulength > maxlength)
return MP_BAD_MESSAGE_LENGTH;
- if (ulength <= buflength) {
- Py_BEGIN_ALLOW_THREADS
- res = _conn_recvall(conn->handle, buffer, (size_t)ulength);
- Py_END_ALLOW_THREADS
- return res < 0 ? res : ulength;
- } else {
- *newbuffer = PyMem_Malloc((size_t)ulength);
- if (*newbuffer == NULL)
+ if (ulength > buflength) {
+ *newbuffer = buffer = PyMem_Malloc((size_t)ulength);
+ if (buffer == NULL)
return MP_MEMORY_ERROR;
- Py_BEGIN_ALLOW_THREADS
- res = _conn_recvall(conn->handle, *newbuffer, (size_t)ulength);
- Py_END_ALLOW_THREADS
- return res < 0 ? (Py_ssize_t)res : (Py_ssize_t)ulength;
}
+
+ Py_BEGIN_ALLOW_THREADS
+ res = _conn_recvall(conn->handle, buffer, (size_t)ulength);
+ Py_END_ALLOW_THREADS
+
+ if (res >= 0) {
+ res = (Py_ssize_t)ulength;
+ } else if (*newbuffer != NULL) {
+ PyMem_Free(*newbuffer);
+ *newbuffer = NULL;
+ }
+ return res;
}
/*
@@ -155,6 +187,41 @@ conn_recv_string(ConnectionObject *conn, char *buffer,
static int
conn_poll(ConnectionObject *conn, double timeout, PyThreadState *_save)
{
+#if defined(HAVE_POLL) && !defined(HAVE_BROKEN_POLL)
+ int res;
+ struct pollfd p;
+
+ p.fd = (int)conn->handle;
+ p.events = POLLIN | POLLPRI;
+ p.revents = 0;
+
+ if (timeout < 0) {
+ do {
+ res = poll(&p, 1, -1);
+ } while (res < 0 && errno == EINTR);
+ } else {
+ res = poll(&p, 1, (int)(timeout * 1000 + 0.5));
+ if (res < 0 && errno == EINTR) {
+ /* We were interrupted by a signal. Just indicate a
+ timeout even though we are early. */
+ return FALSE;
+ }
+ }
+
+ if (res < 0) {
+ return MP_SOCKET_ERROR;
+ } else if (p.revents & (POLLNVAL|POLLERR)) {
+ Py_BLOCK_THREADS
+ PyErr_SetString(PyExc_IOError, "poll() gave POLLNVAL or POLLERR");
+ Py_UNBLOCK_THREADS
+ return MP_EXCEPTION_HAS_BEEN_SET;
+ } else if (p.revents != 0) {
+ return TRUE;
+ } else {
+ assert(res == 0);
+ return FALSE;
+ }
+#else
int res;
fd_set rfds;
@@ -174,12 +241,19 @@ conn_poll(ConnectionObject *conn, double timeout, PyThreadState *_save)
FD_SET((SOCKET)conn->handle, &rfds);
if (timeout < 0.0) {
- res = select((int)conn->handle+1, &rfds, NULL, NULL, NULL);
+ do {
+ res = select((int)conn->handle+1, &rfds, NULL, NULL, NULL);
+ } while (res < 0 && errno == EINTR);
} else {
struct timeval tv;
tv.tv_sec = (long)timeout;
tv.tv_usec = (long)((timeout - tv.tv_sec) * 1e6 + 0.5);
res = select((int)conn->handle+1, &rfds, NULL, NULL, &tv);
+ if (res < 0 && errno == EINTR) {
+ /* We were interrupted by a signal. Just indicate a
+ timeout even though we are early. */
+ return FALSE;
+ }
}
if (res < 0) {
@@ -190,6 +264,7 @@ conn_poll(ConnectionObject *conn, double timeout, PyThreadState *_save)
assert(res == 0);
return FALSE;
}
+#endif
}
/*
diff --git a/Modules/_multiprocessing/win32_functions.c b/Modules/_multiprocessing/win32_functions.c
index 1666aa9..9425929 100644
--- a/Modules/_multiprocessing/win32_functions.c
+++ b/Modules/_multiprocessing/win32_functions.c
@@ -244,6 +244,7 @@ create_win32_namespace(void)
Py_INCREF(&Win32Type);
WIN32_CONSTANT(F_DWORD, ERROR_ALREADY_EXISTS);
+ WIN32_CONSTANT(F_DWORD, ERROR_NO_DATA);
WIN32_CONSTANT(F_DWORD, ERROR_PIPE_BUSY);
WIN32_CONSTANT(F_DWORD, ERROR_PIPE_CONNECTED);
WIN32_CONSTANT(F_DWORD, ERROR_SEM_TIMEOUT);
diff --git a/Modules/_randommodule.c b/Modules/_randommodule.c
index 08c2277..8bb9e37 100644
--- a/Modules/_randommodule.c
+++ b/Modules/_randommodule.c
@@ -400,7 +400,7 @@ random_jumpahead(RandomObject *self, PyObject *n)
long i, j;
PyObject *iobj;
PyObject *remobj;
- unsigned long *mt, tmp;
+ unsigned long *mt, tmp, nonzero;
if (!PyInt_Check(n) && !PyLong_Check(n)) {
PyErr_Format(PyExc_TypeError, "jumpahead requires an "
@@ -427,8 +427,23 @@ random_jumpahead(RandomObject *self, PyObject *n)
mt[j] = tmp;
}
- for (i = 0; i < N; i++)
+ nonzero = 0;
+ for (i = 1; i < N; i++) {
mt[i] += i+1;
+ mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */
+ nonzero |= mt[i];
+ }
+
+ /* Ensure the state is nonzero: in the unlikely event that mt[1] through
+ mt[N-1] are all zero, set the MSB of mt[0] (see issue #14591). In the
+ normal case, we fall back to the pre-issue 14591 behaviour for mt[0]. */
+ if (nonzero) {
+ mt[0] += 1;
+ mt[0] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */
+ }
+ else {
+ mt[0] = 0x80000000UL;
+ }
self->index = N;
Py_INCREF(Py_None);
diff --git a/Modules/_sqlite/connection.c b/Modules/_sqlite/connection.c
index 26678c7..7a8a5a1 100644
--- a/Modules/_sqlite/connection.c
+++ b/Modules/_sqlite/connection.c
@@ -152,7 +152,10 @@ int pysqlite_connection_init(pysqlite_Connection* self, PyObject* args, PyObject
Py_INCREF(isolation_level);
}
self->isolation_level = NULL;
- pysqlite_connection_set_isolation_level(self, isolation_level);
+ if (pysqlite_connection_set_isolation_level(self, isolation_level) < 0) {
+ Py_DECREF(isolation_level);
+ return -1;
+ }
Py_DECREF(isolation_level);
self->statement_cache = (pysqlite_Cache*)PyObject_CallFunction((PyObject*)&pysqlite_CacheType, "Oi", self, cached_statements);
@@ -366,8 +369,7 @@ PyObject* pysqlite_connection_close(pysqlite_Connection* self, PyObject* args)
if (self->apsw_connection) {
ret = PyObject_CallMethod(self->apsw_connection, "close", "");
Py_XDECREF(ret);
- Py_XDECREF(self->apsw_connection);
- self->apsw_connection = NULL;
+ Py_CLEAR(self->apsw_connection);
self->db = NULL;
} else {
Py_BEGIN_ALLOW_THREADS
@@ -538,39 +540,40 @@ error:
}
}
-void _pysqlite_set_result(sqlite3_context* context, PyObject* py_val)
+static int
+_pysqlite_set_result(sqlite3_context* context, PyObject* py_val)
{
- const char* buffer;
- Py_ssize_t buflen;
- PyObject* stringval;
-
- if ((!py_val) || PyErr_Occurred()) {
- sqlite3_result_null(context);
- } else if (py_val == Py_None) {
+ if (py_val == Py_None) {
sqlite3_result_null(context);
} else if (PyInt_Check(py_val)) {
- sqlite3_result_int64(context, (sqlite3_int64)PyInt_AsLong(py_val));
+ sqlite3_result_int64(context, (sqlite_int64)PyInt_AsLong(py_val));
} else if (PyLong_Check(py_val)) {
- sqlite3_result_int64(context, PyLong_AsLongLong(py_val));
+ sqlite_int64 value = _pysqlite_long_as_int64(py_val);
+ if (value == -1 && PyErr_Occurred())
+ return -1;
+ sqlite3_result_int64(context, value);
} else if (PyFloat_Check(py_val)) {
sqlite3_result_double(context, PyFloat_AsDouble(py_val));
} else if (PyBuffer_Check(py_val)) {
+ const char* buffer;
+ Py_ssize_t buflen;
if (PyObject_AsCharBuffer(py_val, &buffer, &buflen) != 0) {
PyErr_SetString(PyExc_ValueError, "could not convert BLOB to buffer");
- } else {
- sqlite3_result_blob(context, buffer, buflen, SQLITE_TRANSIENT);
+ return -1;
}
+ sqlite3_result_blob(context, buffer, buflen, SQLITE_TRANSIENT);
} else if (PyString_Check(py_val)) {
sqlite3_result_text(context, PyString_AsString(py_val), -1, SQLITE_TRANSIENT);
} else if (PyUnicode_Check(py_val)) {
- stringval = PyUnicode_AsUTF8String(py_val);
- if (stringval) {
- sqlite3_result_text(context, PyString_AsString(stringval), -1, SQLITE_TRANSIENT);
- Py_DECREF(stringval);
- }
+ PyObject * stringval = PyUnicode_AsUTF8String(py_val);
+ if (!stringval)
+ return -1;
+ sqlite3_result_text(context, PyString_AsString(stringval), -1, SQLITE_TRANSIENT);
+ Py_DECREF(stringval);
} else {
- /* TODO: raise error */
+ return -1;
}
+ return 0;
}
PyObject* _pysqlite_build_py_params(sqlite3_context *context, int argc, sqlite3_value** argv)
@@ -580,7 +583,6 @@ PyObject* _pysqlite_build_py_params(sqlite3_context *context, int argc, sqlite3_
sqlite3_value* cur_value;
PyObject* cur_py_value;
const char* val_str;
- sqlite3_int64 val_int;
Py_ssize_t buflen;
void* raw_buffer;
@@ -593,11 +595,7 @@ PyObject* _pysqlite_build_py_params(sqlite3_context *context, int argc, sqlite3_
cur_value = argv[i];
switch (sqlite3_value_type(argv[i])) {
case SQLITE_INTEGER:
- val_int = sqlite3_value_int64(cur_value);
- if(val_int < LONG_MIN || val_int > LONG_MAX)
- cur_py_value = PyLong_FromLongLong(val_int);
- else
- cur_py_value = PyInt_FromLong((long)val_int);
+ cur_py_value = _pysqlite_long_from_int64(sqlite3_value_int64(cur_value));
break;
case SQLITE_FLOAT:
cur_py_value = PyFloat_FromDouble(sqlite3_value_double(cur_value));
@@ -648,6 +646,7 @@ void _pysqlite_func_callback(sqlite3_context* context, int argc, sqlite3_value**
PyObject* args;
PyObject* py_func;
PyObject* py_retval = NULL;
+ int ok;
#ifdef WITH_THREAD
PyGILState_STATE threadstate;
@@ -663,10 +662,12 @@ void _pysqlite_func_callback(sqlite3_context* context, int argc, sqlite3_value**
Py_DECREF(args);
}
+ ok = 0;
if (py_retval) {
- _pysqlite_set_result(context, py_retval);
+ ok = _pysqlite_set_result(context, py_retval) == 0;
Py_DECREF(py_retval);
- } else {
+ }
+ if (!ok) {
if (_enable_callback_tracebacks) {
PyErr_Print();
} else {
@@ -746,8 +747,9 @@ error:
void _pysqlite_final_callback(sqlite3_context* context)
{
- PyObject* function_result = NULL;
+ PyObject* function_result;
PyObject** aggregate_instance;
+ int ok;
#ifdef WITH_THREAD
PyGILState_STATE threadstate;
@@ -764,21 +766,23 @@ void _pysqlite_final_callback(sqlite3_context* context)
}
function_result = PyObject_CallMethod(*aggregate_instance, "finalize", "");
- if (!function_result) {
+ Py_DECREF(*aggregate_instance);
+
+ ok = 0;
+ if (function_result) {
+ ok = _pysqlite_set_result(context, function_result) == 0;
+ Py_DECREF(function_result);
+ }
+ if (!ok) {
if (_enable_callback_tracebacks) {
PyErr_Print();
} else {
PyErr_Clear();
}
_sqlite3_result_error(context, "user-defined aggregate's 'finalize' method raised error", -1);
- } else {
- _pysqlite_set_result(context, function_result);
}
error:
- Py_XDECREF(*aggregate_instance);
- Py_XDECREF(function_result);
-
#ifdef WITH_THREAD
PyGILState_Release(threadstate);
#endif
@@ -935,7 +939,9 @@ static int _authorizer_callback(void* user_arg, int action, const char* arg1, co
rc = SQLITE_DENY;
} else {
if (PyInt_Check(ret)) {
- rc = (int)PyInt_AsLong(ret);
+ rc = _PyInt_AsInt(ret);
+ if (rc == -1 && PyErr_Occurred())
+ rc = SQLITE_DENY;
} else {
rc = SQLITE_DENY;
}
@@ -967,7 +973,7 @@ static int _progress_handler(void* user_arg)
}
/* abort query if error occurred */
- rc = 1;
+ rc = 1;
} else {
rc = (int)PyObject_IsTrue(ret);
Py_DECREF(ret);
@@ -1337,6 +1343,7 @@ pysqlite_collation_callback(
PyGILState_STATE gilstate;
#endif
PyObject* retval = NULL;
+ long longval;
int result = 0;
#ifdef WITH_THREAD
gilstate = PyGILState_Ensure();
@@ -1360,10 +1367,17 @@ pysqlite_collation_callback(
goto finally;
}
- result = PyInt_AsLong(retval);
- if (PyErr_Occurred()) {
+ longval = PyLong_AsLongAndOverflow(retval, &result);
+ if (longval == -1 && PyErr_Occurred()) {
+ PyErr_Clear();
result = 0;
}
+ else if (!result) {
+ if (longval > 0)
+ result = 1;
+ else if (longval < 0)
+ result = -1;
+ }
finally:
Py_XDECREF(string1);
diff --git a/Modules/_sqlite/cursor.c b/Modules/_sqlite/cursor.c
index e1cd536..3b84484 100644
--- a/Modules/_sqlite/cursor.c
+++ b/Modules/_sqlite/cursor.c
@@ -26,14 +26,6 @@
#include "util.h"
#include "sqlitecompat.h"
-/* used to decide wether to call PyInt_FromLong or PyLong_FromLongLong */
-#ifndef INT32_MIN
-#define INT32_MIN (-2147483647 - 1)
-#endif
-#ifndef INT32_MAX
-#define INT32_MAX 2147483647
-#endif
-
PyObject* pysqlite_cursor_iternext(pysqlite_Cursor* self);
static char* errmsg_fetch_across_rollback = "Cursor needed to be reset because of commit/rollback and can no longer be fetched from.";
@@ -239,8 +231,7 @@ int pysqlite_build_row_cast_map(pysqlite_Cursor* self)
if (converter != Py_None) {
Py_DECREF(converter);
}
- Py_XDECREF(self->row_cast_map);
- self->row_cast_map = NULL;
+ Py_CLEAR(self->row_cast_map);
return -1;
}
@@ -307,7 +298,6 @@ PyObject* _pysqlite_fetch_one_row(pysqlite_Cursor* self)
PyObject* row;
PyObject* item = NULL;
int coltype;
- PY_LONG_LONG intval;
PyObject* converter;
PyObject* converted;
Py_ssize_t nbytes;
@@ -366,12 +356,7 @@ PyObject* _pysqlite_fetch_one_row(pysqlite_Cursor* self)
Py_INCREF(Py_None);
converted = Py_None;
} else if (coltype == SQLITE_INTEGER) {
- intval = sqlite3_column_int64(self->statement->st, i);
- if (intval < INT32_MIN || intval > INT32_MAX) {
- converted = PyLong_FromLongLong(intval);
- } else {
- converted = PyInt_FromLong((long)intval);
- }
+ converted = _pysqlite_long_from_int64(sqlite3_column_int64(self->statement->st, i));
} else if (coltype == SQLITE_FLOAT) {
converted = PyFloat_FromDouble(sqlite3_column_double(self->statement->st, i));
} else if (coltype == SQLITE_TEXT) {
@@ -466,7 +451,6 @@ PyObject* _pysqlite_query_execute(pysqlite_Cursor* self, int multiple, PyObject*
PyObject* func_args;
PyObject* result;
int numcols;
- PY_LONG_LONG lastrowid;
int statement_type;
PyObject* descriptor;
PyObject* second_argument = NULL;
@@ -483,8 +467,7 @@ PyObject* _pysqlite_query_execute(pysqlite_Cursor* self, int multiple, PyObject*
allow_8bit_chars = ((self->connection->text_factory != (PyObject*)&PyUnicode_Type) &&
(self->connection->text_factory != pysqlite_OptimizedUnicode));
- Py_XDECREF(self->next_row);
- self->next_row = NULL;
+ Py_CLEAR(self->next_row);
if (multiple) {
/* executemany() */
@@ -747,10 +730,11 @@ PyObject* _pysqlite_query_execute(pysqlite_Cursor* self, int multiple, PyObject*
Py_DECREF(self->lastrowid);
if (!multiple && statement_type == STATEMENT_INSERT) {
+ sqlite_int64 lastrowid;
Py_BEGIN_ALLOW_THREADS
lastrowid = sqlite3_last_insert_rowid(self->connection->db);
Py_END_ALLOW_THREADS
- self->lastrowid = PyInt_FromLong((long)lastrowid);
+ self->lastrowid = _pysqlite_long_from_int64(lastrowid);
} else {
Py_INCREF(Py_None);
self->lastrowid = Py_None;
@@ -910,8 +894,7 @@ PyObject* pysqlite_cursor_iternext(pysqlite_Cursor *self)
if (!self->next_row) {
if (self->statement) {
(void)pysqlite_statement_reset(self->statement);
- Py_DECREF(self->statement);
- self->statement = NULL;
+ Py_CLEAR(self->statement);
}
return NULL;
}
diff --git a/Modules/_sqlite/row.c b/Modules/_sqlite/row.c
index 480b482..ff52560 100644
--- a/Modules/_sqlite/row.c
+++ b/Modules/_sqlite/row.c
@@ -64,9 +64,16 @@ int pysqlite_row_init(pysqlite_Row* self, PyObject* args, PyObject* kwargs)
return 0;
}
+PyObject* pysqlite_row_item(pysqlite_Row* self, Py_ssize_t idx)
+{
+ PyObject* item = PyTuple_GetItem(self->data, idx);
+ Py_XINCREF(item);
+ return item;
+}
+
PyObject* pysqlite_row_subscript(pysqlite_Row* self, PyObject* idx)
{
- long _idx;
+ Py_ssize_t _idx;
char* key;
int nitems, i;
char* compare_key;
@@ -78,11 +85,17 @@ PyObject* pysqlite_row_subscript(pysqlite_Row* self, PyObject* idx)
if (PyInt_Check(idx)) {
_idx = PyInt_AsLong(idx);
+ if (_idx < 0)
+ _idx += PyTuple_GET_SIZE(self->data);
item = PyTuple_GetItem(self->data, _idx);
Py_XINCREF(item);
return item;
} else if (PyLong_Check(idx)) {
- _idx = PyLong_AsLong(idx);
+ _idx = PyNumber_AsSsize_t(idx, PyExc_IndexError);
+ if (_idx == -1 && PyErr_Occurred())
+ return NULL;
+ if (_idx < 0)
+ _idx += PyTuple_GET_SIZE(self->data);
item = PyTuple_GetItem(self->data, _idx);
Py_XINCREF(item);
return item;
@@ -199,6 +212,14 @@ PyMappingMethods pysqlite_row_as_mapping = {
/* mp_ass_subscript */ (objobjargproc)0,
};
+static PySequenceMethods pysqlite_row_as_sequence = {
+ /* sq_length */ (lenfunc)pysqlite_row_length,
+ /* sq_concat */ 0,
+ /* sq_repeat */ 0,
+ /* sq_item */ (ssizeargfunc)pysqlite_row_item,
+};
+
+
static PyMethodDef pysqlite_row_methods[] = {
{"keys", (PyCFunction)pysqlite_row_keys, METH_NOARGS,
PyDoc_STR("Returns the keys of the row.")},
@@ -252,5 +273,6 @@ extern int pysqlite_row_setup_types(void)
{
pysqlite_RowType.tp_new = PyType_GenericNew;
pysqlite_RowType.tp_as_mapping = &pysqlite_row_as_mapping;
+ pysqlite_RowType.tp_as_sequence = &pysqlite_row_as_sequence;
return PyType_Ready(&pysqlite_RowType);
}
diff --git a/Modules/_sqlite/statement.c b/Modules/_sqlite/statement.c
index c777211..7a7a60f 100644
--- a/Modules/_sqlite/statement.c
+++ b/Modules/_sqlite/statement.c
@@ -26,6 +26,7 @@
#include "connection.h"
#include "microprotocols.h"
#include "prepare_protocol.h"
+#include "util.h"
#include "sqlitecompat.h"
/* prototypes */
@@ -101,8 +102,6 @@ int pysqlite_statement_create(pysqlite_Statement* self, pysqlite_Connection* con
int pysqlite_statement_bind_parameter(pysqlite_Statement* self, int pos, PyObject* parameter, int allow_8bit_chars)
{
int rc = SQLITE_OK;
- long longval;
- PY_LONG_LONG longlongval;
const char* buffer;
char* string;
Py_ssize_t buflen;
@@ -153,15 +152,19 @@ int pysqlite_statement_bind_parameter(pysqlite_Statement* self, int pos, PyObjec
}
switch (paramtype) {
- case TYPE_INT:
- longval = PyInt_AsLong(parameter);
- rc = sqlite3_bind_int64(self->st, pos, (sqlite_int64)longval);
+ case TYPE_INT: {
+ long longval = PyInt_AsLong(parameter);
+ rc = sqlite3_bind_int64(self->st, pos, longval);
break;
- case TYPE_LONG:
- longlongval = PyLong_AsLongLong(parameter);
- /* in the overflow error case, longlongval is -1, and an exception is set */
- rc = sqlite3_bind_int64(self->st, pos, (sqlite_int64)longlongval);
+ }
+ case TYPE_LONG: {
+ sqlite_int64 value = _pysqlite_long_as_int64(parameter);
+ if (value == -1 && PyErr_Occurred())
+ rc = -1;
+ else
+ rc = sqlite3_bind_int64(self->st, pos, (sqlite_int64)value);
break;
+ }
case TYPE_FLOAT:
rc = sqlite3_bind_double(self->st, pos, PyFloat_AsDouble(parameter));
break;
@@ -198,7 +201,7 @@ static int _need_adapt(PyObject* obj)
return 1;
}
- if (PyInt_CheckExact(obj) || PyLong_CheckExact(obj)
+ if (PyInt_CheckExact(obj) || PyLong_CheckExact(obj)
|| PyFloat_CheckExact(obj) || PyString_CheckExact(obj)
|| PyUnicode_CheckExact(obj) || PyBuffer_Check(obj)) {
return 0;
diff --git a/Modules/_sqlite/util.c b/Modules/_sqlite/util.c
index 6b57b76..a24dd8c 100644
--- a/Modules/_sqlite/util.c
+++ b/Modules/_sqlite/util.c
@@ -104,3 +104,69 @@ int _pysqlite_seterror(sqlite3* db, sqlite3_stmt* st)
return errorcode;
}
+#ifdef WORDS_BIGENDIAN
+# define IS_LITTLE_ENDIAN 0
+#else
+# define IS_LITTLE_ENDIAN 1
+#endif
+
+PyObject *
+_pysqlite_long_from_int64(sqlite_int64 value)
+{
+#ifdef HAVE_LONG_LONG
+# if SIZEOF_LONG_LONG < 8
+ if (value > PY_LLONG_MAX || value < PY_LLONG_MIN) {
+ return _PyLong_FromByteArray(&value, sizeof(value),
+ IS_LITTLE_ENDIAN, 1 /* signed */);
+ }
+# endif
+# if SIZEOF_LONG < SIZEOF_LONG_LONG
+ if (value > LONG_MAX || value < LONG_MIN)
+ return PyLong_FromLongLong(value);
+# endif
+#else
+# if SIZEOF_LONG < 8
+ if (value > LONG_MAX || value < LONG_MIN) {
+ return _PyLong_FromByteArray(&value, sizeof(value),
+ IS_LITTLE_ENDIAN, 1 /* signed */);
+ }
+# endif
+#endif
+ return PyInt_FromLong(value);
+}
+
+sqlite_int64
+_pysqlite_long_as_int64(PyObject * py_val)
+{
+ int overflow;
+#ifdef HAVE_LONG_LONG
+ PY_LONG_LONG value = PyLong_AsLongLongAndOverflow(py_val, &overflow);
+#else
+ long value = PyLong_AsLongAndOverflow(py_val, &overflow);
+#endif
+ if (value == -1 && PyErr_Occurred())
+ return -1;
+ if (!overflow) {
+#ifdef HAVE_LONG_LONG
+# if SIZEOF_LONG_LONG > 8
+ if (-0x8000000000000000LL <= value && value <= 0x7FFFFFFFFFFFFFFFLL)
+# endif
+#else
+# if SIZEOF_LONG > 8
+ if (-0x8000000000000000L <= value && value <= 0x7FFFFFFFFFFFFFFFL)
+# endif
+#endif
+ return value;
+ }
+ else if (sizeof(value) < sizeof(sqlite_int64)) {
+ sqlite_int64 int64val;
+ if (_PyLong_AsByteArray((PyLongObject *)py_val,
+ (unsigned char *)&int64val, sizeof(int64val),
+ IS_LITTLE_ENDIAN, 1 /* signed */) >= 0) {
+ return int64val;
+ }
+ }
+ PyErr_SetString(PyExc_OverflowError,
+ "Python int too large to convert to SQLite INTEGER");
+ return -1;
+}
diff --git a/Modules/_sqlite/util.h b/Modules/_sqlite/util.h
index 4269003..4c3b2c7 100644
--- a/Modules/_sqlite/util.h
+++ b/Modules/_sqlite/util.h
@@ -35,4 +35,8 @@ int pysqlite_step(sqlite3_stmt* statement, pysqlite_Connection* connection);
* Returns the error code (0 means no error occurred).
*/
int _pysqlite_seterror(sqlite3* db, sqlite3_stmt* st);
+
+PyObject * _pysqlite_long_from_int64(sqlite_int64 value);
+sqlite_int64 _pysqlite_long_as_int64(PyObject * value);
+
#endif
diff --git a/Modules/_sre.c b/Modules/_sre.c
index cd95917..d88c13f 100644
--- a/Modules/_sre.c
+++ b/Modules/_sre.c
@@ -272,7 +272,7 @@ data_stack_grow(SRE_STATE* state, Py_ssize_t size)
if (cursize < minsize) {
void* stack;
cursize = minsize+minsize/4+1024;
- TRACE(("allocate/grow stack %d\n", cursize));
+ TRACE(("allocate/grow stack %" PY_FORMAT_SIZE_T "d\n", cursize));
stack = PyMem_REALLOC(state->data_stack, cursize);
if (!stack) {
data_stack_dealloc(state);
@@ -459,7 +459,7 @@ SRE_CHARSET(SRE_CODE* set, SRE_CODE ch)
}
else {
/* <CHARSET> <bitmap> (32 bits per code word) */
- if (ch < 256 && (set[ch >> 5] & (1 << (ch & 31))))
+ if (ch < 256 && (set[ch >> 5] & (1u << (ch & 31))))
return ok;
set += 8;
}
@@ -498,7 +498,7 @@ SRE_CHARSET(SRE_CODE* set, SRE_CODE ch)
block = -1;
set += 64;
if (block >=0 &&
- (set[block*8 + ((ch & 255)>>5)] & (1 << (ch & 31))))
+ (set[block*8 + ((ch & 255)>>5)] & (1u << (ch & 31))))
return ok;
set += count*8;
}
@@ -524,7 +524,7 @@ SRE_COUNT(SRE_STATE* state, SRE_CODE* pattern, Py_ssize_t maxcount)
Py_ssize_t i;
/* adjust end */
- if (maxcount < end - ptr && maxcount != 65535)
+ if (maxcount < end - ptr && maxcount != SRE_MAXREPEAT)
end = ptr + maxcount;
switch (pattern[0]) {
@@ -592,12 +592,13 @@ SRE_COUNT(SRE_STATE* state, SRE_CODE* pattern, Py_ssize_t maxcount)
if (!i)
break;
}
- TRACE(("|%p|%p|COUNT %d\n", pattern, ptr,
+ TRACE(("|%p|%p|COUNT %" PY_FORMAT_SIZE_T "d\n", pattern, ptr,
(SRE_CHAR*) state->ptr - ptr));
return (SRE_CHAR*) state->ptr - ptr;
}
- TRACE(("|%p|%p|COUNT %d\n", pattern, ptr, ptr - (SRE_CHAR*) state->ptr));
+ TRACE(("|%p|%p|COUNT %" PY_FORMAT_SIZE_T "d\n", pattern, ptr,
+ ptr - (SRE_CHAR*) state->ptr));
return ptr - (SRE_CHAR*) state->ptr;
}
@@ -684,9 +685,10 @@ SRE_INFO(SRE_STATE* state, SRE_CODE* pattern)
#define DATA_STACK_ALLOC(state, type, ptr) \
do { \
alloc_pos = state->data_stack_base; \
- TRACE(("allocating %s in %d (%d)\n", \
+ TRACE(("allocating %s in %" PY_FORMAT_SIZE_T "d " \
+ "(%" PY_FORMAT_SIZE_T "d)\n", \
SFY(type), alloc_pos, sizeof(type))); \
- if (state->data_stack_size < alloc_pos+sizeof(type)) { \
+ if (sizeof(type) > state->data_stack_size - alloc_pos) { \
int j = data_stack_grow(state, sizeof(type)); \
if (j < 0) return j; \
if (ctx_pos != -1) \
@@ -698,15 +700,16 @@ do { \
#define DATA_STACK_LOOKUP_AT(state, type, ptr, pos) \
do { \
- TRACE(("looking up %s at %d\n", SFY(type), pos)); \
+ TRACE(("looking up %s at %" PY_FORMAT_SIZE_T "d\n", SFY(type), pos)); \
ptr = (type*)(state->data_stack+pos); \
} while (0)
#define DATA_STACK_PUSH(state, data, size) \
do { \
- TRACE(("copy data in %p to %d (%d)\n", \
+ TRACE(("copy data in %p to %" PY_FORMAT_SIZE_T "d " \
+ "(%" PY_FORMAT_SIZE_T "d)\n", \
data, state->data_stack_base, size)); \
- if (state->data_stack_size < state->data_stack_base+size) { \
+ if (size > state->data_stack_size - state->data_stack_base) { \
int j = data_stack_grow(state, size); \
if (j < 0) return j; \
if (ctx_pos != -1) \
@@ -718,7 +721,8 @@ do { \
#define DATA_STACK_POP(state, data, size, discard) \
do { \
- TRACE(("copy data to %p from %d (%d)\n", \
+ TRACE(("copy data to %p from %" PY_FORMAT_SIZE_T "d " \
+ "(%" PY_FORMAT_SIZE_T "d)\n", \
data, state->data_stack_base-size, size)); \
memcpy(data, state->data_stack+state->data_stack_base-size, size); \
if (discard) \
@@ -727,7 +731,8 @@ do { \
#define DATA_STACK_POP_DISCARD(state, size) \
do { \
- TRACE(("discard data from %d (%d)\n", \
+ TRACE(("discard data from %" PY_FORMAT_SIZE_T "d " \
+ "(%" PY_FORMAT_SIZE_T "d)\n", \
state->data_stack_base-size, size)); \
state->data_stack_base -= size; \
} while(0)
@@ -831,8 +836,9 @@ entrance:
/* optimization info block */
/* <INFO> <1=skip> <2=flags> <3=min> ... */
if (ctx->pattern[3] && (end - ctx->ptr) < ctx->pattern[3]) {
- TRACE(("reject (got %d chars, need %d)\n",
- (end - ctx->ptr), ctx->pattern[3]));
+ TRACE(("reject (got %" PY_FORMAT_SIZE_T "d chars, "
+ "need %" PY_FORMAT_SIZE_T "d)\n",
+ (end - ctx->ptr), (Py_ssize_t) ctx->pattern[3]));
RETURN_FAILURE;
}
ctx->pattern += ctx->pattern[1] + 1;
@@ -1028,7 +1034,7 @@ entrance:
TRACE(("|%p|%p|REPEAT_ONE %d %d\n", ctx->pattern, ctx->ptr,
ctx->pattern[1], ctx->pattern[2]));
- if (ctx->ptr + ctx->pattern[1] > end)
+ if ((Py_ssize_t) ctx->pattern[1] > end - ctx->ptr)
RETURN_FAILURE; /* cannot match */
state->ptr = ctx->ptr;
@@ -1111,7 +1117,7 @@ entrance:
TRACE(("|%p|%p|MIN_REPEAT_ONE %d %d\n", ctx->pattern, ctx->ptr,
ctx->pattern[1], ctx->pattern[2]));
- if (ctx->ptr + ctx->pattern[1] > end)
+ if ((Py_ssize_t) ctx->pattern[1] > end - ctx->ptr)
RETURN_FAILURE; /* cannot match */
state->ptr = ctx->ptr;
@@ -1139,7 +1145,7 @@ entrance:
} else {
/* general case */
LASTMARK_SAVE();
- while ((Py_ssize_t)ctx->pattern[2] == 65535
+ while ((Py_ssize_t)ctx->pattern[2] == SRE_MAXREPEAT
|| ctx->count <= (Py_ssize_t)ctx->pattern[2]) {
state->ptr = ctx->ptr;
DO_JUMP(JUMP_MIN_REPEAT_ONE,jump_min_repeat_one,
@@ -1207,10 +1213,10 @@ entrance:
ctx->count = ctx->u.rep->count+1;
- TRACE(("|%p|%p|MAX_UNTIL %d\n", ctx->pattern,
+ TRACE(("|%p|%p|MAX_UNTIL %" PY_FORMAT_SIZE_T "d\n", ctx->pattern,
ctx->ptr, ctx->count));
- if (ctx->count < ctx->u.rep->pattern[1]) {
+ if (ctx->count < (Py_ssize_t) ctx->u.rep->pattern[1]) {
/* not enough matches */
ctx->u.rep->count = ctx->count;
DO_JUMP(JUMP_MAX_UNTIL_1, jump_max_until_1,
@@ -1224,8 +1230,8 @@ entrance:
RETURN_FAILURE;
}
- if ((ctx->count < ctx->u.rep->pattern[2] ||
- ctx->u.rep->pattern[2] == 65535) &&
+ if ((ctx->count < (Py_ssize_t) ctx->u.rep->pattern[2] ||
+ ctx->u.rep->pattern[2] == SRE_MAXREPEAT) &&
state->ptr != ctx->u.rep->last_ptr) {
/* we may have enough matches, but if we can
match another item, do so */
@@ -1270,10 +1276,10 @@ entrance:
ctx->count = ctx->u.rep->count+1;
- TRACE(("|%p|%p|MIN_UNTIL %d %p\n", ctx->pattern,
+ TRACE(("|%p|%p|MIN_UNTIL %" PY_FORMAT_SIZE_T "d %p\n", ctx->pattern,
ctx->ptr, ctx->count, ctx->u.rep->pattern));
- if (ctx->count < ctx->u.rep->pattern[1]) {
+ if (ctx->count < (Py_ssize_t) ctx->u.rep->pattern[1]) {
/* not enough matches */
ctx->u.rep->count = ctx->count;
DO_JUMP(JUMP_MIN_UNTIL_1, jump_min_until_1,
@@ -1302,13 +1308,18 @@ entrance:
LASTMARK_RESTORE();
- if (ctx->count >= ctx->u.rep->pattern[2]
- && ctx->u.rep->pattern[2] != 65535)
+ if ((ctx->count >= (Py_ssize_t) ctx->u.rep->pattern[2]
+ && ctx->u.rep->pattern[2] != SRE_MAXREPEAT) ||
+ state->ptr == ctx->u.rep->last_ptr)
RETURN_FAILURE;
ctx->u.rep->count = ctx->count;
+ /* zero-width match protection */
+ DATA_PUSH(&ctx->u.rep->last_ptr);
+ ctx->u.rep->last_ptr = state->ptr;
DO_JUMP(JUMP_MIN_UNTIL_3,jump_min_until_3,
ctx->u.rep->pattern+3);
+ DATA_POP(&ctx->u.rep->last_ptr);
if (ret) {
RETURN_ON_ERROR(ret);
RETURN_SUCCESS;
@@ -1478,7 +1489,8 @@ exit:
TRACE(("|%p|%p|JUMP_ASSERT_NOT\n", ctx->pattern, ctx->ptr));
goto jump_assert_not;
case JUMP_NONE:
- TRACE(("|%p|%p|RETURN %d\n", ctx->pattern, ctx->ptr, ret));
+ TRACE(("|%p|%p|RETURN %" PY_FORMAT_SIZE_T "d\n", ctx->pattern,
+ ctx->ptr, ret));
break;
}
@@ -1527,7 +1539,8 @@ SRE_SEARCH(SRE_STATE* state, SRE_CODE* pattern)
pattern += 1 + pattern[1];
}
- TRACE(("prefix = %p %d %d\n", prefix, prefix_len, prefix_skip));
+ TRACE(("prefix = %p %" PY_FORMAT_SIZE_T "d %" PY_FORMAT_SIZE_T "d\n",
+ prefix, prefix_len, prefix_skip));
TRACE(("charset = %p\n", charset));
#if defined(USE_FAST_SEARCH)
@@ -1636,7 +1649,7 @@ static PyObject*pattern_scanner(PatternObject*, PyObject*);
static PyObject *
sre_codesize(PyObject* self, PyObject *unused)
{
- return Py_BuildValue("l", sizeof(SRE_CODE));
+ return PyInt_FromSize_t(sizeof(SRE_CODE));
}
static PyObject *
@@ -1862,18 +1875,62 @@ pattern_dealloc(PatternObject* self)
PyObject_DEL(self);
}
+static int
+check_args_size(const char *name, PyObject* args, PyObject* kw, int n)
+{
+ Py_ssize_t m = PyTuple_GET_SIZE(args) + (kw ? PyDict_Size(kw) : 0);
+ if (m <= n)
+ return 1;
+ PyErr_Format(PyExc_TypeError,
+ "%s() takes at most %d positional arguments (%zd given)",
+ name, n, m);
+ return 0;
+}
+
+static PyObject*
+fix_string_param(PyObject *string, PyObject *string2, const char *oldname)
+{
+ if (string2 != NULL) {
+ char buf[100];
+ if (string != NULL) {
+ PyErr_Format(PyExc_TypeError,
+ "Argument given by name ('%s') and position (1)",
+ oldname);
+ return NULL;
+ }
+ sprintf(buf, "The '%s' keyword parameter name is deprecated. "
+ "Use 'string' instead.", oldname);
+ if (PyErr_Warn(PyExc_DeprecationWarning, buf) < 0)
+ return NULL;
+ return string2;
+ }
+ if (string == NULL) {
+ PyErr_SetString(PyExc_TypeError,
+ "Required argument 'string' (pos 1) not found");
+ return NULL;
+ }
+ return string;
+}
+
static PyObject*
pattern_match(PatternObject* self, PyObject* args, PyObject* kw)
{
SRE_STATE state;
int status;
- PyObject* string;
+ PyObject *string = NULL, *string2 = NULL;
Py_ssize_t start = 0;
Py_ssize_t end = PY_SSIZE_T_MAX;
- static char* kwlist[] = { "pattern", "pos", "endpos", NULL };
- if (!PyArg_ParseTupleAndKeywords(args, kw, "O|nn:match", kwlist,
- &string, &start, &end))
+ static char* kwlist[] = { "string", "pos", "endpos", "pattern", NULL };
+ if (!check_args_size("match", args, kw, 3))
+ return NULL;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kw, "|OnnO:match", kwlist,
+ &string, &start, &end, &string2))
+ return NULL;
+
+ string = fix_string_param(string, string2, "pattern");
+ if (!string)
return NULL;
string = state_init(&state, self, string, start, end);
@@ -1907,12 +1964,19 @@ pattern_search(PatternObject* self, PyObject* args, PyObject* kw)
SRE_STATE state;
int status;
- PyObject* string;
+ PyObject *string = NULL, *string2 = NULL;
Py_ssize_t start = 0;
Py_ssize_t end = PY_SSIZE_T_MAX;
- static char* kwlist[] = { "pattern", "pos", "endpos", NULL };
- if (!PyArg_ParseTupleAndKeywords(args, kw, "O|nn:search", kwlist,
- &string, &start, &end))
+ static char* kwlist[] = { "string", "pos", "endpos", "pattern", NULL };
+ if (!check_args_size("search", args, kw, 3))
+ return NULL;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kw, "|OnnO:search", kwlist,
+ &string, &start, &end, &string2))
+ return NULL;
+
+ string = fix_string_param(string, string2, "pattern");
+ if (!string)
return NULL;
string = state_init(&state, self, string, start, end);
@@ -2042,12 +2106,19 @@ pattern_findall(PatternObject* self, PyObject* args, PyObject* kw)
int status;
Py_ssize_t i, b, e;
- PyObject* string;
+ PyObject *string = NULL, *string2 = NULL;
Py_ssize_t start = 0;
Py_ssize_t end = PY_SSIZE_T_MAX;
- static char* kwlist[] = { "source", "pos", "endpos", NULL };
- if (!PyArg_ParseTupleAndKeywords(args, kw, "O|nn:findall", kwlist,
- &string, &start, &end))
+ static char* kwlist[] = { "string", "pos", "endpos", "source", NULL };
+ if (!check_args_size("findall", args, kw, 3))
+ return NULL;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kw, "|OnnO:findall", kwlist,
+ &string, &start, &end, &string2))
+ return NULL;
+
+ string = fix_string_param(string, string2, "source");
+ if (!string)
return NULL;
string = state_init(&state, self, string, start, end);
@@ -2172,11 +2243,18 @@ pattern_split(PatternObject* self, PyObject* args, PyObject* kw)
Py_ssize_t i;
void* last;
- PyObject* string;
+ PyObject *string = NULL, *string2 = NULL;
Py_ssize_t maxsplit = 0;
- static char* kwlist[] = { "source", "maxsplit", NULL };
- if (!PyArg_ParseTupleAndKeywords(args, kw, "O|n:split", kwlist,
- &string, &maxsplit))
+ static char* kwlist[] = { "string", "maxsplit", "source", NULL };
+ if (!check_args_size("split", args, kw, 2))
+ return NULL;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kw, "|OnO:split", kwlist,
+ &string, &maxsplit, &string2))
+ return NULL;
+
+ string = fix_string_param(string, string2, "source");
+ if (!string)
return NULL;
string = state_init(&state, self, string, 0, PY_SSIZE_T_MAX);
@@ -2448,7 +2526,7 @@ next:
return NULL;
if (subn)
- return Py_BuildValue("Ni", item, n);
+ return Py_BuildValue("Nn", item, n);
return item;
@@ -2546,7 +2624,7 @@ PyDoc_STRVAR(pattern_match_doc,
PyDoc_STRVAR(pattern_search_doc,
"search(string[, pos[, endpos]]) --> match object or None.\n\
Scan through string looking for a match, and return a corresponding\n\
- MatchObject instance. Return None if no position in the string matches.");
+ match object instance. Return None if no position in the string matches.");
PyDoc_STRVAR(pattern_split_doc,
"split(string[, maxsplit = 0]) --> list.\n\
@@ -2675,6 +2753,13 @@ _compile(PyObject* self_, PyObject* args)
PyObject *o = PyList_GET_ITEM(code, i);
unsigned long value = PyInt_Check(o) ? (unsigned long)PyInt_AsLong(o)
: PyLong_AsUnsignedLong(o);
+ if (value == (unsigned long)-1 && PyErr_Occurred()) {
+ if (PyErr_ExceptionMatches(PyExc_OverflowError)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "regular expression code size limit exceeded");
+ }
+ break;
+ }
self->code[i] = (SRE_CODE) value;
if ((unsigned long) self->code[i] != value) {
PyErr_SetString(PyExc_OverflowError,
@@ -2733,8 +2818,7 @@ _compile(PyObject* self_, PyObject* args)
\_________\_____/ /
\____________/
- It also helps that SRE_CODE is always an unsigned type, either 2 bytes or 4
- bytes wide (the latter if Python is compiled for "wide" unicode support).
+ It also helps that SRE_CODE is always an unsigned type.
*/
/* Defining this one enables tracing of the validator */
@@ -2772,7 +2856,7 @@ _compile(PyObject* self_, PyObject* args)
skip = *code; \
VTRACE(("%lu (skip to %p)\n", \
(unsigned long)skip, code+skip)); \
- if (code+skip-adj < code || code+skip-adj > end)\
+ if (skip-adj > end-code) \
FAIL; \
code++; \
} while (0)
@@ -2805,7 +2889,7 @@ _validate_charset(SRE_CODE *code, SRE_CODE *end)
case SRE_OP_CHARSET:
offset = 32/sizeof(SRE_CODE); /* 32-byte bitmap */
- if (code+offset < code || code+offset > end)
+ if (offset > end-code)
FAIL;
code += offset;
break;
@@ -2813,7 +2897,7 @@ _validate_charset(SRE_CODE *code, SRE_CODE *end)
case SRE_OP_BIGCHARSET:
GET_ARG; /* Number of blocks */
offset = 256/sizeof(SRE_CODE); /* 256-byte table */
- if (code+offset < code || code+offset > end)
+ if (offset > end-code)
FAIL;
/* Make sure that each byte points to a valid block */
for (i = 0; i < 256; i++) {
@@ -2822,7 +2906,7 @@ _validate_charset(SRE_CODE *code, SRE_CODE *end)
}
code += offset;
offset = arg * 32/sizeof(SRE_CODE); /* 32-byte bitmap times arg */
- if (code+offset < code || code+offset > end)
+ if (offset > end-code)
FAIL;
code += offset;
break;
@@ -2973,11 +3057,11 @@ _validate_inner(SRE_CODE *code, SRE_CODE *end, Py_ssize_t groups)
GET_ARG; prefix_len = arg;
GET_ARG; /* prefix skip */
/* Here comes the prefix string */
- if (code+prefix_len < code || code+prefix_len > newcode)
+ if (prefix_len > newcode-code)
FAIL;
code += prefix_len;
/* And here comes the overlap table */
- if (code+prefix_len < code || code+prefix_len > newcode)
+ if (prefix_len > newcode-code)
FAIL;
/* Each overlap value should be < prefix_len */
for (i = 0; i < prefix_len; i++) {
@@ -3035,10 +3119,8 @@ _validate_inner(SRE_CODE *code, SRE_CODE *end, Py_ssize_t groups)
GET_ARG; max = arg;
if (min > max)
FAIL;
-#ifdef Py_UNICODE_WIDE
- if (max > 65535)
+ if (max > SRE_MAXREPEAT)
FAIL;
-#endif
if (!_validate_inner(code, code+skip-4, groups))
FAIL;
code += skip-4;
@@ -3056,10 +3138,8 @@ _validate_inner(SRE_CODE *code, SRE_CODE *end, Py_ssize_t groups)
GET_ARG; max = arg;
if (min > max)
FAIL;
-#ifdef Py_UNICODE_WIDE
- if (max > 65535)
+ if (max > SRE_MAXREPEAT)
FAIL;
-#endif
if (!_validate_inner(code, code+skip-3, groups))
FAIL;
code += skip-3;
@@ -3110,7 +3190,7 @@ _validate_inner(SRE_CODE *code, SRE_CODE *end, Py_ssize_t groups)
to allow arbitrary jumps anywhere in the code; so we just look
for a JUMP opcode preceding our skip target.
*/
- if (skip >= 3 && code+skip-3 >= code &&
+ if (skip >= 3 && skip-3 < end-code &&
code[skip-3] == SRE_OP_JUMP)
{
VTRACE(("both then and else parts present\n"));
@@ -3386,7 +3466,7 @@ match_start(MatchObject* self, PyObject* args)
}
/* mark is -1 if group is undefined */
- return Py_BuildValue("i", self->mark[index*2]);
+ return PyInt_FromSsize_t(self->mark[index*2]);
}
static PyObject*
@@ -3409,7 +3489,7 @@ match_end(MatchObject* self, PyObject* args)
}
/* mark is -1 if group is undefined */
- return Py_BuildValue("i", self->mark[index*2+1]);
+ return PyInt_FromSsize_t(self->mark[index*2+1]);
}
LOCAL(PyObject*)
@@ -3542,14 +3622,54 @@ match_deepcopy(MatchObject* self, PyObject* memo)
#endif
}
-static struct PyMethodDef match_methods[] = {
- {"group", (PyCFunction) match_group, METH_VARARGS},
- {"start", (PyCFunction) match_start, METH_VARARGS},
- {"end", (PyCFunction) match_end, METH_VARARGS},
- {"span", (PyCFunction) match_span, METH_VARARGS},
- {"groups", (PyCFunction) match_groups, METH_VARARGS|METH_KEYWORDS},
- {"groupdict", (PyCFunction) match_groupdict, METH_VARARGS|METH_KEYWORDS},
- {"expand", (PyCFunction) match_expand, METH_O},
+PyDoc_STRVAR(match_doc,
+"The result of re.match() and re.search().\n\
+Match objects always have a boolean value of True.");
+
+PyDoc_STRVAR(match_group_doc,
+"group([group1, ...]) -> str or tuple.\n\
+ Return subgroup(s) of the match by indices or names.\n\
+ For 0 returns the entire match.");
+
+PyDoc_STRVAR(match_start_doc,
+"start([group=0]) -> int.\n\
+ Return index of the start of the substring matched by group.");
+
+PyDoc_STRVAR(match_end_doc,
+"end([group=0]) -> int.\n\
+ Return index of the end of the substring matched by group.");
+
+PyDoc_STRVAR(match_span_doc,
+"span([group]) -> tuple.\n\
+ For MatchObject m, return the 2-tuple (m.start(group), m.end(group)).");
+
+PyDoc_STRVAR(match_groups_doc,
+"groups([default=None]) -> tuple.\n\
+ Return a tuple containing all the subgroups of the match, from 1.\n\
+ The default argument is used for groups\n\
+ that did not participate in the match");
+
+PyDoc_STRVAR(match_groupdict_doc,
+"groupdict([default=None]) -> dict.\n\
+ Return a dictionary containing all the named subgroups of the match,\n\
+ keyed by the subgroup name. The default argument is used for groups\n\
+ that did not participate in the match");
+
+PyDoc_STRVAR(match_expand_doc,
+"expand(template) -> str.\n\
+ Return the string obtained by doing backslash substitution\n\
+ on the string template, as done by the sub() method.");
+
+static PyMethodDef match_methods[] = {
+ {"group", (PyCFunction) match_group, METH_VARARGS, match_group_doc},
+ {"start", (PyCFunction) match_start, METH_VARARGS, match_start_doc},
+ {"end", (PyCFunction) match_end, METH_VARARGS, match_end_doc},
+ {"span", (PyCFunction) match_span, METH_VARARGS, match_span_doc},
+ {"groups", (PyCFunction) match_groups, METH_VARARGS|METH_KEYWORDS,
+ match_groups_doc},
+ {"groupdict", (PyCFunction) match_groupdict, METH_VARARGS|METH_KEYWORDS,
+ match_groupdict_doc},
+ {"expand", (PyCFunction) match_expand, METH_O, match_expand_doc},
{"__copy__", (PyCFunction) match_copy, METH_NOARGS},
{"__deepcopy__", (PyCFunction) match_deepcopy, METH_O},
{NULL, NULL}
@@ -3559,7 +3679,7 @@ static PyObject *
match_lastindex_get(MatchObject *self)
{
if (self->lastindex >= 0)
- return Py_BuildValue("i", self->lastindex);
+ return PyInt_FromSsize_t(self->lastindex);
Py_INCREF(Py_None);
return Py_None;
}
@@ -3629,7 +3749,7 @@ static PyTypeObject Match_Type = {
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT,
- 0, /* tp_doc */
+ match_doc, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
@@ -3895,6 +4015,12 @@ PyMODINIT_FUNC init_sre(void)
Py_DECREF(x);
}
+ x = PyLong_FromUnsignedLong(SRE_MAXREPEAT);
+ if (x) {
+ PyDict_SetItemString(d, "MAXREPEAT", x);
+ Py_DECREF(x);
+ }
+
x = PyString_FromString(copyright);
if (x) {
PyDict_SetItemString(d, "copyright", x);
diff --git a/Modules/_ssl.c b/Modules/_ssl.c
index e692b5d..752b033 100644
--- a/Modules/_ssl.c
+++ b/Modules/_ssl.c
@@ -18,6 +18,8 @@
#ifdef WITH_THREAD
#include "pythread.h"
+
+
#define PySSL_BEGIN_ALLOW_THREADS { \
PyThreadState *_save = NULL; \
if (_ssl_locks_count>0) {_save = PyEval_SaveThread();}
@@ -271,6 +273,7 @@ newPySSLObject(PySocketSockObject *Sock, char *key_file, char *cert_file,
char *errstr = NULL;
int ret;
int verification_mode;
+ long options;
self = PyObject_New(PySSLObject, &PySSL_Type); /* Create new object */
if (self == NULL)
@@ -281,6 +284,7 @@ newPySSLObject(PySocketSockObject *Sock, char *key_file, char *cert_file,
self->ssl = NULL;
self->ctx = NULL;
self->Socket = NULL;
+ self->shutdown_seen_zero = 0;
/* Make sure the SSL error state is initialized */
(void) ERR_get_state();
@@ -369,8 +373,10 @@ newPySSLObject(PySocketSockObject *Sock, char *key_file, char *cert_file,
}
/* ssl compatibility */
- SSL_CTX_set_options(self->ctx,
- SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS);
+ options = SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS;
+ if (proto_version != PY_SSL_VERSION_SSL2)
+ options |= SSL_OP_NO_SSLv2;
+ SSL_CTX_set_options(self->ctx, options);
verification_mode = SSL_VERIFY_NONE;
if (certreq == PY_SSL_CERT_OPTIONAL)
@@ -686,7 +692,7 @@ _get_peer_alt_names (X509 *certificate) {
int i, j;
PyObject *peer_alt_names = Py_None;
- PyObject *v, *t;
+ PyObject *v = NULL, *t;
X509_EXTENSION *ext = NULL;
GENERAL_NAMES *names = NULL;
GENERAL_NAME *name;
@@ -738,13 +744,16 @@ _get_peer_alt_names (X509 *certificate) {
ext->value->length));
for(j = 0; j < sk_GENERAL_NAME_num(names); j++) {
-
/* get a rendering of each name in the set of names */
+ int gntype;
+ ASN1_STRING *as = NULL;
name = sk_GENERAL_NAME_value(names, j);
- if (name->type == GEN_DIRNAME) {
-
- /* we special-case DirName as a tuple of tuples of attributes */
+ gntype = name->type;
+ switch (gntype) {
+ case GEN_DIRNAME:
+ /* we special-case DirName as a tuple of
+ tuples of attributes */
t = PyTuple_New(2);
if (t == NULL) {
@@ -764,11 +773,61 @@ _get_peer_alt_names (X509 *certificate) {
goto fail;
}
PyTuple_SET_ITEM(t, 1, v);
+ break;
- } else {
+ case GEN_EMAIL:
+ case GEN_DNS:
+ case GEN_URI:
+ /* GENERAL_NAME_print() doesn't handle NULL bytes in ASN1_string
+ correctly, CVE-2013-4238 */
+ t = PyTuple_New(2);
+ if (t == NULL)
+ goto fail;
+ switch (gntype) {
+ case GEN_EMAIL:
+ v = PyString_FromString("email");
+ as = name->d.rfc822Name;
+ break;
+ case GEN_DNS:
+ v = PyString_FromString("DNS");
+ as = name->d.dNSName;
+ break;
+ case GEN_URI:
+ v = PyString_FromString("URI");
+ as = name->d.uniformResourceIdentifier;
+ break;
+ }
+ if (v == NULL) {
+ Py_DECREF(t);
+ goto fail;
+ }
+ PyTuple_SET_ITEM(t, 0, v);
+ v = PyString_FromStringAndSize((char *)ASN1_STRING_data(as),
+ ASN1_STRING_length(as));
+ if (v == NULL) {
+ Py_DECREF(t);
+ goto fail;
+ }
+ PyTuple_SET_ITEM(t, 1, v);
+ break;
+ default:
/* for everything else, we use the OpenSSL print form */
-
+ switch (gntype) {
+ /* check for new general name type */
+ case GEN_OTHERNAME:
+ case GEN_X400:
+ case GEN_EDIPARTY:
+ case GEN_IPADD:
+ case GEN_RID:
+ break;
+ default:
+ if (PyErr_Warn(PyExc_RuntimeWarning,
+ "Unknown general name type") == -1) {
+ goto fail;
+ }
+ break;
+ }
(void) BIO_reset(biobuf);
GENERAL_NAME_print(biobuf, name);
len = BIO_gets(biobuf, buf, sizeof(buf)-1);
@@ -794,6 +853,7 @@ _get_peer_alt_names (X509 *certificate) {
goto fail;
}
PyTuple_SET_ITEM(t, 1, v);
+ break;
}
/* and add that rendering to the list */
@@ -1005,6 +1065,7 @@ PySSL_peercert(PySSLObject *self, PyObject *args)
int len;
int verification;
PyObject *binary_mode = Py_None;
+ int b;
if (!PyArg_ParseTuple(args, "|O:peer_certificate", &binary_mode))
return NULL;
@@ -1012,7 +1073,10 @@ PySSL_peercert(PySSLObject *self, PyObject *args)
if (!self->peer_cert)
Py_RETURN_NONE;
- if (PyObject_IsTrue(binary_mode)) {
+ b = PyObject_IsTrue(binary_mode);
+ if (b < 0)
+ return NULL;
+ if (b) {
/* return cert in DER-encoded format */
unsigned char *bytes_buf = NULL;
@@ -1188,6 +1252,12 @@ static PyObject *PySSL_SSLwrite(PySSLObject *self, PyObject *args)
if (!PyArg_ParseTuple(args, "s*:write", &buf))
return NULL;
+ if (buf.len > INT_MAX) {
+ PyErr_Format(PyExc_OverflowError,
+ "string longer than %d bytes", INT_MAX);
+ goto error;
+ }
+
/* just in case the blocking state of the socket has been changed */
nonblocking = (self->Socket->sock_timeout >= 0.0);
BIO_set_nbio(SSL_get_rbio(self->ssl), nonblocking);
@@ -1209,7 +1279,7 @@ static PyObject *PySSL_SSLwrite(PySSLObject *self, PyObject *args)
}
do {
PySSL_BEGIN_ALLOW_THREADS
- len = SSL_write(self->ssl, buf.buf, buf.len);
+ len = SSL_write(self->ssl, buf.buf, (int)buf.len);
err = SSL_get_error(self->ssl, len);
PySSL_END_ALLOW_THREADS
if (PyErr_CheckSignals()) {
@@ -1391,7 +1461,7 @@ static PyObject *PySSL_SSLshutdown(PySSLObject *self)
* Otherwise OpenSSL might read in too much data,
* eating clear text data that happens to be
* transmitted after the SSL shutdown.
- * Should be safe to call repeatedly everytime this
+ * Should be safe to call repeatedly every time this
* function is used and the shutdown_seen_zero != 0
* condition is met.
*/
@@ -1555,9 +1625,10 @@ PyDoc_STRVAR(PySSL_RAND_egd_doc,
\n\
Queries the entropy gather daemon (EGD) on the socket named by 'path'.\n\
Returns number of bytes read. Raises SSLError if connection to EGD\n\
-fails or if it does provide enough data to seed PRNG.");
+fails or if it does not provide enough data to seed PRNG.");
+
+#endif /* HAVE_OPENSSL_RAND */
-#endif
/* List of functions exported by this module. */
@@ -1585,9 +1656,21 @@ static PyMethodDef PySSL_methods[] = {
static PyThread_type_lock *_ssl_locks = NULL;
-static unsigned long _ssl_thread_id_function (void) {
+#if OPENSSL_VERSION_NUMBER >= 0x10000000
+/* use new CRYPTO_THREADID API. */
+static void
+_ssl_threadid_callback(CRYPTO_THREADID *id)
+{
+ CRYPTO_THREADID_set_numeric(id,
+ (unsigned long)PyThread_get_thread_ident());
+}
+#else
+/* deprecated CRYPTO_set_id_callback() API. */
+static unsigned long
+_ssl_thread_id_function (void) {
return PyThread_get_thread_ident();
}
+#endif
static void _ssl_thread_locking_function (int mode, int n, const char *file, int line) {
/* this function is needed to perform locking on shared data
@@ -1638,7 +1721,11 @@ static int _setup_ssl_threads(void) {
}
}
CRYPTO_set_locking_callback(_ssl_thread_locking_function);
+#if OPENSSL_VERSION_NUMBER >= 0x10000000
+ CRYPTO_THREADID_set_callback(_ssl_threadid_callback);
+#else
CRYPTO_set_id_callback(_ssl_thread_id_function);
+#endif
}
return 1;
}
@@ -1753,4 +1840,5 @@ init_ssl(void)
r = PyString_FromString(SSLeay_version(SSLEAY_VERSION));
if (r == NULL || PyModule_AddObject(m, "OPENSSL_VERSION", r))
return;
+
}
diff --git a/Modules/_struct.c b/Modules/_struct.c
index c158eba..f035847 100644
--- a/Modules/_struct.c
+++ b/Modules/_struct.c
@@ -1371,13 +1371,28 @@ s_init(PyObject *self, PyObject *args, PyObject *kwds)
assert(PyStruct_Check(self));
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "S:Struct", kwlist,
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O:Struct", kwlist,
&o_format))
return -1;
- Py_INCREF(o_format);
- Py_CLEAR(soself->s_format);
- soself->s_format = o_format;
+ if (PyString_Check(o_format)) {
+ Py_INCREF(o_format);
+ Py_CLEAR(soself->s_format);
+ soself->s_format = o_format;
+ }
+ else if (PyUnicode_Check(o_format)) {
+ PyObject *str = PyUnicode_AsEncodedString(o_format, "ascii", NULL);
+ if (str == NULL)
+ return -1;
+ Py_CLEAR(soself->s_format);
+ soself->s_format = str;
+ }
+ else {
+ PyErr_Format(PyExc_TypeError,
+ "Struct() argument 1 must be string, not %s",
+ Py_TYPE(o_format)->tp_name);
+ return -1;
+ }
ret = prepare_s(soself);
return ret;
@@ -1439,6 +1454,7 @@ strings.");
static PyObject *
s_unpack(PyObject *self, PyObject *inputstr)
{
+ Py_buffer buf;
char *start;
Py_ssize_t len;
PyObject *args=NULL, *result;
@@ -1454,12 +1470,17 @@ s_unpack(PyObject *self, PyObject *inputstr)
args = PyTuple_Pack(1, inputstr);
if (args == NULL)
return NULL;
- if (!PyArg_ParseTuple(args, "s#:unpack", &start, &len))
+ if (!PyArg_ParseTuple(args, "s*:unpack", &buf))
goto fail;
- if (soself->s_size != len)
+ start = buf.buf;
+ len = buf.len;
+ if (soself->s_size != len) {
+ PyBuffer_Release(&buf);
goto fail;
+ }
result = s_unpack_internal(soself, start);
Py_DECREF(args);
+ PyBuffer_Release(&buf);
return result;
fail:
@@ -1482,24 +1503,24 @@ static PyObject *
s_unpack_from(PyObject *self, PyObject *args, PyObject *kwds)
{
static char *kwlist[] = {"buffer", "offset", 0};
-#if (PY_VERSION_HEX < 0x02050000)
- static char *fmt = "z#|i:unpack_from";
-#else
- static char *fmt = "z#|n:unpack_from";
-#endif
+ static char *fmt = "z*|n:unpack_from";
+ Py_buffer buf;
Py_ssize_t buffer_len = 0, offset = 0;
char *buffer = NULL;
PyStructObject *soself = (PyStructObject *)self;
+ PyObject *result;
assert(PyStruct_Check(self));
assert(soself->s_codes != NULL);
if (!PyArg_ParseTupleAndKeywords(args, kwds, fmt, kwlist,
- &buffer, &buffer_len, &offset))
+ &buf, &offset))
return NULL;
-
+ buffer = buf.buf;
+ buffer_len = buf.len;
if (buffer == NULL) {
PyErr_Format(StructError,
"unpack_from requires a buffer argument");
+ PyBuffer_Release(&buf);
return NULL;
}
@@ -1510,9 +1531,12 @@ s_unpack_from(PyObject *self, PyObject *args, PyObject *kwds)
PyErr_Format(StructError,
"unpack_from requires a buffer of at least %zd bytes",
soself->s_size);
+ PyBuffer_Release(&buf);
return NULL;
}
- return s_unpack_internal(soself, buffer + offset);
+ result = s_unpack_internal(soself, buffer + offset);
+ PyBuffer_Release(&buf);
+ return result;
}
@@ -1603,7 +1627,7 @@ s_pack(PyObject *self, PyObject *args)
if (PyTuple_GET_SIZE(args) != soself->s_len)
{
PyErr_Format(StructError,
- "pack requires exactly %zd arguments", soself->s_len);
+ "pack expected %zd items for packing (got %zd)", soself->s_len, PyTuple_GET_SIZE(args));
return NULL;
}
@@ -1642,9 +1666,19 @@ s_pack_into(PyObject *self, PyObject *args)
assert(soself->s_codes != NULL);
if (PyTuple_GET_SIZE(args) != (soself->s_len + 2))
{
- PyErr_Format(StructError,
- "pack_into requires exactly %zd arguments",
- (soself->s_len + 2));
+ if (PyTuple_GET_SIZE(args) == 0) {
+ PyErr_Format(StructError,
+ "pack_into expected buffer argument");
+ }
+ else if (PyTuple_GET_SIZE(args) == 1) {
+ PyErr_Format(StructError,
+ "pack_into expected offset argument");
+ }
+ else {
+ PyErr_Format(StructError,
+ "pack_into expected %zd items for packing (got %zd)",
+ soself->s_len, (PyTuple_GET_SIZE(args) - 2));
+ }
return NULL;
}
@@ -1693,6 +1727,18 @@ s_get_size(PyStructObject *self, void *unused)
return PyInt_FromSsize_t(self->s_size);
}
+PyDoc_STRVAR(s_sizeof__doc__,
+"S.__sizeof__() -> size of S in memory, in bytes");
+
+static PyObject *
+s_sizeof(PyStructObject *self, void *unused)
+{
+ Py_ssize_t size;
+
+ size = sizeof(PyStructObject) + sizeof(formatcode) * (self->s_len + 1);
+ return PyLong_FromSsize_t(size);
+}
+
/* List of functions */
static struct PyMethodDef s_methods[] = {
@@ -1701,6 +1747,7 @@ static struct PyMethodDef s_methods[] = {
{"unpack", s_unpack, METH_O, s_unpack__doc__},
{"unpack_from", (PyCFunction)s_unpack_from, METH_VARARGS|METH_KEYWORDS,
s_unpack_from__doc__},
+ {"__sizeof__", (PyCFunction)s_sizeof, METH_NOARGS, s_sizeof__doc__},
{NULL, NULL} /* sentinel */
};
diff --git a/Modules/_testcapimodule.c b/Modules/_testcapimodule.c
index f1968e2..e4885d1 100644
--- a/Modules/_testcapimodule.c
+++ b/Modules/_testcapimodule.c
@@ -1118,7 +1118,7 @@ unicode_encodedecimal(PyObject *self, PyObject *args)
if (!PyArg_ParseTuple(args, "u#|s", &unicode, &length, &errors))
return NULL;
- decimal_length = length * 7; /* len('&#8364;') */
+ decimal_length = length * 10; /* len('&#1114111;') */
decimal = PyBytes_FromStringAndSize(NULL, decimal_length);
if (decimal == NULL)
return NULL;
@@ -1687,6 +1687,96 @@ sequence_delitem(PyObject *self, PyObject *args)
Py_RETURN_NONE;
}
+#ifdef WITH_THREAD
+typedef struct {
+ PyThread_type_lock start_event;
+ PyThread_type_lock exit_event;
+ PyObject *callback;
+} test_c_thread_t;
+
+static void
+temporary_c_thread(void *data)
+{
+ test_c_thread_t *test_c_thread = data;
+ PyGILState_STATE state;
+ PyObject *res;
+
+ PyThread_release_lock(test_c_thread->start_event);
+
+ /* Allocate a Python thread state for this thread */
+ state = PyGILState_Ensure();
+
+ res = PyObject_CallFunction(test_c_thread->callback, "", NULL);
+ Py_CLEAR(test_c_thread->callback);
+
+ if (res == NULL) {
+ PyErr_Print();
+ }
+ else {
+ Py_DECREF(res);
+ }
+
+ /* Destroy the Python thread state for this thread */
+ PyGILState_Release(state);
+
+ PyThread_release_lock(test_c_thread->exit_event);
+
+ PyThread_exit_thread();
+}
+
+static PyObject *
+call_in_temporary_c_thread(PyObject *self, PyObject *callback)
+{
+ PyObject *res = NULL;
+ test_c_thread_t test_c_thread;
+ long thread;
+
+ PyEval_InitThreads();
+
+ test_c_thread.start_event = PyThread_allocate_lock();
+ test_c_thread.exit_event = PyThread_allocate_lock();
+ test_c_thread.callback = NULL;
+ if (!test_c_thread.start_event || !test_c_thread.exit_event) {
+ PyErr_SetString(PyExc_RuntimeError, "could not allocate lock");
+ goto exit;
+ }
+
+ Py_INCREF(callback);
+ test_c_thread.callback = callback;
+
+ PyThread_acquire_lock(test_c_thread.start_event, 1);
+ PyThread_acquire_lock(test_c_thread.exit_event, 1);
+
+ thread = PyThread_start_new_thread(temporary_c_thread, &test_c_thread);
+ if (thread == -1) {
+ PyErr_SetString(PyExc_RuntimeError, "unable to start the thread");
+ PyThread_release_lock(test_c_thread.start_event);
+ PyThread_release_lock(test_c_thread.exit_event);
+ goto exit;
+ }
+
+ PyThread_acquire_lock(test_c_thread.start_event, 1);
+ PyThread_release_lock(test_c_thread.start_event);
+
+ Py_BEGIN_ALLOW_THREADS
+ PyThread_acquire_lock(test_c_thread.exit_event, 1);
+ PyThread_release_lock(test_c_thread.exit_event);
+ Py_END_ALLOW_THREADS
+
+ Py_INCREF(Py_None);
+ res = Py_None;
+
+exit:
+ Py_CLEAR(test_c_thread.callback);
+ if (test_c_thread.start_event)
+ PyThread_free_lock(test_c_thread.start_event);
+ if (test_c_thread.exit_event)
+ PyThread_free_lock(test_c_thread.exit_event);
+ return res;
+}
+#endif /* WITH_THREAD */
+
+
static PyMethodDef TestMethods[] = {
{"raise_exception", raise_exception, METH_VARARGS},
{"test_config", (PyCFunction)test_config, METH_NOARGS},
@@ -1745,6 +1835,10 @@ static PyMethodDef TestMethods[] = {
{"make_exception_with_doc", (PyCFunction)make_exception_with_doc,
METH_VARARGS | METH_KEYWORDS},
{"sequence_delitem", (PyCFunction)sequence_delitem, METH_VARARGS},
+#ifdef WITH_THREAD
+ {"call_in_temporary_c_thread", call_in_temporary_c_thread, METH_O,
+ PyDoc_STR("set_error_class(error_class) -> None")},
+#endif
{NULL, NULL} /* sentinel */
};
@@ -1813,7 +1907,7 @@ test_structmembers_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
;
test_structmembers *ob;
const char *s = NULL;
- Py_ssize_t string_len = 0;
+ int string_len = 0;
ob = PyObject_New(test_structmembers, type);
if (ob == NULL)
return NULL;
diff --git a/Modules/_tkinter.c b/Modules/_tkinter.c
index 40c8be2..4aa8594 100644
--- a/Modules/_tkinter.c
+++ b/Modules/_tkinter.c
@@ -47,6 +47,10 @@ Copyright (C) 1994 Steen Lumholt.
#define PyBool_FromLong PyInt_FromLong
#endif
+#define CHECK_SIZE(size, elemsize) \
+ ((size_t)(size) <= (size_t)INT_MAX && \
+ (size_t)(size) <= UINT_MAX / (size_t)(elemsize))
+
/* Starting with Tcl 8.4, many APIs offer const-correctness. Unfortunately,
making _tkinter correct for this API means to break earlier
versions. USE_COMPAT_CONST allows to make _tkinter work with both 8.4 and
@@ -378,7 +382,7 @@ Merge(PyObject *args)
char **argv = NULL;
int fvStore[ARGSZ];
int *fv = NULL;
- int argc = 0, fvc = 0, i;
+ Py_ssize_t argc = 0, fvc = 0, i;
char *res = NULL;
if (!(tmp = PyList_New(0)))
@@ -400,8 +404,12 @@ Merge(PyObject *args)
argc = PyTuple_Size(args);
if (argc > ARGSZ) {
- argv = (char **)ckalloc(argc * sizeof(char *));
- fv = (int *)ckalloc(argc * sizeof(int));
+ if (!CHECK_SIZE(argc, sizeof(char *))) {
+ PyErr_SetString(PyExc_OverflowError, "tuple is too long");
+ goto finally;
+ }
+ argv = (char **)ckalloc((size_t)argc * sizeof(char *));
+ fv = (int *)ckalloc((size_t)argc * sizeof(int));
if (argv == NULL || fv == NULL) {
PyErr_NoMemory();
goto finally;
@@ -448,6 +456,68 @@ Merge(PyObject *args)
+#ifdef Py_USING_UNICODE
+static PyObject *
+unicode_FromTclStringAndSize(const char *s, Py_ssize_t size)
+{
+ PyObject *r = PyUnicode_DecodeUTF8(s, size, NULL);
+ if (!r && PyErr_ExceptionMatches(PyExc_UnicodeDecodeError)) {
+ /* Tcl encodes null character as \xc0\x80 */
+ if (memchr(s, '\xc0', size)) {
+ char *buf, *q;
+ const char *e = s + size;
+ PyErr_Clear();
+ q = buf = (char *)PyMem_Malloc(size);
+ if (buf == NULL)
+ return NULL;
+ while (s != e) {
+ if (s + 1 != e && s[0] == '\xc0' && s[1] == '\x80') {
+ *q++ = '\0';
+ s += 2;
+ }
+ else
+ *q++ = *s++;
+ }
+ s = buf;
+ size = q - s;
+ r = PyUnicode_DecodeUTF8(s, size, NULL);
+ PyMem_Free(buf);
+ }
+ }
+ return r;
+}
+#endif
+
+static PyObject *
+fromTclStringAndSize(const char *s, Py_ssize_t size)
+{
+ PyObject *r;
+#ifdef Py_USING_UNICODE
+ Py_ssize_t i;
+ /* If Tcl string contains any bytes with the top bit set,
+ it's UTF-8 and we should decode it to Unicode */
+ for (i = 0; i < size; i++)
+ if (s[i] & 0x80)
+ break;
+ if (i != size) {
+ /* It isn't an ASCII string. */
+ r = unicode_FromTclStringAndSize(s, size);
+ if (r)
+ return r;
+ PyErr_Clear();
+ }
+#endif
+ r = PyString_FromStringAndSize(s, size);
+ return r;
+}
+
+static PyObject *
+fromTclString(const char *s)
+{
+ return fromTclStringAndSize(s, strlen(s));
+}
+
+
static PyObject *
Split(char *list)
{
@@ -547,6 +617,33 @@ SplitObj(PyObject *arg)
return Split(PyString_AsString(arg));
/* Fall through, returning arg. */
}
+ else if (PyUnicode_Check(arg)) {
+ int argc;
+ char **argv;
+ char *list;
+ PyObject *s = PyUnicode_AsUTF8String(arg);
+
+ if (s == NULL) {
+ Py_INCREF(arg);
+ return arg;
+ }
+ list = PyString_AsString(s);
+
+ if (list == NULL ||
+ Tcl_SplitList((Tcl_Interp *)NULL, list, &argc, &argv) != TCL_OK) {
+ Py_DECREF(s);
+ Py_INCREF(arg);
+ return arg;
+ }
+ Tcl_Free(FREECAST argv);
+ if (argc > 1) {
+ PyObject *v = Split(list);
+ Py_DECREF(s);
+ return v;
+ }
+ Py_DECREF(s);
+ /* Fall through, returning arg. */
+ }
Py_INCREF(arg);
return arg;
}
@@ -806,27 +903,10 @@ PyDoc_STRVAR(PyTclObject_string__doc__,
static PyObject *
PyTclObject_string(PyTclObject *self, void *ignored)
{
- char *s;
- int i, len;
if (!self->string) {
- s = Tcl_GetStringFromObj(self->value, &len);
- for (i = 0; i < len; i++)
- if (s[i] & 0x80)
- break;
-#ifdef Py_USING_UNICODE
- if (i == len)
- /* It is an ASCII string. */
- self->string = PyString_FromStringAndSize(s, len);
- else {
- self->string = PyUnicode_DecodeUTF8(s, len, "strict");
- if (!self->string) {
- PyErr_Clear();
- self->string = PyString_FromStringAndSize(s, len);
- }
- }
-#else
- self->string = PyString_FromStringAndSize(s, len);
-#endif
+ int len;
+ char *s = Tcl_GetStringFromObj(self->value, &len);
+ self->string = fromTclStringAndSize(s, len);
if (!self->string)
return NULL;
}
@@ -848,7 +928,7 @@ PyTclObject_unicode(PyTclObject *self, void *ignored)
}
/* XXX Could chache result if it is non-ASCII. */
s = Tcl_GetStringFromObj(self->value, &len);
- return PyUnicode_DecodeUTF8(s, len, "strict");
+ return unicode_FromTclStringAndSize(s, len);
}
#endif
@@ -941,6 +1021,16 @@ statichere PyTypeObject PyTclObject_Type = {
0, /*tp_is_gc*/
};
+#if PY_SIZE_MAX > INT_MAX
+#define CHECK_STRING_LENGTH(s) do { \
+ if (s != NULL && strlen(s) >= INT_MAX) { \
+ PyErr_SetString(PyExc_OverflowError, "string is too long"); \
+ return NULL; \
+ } } while(0)
+#else
+#define CHECK_STRING_LENGTH(s)
+#endif
+
static Tcl_Obj*
AsObj(PyObject *value)
{
@@ -956,12 +1046,18 @@ AsObj(PyObject *value)
else if (PyFloat_Check(value))
return Tcl_NewDoubleObj(PyFloat_AS_DOUBLE(value));
else if (PyTuple_Check(value)) {
- Tcl_Obj **argv = (Tcl_Obj**)
- ckalloc(PyTuple_Size(value)*sizeof(Tcl_Obj*));
- int i;
+ Tcl_Obj **argv;
+ Py_ssize_t size, i;
+
+ size = PyTuple_Size(value);
+ if (!CHECK_SIZE(size, sizeof(Tcl_Obj *))) {
+ PyErr_SetString(PyExc_OverflowError, "tuple is too long");
+ return NULL;
+ }
+ argv = (Tcl_Obj **) ckalloc(((size_t)size) * sizeof(Tcl_Obj *));
if(!argv)
return 0;
- for(i=0;i<PyTuple_Size(value);i++)
+ for (i = 0; i < size; i++)
argv[i] = AsObj(PyTuple_GetItem(value,i));
result = Tcl_NewListObj(PyTuple_Size(value), argv);
ckfree(FREECAST argv);
@@ -976,7 +1072,14 @@ AsObj(PyObject *value)
#if defined(Py_UNICODE_WIDE) && TCL_UTF_MAX == 3
Tcl_UniChar *outbuf = NULL;
Py_ssize_t i;
- size_t allocsize = ((size_t)size) * sizeof(Tcl_UniChar);
+ size_t allocsize;
+ if (!CHECK_SIZE(size, sizeof(Tcl_UniChar))) {
+ PyErr_SetString(PyExc_OverflowError, "string is too long");
+ return NULL;
+ }
+ if (sizeof(Py_UNICODE) == sizeof(Tcl_UniChar))
+ return Tcl_NewUnicodeObj(inbuf, size);
+ allocsize = ((size_t)size) * sizeof(Tcl_UniChar);
if (allocsize >= size)
outbuf = (Tcl_UniChar*)ckalloc(allocsize);
/* Else overflow occurred, and we take the next exit */
@@ -987,8 +1090,10 @@ AsObj(PyObject *value)
for (i = 0; i < size; i++) {
if (inbuf[i] >= 0x10000) {
/* Tcl doesn't do UTF-16, yet. */
- PyErr_SetString(PyExc_ValueError,
- "unsupported character");
+ PyErr_Format(Tkinter_TclError,
+ "character U+%x is above the range "
+ "(U+0000-U+FFFF) allowed by Tcl",
+ (int)inbuf[i]);
ckfree(FREECAST outbuf);
return NULL;
}
@@ -1025,30 +1130,7 @@ FromObj(PyObject* tkapp, Tcl_Obj *value)
TkappObject *app = (TkappObject*)tkapp;
if (value->typePtr == NULL) {
- /* If the result contains any bytes with the top bit set,
- it's UTF-8 and we should decode it to Unicode */
-#ifdef Py_USING_UNICODE
- int i;
- char *s = value->bytes;
- int len = value->length;
- for (i = 0; i < len; i++) {
- if (value->bytes[i] & 0x80)
- break;
- }
-
- if (i == value->length)
- result = PyString_FromStringAndSize(s, len);
- else {
- /* Convert UTF-8 to Unicode string */
- result = PyUnicode_DecodeUTF8(s, len, "strict");
- if (result == NULL) {
- PyErr_Clear();
- result = PyString_FromStringAndSize(s, len);
- }
- }
-#else
- result = PyString_FromStringAndSize(value->bytes, value->length);
-#endif
+ result = fromTclStringAndSize(value->bytes, value->length);
return result;
}
@@ -1169,7 +1251,7 @@ static Tcl_Obj**
Tkapp_CallArgs(PyObject *args, Tcl_Obj** objStore, int *pobjc)
{
Tcl_Obj **objv = objStore;
- int objc = 0, i;
+ Py_ssize_t objc = 0, i;
if (args == NULL)
/* do nothing */;
@@ -1184,7 +1266,11 @@ Tkapp_CallArgs(PyObject *args, Tcl_Obj** objStore, int *pobjc)
objc = PyTuple_Size(args);
if (objc > ARGSZ) {
- objv = (Tcl_Obj **)ckalloc(objc * sizeof(char *));
+ if (!CHECK_SIZE(objc, sizeof(Tcl_Obj *))) {
+ PyErr_SetString(PyExc_OverflowError, "tuple is too long");
+ return NULL;
+ }
+ objv = (Tcl_Obj **)ckalloc(((size_t)objc) * sizeof(Tcl_Obj *));
if (objv == NULL) {
PyErr_NoMemory();
objc = 0;
@@ -1221,8 +1307,8 @@ static PyObject*
Tkapp_CallResult(TkappObject *self)
{
PyObject *res = NULL;
+ Tcl_Obj *value = Tcl_GetObjResult(self->interp);
if(self->wantobjects) {
- Tcl_Obj *value = Tcl_GetObjResult(self->interp);
/* Not sure whether the IncrRef is necessary, but something
may overwrite the interpreter result while we are
converting it. */
@@ -1230,33 +1316,9 @@ Tkapp_CallResult(TkappObject *self)
res = FromObj((PyObject*)self, value);
Tcl_DecrRefCount(value);
} else {
- const char *s = Tcl_GetStringResult(self->interp);
- const char *p = s;
-
- /* If the result contains any bytes with the top bit set,
- it's UTF-8 and we should decode it to Unicode */
-#ifdef Py_USING_UNICODE
- while (*p != '\0') {
- if (*p & 0x80)
- break;
- p++;
- }
-
- if (*p == '\0')
- res = PyString_FromStringAndSize(s, (int)(p-s));
- else {
- /* Convert UTF-8 to Unicode string */
- p = strchr(p, '\0');
- res = PyUnicode_DecodeUTF8(s, (int)(p-s), "strict");
- if (res == NULL) {
- PyErr_Clear();
- res = PyString_FromStringAndSize(s, (int)(p-s));
- }
- }
-#else
- p = strchr(p, '\0');
- res = PyString_FromStringAndSize(s, (int)(p-s));
-#endif
+ int len;
+ const char *s = Tcl_GetStringFromObj(value, &len);
+ res = fromTclStringAndSize(s, len);
}
return res;
}
@@ -1434,6 +1496,7 @@ Tkapp_Eval(PyObject *self, PyObject *args)
if (!PyArg_ParseTuple(args, "s:eval", &script))
return NULL;
+ CHECK_STRING_LENGTH(script);
CHECK_TCL_APPARTMENT;
ENTER_TCL
@@ -1480,6 +1543,7 @@ Tkapp_EvalFile(PyObject *self, PyObject *args)
if (!PyArg_ParseTuple(args, "s:evalfile", &fileName))
return NULL;
+ CHECK_STRING_LENGTH(fileName);
CHECK_TCL_APPARTMENT;
ENTER_TCL
@@ -1501,9 +1565,10 @@ Tkapp_Record(PyObject *self, PyObject *args)
PyObject *res = NULL;
int err;
- if (!PyArg_ParseTuple(args, "s", &script))
+ if (!PyArg_ParseTuple(args, "s:record", &script))
return NULL;
+ CHECK_STRING_LENGTH(script);
CHECK_TCL_APPARTMENT;
ENTER_TCL
@@ -1524,6 +1589,7 @@ Tkapp_AddErrorInfo(PyObject *self, PyObject *args)
if (!PyArg_ParseTuple(args, "s:adderrorinfo", &msg))
return NULL;
+ CHECK_STRING_LENGTH(msg);
CHECK_TCL_APPARTMENT;
ENTER_TCL
@@ -1559,16 +1625,28 @@ typedef struct VarEvent {
static int
varname_converter(PyObject *in, void *_out)
{
+ char *s;
char **out = (char**)_out;
if (PyString_Check(in)) {
- *out = PyString_AsString(in);
+ if (PyString_Size(in) > INT_MAX) {
+ PyErr_SetString(PyExc_OverflowError, "string is too long");
+ return 0;
+ }
+ s = PyString_AsString(in);
+ if (strlen(s) != PyString_Size(in)) {
+ PyErr_SetString(PyExc_ValueError, "null character in string");
+ return 0;
+ }
+ *out = s;
return 1;
}
if (PyTclObject_Check(in)) {
*out = PyTclObject_TclString(in);
return 1;
}
- /* XXX: Should give diagnostics. */
+ PyErr_Format(PyExc_TypeError,
+ "must be str or Tcl_Obj, not %.50s",
+ in->ob_type->tp_name);
return 0;
}
@@ -1654,8 +1732,11 @@ SetVar(PyObject *self, PyObject *args, int flags)
PyObject *res = NULL;
Tcl_Obj *newval, *ok;
- if (PyArg_ParseTuple(args, "O&O:setvar",
- varname_converter, &name1, &newValue)) {
+ switch (PyTuple_GET_SIZE(args)) {
+ case 2:
+ if (!PyArg_ParseTuple(args, "O&O:setvar",
+ varname_converter, &name1, &newValue))
+ return NULL;
/* XXX Acquire tcl lock??? */
newval = AsObj(newValue);
if (newval == NULL)
@@ -1671,27 +1752,29 @@ SetVar(PyObject *self, PyObject *args, int flags)
Py_INCREF(res);
}
LEAVE_OVERLAP_TCL
- }
- else {
- PyErr_Clear();
- if (PyArg_ParseTuple(args, "ssO:setvar",
- &name1, &name2, &newValue)) {
- /* XXX must hold tcl lock already??? */
- newval = AsObj(newValue);
- ENTER_TCL
- ok = Tcl_SetVar2Ex(Tkapp_Interp(self), name1, name2, newval, flags);
- ENTER_OVERLAP
- if (!ok)
- Tkinter_Error(self);
- else {
- res = Py_None;
- Py_INCREF(res);
- }
- LEAVE_OVERLAP_TCL
- }
- else {
+ break;
+ case 3:
+ if (!PyArg_ParseTuple(args, "ssO:setvar",
+ &name1, &name2, &newValue))
return NULL;
+ CHECK_STRING_LENGTH(name1);
+ CHECK_STRING_LENGTH(name2);
+ /* XXX must hold tcl lock already??? */
+ newval = AsObj(newValue);
+ ENTER_TCL
+ ok = Tcl_SetVar2Ex(Tkapp_Interp(self), name1, name2, newval, flags);
+ ENTER_OVERLAP
+ if (!ok)
+ Tkinter_Error(self);
+ else {
+ res = Py_None;
+ Py_INCREF(res);
}
+ LEAVE_OVERLAP_TCL
+ break;
+ default:
+ PyErr_SetString(PyExc_TypeError, "setvar requires 2 to 3 arguments");
+ return NULL;
}
return res;
}
@@ -1721,6 +1804,7 @@ GetVar(PyObject *self, PyObject *args, int flags)
varname_converter, &name1, &name2))
return NULL;
+ CHECK_STRING_LENGTH(name2);
ENTER_TCL
tres = Tcl_GetVar2Ex(Tkapp_Interp(self), name1, name2, flags);
ENTER_OVERLAP
@@ -1731,7 +1815,9 @@ GetVar(PyObject *self, PyObject *args, int flags)
res = FromObj(self, tres);
}
else {
- res = PyString_FromString(Tcl_GetString(tres));
+ int len;
+ char *s = Tcl_GetStringFromObj(tres, &len);
+ res = PyString_FromStringAndSize(s, len);
}
}
LEAVE_OVERLAP_TCL
@@ -1762,6 +1848,8 @@ UnsetVar(PyObject *self, PyObject *args, int flags)
if (!PyArg_ParseTuple(args, "s|s:unsetvar", &name1, &name2))
return NULL;
+ CHECK_STRING_LENGTH(name1);
+ CHECK_STRING_LENGTH(name2);
ENTER_TCL
code = Tcl_UnsetVar2(Tkapp_Interp(self), name1, name2, flags);
ENTER_OVERLAP
@@ -1806,6 +1894,7 @@ Tkapp_GetInt(PyObject *self, PyObject *args)
}
if (!PyArg_ParseTuple(args, "s:getint", &s))
return NULL;
+ CHECK_STRING_LENGTH(s);
if (Tcl_GetInt(Tkapp_Interp(self), s, &v) == TCL_ERROR)
return Tkinter_Error(self);
return Py_BuildValue("i", v);
@@ -1826,6 +1915,7 @@ Tkapp_GetDouble(PyObject *self, PyObject *args)
}
if (!PyArg_ParseTuple(args, "s:getdouble", &s))
return NULL;
+ CHECK_STRING_LENGTH(s);
if (Tcl_GetDouble(Tkapp_Interp(self), s, &v) == TCL_ERROR)
return Tkinter_Error(self);
return Py_BuildValue("d", v);
@@ -1846,6 +1936,7 @@ Tkapp_GetBoolean(PyObject *self, PyObject *args)
}
if (!PyArg_ParseTuple(args, "s:getboolean", &s))
return NULL;
+ CHECK_STRING_LENGTH(s);
if (Tcl_GetBoolean(Tkapp_Interp(self), s, &v) == TCL_ERROR)
return Tkinter_Error(self);
return PyBool_FromLong(v);
@@ -1861,6 +1952,7 @@ Tkapp_ExprString(PyObject *self, PyObject *args)
if (!PyArg_ParseTuple(args, "s:exprstring", &s))
return NULL;
+ CHECK_STRING_LENGTH(s);
CHECK_TCL_APPARTMENT;
ENTER_TCL
@@ -1869,7 +1961,7 @@ Tkapp_ExprString(PyObject *self, PyObject *args)
if (retval == TCL_ERROR)
res = Tkinter_Error(self);
else
- res = Py_BuildValue("s", Tkapp_Result(self));
+ res = PyString_FromString(Tkapp_Result(self));
LEAVE_OVERLAP_TCL
return res;
}
@@ -1885,6 +1977,7 @@ Tkapp_ExprLong(PyObject *self, PyObject *args)
if (!PyArg_ParseTuple(args, "s:exprlong", &s))
return NULL;
+ CHECK_STRING_LENGTH(s);
CHECK_TCL_APPARTMENT;
ENTER_TCL
@@ -1908,6 +2001,7 @@ Tkapp_ExprDouble(PyObject *self, PyObject *args)
if (!PyArg_ParseTuple(args, "s:exprdouble", &s))
return NULL;
+ CHECK_STRING_LENGTH(s);
CHECK_TCL_APPARTMENT;
PyFPE_START_PROTECT("Tkapp_ExprDouble", return 0)
ENTER_TCL
@@ -1932,6 +2026,7 @@ Tkapp_ExprBoolean(PyObject *self, PyObject *args)
if (!PyArg_ParseTuple(args, "s:exprboolean", &s))
return NULL;
+ CHECK_STRING_LENGTH(s);
CHECK_TCL_APPARTMENT;
ENTER_TCL
retval = Tcl_ExprBoolean(Tkapp_Interp(self), s, &v);
@@ -1952,19 +2047,39 @@ Tkapp_SplitList(PyObject *self, PyObject *args)
char *list;
int argc;
char **argv;
- PyObject *v;
+ PyObject *arg, *v;
int i;
- if (PyTuple_Size(args) == 1) {
- v = PyTuple_GetItem(args, 0);
- if (PyTuple_Check(v)) {
- Py_INCREF(v);
- return v;
+ if (!PyArg_ParseTuple(args, "O:splitlist", &arg))
+ return NULL;
+ if (PyTclObject_Check(arg)) {
+ int objc;
+ Tcl_Obj **objv;
+ if (Tcl_ListObjGetElements(Tkapp_Interp(self),
+ ((PyTclObject*)arg)->value,
+ &objc, &objv) == TCL_ERROR) {
+ return Tkinter_Error(self);
}
+ if (!(v = PyTuple_New(objc)))
+ return NULL;
+ for (i = 0; i < objc; i++) {
+ PyObject *s = FromObj(self, objv[i]);
+ if (!s || PyTuple_SetItem(v, i, s)) {
+ Py_DECREF(v);
+ return NULL;
+ }
+ }
+ return v;
}
+ if (PyTuple_Check(arg)) {
+ Py_INCREF(arg);
+ return arg;
+ }
+
if (!PyArg_ParseTuple(args, "et:splitlist", "utf-8", &list))
return NULL;
+ CHECK_STRING_LENGTH(list);
if (Tcl_SplitList(Tkapp_Interp(self), list,
&argc, &argv) == TCL_ERROR) {
PyMem_Free(list);
@@ -1992,18 +2107,41 @@ Tkapp_SplitList(PyObject *self, PyObject *args)
static PyObject *
Tkapp_Split(PyObject *self, PyObject *args)
{
- PyObject *v;
+ PyObject *arg, *v;
char *list;
- if (PyTuple_Size(args) == 1) {
- PyObject* o = PyTuple_GetItem(args, 0);
- if (PyTuple_Check(o)) {
- o = SplitObj(o);
- return o;
+ if (!PyArg_ParseTuple(args, "O:split", &arg))
+ return NULL;
+ if (PyTclObject_Check(arg)) {
+ Tcl_Obj *value = ((PyTclObject*)arg)->value;
+ int objc;
+ Tcl_Obj **objv;
+ int i;
+ if (Tcl_ListObjGetElements(Tkapp_Interp(self), value,
+ &objc, &objv) == TCL_ERROR) {
+ return FromObj(self, value);
}
+ if (objc == 0)
+ return PyString_FromString("");
+ if (objc == 1)
+ return FromObj(self, objv[0]);
+ if (!(v = PyTuple_New(objc)))
+ return NULL;
+ for (i = 0; i < objc; i++) {
+ PyObject *s = FromObj(self, objv[i]);
+ if (!s || PyTuple_SetItem(v, i, s)) {
+ Py_DECREF(v);
+ return NULL;
+ }
+ }
+ return v;
}
+ if (PyTuple_Check(arg))
+ return SplitObj(arg);
+
if (!PyArg_ParseTuple(args, "et:split", "utf-8", &list))
return NULL;
+ CHECK_STRING_LENGTH(list);
v = Split(list);
PyMem_Free(list);
return v;
@@ -2065,7 +2203,7 @@ PythonCmd(ClientData clientData, Tcl_Interp *interp, int argc, char *argv[])
return PythonCmd_Error(interp);
for (i = 0; i < (argc - 1); i++) {
- PyObject *s = PyString_FromString(argv[i + 1]);
+ PyObject *s = fromTclString(argv[i + 1]);
if (!s || PyTuple_SetItem(arg, i, s)) {
Py_DECREF(arg);
return PythonCmd_Error(interp);
@@ -2149,6 +2287,7 @@ Tkapp_CreateCommand(PyObject *selfptr, PyObject *args)
if (!PyArg_ParseTuple(args, "sO:createcommand", &cmdName, &func))
return NULL;
+ CHECK_STRING_LENGTH(cmdName);
if (!PyCallable_Check(func)) {
PyErr_SetString(PyExc_TypeError, "command not callable");
return NULL;
@@ -2212,6 +2351,7 @@ Tkapp_DeleteCommand(PyObject *selfptr, PyObject *args)
if (!PyArg_ParseTuple(args, "s:deletecommand", &cmdName))
return NULL;
+ CHECK_STRING_LENGTH(cmdName);
#ifdef WITH_THREAD
if (self->threaded && self->thread_id != Tcl_GetCurrentThread()) {
@@ -2721,7 +2861,7 @@ Tkapp_InterpAddr(PyObject *self, PyObject *args)
if (!PyArg_ParseTuple(args, ":interpaddr"))
return NULL;
- return PyInt_FromLong((long)Tkapp_Interp(self));
+ return PyLong_FromVoidPtr(Tkapp_Interp(self));
}
static PyObject *
@@ -3020,6 +3160,10 @@ Tkinter_Create(PyObject *self, PyObject *args)
&interactive, &wantobjects, &wantTk,
&sync, &use))
return NULL;
+ CHECK_STRING_LENGTH(screenName);
+ CHECK_STRING_LENGTH(baseName);
+ CHECK_STRING_LENGTH(className);
+ CHECK_STRING_LENGTH(use);
return (PyObject *) Tkapp_New(screenName, baseName, className,
interactive, wantobjects, wantTk,
diff --git a/Modules/arraymodule.c b/Modules/arraymodule.c
index a860f57..5a92862 100644
--- a/Modules/arraymodule.c
+++ b/Modules/arraymodule.c
@@ -1533,6 +1533,19 @@ array_reduce(arrayobject *array)
PyDoc_STRVAR(reduce_doc, "Return state information for pickling.");
static PyObject *
+array_sizeof(arrayobject *self, PyObject *unused)
+{
+ Py_ssize_t res;
+ res = sizeof(arrayobject) + self->allocated * self->ob_descr->itemsize;
+ return PyLong_FromSsize_t(res);
+}
+
+PyDoc_STRVAR(sizeof_doc,
+"__sizeof__() -> int\n\
+\n\
+Size of the array in memory, in bytes.");
+
+static PyObject *
array_get_typecode(arrayobject *a, void *closure)
{
char tc = a->ob_descr->typecode;
@@ -1606,6 +1619,8 @@ static PyMethodDef array_methods[] = {
#endif
{"write", (PyCFunction)array_tofile_as_write, METH_O,
tofile_doc},
+ {"__sizeof__", (PyCFunction)array_sizeof, METH_NOARGS,
+ sizeof_doc},
{NULL, NULL} /* sentinel */
};
diff --git a/Modules/audioop.c b/Modules/audioop.c
index fc79cf5..8a3a275 100644
--- a/Modules/audioop.c
+++ b/Modules/audioop.c
@@ -24,6 +24,21 @@ typedef short PyInt16;
#endif
#endif
+static const int maxvals[] = {0, 0x7F, 0x7FFF, 0x7FFFFF, 0x7FFFFFFF};
+static const int minvals[] = {0, -0x80, -0x8000, -0x800000, -0x80000000};
+static const unsigned int masks[] = {0, 0xFF, 0xFFFF, 0xFFFFFF, 0xFFFFFFFF};
+
+static int
+fbound(double val, double minval, double maxval)
+{
+ if (val > maxval)
+ val = maxval;
+ else if (val < minval + 1)
+ val = minval;
+ return val;
+}
+
+
/* Code shamelessly stolen from sox, 12.17.7, g711.c
** (c) Craig Reese, Joe Campbell and Jeff Poskanzer 1989 */
@@ -345,7 +360,7 @@ audioop_max(PyObject *self, PyObject *args)
signed char *cp;
int len, size, val = 0;
int i;
- int max = 0;
+ unsigned int absval, max = 0;
if ( !PyArg_ParseTuple(args, "s#i:max", &cp, &len, &size) )
return 0;
@@ -355,10 +370,14 @@ audioop_max(PyObject *self, PyObject *args)
if ( size == 1 ) val = (int)*CHARP(cp, i);
else if ( size == 2 ) val = (int)*SHORTP(cp, i);
else if ( size == 4 ) val = (int)*LONGP(cp, i);
- if ( val < 0 ) val = (-val);
- if ( val > max ) max = val;
+ if (val < 0) absval = (-val);
+ else absval = val;
+ if (absval > max) max = absval;
}
- return PyInt_FromLong(max);
+ if (max <= INT_MAX)
+ return PyInt_FromLong(max);
+ else
+ return PyLong_FromUnsignedLong(max);
}
static PyObject *
@@ -367,7 +386,7 @@ audioop_minmax(PyObject *self, PyObject *args)
signed char *cp;
int len, size, val = 0;
int i;
- int min = 0x7fffffff, max = -0x7fffffff;
+ int min = 0x7fffffff, max = -0x80000000;
if (!PyArg_ParseTuple(args, "s#i:minmax", &cp, &len, &size))
return NULL;
@@ -404,7 +423,7 @@ audioop_avg(PyObject *self, PyObject *args)
if ( len == 0 )
val = 0;
else
- val = (int)(avg / (double)(len/size));
+ val = (int)floor(avg / (double)(len/size));
return PyInt_FromLong(val);
}
@@ -414,6 +433,7 @@ audioop_rms(PyObject *self, PyObject *args)
signed char *cp;
int len, size, val = 0;
int i;
+ unsigned int res;
double sum_squares = 0.0;
if ( !PyArg_ParseTuple(args, "s#i:rms", &cp, &len, &size) )
@@ -427,10 +447,13 @@ audioop_rms(PyObject *self, PyObject *args)
sum_squares += (double)val*(double)val;
}
if ( len == 0 )
- val = 0;
+ res = 0;
else
- val = (int)sqrt(sum_squares / (double)(len/size));
- return PyInt_FromLong(val);
+ res = (unsigned int)sqrt(sum_squares / (double)(len/size));
+ if (res <= INT_MAX)
+ return PyInt_FromLong(res);
+ else
+ return PyLong_FromUnsignedLong(res);
}
static double _sum2(short *a, short *b, int len)
@@ -620,52 +643,49 @@ audioop_avgpp(PyObject *self, PyObject *args)
int len, size, val = 0, prevval = 0, prevextremevalid = 0,
prevextreme = 0;
int i;
- double avg = 0.0;
- int diff, prevdiff, extremediff, nextreme = 0;
+ double sum = 0.0;
+ unsigned int avg;
+ int diff, prevdiff, nextreme = 0;
if ( !PyArg_ParseTuple(args, "s#i:avgpp", &cp, &len, &size) )
return 0;
if (!audioop_check_parameters(len, size))
return NULL;
- /* Compute first delta value ahead. Also automatically makes us
- ** skip the first extreme value
- */
+ if (len <= size*2)
+ return PyInt_FromLong(0);
if ( size == 1 ) prevval = (int)*CHARP(cp, 0);
else if ( size == 2 ) prevval = (int)*SHORTP(cp, 0);
else if ( size == 4 ) prevval = (int)*LONGP(cp, 0);
- if ( size == 1 ) val = (int)*CHARP(cp, size);
- else if ( size == 2 ) val = (int)*SHORTP(cp, size);
- else if ( size == 4 ) val = (int)*LONGP(cp, size);
- prevdiff = val - prevval;
-
+ prevdiff = 17; /* Anything != 0, 1 */
for ( i=size; i<len; i+= size) {
if ( size == 1 ) val = (int)*CHARP(cp, i);
else if ( size == 2 ) val = (int)*SHORTP(cp, i);
else if ( size == 4 ) val = (int)*LONGP(cp, i);
- diff = val - prevval;
- if ( diff*prevdiff < 0 ) {
- /* Derivative changed sign. Compute difference to last
- ** extreme value and remember.
- */
- if ( prevextremevalid ) {
- extremediff = prevval - prevextreme;
- if ( extremediff < 0 )
- extremediff = -extremediff;
- avg += extremediff;
- nextreme++;
+ if (val != prevval) {
+ diff = val < prevval;
+ if (prevdiff == !diff) {
+ /* Derivative changed sign. Compute difference to last
+ ** extreme value and remember.
+ */
+ if (prevextremevalid) {
+ sum += fabs((double)prevval - (double)prevextreme);
+ nextreme++;
+ }
+ prevextremevalid = 1;
+ prevextreme = prevval;
}
- prevextremevalid = 1;
- prevextreme = prevval;
- }
- prevval = val;
- if ( diff != 0 )
+ prevval = val;
prevdiff = diff;
+ }
}
if ( nextreme == 0 )
- val = 0;
+ avg = 0;
else
- val = (int)(avg / (double)nextreme);
- return PyInt_FromLong(val);
+ avg = (unsigned int)(sum / (double)nextreme);
+ if (avg <= INT_MAX)
+ return PyInt_FromLong(avg);
+ else
+ return PyLong_FromUnsignedLong(avg);
}
static PyObject *
@@ -675,48 +695,50 @@ audioop_maxpp(PyObject *self, PyObject *args)
int len, size, val = 0, prevval = 0, prevextremevalid = 0,
prevextreme = 0;
int i;
- int max = 0;
- int diff, prevdiff, extremediff;
+ unsigned int max = 0, extremediff;
+ int diff, prevdiff;
if ( !PyArg_ParseTuple(args, "s#i:maxpp", &cp, &len, &size) )
return 0;
if (!audioop_check_parameters(len, size))
return NULL;
- /* Compute first delta value ahead. Also automatically makes us
- ** skip the first extreme value
- */
+ if (len <= size)
+ return PyInt_FromLong(0);
if ( size == 1 ) prevval = (int)*CHARP(cp, 0);
else if ( size == 2 ) prevval = (int)*SHORTP(cp, 0);
else if ( size == 4 ) prevval = (int)*LONGP(cp, 0);
- if ( size == 1 ) val = (int)*CHARP(cp, size);
- else if ( size == 2 ) val = (int)*SHORTP(cp, size);
- else if ( size == 4 ) val = (int)*LONGP(cp, size);
- prevdiff = val - prevval;
-
+ prevdiff = 17; /* Anything != 0, 1 */
for ( i=size; i<len; i+= size) {
if ( size == 1 ) val = (int)*CHARP(cp, i);
else if ( size == 2 ) val = (int)*SHORTP(cp, i);
else if ( size == 4 ) val = (int)*LONGP(cp, i);
- diff = val - prevval;
- if ( diff*prevdiff < 0 ) {
- /* Derivative changed sign. Compute difference to
- ** last extreme value and remember.
- */
- if ( prevextremevalid ) {
- extremediff = prevval - prevextreme;
- if ( extremediff < 0 )
- extremediff = -extremediff;
- if ( extremediff > max )
- max = extremediff;
+ if (val != prevval) {
+ diff = val < prevval;
+ if (prevdiff == !diff) {
+ /* Derivative changed sign. Compute difference to
+ ** last extreme value and remember.
+ */
+ if (prevextremevalid) {
+ if (prevval < prevextreme)
+ extremediff = (unsigned int)prevextreme -
+ (unsigned int)prevval;
+ else
+ extremediff = (unsigned int)prevval -
+ (unsigned int)prevextreme;
+ if ( extremediff > max )
+ max = extremediff;
+ }
+ prevextremevalid = 1;
+ prevextreme = prevval;
}
- prevextremevalid = 1;
- prevextreme = prevval;
- }
- prevval = val;
- if ( diff != 0 )
+ prevval = val;
prevdiff = diff;
+ }
}
- return PyInt_FromLong(max);
+ if (max <= INT_MAX)
+ return PyInt_FromLong(max);
+ else
+ return PyLong_FromUnsignedLong(max);
}
static PyObject *
@@ -749,7 +771,7 @@ audioop_mul(PyObject *self, PyObject *args)
{
signed char *cp, *ncp;
int len, size, val = 0;
- double factor, fval, maxval;
+ double factor, fval, maxval, minval;
PyObject *rv;
int i;
@@ -758,13 +780,8 @@ audioop_mul(PyObject *self, PyObject *args)
if (!audioop_check_parameters(len, size))
return NULL;
- if ( size == 1 ) maxval = (double) 0x7f;
- else if ( size == 2 ) maxval = (double) 0x7fff;
- else if ( size == 4 ) maxval = (double) 0x7fffffff;
- else {
- PyErr_SetString(AudioopError, "Size should be 1, 2 or 4");
- return 0;
- }
+ maxval = (double) maxvals[size];
+ minval = (double) minvals[size];
rv = PyString_FromStringAndSize(NULL, len);
if ( rv == 0 )
@@ -777,9 +794,7 @@ audioop_mul(PyObject *self, PyObject *args)
else if ( size == 2 ) val = (int)*SHORTP(cp, i);
else if ( size == 4 ) val = (int)*LONGP(cp, i);
fval = (double)val*factor;
- if ( fval > maxval ) fval = maxval;
- else if ( fval < -maxval ) fval = -maxval;
- val = (int)fval;
+ val = (int)floor(fbound(fval, minval, maxval));
if ( size == 1 ) *CHARP(ncp, i) = (signed char)val;
else if ( size == 2 ) *SHORTP(ncp, i) = (short)val;
else if ( size == 4 ) *LONGP(ncp, i) = (Py_Int32)val;
@@ -792,7 +807,7 @@ audioop_tomono(PyObject *self, PyObject *args)
{
signed char *cp, *ncp;
int len, size, val1 = 0, val2 = 0;
- double fac1, fac2, fval, maxval;
+ double fac1, fac2, fval, maxval, minval;
PyObject *rv;
int i;
@@ -806,13 +821,8 @@ audioop_tomono(PyObject *self, PyObject *args)
return NULL;
}
- if ( size == 1 ) maxval = (double) 0x7f;
- else if ( size == 2 ) maxval = (double) 0x7fff;
- else if ( size == 4 ) maxval = (double) 0x7fffffff;
- else {
- PyErr_SetString(AudioopError, "Size should be 1, 2 or 4");
- return 0;
- }
+ maxval = (double) maxvals[size];
+ minval = (double) minvals[size];
rv = PyString_FromStringAndSize(NULL, len/2);
if ( rv == 0 )
@@ -828,9 +838,7 @@ audioop_tomono(PyObject *self, PyObject *args)
else if ( size == 2 ) val2 = (int)*SHORTP(cp, i+2);
else if ( size == 4 ) val2 = (int)*LONGP(cp, i+4);
fval = (double)val1*fac1 + (double)val2*fac2;
- if ( fval > maxval ) fval = maxval;
- else if ( fval < -maxval ) fval = -maxval;
- val1 = (int)fval;
+ val1 = (int)floor(fbound(fval, minval, maxval));
if ( size == 1 ) *CHARP(ncp, i/2) = (signed char)val1;
else if ( size == 2 ) *SHORTP(ncp, i/2) = (short)val1;
else if ( size == 4 ) *LONGP(ncp, i/2)= (Py_Int32)val1;
@@ -843,7 +851,7 @@ audioop_tostereo(PyObject *self, PyObject *args)
{
signed char *cp, *ncp;
int len, size, val1, val2, val = 0;
- double fac1, fac2, fval, maxval;
+ double fac1, fac2, fval, maxval, minval;
PyObject *rv;
int i;
@@ -853,13 +861,8 @@ audioop_tostereo(PyObject *self, PyObject *args)
if (!audioop_check_parameters(len, size))
return NULL;
- if ( size == 1 ) maxval = (double) 0x7f;
- else if ( size == 2 ) maxval = (double) 0x7fff;
- else if ( size == 4 ) maxval = (double) 0x7fffffff;
- else {
- PyErr_SetString(AudioopError, "Size should be 1, 2 or 4");
- return 0;
- }
+ maxval = (double) maxvals[size];
+ minval = (double) minvals[size];
if (len > INT_MAX/2) {
PyErr_SetString(PyExc_MemoryError,
@@ -879,14 +882,10 @@ audioop_tostereo(PyObject *self, PyObject *args)
else if ( size == 4 ) val = (int)*LONGP(cp, i);
fval = (double)val*fac1;
- if ( fval > maxval ) fval = maxval;
- else if ( fval < -maxval ) fval = -maxval;
- val1 = (int)fval;
+ val1 = (int)floor(fbound(fval, minval, maxval));
fval = (double)val*fac2;
- if ( fval > maxval ) fval = maxval;
- else if ( fval < -maxval ) fval = -maxval;
- val2 = (int)fval;
+ val2 = (int)floor(fbound(fval, minval, maxval));
if ( size == 1 ) *CHARP(ncp, i*2) = (signed char)val1;
else if ( size == 2 ) *SHORTP(ncp, i*2) = (short)val1;
@@ -903,7 +902,7 @@ static PyObject *
audioop_add(PyObject *self, PyObject *args)
{
signed char *cp1, *cp2, *ncp;
- int len1, len2, size, val1 = 0, val2 = 0, maxval, newval;
+ int len1, len2, size, val1 = 0, val2 = 0, minval, maxval, newval;
PyObject *rv;
int i;
@@ -917,13 +916,8 @@ audioop_add(PyObject *self, PyObject *args)
return 0;
}
- if ( size == 1 ) maxval = 0x7f;
- else if ( size == 2 ) maxval = 0x7fff;
- else if ( size == 4 ) maxval = 0x7fffffff;
- else {
- PyErr_SetString(AudioopError, "Size should be 1, 2 or 4");
- return 0;
- }
+ maxval = maxvals[size];
+ minval = minvals[size];
rv = PyString_FromStringAndSize(NULL, len1);
if ( rv == 0 )
@@ -939,12 +933,19 @@ audioop_add(PyObject *self, PyObject *args)
else if ( size == 2 ) val2 = (int)*SHORTP(cp2, i);
else if ( size == 4 ) val2 = (int)*LONGP(cp2, i);
- newval = val1 + val2;
- /* truncate in case of overflow */
- if (newval > maxval) newval = maxval;
- else if (newval < -maxval) newval = -maxval;
- else if (size == 4 && (newval^val1) < 0 && (newval^val2) < 0)
- newval = val1 > 0 ? maxval : - maxval;
+ if (size < 4) {
+ newval = val1 + val2;
+ /* truncate in case of overflow */
+ if (newval > maxval)
+ newval = maxval;
+ else if (newval < minval)
+ newval = minval;
+ }
+ else {
+ double fval = (double)val1 + (double)val2;
+ /* truncate in case of overflow */
+ newval = (int)floor(fbound(fval, minval, maxval));
+ }
if ( size == 1 ) *CHARP(ncp, i) = (signed char)newval;
else if ( size == 2 ) *SHORTP(ncp, i) = (short)newval;
@@ -957,7 +958,8 @@ static PyObject *
audioop_bias(PyObject *self, PyObject *args)
{
signed char *cp, *ncp;
- int len, size, val = 0;
+ int len, size;
+ unsigned int val = 0, mask;
PyObject *rv;
int i;
int bias;
@@ -974,15 +976,20 @@ audioop_bias(PyObject *self, PyObject *args)
return 0;
ncp = (signed char *)PyString_AsString(rv);
+ mask = masks[size];
for ( i=0; i < len; i += size ) {
- if ( size == 1 ) val = (int)*CHARP(cp, i);
- else if ( size == 2 ) val = (int)*SHORTP(cp, i);
- else if ( size == 4 ) val = (int)*LONGP(cp, i);
+ if ( size == 1 ) val = (unsigned int)(unsigned char)*CHARP(cp, i);
+ else if ( size == 2 ) val = (unsigned int)(unsigned short)*SHORTP(cp, i);
+ else if ( size == 4 ) val = (unsigned int)(Py_UInt32)*LONGP(cp, i);
+
+ val += (unsigned int)bias;
+ /* wrap around in case of overflow */
+ val &= mask;
- if ( size == 1 ) *CHARP(ncp, i) = (signed char)(val+bias);
- else if ( size == 2 ) *SHORTP(ncp, i) = (short)(val+bias);
- else if ( size == 4 ) *LONGP(ncp, i) = (Py_Int32)(val+bias);
+ if ( size == 1 ) *CHARP(ncp, i) = (signed char)(unsigned char)val;
+ else if ( size == 2 ) *SHORTP(ncp, i) = (short)(unsigned short)val;
+ else if ( size == 4 ) *LONGP(ncp, i) = (Py_Int32)(Py_UInt32)val;
}
return rv;
}
@@ -1009,15 +1016,15 @@ audioop_reverse(PyObject *self, PyObject *args)
ncp = (unsigned char *)PyString_AsString(rv);
for ( i=0; i < len; i += size ) {
- if ( size == 1 ) val = ((int)*CHARP(cp, i)) << 8;
- else if ( size == 2 ) val = (int)*SHORTP(cp, i);
- else if ( size == 4 ) val = ((int)*LONGP(cp, i)) >> 16;
+ if ( size == 1 ) val = ((int)*CHARP(cp, i)) << 24;
+ else if ( size == 2 ) val = ((int)*SHORTP(cp, i)) << 16;
+ else if ( size == 4 ) val = (int)*LONGP(cp, i);
j = len - i - size;
- if ( size == 1 ) *CHARP(ncp, j) = (signed char)(val >> 8);
- else if ( size == 2 ) *SHORTP(ncp, j) = (short)(val);
- else if ( size == 4 ) *LONGP(ncp, j) = (Py_Int32)(val<<16);
+ if ( size == 1 ) *CHARP(ncp, j) = (signed char)(val >> 24);
+ else if ( size == 2 ) *SHORTP(ncp, j) = (short)(val >> 16);
+ else if ( size == 4 ) *LONGP(ncp, j) = (Py_Int32)val;
}
return rv;
}
@@ -1051,13 +1058,13 @@ audioop_lin2lin(PyObject *self, PyObject *args)
ncp = (unsigned char *)PyString_AsString(rv);
for ( i=0, j=0; i < len; i += size, j += size2 ) {
- if ( size == 1 ) val = ((int)*CHARP(cp, i)) << 8;
- else if ( size == 2 ) val = (int)*SHORTP(cp, i);
- else if ( size == 4 ) val = ((int)*LONGP(cp, i)) >> 16;
+ if ( size == 1 ) val = ((int)*CHARP(cp, i)) << 24;
+ else if ( size == 2 ) val = ((int)*SHORTP(cp, i)) << 16;
+ else if ( size == 4 ) val = (int)*LONGP(cp, i);
- if ( size2 == 1 ) *CHARP(ncp, j) = (signed char)(val >> 8);
- else if ( size2 == 2 ) *SHORTP(ncp, j) = (short)(val);
- else if ( size2 == 4 ) *LONGP(ncp, j) = (Py_Int32)(val<<16);
+ if ( size2 == 1 ) *CHARP(ncp, j) = (signed char)(val >> 24);
+ else if ( size2 == 2 ) *SHORTP(ncp, j) = (short)(val >> 16);
+ else if ( size2 == 4 ) *LONGP(ncp, j) = (Py_Int32)val;
}
return rv;
}
@@ -1120,6 +1127,10 @@ audioop_ratecv(PyObject *self, PyObject *args)
d = gcd(inrate, outrate);
inrate /= d;
outrate /= d;
+ /* divide weightA and weightB by their greatest common divisor */
+ d = gcd(weightA, weightB);
+ weightA /= d;
+ weightA /= d;
if ((size_t)nchannels > PY_SIZE_MAX/sizeof(int)) {
PyErr_SetString(PyExc_MemoryError,
@@ -1159,7 +1170,9 @@ audioop_ratecv(PyObject *self, PyObject *args)
}
/* str <- Space for the output buffer. */
- {
+ if (len == 0)
+ str = PyString_FromStringAndSize(NULL, 0);
+ else {
/* There are len input frames, so we need (mathematically)
ceiling(len*outrate/inrate) output frames, and each frame
requires bytes_per_frame bytes. Computing this
@@ -1174,12 +1187,11 @@ audioop_ratecv(PyObject *self, PyObject *args)
else
str = PyString_FromStringAndSize(NULL,
q * outrate * bytes_per_frame);
-
- if (str == NULL) {
- PyErr_SetString(PyExc_MemoryError,
- "not enough memory for output buffer");
- goto exit;
- }
+ }
+ if (str == NULL) {
+ PyErr_SetString(PyExc_MemoryError,
+ "not enough memory for output buffer");
+ goto exit;
}
ncp = PyString_AsString(str);
@@ -1214,32 +1226,32 @@ audioop_ratecv(PyObject *self, PyObject *args)
for (chan = 0; chan < nchannels; chan++) {
prev_i[chan] = cur_i[chan];
if (size == 1)
- cur_i[chan] = ((int)*CHARP(cp, 0)) << 8;
+ cur_i[chan] = ((int)*CHARP(cp, 0)) << 24;
else if (size == 2)
- cur_i[chan] = (int)*SHORTP(cp, 0);
+ cur_i[chan] = ((int)*SHORTP(cp, 0)) << 16;
else if (size == 4)
- cur_i[chan] = ((int)*LONGP(cp, 0)) >> 16;
+ cur_i[chan] = (int)*LONGP(cp, 0);
cp += size;
/* implements a simple digital filter */
- cur_i[chan] =
- (weightA * cur_i[chan] +
- weightB * prev_i[chan]) /
- (weightA + weightB);
+ cur_i[chan] = (int)(
+ ((double)weightA * (double)cur_i[chan] +
+ (double)weightB * (double)prev_i[chan]) /
+ ((double)weightA + (double)weightB));
}
len--;
d += outrate;
}
while (d >= 0) {
for (chan = 0; chan < nchannels; chan++) {
- cur_o = (prev_i[chan] * d +
- cur_i[chan] * (outrate - d)) /
- outrate;
+ cur_o = (int)(((double)prev_i[chan] * (double)d +
+ (double)cur_i[chan] * (double)(outrate - d)) /
+ (double)outrate);
if (size == 1)
- *CHARP(ncp, 0) = (signed char)(cur_o >> 8);
+ *CHARP(ncp, 0) = (signed char)(cur_o >> 24);
else if (size == 2)
- *SHORTP(ncp, 0) = (short)(cur_o);
+ *SHORTP(ncp, 0) = (short)(cur_o >> 16);
else if (size == 4)
- *LONGP(ncp, 0) = (Py_Int32)(cur_o<<16);
+ *LONGP(ncp, 0) = (Py_Int32)(cur_o);
ncp += size;
}
d -= inrate;
diff --git a/Modules/binascii.c b/Modules/binascii.c
index 8334fe5..0b492d5 100644
--- a/Modules/binascii.c
+++ b/Modules/binascii.c
@@ -320,12 +320,10 @@ binascii_b2a_uu(PyObject *self, PyObject *args)
}
*ascii_data++ = '\n'; /* Append a courtesy newline */
- if (_PyString_Resize(&rv,
+ /* rv is cleared on error */
+ (void)_PyString_Resize(&rv,
(ascii_data -
- (unsigned char *)PyString_AS_STRING(rv))) < 0) {
- Py_DECREF(rv);
- rv = NULL;
- }
+ (unsigned char *)PyString_AS_STRING(rv)));
PyBuffer_Release(&pbin);
return rv;
}
@@ -452,10 +450,8 @@ binascii_a2b_base64(PyObject *self, PyObject *args)
** string instead; _PyString_Resize() won't do this for us.
*/
if (bin_len > 0) {
- if (_PyString_Resize(&rv, bin_len) < 0) {
- Py_DECREF(rv);
- rv = NULL;
- }
+ /* rv is cleared on error */
+ (void)_PyString_Resize(&rv, bin_len);
}
else {
Py_DECREF(rv);
@@ -522,12 +518,10 @@ binascii_b2a_base64(PyObject *self, PyObject *args)
}
*ascii_data++ = '\n'; /* Append a courtesy newline */
- if (_PyString_Resize(&rv,
+ /* rv is cleared on error */
+ (void)_PyString_Resize(&rv,
(ascii_data -
- (unsigned char *)PyString_AS_STRING(rv))) < 0) {
- Py_DECREF(rv);
- rv = NULL;
- }
+ (unsigned char *)PyString_AS_STRING(rv)));
PyBuffer_Release(&pbuf);
return rv;
}
@@ -601,13 +595,10 @@ binascii_a2b_hqx(PyObject *self, PyObject *args)
Py_DECREF(rv);
return NULL;
}
+ /* rv is cleared on error */
if (_PyString_Resize(&rv,
(bin_data -
- (unsigned char *)PyString_AS_STRING(rv))) < 0) {
- Py_DECREF(rv);
- rv = NULL;
- }
- if (rv) {
+ (unsigned char *)PyString_AS_STRING(rv))) == 0) {
PyObject *rrv = Py_BuildValue("Oi", rv, done);
PyBuffer_Release(&pascii);
Py_DECREF(rv);
@@ -672,12 +663,10 @@ binascii_rlecode_hqx(PyObject *self, PyObject *args)
}
}
}
- if (_PyString_Resize(&rv,
+ /* rv is cleared on error */
+ (void)_PyString_Resize(&rv,
(out_data -
- (unsigned char *)PyString_AS_STRING(rv))) < 0) {
- Py_DECREF(rv);
- rv = NULL;
- }
+ (unsigned char *)PyString_AS_STRING(rv)));
PyBuffer_Release(&pbuf);
return rv;
}
@@ -729,12 +718,10 @@ binascii_b2a_hqx(PyObject *self, PyObject *args)
leftchar <<= (6-leftbits);
*ascii_data++ = table_b2a_hqx[leftchar & 0x3f];
}
- if (_PyString_Resize(&rv,
+ /* rv is cleared on error */
+ (void)_PyString_Resize(&rv,
(ascii_data -
- (unsigned char *)PyString_AS_STRING(rv))) < 0) {
- Py_DECREF(rv);
- rv = NULL;
- }
+ (unsigned char *)PyString_AS_STRING(rv)));
PyBuffer_Release(&pbin);
return rv;
}
@@ -796,7 +783,7 @@ binascii_rledecode_hqx(PyObject *self, PyObject *args)
if ( --out_len_left < 0 ) { \
if ( out_len > PY_SSIZE_T_MAX / 2) return PyErr_NoMemory(); \
if (_PyString_Resize(&rv, 2*out_len) < 0) \
- { Py_DECREF(rv); PyBuffer_Release(&pin); return NULL; } \
+ { PyBuffer_Release(&pin); return NULL; } \
out_data = (unsigned char *)PyString_AS_STRING(rv) \
+ out_len; \
out_len_left = out_len-1; \
@@ -846,12 +833,10 @@ binascii_rledecode_hqx(PyObject *self, PyObject *args)
OUTBYTE(in_byte);
}
}
- if (_PyString_Resize(&rv,
+ /* rv is cleared on error */
+ (void)_PyString_Resize(&rv,
(out_data -
- (unsigned char *)PyString_AS_STRING(rv))) < 0) {
- Py_DECREF(rv);
- rv = NULL;
- }
+ (unsigned char *)PyString_AS_STRING(rv)));
PyBuffer_Release(&pin);
return rv;
}
diff --git a/Modules/bsddb.h b/Modules/bsddb.h
index 3199e94..8405afa 100644
--- a/Modules/bsddb.h
+++ b/Modules/bsddb.h
@@ -61,7 +61,7 @@
*
* http://www.python.org/peps/pep-0291.html
*
- * This module contains 6 types:
+ * This module contains 7 types:
*
* DB (Database)
* DBCursor (Database Cursor)
@@ -69,6 +69,7 @@
* DBTxn (An explicit database transaction)
* DBLock (A lock handle)
* DBSequence (Sequence)
+ * DBSite (Site)
*
* New datatypes:
*
@@ -109,7 +110,7 @@
#error "eek! DBVER can't handle minor versions > 9"
#endif
-#define PY_BSDDB_VERSION "4.8.4.2"
+#define PY_BSDDB_VERSION "5.3.0"
/* Python object definitions */
@@ -129,6 +130,9 @@ struct DBCursorObject; /* Forward declaration */
struct DBLogCursorObject; /* Forward declaration */
struct DBTxnObject; /* Forward declaration */
struct DBSequenceObject; /* Forward declaration */
+#if (DBVER >= 52)
+struct DBSiteObject; /* Forward declaration */
+#endif
typedef struct {
PyObject_HEAD
@@ -140,6 +144,9 @@ typedef struct {
struct DBObject *children_dbs;
struct DBTxnObject *children_txns;
struct DBLogCursorObject *children_logcursors;
+#if (DBVER >= 52)
+ struct DBSiteObject *children_sites;
+#endif
PyObject *private_obj;
PyObject *rep_transport;
PyObject *in_weakreflist; /* List of weak references */
@@ -154,15 +161,14 @@ typedef struct DBObject {
struct behaviourFlags moduleFlags;
struct DBTxnObject *txn;
struct DBCursorObject *children_cursors;
-#if (DBVER >=43)
struct DBSequenceObject *children_sequences;
-#endif
struct DBObject **sibling_prev_p;
struct DBObject *sibling_next;
struct DBObject **sibling_prev_p_txn;
struct DBObject *sibling_next_txn;
PyObject* associateCallback;
PyObject* btCompareCallback;
+ PyObject* dupCompareCallback;
int primaryDBType;
PyObject *private_obj;
PyObject *in_weakreflist; /* List of weak references */
@@ -207,6 +213,16 @@ typedef struct DBLogCursorObject {
PyObject *in_weakreflist; /* List of weak references */
} DBLogCursorObject;
+#if (DBVER >= 52)
+typedef struct DBSiteObject {
+ PyObject_HEAD
+ DB_SITE *site;
+ DBEnvObject *env;
+ struct DBSiteObject **sibling_prev_p;
+ struct DBSiteObject *sibling_next;
+ PyObject *in_weakreflist; /* List of weak references */
+} DBSiteObject;
+#endif
typedef struct {
PyObject_HEAD
@@ -216,7 +232,6 @@ typedef struct {
} DBLockObject;
-#if (DBVER >= 43)
typedef struct DBSequenceObject {
PyObject_HEAD
DB_SEQUENCE* sequence;
@@ -228,7 +243,6 @@ typedef struct DBSequenceObject {
struct DBSequenceObject *sibling_next_txn;
PyObject *in_weakreflist; /* List of weak references */
} DBSequenceObject;
-#endif
/* API structure for use by C code */
@@ -236,7 +250,7 @@ typedef struct DBSequenceObject {
/* To access the structure from an external module, use code like the
following (error checking missed out for clarity):
- // If you are using Python before 3.2:
+ // If you are using Python before 2.7:
BSDDB_api* bsddb_api;
PyObject* mod;
PyObject* cobj;
@@ -249,7 +263,7 @@ typedef struct DBSequenceObject {
Py_DECREF(mod);
- // If you are using Python 3.2 or up:
+ // If you are using Python 2.7 or up: (except Python 3.0, unsupported)
BSDDB_api* bsddb_api;
// Use "bsddb3._pybsddb.api" if you're using
@@ -257,10 +271,14 @@ typedef struct DBSequenceObject {
bsddb_api = (void **)PyCapsule_Import("bsddb._bsddb.api", 1);
+ Check "api_version" number before trying to use the API.
+
The structure's members must not be changed.
*/
+#define PYBSDDB_API_VERSION 1
typedef struct {
+ unsigned int api_version;
/* Type objects */
PyTypeObject* db_type;
PyTypeObject* dbcursor_type;
@@ -268,9 +286,7 @@ typedef struct {
PyTypeObject* dbenv_type;
PyTypeObject* dbtxn_type;
PyTypeObject* dblock_type;
-#if (DBVER >= 43)
PyTypeObject* dbsequence_type;
-#endif
/* Functions */
int (*makeDBError)(int err);
@@ -289,9 +305,9 @@ typedef struct {
#define DBEnvObject_Check(v) ((v)->ob_type == bsddb_api->dbenv_type)
#define DBTxnObject_Check(v) ((v)->ob_type == bsddb_api->dbtxn_type)
#define DBLockObject_Check(v) ((v)->ob_type == bsddb_api->dblock_type)
-#if (DBVER >= 43)
-#define DBSequenceObject_Check(v) ((v)->ob_type == bsddb_api->dbsequence_type)
-#endif
+#define DBSequenceObject_Check(v) \
+ ((bsddb_api->dbsequence_type) && \
+ ((v)->ob_type == bsddb_api->dbsequence_type))
#endif /* COMPILING_BSDDB_C */
diff --git a/Modules/bz2module.c b/Modules/bz2module.c
index 9c59c04..ae749ee 100644
--- a/Modules/bz2module.c
+++ b/Modules/bz2module.c
@@ -41,23 +41,8 @@ typedef fpos_t Py_off_t;
#define MODE_READ_EOF 2
#define MODE_WRITE 3
-#define BZ2FileObject_Check(v) (Py_TYPE(v) == &BZ2File_Type)
-
-#ifdef BZ_CONFIG_ERROR
-
-#if SIZEOF_LONG >= 8
-#define BZS_TOTAL_OUT(bzs) \
- (((long)bzs->total_out_hi32 << 32) + bzs->total_out_lo32)
-#elif SIZEOF_LONG_LONG >= 8
-#define BZS_TOTAL_OUT(bzs) \
- (((PY_LONG_LONG)bzs->total_out_hi32 << 32) + bzs->total_out_lo32)
-#else
-#define BZS_TOTAL_OUT(bzs) \
- bzs->total_out_lo32
-#endif
-
-#else /* ! BZ_CONFIG_ERROR */
+#ifndef BZ_CONFIG_ERROR
#define BZ2_bzRead bzRead
#define BZ2_bzReadOpen bzReadOpen
@@ -72,8 +57,6 @@ typedef fpos_t Py_off_t;
#define BZ2_bzDecompressInit bzDecompressInit
#define BZ2_bzDecompressEnd bzDecompressEnd
-#define BZS_TOTAL_OUT(bzs) bzs->total_out
-
#endif /* ! BZ_CONFIG_ERROR */
@@ -90,6 +73,8 @@ typedef fpos_t Py_off_t;
#define RELEASE_LOCK(obj)
#endif
+#define MIN(X, Y) (((X) < (Y)) ? (X) : (Y))
+
/* Bits in f_newlinetypes */
#define NEWLINE_UNKNOWN 0 /* No newline seen, yet */
#define NEWLINE_CR 1 /* \r newline seen */
@@ -234,6 +219,20 @@ Util_NewBufferSize(size_t currentsize)
return currentsize + (currentsize >> 3) + 6;
}
+static int
+Util_GrowBuffer(PyObject **buf)
+{
+ size_t size = PyString_GET_SIZE(*buf);
+ size_t new_size = Util_NewBufferSize(size);
+ if (new_size > size) {
+ return _PyString_Resize(buf, new_size);
+ } else { /* overflow */
+ PyErr_SetString(PyExc_OverflowError,
+ "Unable to allocate buffer - output too large");
+ return -1;
+ }
+}
+
/* This is a hacked version of Python's fileobject.c:get_line(). */
static PyObject *
Util_GetLine(BZ2FileObject *f, int n)
@@ -733,7 +732,8 @@ BZ2File_readlines(BZ2FileObject *self, PyObject *args)
}
else {
/* Grow the big buffer */
- _PyString_Resize(&big_buffer, buffersize);
+ if (_PyString_Resize(&big_buffer, buffersize))
+ goto error;
buffer = PyString_AS_STRING(big_buffer);
}
continue;
@@ -1207,12 +1207,16 @@ BZ2File_close(BZ2FileObject *self)
0, NULL, NULL);
break;
}
- if (self->fp) {
- PyFile_DecUseCount((PyFileObject *)self->file);
- self->fp = NULL;
+ if (self->file) {
+ if (self->fp)
+ PyFile_DecUseCount((PyFileObject *)self->file);
+ ret = PyObject_CallMethod(self->file, "close", NULL);
+ } else {
+ Py_INCREF(Py_None);
+ ret = Py_None;
}
+ self->fp = NULL;
self->mode = MODE_CLOSED;
- ret = PyObject_CallMethod(self->file, "close", NULL);
if (bzerror != BZ_OK) {
Util_CatchBZ2Error(bzerror);
Py_XDECREF(ret);
@@ -1480,10 +1484,9 @@ BZ2File_dealloc(BZ2FileObject *self)
0, NULL, NULL);
break;
}
- if (self->fp) {
+ if (self->fp != NULL && self->file != NULL)
PyFile_DecUseCount((PyFileObject *)self->file);
- self->fp = NULL;
- }
+ self->fp = NULL;
Util_DropReadAhead(self);
Py_XDECREF(self->file);
Py_TYPE(self)->tp_free((PyObject *)self);
@@ -1608,20 +1611,16 @@ static PyObject *
BZ2Comp_compress(BZ2CompObject *self, PyObject *args)
{
Py_buffer pdata;
- char *data;
- int datasize;
- int bufsize = SMALLCHUNK;
- PY_LONG_LONG totalout;
+ size_t input_left;
+ size_t output_size = 0;
PyObject *ret = NULL;
bz_stream *bzs = &self->bzs;
int bzerror;
if (!PyArg_ParseTuple(args, "s*:compress", &pdata))
return NULL;
- data = pdata.buf;
- datasize = pdata.len;
- if (datasize == 0) {
+ if (pdata.len == 0) {
PyBuffer_Release(&pdata);
return PyString_FromString("");
}
@@ -1633,40 +1632,52 @@ BZ2Comp_compress(BZ2CompObject *self, PyObject *args)
goto error;
}
- ret = PyString_FromStringAndSize(NULL, bufsize);
+ ret = PyString_FromStringAndSize(NULL, SMALLCHUNK);
if (!ret)
goto error;
- bzs->next_in = data;
- bzs->avail_in = datasize;
- bzs->next_out = BUF(ret);
- bzs->avail_out = bufsize;
+ bzs->next_in = pdata.buf;
+ bzs->avail_in = MIN(pdata.len, UINT_MAX);
+ input_left = pdata.len - bzs->avail_in;
- totalout = BZS_TOTAL_OUT(bzs);
+ bzs->next_out = BUF(ret);
+ bzs->avail_out = PyString_GET_SIZE(ret);
for (;;) {
+ char *saved_next_out;
+
Py_BEGIN_ALLOW_THREADS
+ saved_next_out = bzs->next_out;
bzerror = BZ2_bzCompress(bzs, BZ_RUN);
+ output_size += bzs->next_out - saved_next_out;
Py_END_ALLOW_THREADS
+
if (bzerror != BZ_RUN_OK) {
Util_CatchBZ2Error(bzerror);
goto error;
}
- if (bzs->avail_in == 0)
- break; /* no more input data */
+ if (bzs->avail_in == 0) {
+ if (input_left == 0)
+ break; /* no more input data */
+ bzs->avail_in = MIN(input_left, UINT_MAX);
+ input_left -= bzs->avail_in;
+ }
if (bzs->avail_out == 0) {
- bufsize = Util_NewBufferSize(bufsize);
- if (_PyString_Resize(&ret, bufsize) < 0) {
- BZ2_bzCompressEnd(bzs);
- goto error;
+ size_t buffer_left = PyString_GET_SIZE(ret) - output_size;
+ if (buffer_left == 0) {
+ if (Util_GrowBuffer(&ret) < 0) {
+ BZ2_bzCompressEnd(bzs);
+ goto error;
+ }
+ bzs->next_out = BUF(ret) + output_size;
+ buffer_left = PyString_GET_SIZE(ret) - output_size;
}
- bzs->next_out = BUF(ret) + (BZS_TOTAL_OUT(bzs)
- - totalout);
- bzs->avail_out = bufsize - (bzs->next_out - BUF(ret));
+ bzs->avail_out = MIN(buffer_left, UINT_MAX);
}
}
- _PyString_Resize(&ret, (Py_ssize_t)(BZS_TOTAL_OUT(bzs) - totalout));
+ if (_PyString_Resize(&ret, output_size) < 0)
+ goto error;
RELEASE_LOCK(self);
PyBuffer_Release(&pdata);
@@ -1689,33 +1700,34 @@ You must not use the compressor object after calling this method.\n\
static PyObject *
BZ2Comp_flush(BZ2CompObject *self)
{
- int bufsize = SMALLCHUNK;
+ size_t output_size = 0;
PyObject *ret = NULL;
bz_stream *bzs = &self->bzs;
- PY_LONG_LONG totalout;
int bzerror;
ACQUIRE_LOCK(self);
if (!self->running) {
- PyErr_SetString(PyExc_ValueError, "object was already "
- "flushed");
+ PyErr_SetString(PyExc_ValueError, "object was already flushed");
goto error;
}
self->running = 0;
- ret = PyString_FromStringAndSize(NULL, bufsize);
+ ret = PyString_FromStringAndSize(NULL, SMALLCHUNK);
if (!ret)
goto error;
bzs->next_out = BUF(ret);
- bzs->avail_out = bufsize;
-
- totalout = BZS_TOTAL_OUT(bzs);
+ bzs->avail_out = PyString_GET_SIZE(ret);
for (;;) {
+ char *saved_next_out;
+
Py_BEGIN_ALLOW_THREADS
+ saved_next_out = bzs->next_out;
bzerror = BZ2_bzCompress(bzs, BZ_FINISH);
+ output_size += bzs->next_out - saved_next_out;
Py_END_ALLOW_THREADS
+
if (bzerror == BZ_STREAM_END) {
break;
} else if (bzerror != BZ_FINISH_OK) {
@@ -1723,18 +1735,20 @@ BZ2Comp_flush(BZ2CompObject *self)
goto error;
}
if (bzs->avail_out == 0) {
- bufsize = Util_NewBufferSize(bufsize);
- if (_PyString_Resize(&ret, bufsize) < 0)
- goto error;
- bzs->next_out = BUF(ret);
- bzs->next_out = BUF(ret) + (BZS_TOTAL_OUT(bzs)
- - totalout);
- bzs->avail_out = bufsize - (bzs->next_out - BUF(ret));
+ size_t buffer_left = PyString_GET_SIZE(ret) - output_size;
+ if (buffer_left == 0) {
+ if (Util_GrowBuffer(&ret) < 0)
+ goto error;
+ bzs->next_out = BUF(ret) + output_size;
+ buffer_left = PyString_GET_SIZE(ret) - output_size;
+ }
+ bzs->avail_out = MIN(buffer_left, UINT_MAX);
}
}
- if (bzs->avail_out != 0)
- _PyString_Resize(&ret, (Py_ssize_t)(BZS_TOTAL_OUT(bzs) - totalout));
+ if (output_size != PyString_GET_SIZE(ret))
+ if (_PyString_Resize(&ret, output_size) < 0)
+ goto error;
RELEASE_LOCK(self);
return ret;
@@ -1899,18 +1913,14 @@ static PyObject *
BZ2Decomp_decompress(BZ2DecompObject *self, PyObject *args)
{
Py_buffer pdata;
- char *data;
- int datasize;
- int bufsize = SMALLCHUNK;
- PY_LONG_LONG totalout;
+ size_t input_left;
+ size_t output_size = 0;
PyObject *ret = NULL;
bz_stream *bzs = &self->bzs;
int bzerror;
if (!PyArg_ParseTuple(args, "s*:decompress", &pdata))
return NULL;
- data = pdata.buf;
- datasize = pdata.len;
ACQUIRE_LOCK(self);
if (!self->running) {
@@ -1919,52 +1929,65 @@ BZ2Decomp_decompress(BZ2DecompObject *self, PyObject *args)
goto error;
}
- ret = PyString_FromStringAndSize(NULL, bufsize);
+ ret = PyString_FromStringAndSize(NULL, SMALLCHUNK);
if (!ret)
goto error;
- bzs->next_in = data;
- bzs->avail_in = datasize;
- bzs->next_out = BUF(ret);
- bzs->avail_out = bufsize;
+ bzs->next_in = pdata.buf;
+ bzs->avail_in = MIN(pdata.len, UINT_MAX);
+ input_left = pdata.len - bzs->avail_in;
- totalout = BZS_TOTAL_OUT(bzs);
+ bzs->next_out = BUF(ret);
+ bzs->avail_out = PyString_GET_SIZE(ret);
for (;;) {
+ char *saved_next_out;
+
Py_BEGIN_ALLOW_THREADS
+ saved_next_out = bzs->next_out;
bzerror = BZ2_bzDecompress(bzs);
+ output_size += bzs->next_out - saved_next_out;
Py_END_ALLOW_THREADS
+
if (bzerror == BZ_STREAM_END) {
- if (bzs->avail_in != 0) {
+ self->running = 0;
+ input_left += bzs->avail_in;
+ if (input_left != 0) {
Py_DECREF(self->unused_data);
self->unused_data =
- PyString_FromStringAndSize(bzs->next_in,
- bzs->avail_in);
+ PyString_FromStringAndSize(bzs->next_in, input_left);
+ if (self->unused_data == NULL)
+ goto error;
}
- self->running = 0;
break;
}
if (bzerror != BZ_OK) {
Util_CatchBZ2Error(bzerror);
goto error;
}
- if (bzs->avail_in == 0)
- break; /* no more input data */
+ if (bzs->avail_in == 0) {
+ if (input_left == 0)
+ break; /* no more input data */
+ bzs->avail_in = MIN(input_left, UINT_MAX);
+ input_left -= bzs->avail_in;
+ }
if (bzs->avail_out == 0) {
- bufsize = Util_NewBufferSize(bufsize);
- if (_PyString_Resize(&ret, bufsize) < 0) {
- BZ2_bzDecompressEnd(bzs);
- goto error;
+ size_t buffer_left = PyString_GET_SIZE(ret) - output_size;
+ if (buffer_left == 0) {
+ if (Util_GrowBuffer(&ret) < 0) {
+ BZ2_bzDecompressEnd(bzs);
+ goto error;
+ }
+ bzs->next_out = BUF(ret) + output_size;
+ buffer_left = PyString_GET_SIZE(ret) - output_size;
}
- bzs->next_out = BUF(ret);
- bzs->next_out = BUF(ret) + (BZS_TOTAL_OUT(bzs)
- - totalout);
- bzs->avail_out = bufsize - (bzs->next_out - BUF(ret));
+ bzs->avail_out = MIN(buffer_left, UINT_MAX);
}
}
- if (bzs->avail_out != 0)
- _PyString_Resize(&ret, (Py_ssize_t)(BZS_TOTAL_OUT(bzs) - totalout));
+ if (output_size != PyString_GET_SIZE(ret))
+ if (_PyString_Resize(&ret, output_size) < 0)
+ goto error;
RELEASE_LOCK(self);
PyBuffer_Release(&pdata);
@@ -2111,10 +2134,10 @@ static PyObject *
bz2_compress(PyObject *self, PyObject *args, PyObject *kwargs)
{
int compresslevel=9;
+ int action;
Py_buffer pdata;
- char *data;
- int datasize;
- int bufsize;
+ size_t input_left;
+ size_t output_size = 0;
PyObject *ret = NULL;
bz_stream _bzs;
bz_stream *bzs = &_bzs;
@@ -2125,8 +2148,6 @@ bz2_compress(PyObject *self, PyObject *args, PyObject *kwargs)
kwlist, &pdata,
&compresslevel))
return NULL;
- data = pdata.buf;
- datasize = pdata.len;
if (compresslevel < 1 || compresslevel > 9) {
PyErr_SetString(PyExc_ValueError,
@@ -2135,11 +2156,7 @@ bz2_compress(PyObject *self, PyObject *args, PyObject *kwargs)
return NULL;
}
- /* Conforming to bz2 manual, this is large enough to fit compressed
- * data in one shot. We will check it later anyway. */
- bufsize = datasize + (datasize/100+1) + 600;
-
- ret = PyString_FromStringAndSize(NULL, bufsize);
+ ret = PyString_FromStringAndSize(NULL, SMALLCHUNK);
if (!ret) {
PyBuffer_Release(&pdata);
return NULL;
@@ -2147,10 +2164,12 @@ bz2_compress(PyObject *self, PyObject *args, PyObject *kwargs)
memset(bzs, 0, sizeof(bz_stream));
- bzs->next_in = data;
- bzs->avail_in = datasize;
+ bzs->next_in = pdata.buf;
+ bzs->avail_in = MIN(pdata.len, UINT_MAX);
+ input_left = pdata.len - bzs->avail_in;
+
bzs->next_out = BUF(ret);
- bzs->avail_out = bufsize;
+ bzs->avail_out = PyString_GET_SIZE(ret);
bzerror = BZ2_bzCompressInit(bzs, compresslevel, 0, 0);
if (bzerror != BZ_OK) {
@@ -2160,36 +2179,53 @@ bz2_compress(PyObject *self, PyObject *args, PyObject *kwargs)
return NULL;
}
+ action = input_left > 0 ? BZ_RUN : BZ_FINISH;
+
for (;;) {
+ char *saved_next_out;
+
Py_BEGIN_ALLOW_THREADS
- bzerror = BZ2_bzCompress(bzs, BZ_FINISH);
+ saved_next_out = bzs->next_out;
+ bzerror = BZ2_bzCompress(bzs, action);
+ output_size += bzs->next_out - saved_next_out;
Py_END_ALLOW_THREADS
+
if (bzerror == BZ_STREAM_END) {
break;
- } else if (bzerror != BZ_FINISH_OK) {
+ } else if (bzerror != BZ_RUN_OK && bzerror != BZ_FINISH_OK) {
BZ2_bzCompressEnd(bzs);
Util_CatchBZ2Error(bzerror);
PyBuffer_Release(&pdata);
Py_DECREF(ret);
return NULL;
}
+ if (action == BZ_RUN && bzs->avail_in == 0) {
+ if (input_left == 0) {
+ action = BZ_FINISH;
+ } else {
+ bzs->avail_in = MIN(input_left, UINT_MAX);
+ input_left -= bzs->avail_in;
+ }
+ }
if (bzs->avail_out == 0) {
- bufsize = Util_NewBufferSize(bufsize);
- if (_PyString_Resize(&ret, bufsize) < 0) {
- BZ2_bzCompressEnd(bzs);
- PyBuffer_Release(&pdata);
- Py_DECREF(ret);
- return NULL;
+ size_t buffer_left = PyString_GET_SIZE(ret) - output_size;
+ if (buffer_left == 0) {
+ if (Util_GrowBuffer(&ret) < 0) {
+ BZ2_bzCompressEnd(bzs);
+ PyBuffer_Release(&pdata);
+ return NULL;
+ }
+ bzs->next_out = BUF(ret) + output_size;
+ buffer_left = PyString_GET_SIZE(ret) - output_size;
}
- bzs->next_out = BUF(ret) + BZS_TOTAL_OUT(bzs);
- bzs->avail_out = bufsize - (bzs->next_out - BUF(ret));
+ bzs->avail_out = MIN(buffer_left, UINT_MAX);
}
}
- if (bzs->avail_out != 0)
- _PyString_Resize(&ret, (Py_ssize_t)BZS_TOTAL_OUT(bzs));
- BZ2_bzCompressEnd(bzs);
+ if (output_size != PyString_GET_SIZE(ret))
+ _PyString_Resize(&ret, output_size); /* Sets ret to NULL on failure. */
+ BZ2_bzCompressEnd(bzs);
PyBuffer_Release(&pdata);
return ret;
}
@@ -2205,9 +2241,8 @@ static PyObject *
bz2_decompress(PyObject *self, PyObject *args)
{
Py_buffer pdata;
- char *data;
- int datasize;
- int bufsize = SMALLCHUNK;
+ size_t input_left;
+ size_t output_size = 0;
PyObject *ret;
bz_stream _bzs;
bz_stream *bzs = &_bzs;
@@ -2215,15 +2250,13 @@ bz2_decompress(PyObject *self, PyObject *args)
if (!PyArg_ParseTuple(args, "s*:decompress", &pdata))
return NULL;
- data = pdata.buf;
- datasize = pdata.len;
- if (datasize == 0) {
+ if (pdata.len == 0) {
PyBuffer_Release(&pdata);
return PyString_FromString("");
}
- ret = PyString_FromStringAndSize(NULL, bufsize);
+ ret = PyString_FromStringAndSize(NULL, SMALLCHUNK);
if (!ret) {
PyBuffer_Release(&pdata);
return NULL;
@@ -2231,10 +2264,12 @@ bz2_decompress(PyObject *self, PyObject *args)
memset(bzs, 0, sizeof(bz_stream));
- bzs->next_in = data;
- bzs->avail_in = datasize;
+ bzs->next_in = pdata.buf;
+ bzs->avail_in = MIN(pdata.len, UINT_MAX);
+ input_left = pdata.len - bzs->avail_in;
+
bzs->next_out = BUF(ret);
- bzs->avail_out = bufsize;
+ bzs->avail_out = PyString_GET_SIZE(ret);
bzerror = BZ2_bzDecompressInit(bzs, 0, 0);
if (bzerror != BZ_OK) {
@@ -2245,9 +2280,14 @@ bz2_decompress(PyObject *self, PyObject *args)
}
for (;;) {
+ char *saved_next_out;
+
Py_BEGIN_ALLOW_THREADS
+ saved_next_out = bzs->next_out;
bzerror = BZ2_bzDecompress(bzs);
+ output_size += bzs->next_out - saved_next_out;
Py_END_ALLOW_THREADS
+
if (bzerror == BZ_STREAM_END) {
break;
} else if (bzerror != BZ_OK) {
@@ -2258,31 +2298,37 @@ bz2_decompress(PyObject *self, PyObject *args)
return NULL;
}
if (bzs->avail_in == 0) {
- BZ2_bzDecompressEnd(bzs);
- PyErr_SetString(PyExc_ValueError,
- "couldn't find end of stream");
- PyBuffer_Release(&pdata);
- Py_DECREF(ret);
- return NULL;
- }
- if (bzs->avail_out == 0) {
- bufsize = Util_NewBufferSize(bufsize);
- if (_PyString_Resize(&ret, bufsize) < 0) {
+ if (input_left == 0) {
BZ2_bzDecompressEnd(bzs);
+ PyErr_SetString(PyExc_ValueError,
+ "couldn't find end of stream");
PyBuffer_Release(&pdata);
Py_DECREF(ret);
return NULL;
}
- bzs->next_out = BUF(ret) + BZS_TOTAL_OUT(bzs);
- bzs->avail_out = bufsize - (bzs->next_out - BUF(ret));
+ bzs->avail_in = MIN(input_left, UINT_MAX);
+ input_left -= bzs->avail_in;
+ }
+ if (bzs->avail_out == 0) {
+ size_t buffer_left = PyString_GET_SIZE(ret) - output_size;
+ if (buffer_left == 0) {
+ if (Util_GrowBuffer(&ret) < 0) {
+ BZ2_bzDecompressEnd(bzs);
+ PyBuffer_Release(&pdata);
+ return NULL;
+ }
+ bzs->next_out = BUF(ret) + output_size;
+ buffer_left = PyString_GET_SIZE(ret) - output_size;
+ }
+ bzs->avail_out = MIN(buffer_left, UINT_MAX);
}
}
- if (bzs->avail_out != 0)
- _PyString_Resize(&ret, (Py_ssize_t)BZS_TOTAL_OUT(bzs));
+ if (output_size != PyString_GET_SIZE(ret))
+ _PyString_Resize(&ret, output_size); /* Sets ret to NULL on failure. */
+
BZ2_bzDecompressEnd(bzs);
PyBuffer_Release(&pdata);
-
return ret;
}
diff --git a/Modules/cPickle.c b/Modules/cPickle.c
index 48b5075..10199eb 100644
--- a/Modules/cPickle.c
+++ b/Modules/cPickle.c
@@ -139,15 +139,15 @@ static PyObject *__class___str, *__getinitargs___str, *__dict___str,
typedef struct {
PyObject_HEAD
- int length; /* number of initial slots in data currently used */
- int size; /* number of slots in data allocated */
+ Py_ssize_t length; /* number of initial slots in data currently used */
+ Py_ssize_t size; /* number of slots in data allocated */
PyObject **data;
} Pdata;
static void
Pdata_dealloc(Pdata *self)
{
- int i;
+ Py_ssize_t i;
PyObject **p;
for (i = self->length, p = self->data; --i >= 0; p++) {
@@ -193,9 +193,9 @@ stackUnderflow(void)
* number of items, this is a (non-erroneous) NOP.
*/
static int
-Pdata_clear(Pdata *self, int clearto)
+Pdata_clear(Pdata *self, Py_ssize_t clearto)
{
- int i;
+ Py_ssize_t i;
PyObject **p;
if (clearto < 0) return stackUnderflow();
@@ -214,18 +214,17 @@ Pdata_clear(Pdata *self, int clearto)
static int
Pdata_grow(Pdata *self)
{
- int bigger;
- size_t nbytes;
+ Py_ssize_t bigger;
+ Py_ssize_t nbytes;
+
PyObject **tmp;
- bigger = self->size << 1;
- if (bigger <= 0) /* was 0, or new value overflows */
+ if (self->size > (PY_SSIZE_T_MAX >> 1))
goto nomemory;
- if ((int)(size_t)bigger != bigger)
- goto nomemory;
- nbytes = (size_t)bigger * sizeof(PyObject *);
- if (nbytes / sizeof(PyObject *) != (size_t)bigger)
+ bigger = self->size << 1;
+ if (bigger > (PY_SSIZE_T_MAX / sizeof(PyObject *)))
goto nomemory;
+ nbytes = bigger * sizeof(PyObject *);
tmp = realloc(self->data, nbytes);
if (tmp == NULL)
goto nomemory;
@@ -280,10 +279,10 @@ Pdata_grow(Pdata *self)
static PyObject *
-Pdata_popTuple(Pdata *self, int start)
+Pdata_popTuple(Pdata *self, Py_ssize_t start)
{
PyObject *r;
- int i, j, l;
+ Py_ssize_t i, j, l;
l = self->length-start;
r = PyTuple_New(l);
@@ -297,10 +296,10 @@ Pdata_popTuple(Pdata *self, int start)
}
static PyObject *
-Pdata_popList(Pdata *self, int start)
+Pdata_popList(Pdata *self, Py_ssize_t start)
{
PyObject *r;
- int i, j, l;
+ Py_ssize_t i, j, l;
l=self->length-start;
if (!( r=PyList_New(l))) return NULL;
@@ -325,8 +324,7 @@ Pdata_popList(Pdata *self, int start)
#define FREE_ARG_TUP(self) { \
if (Py_REFCNT(self->arg) > 1) { \
- Py_DECREF(self->arg); \
- self->arg=NULL; \
+ Py_CLEAR(self->arg); \
} \
}
@@ -347,9 +345,9 @@ typedef struct Picklerobject {
int bin;
int fast; /* Fast mode doesn't save in memo, don't use if circ ref */
- int (*write_func)(struct Picklerobject *, const char *, Py_ssize_t);
+ Py_ssize_t (*write_func)(struct Picklerobject *, const char *, Py_ssize_t);
char *write_buf;
- int buf_size;
+ Py_ssize_t buf_size;
PyObject *dispatch_table;
int fast_container; /* count nested container dumps */
PyObject *fast_memo;
@@ -373,12 +371,12 @@ typedef struct Unpicklerobject {
PyObject *mark;
PyObject *pers_func;
PyObject *last_string;
- int *marks;
- int num_marks;
- int marks_size;
+ Py_ssize_t *marks;
+ Py_ssize_t num_marks;
+ Py_ssize_t marks_size;
Py_ssize_t (*read_func)(struct Unpicklerobject *, char **, Py_ssize_t);
Py_ssize_t (*readline_func)(struct Unpicklerobject *, char **);
- int buf_size;
+ Py_ssize_t buf_size;
char *buf;
PyObject *find_class;
} Unpicklerobject;
@@ -424,7 +422,7 @@ cPickle_ErrFormat(PyObject *ErrType, char *stringformat, char *format, ...)
return NULL;
}
-static int
+static Py_ssize_t
write_file(Picklerobject *self, const char *s, Py_ssize_t n)
{
size_t nbyteswritten;
@@ -433,11 +431,6 @@ write_file(Picklerobject *self, const char *s, Py_ssize_t n)
return 0;
}
- if (n > INT_MAX) {
- /* String too large */
- return -1;
- }
-
PyFile_IncUseCount((PyFileObject *)self->file);
Py_BEGIN_ALLOW_THREADS
nbyteswritten = fwrite(s, sizeof(char), n, self->fp);
@@ -448,40 +441,44 @@ write_file(Picklerobject *self, const char *s, Py_ssize_t n)
return -1;
}
- return (int)n;
+ return n;
}
-static int
+static Py_ssize_t
write_cStringIO(Picklerobject *self, const char *s, Py_ssize_t n)
{
+ Py_ssize_t len = n;
+
if (s == NULL) {
return 0;
}
+ while (n > INT_MAX) {
+ if (PycStringIO->cwrite((PyObject *)self->file, s, INT_MAX) != INT_MAX) {
+ return -1;
+ }
+ n -= INT_MAX;
+ }
+
if (PycStringIO->cwrite((PyObject *)self->file, s, n) != n) {
return -1;
}
- return (int)n;
+ return len;
}
-static int
+static Py_ssize_t
write_none(Picklerobject *self, const char *s, Py_ssize_t n)
{
if (s == NULL) return 0;
- if (n > INT_MAX) return -1;
- return (int)n;
+ return n;
}
-static int
-write_other(Picklerobject *self, const char *s, Py_ssize_t _n)
+static Py_ssize_t
+write_other(Picklerobject *self, const char *s, Py_ssize_t n)
{
PyObject *py_str = 0, *junk = 0;
- int n;
- if (_n > INT_MAX)
- return -1;
- n = (int)_n;
if (s == NULL) {
if (!( self->buf_size )) return 0;
py_str = PyString_FromStringAndSize(self->write_buf,
@@ -490,7 +487,7 @@ write_other(Picklerobject *self, const char *s, Py_ssize_t _n)
return -1;
}
else {
- if (self->buf_size && (n + self->buf_size) > WRITE_BUF_SIZE) {
+ if (self->buf_size && n > WRITE_BUF_SIZE - self->buf_size) {
if (write_other(self, NULL, 0) < 0)
return -1;
}
@@ -531,7 +528,7 @@ read_file(Unpicklerobject *self, char **s, Py_ssize_t n)
size_t nbytesread;
if (self->buf_size == 0) {
- int size;
+ Py_ssize_t size;
size = ((n < 32) ? 32 : n);
if (!( self->buf = (char *)malloc(size))) {
@@ -575,7 +572,7 @@ read_file(Unpicklerobject *self, char **s, Py_ssize_t n)
static Py_ssize_t
readline_file(Unpicklerobject *self, char **s)
{
- int i;
+ Py_ssize_t i;
if (self->buf_size == 0) {
if (!( self->buf = (char *)malloc(40))) {
@@ -587,7 +584,7 @@ readline_file(Unpicklerobject *self, char **s)
i = 0;
while (1) {
- int bigger;
+ Py_ssize_t bigger;
char *newbuf;
for (; i < (self->buf_size - 1); i++) {
if (feof(self->fp) ||
@@ -597,13 +594,13 @@ readline_file(Unpicklerobject *self, char **s)
return i + 1;
}
}
- bigger = self->buf_size << 1;
- if (bigger <= 0) { /* overflow */
+ if (self->buf_size > (PY_SSIZE_T_MAX >> 1)) {
PyErr_NoMemory();
return -1;
}
+ bigger = self->buf_size << 1;
newbuf = (char *)realloc(self->buf, bigger);
- if (!newbuf) {
+ if (newbuf == NULL) {
PyErr_NoMemory();
return -1;
}
@@ -616,30 +613,63 @@ readline_file(Unpicklerobject *self, char **s)
static Py_ssize_t
read_cStringIO(Unpicklerobject *self, char **s, Py_ssize_t n)
{
- char *ptr;
+ Py_ssize_t len = n;
+ char *start, *end = NULL;
- if (PycStringIO->cread((PyObject *)self->file, &ptr, n) != n) {
- PyErr_SetNone(PyExc_EOFError);
- return -1;
+ while (1) {
+ int k;
+ char *ptr;
+ if (n > INT_MAX)
+ k = INT_MAX;
+ else
+ k = (int)n;
+ if (PycStringIO->cread((PyObject *)self->file, &ptr, k) != k) {
+ PyErr_SetNone(PyExc_EOFError);
+ return -1;
+ }
+ if (end == NULL)
+ start = ptr;
+ else if (ptr != end) {
+ /* non-continuous area */
+ return -1;
+ }
+ if (n <= INT_MAX)
+ break;
+ end = ptr + INT_MAX;
+ n -= INT_MAX;
}
- *s = ptr;
+ *s = start;
- return n;
+ return len;
}
static Py_ssize_t
readline_cStringIO(Unpicklerobject *self, char **s)
{
- Py_ssize_t n;
- char *ptr;
+ Py_ssize_t n = 0;
+ char *start = NULL, *end = NULL;
- if ((n = PycStringIO->creadline((PyObject *)self->file, &ptr)) < 0) {
- return -1;
+ while (1) {
+ int k;
+ char *ptr;
+ if ((k = PycStringIO->creadline((PyObject *)self->file, &ptr)) < 0) {
+ return -1;
+ }
+ n += k;
+ if (end == NULL)
+ start = ptr;
+ else if (ptr != end) {
+ /* non-continuous area */
+ return -1;
+ }
+ if (k == 0 || ptr[k - 1] == '\n')
+ break;
+ end = ptr + k;
}
- *s = ptr;
+ *s = start;
return n;
}
@@ -700,7 +730,7 @@ readline_other(Unpicklerobject *self, char **s)
* The caller is responsible for free()'ing the return value.
*/
static char *
-pystrndup(const char *s, int n)
+pystrndup(const char *s, Py_ssize_t n)
{
char *r = (char *)malloc(n+1);
if (r == NULL)
@@ -715,7 +745,7 @@ static int
get(Picklerobject *self, PyObject *id)
{
PyObject *value, *mv;
- long c_value;
+ Py_ssize_t c_value;
char s[30];
size_t len;
@@ -735,7 +765,8 @@ get(Picklerobject *self, PyObject *id)
if (!self->bin) {
s[0] = GET;
- PyOS_snprintf(s + 1, sizeof(s) - 1, "%ld\n", c_value);
+ PyOS_snprintf(s + 1, sizeof(s) - 1,
+ "%" PY_FORMAT_SIZE_T "d\n", c_value);
len = strlen(s);
}
else if (Pdata_Check(self->file)) {
@@ -780,8 +811,7 @@ static int
put2(Picklerobject *self, PyObject *ob)
{
char c_str[30];
- int p;
- size_t len;
+ Py_ssize_t len, p;
int res = -1;
PyObject *py_ob_id = 0, *memo_len = 0, *t = 0;
@@ -818,7 +848,8 @@ put2(Picklerobject *self, PyObject *ob)
if (!self->bin) {
c_str[0] = PUT;
- PyOS_snprintf(c_str + 1, sizeof(c_str) - 1, "%d\n", p);
+ PyOS_snprintf(c_str + 1, sizeof(c_str) - 1,
+ "%" PY_FORMAT_SIZE_T "d\n", p);
len = strlen(c_str);
}
else if (Pdata_Check(self->file)) {
@@ -994,7 +1025,7 @@ save_int(Picklerobject *self, PyObject *args)
{
char c_str[32];
long l = PyInt_AS_LONG((PyIntObject *)args);
- int len = 0;
+ Py_ssize_t len = 0;
if (!self->bin
#if SIZEOF_LONG > 4
@@ -1201,7 +1232,7 @@ done:
static int
save_string(Picklerobject *self, PyObject *args, int doput)
{
- int size, len;
+ Py_ssize_t size, len;
PyObject *repr=0;
if ((size = PyString_Size(args)) < 0)
@@ -1448,7 +1479,7 @@ save_unicode(Picklerobject *self, PyObject *args, int doput)
static int
store_tuple_elements(Picklerobject *self, PyObject *t, int len)
{
- int i;
+ Py_ssize_t i;
int res = -1; /* guilty until proved innocent */
assert(PyTuple_Size(t) == len);
@@ -1477,7 +1508,7 @@ static int
save_tuple(Picklerobject *self, PyObject *args)
{
PyObject *py_tuple_id = NULL;
- int len, i;
+ Py_ssize_t len, i;
int res = -1;
static char tuple = TUPLE;
@@ -1690,7 +1721,7 @@ save_list(Picklerobject *self, PyObject *args)
{
int res = -1;
char s[3];
- int len;
+ Py_ssize_t len;
PyObject *iter;
if (self->fast && !fast_save_enter(self, args))
@@ -1943,7 +1974,7 @@ save_dict(Picklerobject *self, PyObject *args)
{
int res = -1;
char s[3];
- int len;
+ Py_ssize_t len;
if (self->fast && !fast_save_enter(self, args))
goto finally;
@@ -2027,7 +2058,7 @@ save_inst(Picklerobject *self, PyObject *args)
if ((getinitargs_func = PyObject_GetAttr(args, __getinitargs___str))) {
PyObject *element = 0;
- int i, len;
+ Py_ssize_t i, len;
if (!( class_args =
PyObject_Call(getinitargs_func, empty_tuple, NULL)))
@@ -2289,7 +2320,8 @@ static int
save_pers(Picklerobject *self, PyObject *args, PyObject *f)
{
PyObject *pid = 0;
- int size, res = -1;
+ Py_ssize_t size;
+ int res = -1;
static char persid = PERSID, binpersid = BINPERSID;
@@ -2431,7 +2463,7 @@ save_reduce(Picklerobject *self, PyObject *args, PyObject *fn, PyObject *ob)
if (use_newobj) {
PyObject *cls;
PyObject *newargtup;
- int n, i;
+ Py_ssize_t n, i;
/* Sanity checks. */
n = PyTuple_Size(argtup);
@@ -2815,7 +2847,7 @@ Pickle_clear_memo(Picklerobject *self, PyObject *args)
static PyObject *
Pickle_getvalue(Picklerobject *self, PyObject *args)
{
- int l, i, rsize, ssize, clear=1, lm;
+ Py_ssize_t l, i, rsize, ssize, clear=1, lm;
long ik;
PyObject *k, *r;
char *s, *p, *have_get;
@@ -3314,7 +3346,7 @@ find_class(PyObject *py_module_name, PyObject *py_global_name, PyObject *fc)
return global;
}
-static int
+static Py_ssize_t
marker(Unpicklerobject *self)
{
if (self->num_marks < 1) {
@@ -3345,7 +3377,8 @@ load_int(Unpicklerobject *self)
{
PyObject *py_int = 0;
char *endptr, *s;
- int len, res = -1;
+ Py_ssize_t len;
+ int res = -1;
long l;
if ((len = self->readline_func(self, &s)) < 0) return -1;
@@ -3477,7 +3510,8 @@ load_long(Unpicklerobject *self)
{
PyObject *l = 0;
char *end, *s;
- int len, res = -1;
+ Py_ssize_t len;
+ int res = -1;
if ((len = self->readline_func(self, &s)) < 0) return -1;
if (len < 2) return bad_readline();
@@ -3541,7 +3575,8 @@ load_float(Unpicklerobject *self)
{
PyObject *py_float = 0;
char *endptr, *s;
- int len, res = -1;
+ Py_ssize_t len;
+ int res = -1;
double d;
if ((len = self->readline_func(self, &s)) < 0) return -1;
@@ -3597,7 +3632,8 @@ static int
load_string(Unpicklerobject *self)
{
PyObject *str = 0;
- int len, res = -1;
+ Py_ssize_t len;
+ int res = -1;
char *s, *p;
if ((len = self->readline_func(self, &s)) < 0) return -1;
@@ -3606,17 +3642,19 @@ load_string(Unpicklerobject *self)
/* Strip outermost quotes */
- while (s[len-1] <= ' ')
+ while (len > 0 && s[len-1] <= ' ')
len--;
- if(s[0]=='"' && s[len-1]=='"'){
+ if (len > 1 && s[0]=='"' && s[len-1]=='"') {
s[len-1] = '\0';
p = s + 1 ;
len -= 2;
- } else if(s[0]=='\'' && s[len-1]=='\''){
+ }
+ else if (len > 1 && s[0]=='\'' && s[len-1]=='\'') {
s[len-1] = '\0';
p = s + 1 ;
len -= 2;
- } else
+ }
+ else
goto insecure;
/********************************************/
@@ -3639,7 +3677,7 @@ static int
load_binstring(Unpicklerobject *self)
{
PyObject *py_string = 0;
- long l;
+ Py_ssize_t l;
char *s;
if (self->read_func(self, &s, 4) < 0) return -1;
@@ -3691,20 +3729,17 @@ static int
load_unicode(Unpicklerobject *self)
{
PyObject *str = 0;
- int len, res = -1;
+ Py_ssize_t len;
char *s;
if ((len = self->readline_func(self, &s)) < 0) return -1;
if (len < 1) return bad_readline();
if (!( str = PyUnicode_DecodeRawUnicodeEscape(s, len - 1, NULL)))
- goto finally;
+ return -1;
PDATA_PUSH(self->stack, str, -1);
return 0;
-
- finally:
- return res;
}
#endif
@@ -3714,7 +3749,7 @@ static int
load_binunicode(Unpicklerobject *self)
{
PyObject *unicode;
- long l;
+ Py_ssize_t l;
char *s;
if (self->read_func(self, &s, 4) < 0) return -1;
@@ -3745,7 +3780,7 @@ static int
load_tuple(Unpicklerobject *self)
{
PyObject *tup;
- int i;
+ Py_ssize_t i;
if ((i = marker(self)) < 0) return -1;
if (!( tup=Pdata_popTuple(self->stack, i))) return -1;
@@ -3798,7 +3833,7 @@ static int
load_list(Unpicklerobject *self)
{
PyObject *list = 0;
- int i;
+ Py_ssize_t i;
if ((i = marker(self)) < 0) return -1;
if (!( list=Pdata_popList(self->stack, i))) return -1;
@@ -3810,7 +3845,7 @@ static int
load_dict(Unpicklerobject *self)
{
PyObject *dict, *key, *value;
- int i, j, k;
+ Py_ssize_t i, j, k;
if ((i = marker(self)) < 0) return -1;
j=self->stack->length;
@@ -3886,7 +3921,7 @@ static int
load_obj(Unpicklerobject *self)
{
PyObject *class, *tup, *obj=0;
- int i;
+ Py_ssize_t i;
if ((i = marker(self)) < 0) return -1;
if (!( tup=Pdata_popTuple(self->stack, i+1))) return -1;
@@ -3907,7 +3942,7 @@ static int
load_inst(Unpicklerobject *self)
{
PyObject *tup, *class=0, *obj=0, *module_name, *class_name;
- int i, len;
+ Py_ssize_t i, len;
char *s;
if ((i = marker(self)) < 0) return -1;
@@ -3993,7 +4028,7 @@ static int
load_global(Unpicklerobject *self)
{
PyObject *class = 0, *module_name = 0, *class_name = 0;
- int len;
+ Py_ssize_t len;
char *s;
if ((len = self->readline_func(self, &s)) < 0) return -1;
@@ -4024,7 +4059,7 @@ static int
load_persid(Unpicklerobject *self)
{
PyObject *pid = 0;
- int len;
+ Py_ssize_t len;
char *s;
if (self->pers_func) {
@@ -4102,7 +4137,7 @@ load_binpersid(Unpicklerobject *self)
static int
load_pop(Unpicklerobject *self)
{
- int len = self->stack->length;
+ Py_ssize_t len = self->stack->length;
/* Note that we split the (pickle.py) stack into two stacks,
an object stack and a mark stack. We have to be clever and
@@ -4127,7 +4162,7 @@ load_pop(Unpicklerobject *self)
static int
load_pop_mark(Unpicklerobject *self)
{
- int i;
+ Py_ssize_t i;
if ((i = marker(self)) < 0)
return -1;
@@ -4142,7 +4177,7 @@ static int
load_dup(Unpicklerobject *self)
{
PyObject *last;
- int len;
+ Py_ssize_t len;
if ((len = self->stack->length) <= 0) return stackUnderflow();
last=self->stack->data[len-1];
@@ -4156,7 +4191,7 @@ static int
load_get(Unpicklerobject *self)
{
PyObject *py_str = 0, *value = 0;
- int len;
+ Py_ssize_t len;
char *s;
int rc;
@@ -4214,7 +4249,7 @@ load_long_binget(Unpicklerobject *self)
PyObject *py_key = 0, *value = 0;
unsigned char c;
char *s;
- long key;
+ Py_ssize_t key;
int rc;
if (self->read_func(self, &s, 4) < 0) return -1;
@@ -4317,7 +4352,7 @@ static int
load_put(Unpicklerobject *self)
{
PyObject *py_str = 0, *value = 0;
- int len, l;
+ Py_ssize_t len, l;
char *s;
if ((l = self->readline_func(self, &s)) < 0) return -1;
@@ -4337,7 +4372,7 @@ load_binput(Unpicklerobject *self)
PyObject *py_key = 0, *value = 0;
unsigned char key;
char *s;
- int len;
+ Py_ssize_t len;
if (self->read_func(self, &s, 1) < 0) return -1;
if (!( (len=self->stack->length) > 0 )) return stackUnderflow();
@@ -4356,10 +4391,10 @@ static int
load_long_binput(Unpicklerobject *self)
{
PyObject *py_key = 0, *value = 0;
- long key;
+ Py_ssize_t key;
unsigned char c;
char *s;
- int len;
+ Py_ssize_t len;
if (self->read_func(self, &s, 4) < 0) return -1;
if (!( len=self->stack->length )) return stackUnderflow();
@@ -4382,10 +4417,10 @@ load_long_binput(Unpicklerobject *self)
static int
-do_append(Unpicklerobject *self, int x)
+do_append(Unpicklerobject *self, Py_ssize_t x)
{
PyObject *value = 0, *list = 0, *append_method = 0;
- int len, i;
+ Py_ssize_t len, i;
len=self->stack->length;
if (!( len >= x && x > 0 )) return stackUnderflow();
@@ -4451,11 +4486,11 @@ load_appends(Unpicklerobject *self)
}
-static int
-do_setitems(Unpicklerobject *self, int x)
+static Py_ssize_t
+do_setitems(Unpicklerobject *self, Py_ssize_t x)
{
PyObject *value = 0, *key = 0, *dict = 0;
- int len, i, r=0;
+ Py_ssize_t len, i, r=0;
if (!( (len=self->stack->length) >= x
&& x > 0 )) return stackUnderflow();
@@ -4496,8 +4531,8 @@ load_build(Unpicklerobject *self)
PyObject *state, *inst, *slotstate;
PyObject *__setstate__;
PyObject *d_key, *d_value;
- Py_ssize_t i;
int res = -1;
+ Py_ssize_t i;
/* Stack is ... instance, state. We want to leave instance at
* the stack top, possibly mutated via instance.__setstate__(state).
@@ -4596,7 +4631,7 @@ load_build(Unpicklerobject *self)
static int
load_mark(Unpicklerobject *self)
{
- int s;
+ Py_ssize_t s;
/* Note that we split the (pickle.py) stack into two stacks, an
object stack and a mark stack. Here we push a mark onto the
@@ -4604,14 +4639,14 @@ load_mark(Unpicklerobject *self)
*/
if ((self->num_marks + 1) >= self->marks_size) {
- int *marks;
+ Py_ssize_t *marks;
s=self->marks_size+20;
if (s <= self->num_marks) s=self->num_marks + 1;
if (self->marks == NULL)
- marks=(int *)malloc(s * sizeof(int));
+ marks=(Py_ssize_t *)malloc(s * sizeof(Py_ssize_t));
else
- marks=(int *)realloc(self->marks,
- s * sizeof(int));
+ marks=(Py_ssize_t *)realloc(self->marks,
+ s * sizeof(Py_ssize_t));
if (!marks) {
PyErr_NoMemory();
return -1;
@@ -4981,7 +5016,7 @@ load(Unpicklerobject *self)
static int
noload_obj(Unpicklerobject *self)
{
- int i;
+ Py_ssize_t i;
if ((i = marker(self)) < 0) return -1;
return Pdata_clear(self->stack, i+1);
@@ -4991,7 +5026,7 @@ noload_obj(Unpicklerobject *self)
static int
noload_inst(Unpicklerobject *self)
{
- int i;
+ Py_ssize_t i;
char *s;
if ((i = marker(self)) < 0) return -1;
@@ -5068,7 +5103,7 @@ noload_append(Unpicklerobject *self)
static int
noload_appends(Unpicklerobject *self)
{
- int i;
+ Py_ssize_t i;
if ((i = marker(self)) < 0) return -1;
return Pdata_clear(self->stack, i);
}
@@ -5082,7 +5117,7 @@ noload_setitem(Unpicklerobject *self)
static int
noload_setitems(Unpicklerobject *self)
{
- int i;
+ Py_ssize_t i;
if ((i = marker(self)) < 0) return -1;
return Pdata_clear(self->stack, i);
}
diff --git a/Modules/cStringIO.c b/Modules/cStringIO.c
index 89f1dd6..5b78789 100644
--- a/Modules/cStringIO.c
+++ b/Modules/cStringIO.c
@@ -66,9 +66,7 @@ typedef struct { /* Subtype of IOobject */
PyObject_HEAD
char *buf;
Py_ssize_t pos, string_size;
- /* We store a reference to the object here in order to keep
- the buffer alive during the lifetime of the Iobject. */
- PyObject *pbuf;
+ Py_buffer pbuf;
} Iobject;
/* IOobject (common) methods */
@@ -127,12 +125,16 @@ IO_cgetval(PyObject *self) {
static PyObject *
IO_getval(IOobject *self, PyObject *args) {
PyObject *use_pos=Py_None;
+ int b;
Py_ssize_t s;
if (!IO__opencheck(self)) return NULL;
if (!PyArg_UnpackTuple(args,"getval", 0, 1,&use_pos)) return NULL;
- if (PyObject_IsTrue(use_pos)) {
+ b = PyObject_IsTrue(use_pos);
+ if (b < 0)
+ return NULL;
+ if (b) {
s=self->pos;
if (s > self->string_size) s=self->string_size;
}
@@ -166,10 +168,15 @@ IO_cread(PyObject *self, char **output, Py_ssize_t n) {
n = l;
if (n < 0) n=0;
}
+ if (n > INT_MAX) {
+ PyErr_SetString(PyExc_OverflowError,
+ "length too large");
+ return -1;
+ }
*output=((IOobject*)self)->buf + ((IOobject*)self)->pos;
((IOobject*)self)->pos += n;
- return n;
+ return (int)n;
}
static PyObject *
@@ -188,26 +195,30 @@ PyDoc_STRVAR(IO_readline__doc__, "readline() -- Read one line");
static int
IO_creadline(PyObject *self, char **output) {
- char *n, *s;
- Py_ssize_t l;
+ char *n, *start, *end;
+ Py_ssize_t len;
if (!IO__opencheck(IOOOBJECT(self))) return -1;
- for (n = ((IOobject*)self)->buf + ((IOobject*)self)->pos,
- s = ((IOobject*)self)->buf + ((IOobject*)self)->string_size;
- n < s && *n != '\n'; n++);
+ n = start = ((IOobject*)self)->buf + ((IOobject*)self)->pos;
+ end = ((IOobject*)self)->buf + ((IOobject*)self)->string_size;
+ while (n < end && *n != '\n')
+ n++;
- if (n < s) n++;
+ if (n < end) n++;
- *output=((IOobject*)self)->buf + ((IOobject*)self)->pos;
- l = n - ((IOobject*)self)->buf - ((IOobject*)self)->pos;
+ len = n - start;
+ if (len > INT_MAX)
+ len = INT_MAX;
+
+ *output=start;
- assert(IOOOBJECT(self)->pos <= PY_SSIZE_T_MAX - l);
+ assert(IOOOBJECT(self)->pos <= PY_SSIZE_T_MAX - len);
assert(IOOOBJECT(self)->pos >= 0);
assert(IOOOBJECT(self)->string_size >= 0);
- ((IOobject*)self)->pos += l;
- return (int)l;
+ ((IOobject*)self)->pos += len;
+ return (int)len;
}
static PyObject *
@@ -235,9 +246,9 @@ IO_readlines(IOobject *self, PyObject *args) {
int n;
char *output;
PyObject *result, *line;
- int hint = 0, length = 0;
+ Py_ssize_t hint = 0, length = 0;
- if (!PyArg_ParseTuple(args, "|i:readlines", &hint)) return NULL;
+ if (!PyArg_ParseTuple(args, "|n:readlines", &hint)) return NULL;
result = PyList_New(0);
if (!result)
@@ -373,31 +384,41 @@ PyDoc_STRVAR(O_write__doc__,
static int
-O_cwrite(PyObject *self, const char *c, Py_ssize_t l) {
- Py_ssize_t newl;
+O_cwrite(PyObject *self, const char *c, Py_ssize_t len) {
+ Py_ssize_t newpos;
Oobject *oself;
char *newbuf;
if (!IO__opencheck(IOOOBJECT(self))) return -1;
oself = (Oobject *)self;
- newl = oself->pos+l;
- if (newl >= oself->buf_size) {
- oself->buf_size *= 2;
- if (oself->buf_size <= newl) {
- assert(newl + 1 < INT_MAX);
- oself->buf_size = (int)(newl+1);
+ if (len > INT_MAX) {
+ PyErr_SetString(PyExc_OverflowError,
+ "length too large");
+ return -1;
+ }
+ assert(len >= 0);
+ if (oself->pos >= PY_SSIZE_T_MAX - len) {
+ PyErr_SetString(PyExc_OverflowError,
+ "new position too large");
+ return -1;
+ }
+ newpos = oself->pos + len;
+ if (newpos >= oself->buf_size) {
+ size_t newsize = oself->buf_size;
+ newsize *= 2;
+ if (newsize <= (size_t)newpos || newsize > PY_SSIZE_T_MAX) {
+ assert(newpos < PY_SSIZE_T_MAX - 1);
+ newsize = newpos + 1;
}
- newbuf = (char*)realloc(oself->buf, oself->buf_size);
+ newbuf = (char*)realloc(oself->buf, newsize);
if (!newbuf) {
PyErr_SetString(PyExc_MemoryError,"out of memory");
- free(oself->buf);
- oself->buf = 0;
- oself->buf_size = oself->pos = 0;
return -1;
- }
+ }
+ oself->buf_size = (Py_ssize_t)newsize;
oself->buf = newbuf;
- }
+ }
if (oself->string_size < oself->pos) {
/* In case of overseek, pad with null bytes the buffer region between
@@ -412,26 +433,27 @@ O_cwrite(PyObject *self, const char *c, Py_ssize_t l) {
(oself->pos - oself->string_size) * sizeof(char));
}
- memcpy(oself->buf+oself->pos,c,l);
+ memcpy(oself->buf + oself->pos, c, len);
- assert(oself->pos + l < INT_MAX);
- oself->pos += (int)l;
+ oself->pos = newpos;
if (oself->string_size < oself->pos) {
oself->string_size = oself->pos;
}
- return (int)l;
+ return (int)len;
}
static PyObject *
O_write(Oobject *self, PyObject *args) {
- char *c;
- int l;
+ Py_buffer buf;
+ int result;
- if (!PyArg_ParseTuple(args, "t#:write", &c, &l)) return NULL;
+ if (!PyArg_ParseTuple(args, "s*:write", &buf)) return NULL;
- if (O_cwrite((PyObject*)self,c,l) < 0) return NULL;
+ result = O_cwrite((PyObject*)self, buf.buf, buf.len);
+ PyBuffer_Release(&buf);
+ if (result < 0) return NULL;
Py_INCREF(Py_None);
return Py_None;
@@ -584,7 +606,7 @@ newOobject(int size) {
static PyObject *
I_close(Iobject *self, PyObject *unused) {
- Py_CLEAR(self->pbuf);
+ PyBuffer_Release(&self->pbuf);
self->buf = NULL;
self->pos = self->string_size = 0;
@@ -613,7 +635,7 @@ static struct PyMethodDef I_methods[] = {
static void
I_dealloc(Iobject *self) {
- Py_XDECREF(self->pbuf);
+ PyBuffer_Release(&self->pbuf);
PyObject_Del(self);
}
@@ -658,25 +680,26 @@ static PyTypeObject Itype = {
static PyObject *
newIobject(PyObject *s) {
Iobject *self;
- char *buf;
- Py_ssize_t size;
+ Py_buffer buf;
+ PyObject *args;
+ int result;
- if (PyUnicode_Check(s)) {
- if (PyObject_AsCharBuffer(s, (const char **)&buf, &size) != 0)
+ args = Py_BuildValue("(O)", s);
+ if (args == NULL)
+ return NULL;
+ result = PyArg_ParseTuple(args, "s*:StringIO", &buf);
+ Py_DECREF(args);
+ if (!result)
return NULL;
- }
- else if (PyObject_AsReadBuffer(s, (const void **)&buf, &size)) {
- PyErr_Format(PyExc_TypeError, "expected read buffer, %.200s found",
- s->ob_type->tp_name);
- return NULL;
- }
self = PyObject_New(Iobject, &Itype);
- if (!self) return NULL;
- Py_INCREF(s);
- self->buf=buf;
- self->string_size=size;
- self->pbuf=s;
+ if (!self) {
+ PyBuffer_Release(&buf);
+ return NULL;
+ }
+ self->buf=buf.buf;
+ self->string_size=buf.len;
+ self->pbuf=buf;
self->pos=0;
return (PyObject*)self;
diff --git a/Modules/cdmodule.c b/Modules/cdmodule.c
index 8dfb769..9ee9b0b 100644
--- a/Modules/cdmodule.c
+++ b/Modules/cdmodule.c
@@ -535,10 +535,8 @@ CD_deleteparser(cdparserobject *self, PyObject *args)
/* no sense in keeping the callbacks, so remove them */
for (i = 0; i < NCALLBACKS; i++) {
- Py_XDECREF(self->ob_cdcallbacks[i].ob_cdcallback);
- self->ob_cdcallbacks[i].ob_cdcallback = NULL;
- Py_XDECREF(self->ob_cdcallbacks[i].ob_cdcallbackarg);
- self->ob_cdcallbacks[i].ob_cdcallbackarg = NULL;
+ Py_CLEAR(self->ob_cdcallbacks[i].ob_cdcallback);
+ Py_CLEAR(self->ob_cdcallbacks[i].ob_cdcallbackarg);
}
Py_INCREF(Py_None);
@@ -588,11 +586,9 @@ CD_removecallback(cdparserobject *self, PyObject *args)
CDremovecallback(self->ob_cdparser, (CDDATATYPES) type);
- Py_XDECREF(self->ob_cdcallbacks[type].ob_cdcallback);
- self->ob_cdcallbacks[type].ob_cdcallback = NULL;
+ Py_CLEAR(self->ob_cdcallbacks[type].ob_cdcallback);
- Py_XDECREF(self->ob_cdcallbacks[type].ob_cdcallbackarg);
- self->ob_cdcallbacks[type].ob_cdcallbackarg = NULL;
+ Py_CLEAR(self->ob_cdcallbacks[type].ob_cdcallbackarg);
Py_INCREF(Py_None);
return Py_None;
@@ -668,10 +664,8 @@ cdparser_dealloc(cdparserobject *self)
int i;
for (i = 0; i < NCALLBACKS; i++) {
- Py_XDECREF(self->ob_cdcallbacks[i].ob_cdcallback);
- self->ob_cdcallbacks[i].ob_cdcallback = NULL;
- Py_XDECREF(self->ob_cdcallbacks[i].ob_cdcallbackarg);
- self->ob_cdcallbacks[i].ob_cdcallbackarg = NULL;
+ Py_CLEAR(self->ob_cdcallbacks[i].ob_cdcallback);
+ Py_CLEAR(self->ob_cdcallbacks[i].ob_cdcallbackarg);
}
CDdeleteparser(self->ob_cdparser);
PyObject_Del(self);
diff --git a/Modules/cmathmodule.c b/Modules/cmathmodule.c
index 0dc6bdb..5720678 100644
--- a/Modules/cmathmodule.c
+++ b/Modules/cmathmodule.c
@@ -1006,6 +1006,13 @@ cmath_rect(PyObject *self, PyObject *args)
else
errno = 0;
}
+ else if (phi == 0.0) {
+ /* Workaround for buggy results with phi=-0.0 on OS X 10.8. See
+ bugs.python.org/issue18513. */
+ z.real = r;
+ z.imag = r * phi;
+ errno = 0;
+ }
else {
z.real = r * cos(phi);
z.imag = r * sin(phi);
diff --git a/Modules/dbmmodule.c b/Modules/dbmmodule.c
index f9c99a8..8b16def 100644
--- a/Modules/dbmmodule.c
+++ b/Modules/dbmmodule.c
@@ -168,11 +168,13 @@ static int
dbm_contains(register dbmobject *dp, PyObject *v)
{
datum key, val;
+ char *ptr;
+ Py_ssize_t size;
- if (PyString_AsStringAndSize(v, (char **)&key.dptr,
- (Py_ssize_t *)&key.dsize)) {
+ if (PyString_AsStringAndSize(v, &ptr, &size))
return -1;
- }
+ key.dptr = ptr;
+ key.dsize = size;
/* Expand check_dbmobject_open to return -1 */
if (dp->di_dbm == NULL) {
diff --git a/Modules/errnomodule.c b/Modules/errnomodule.c
index 68c3c49..87ebab0 100644
--- a/Modules/errnomodule.c
+++ b/Modules/errnomodule.c
@@ -783,6 +783,9 @@ initerrno(void)
#ifdef WSAN
inscode(d, ds, de, "WSAN", WSAN, "Error WSAN");
#endif
+#ifdef ENOTSUP
+ inscode(d, ds, de, "ENOTSUP", ENOTSUP, "Operation not supported");
+#endif
Py_DECREF(de);
}
diff --git a/Modules/expat/COPYING b/Modules/expat/COPYING
index fc97b02..dcb4506 100644
--- a/Modules/expat/COPYING
+++ b/Modules/expat/COPYING
@@ -1,5 +1,6 @@
Copyright (c) 1998, 1999, 2000 Thai Open Source Software Center Ltd
and Clark Cooper
+Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Expat maintainers.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/Modules/expat/amigaconfig.h b/Modules/expat/amigaconfig.h
index 6781a71..86c6115 100644
--- a/Modules/expat/amigaconfig.h
+++ b/Modules/expat/amigaconfig.h
@@ -10,66 +10,12 @@
/* Define to 1 if you have the <check.h> header file. */
#undef HAVE_CHECK_H
-/* Define to 1 if you have the <dlfcn.h> header file. */
-#undef HAVE_DLFCN_H
-
-/* Define to 1 if you have the <fcntl.h> header file. */
-#define HAVE_FCNTL_H 1
-
-/* Define to 1 if you have the `getpagesize' function. */
-#undef HAVE_GETPAGESIZE
-
-/* Define to 1 if you have the <inttypes.h> header file. */
-#define HAVE_INTTYPES_H 1
-
/* Define to 1 if you have the `memmove' function. */
#define HAVE_MEMMOVE 1
-/* Define to 1 if you have the <memory.h> header file. */
-#undef HAVE_MEMORY_H
-
-/* Define to 1 if you have a working `mmap' system call. */
-#undef HAVE_MMAP
-
-/* Define to 1 if you have the <stdint.h> header file. */
-#define HAVE_STDINT_H 1
-
-/* Define to 1 if you have the <stdlib.h> header file. */
-#define HAVE_STDLIB_H 1
-
-/* Define to 1 if you have the <strings.h> header file. */
-#define HAVE_STRINGS_H 1
-
-/* Define to 1 if you have the <string.h> header file. */
-#define HAVE_STRING_H 1
-
-/* Define to 1 if you have the <sys/stat.h> header file. */
-#define HAVE_SYS_STAT_H 1
-
-/* Define to 1 if you have the <sys/types.h> header file. */
-#define HAVE_SYS_TYPES_H 1
-
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
-/* Define to the address where bug reports for this package should be sent. */
-#define PACKAGE_BUGREPORT "expat-bugs@mail.libexpat.org"
-
-/* Define to the full name of this package. */
-#define PACKAGE_NAME "expat"
-
-/* Define to the full name and version of this package. */
-#define PACKAGE_STRING "expat 1.95.8"
-
-/* Define to the one symbol short name of this package. */
-#undef PACKAGE_TARNAME
-
-/* Define to the version of this package. */
-#define PACKAGE_VERSION "1.95.8"
-
-/* Define to 1 if you have the ANSI C header files. */
-#define STDC_HEADERS 1
-
/* whether byteorder is bigendian */
#define WORDS_BIGENDIAN
@@ -83,14 +29,4 @@
/* Define to make XML Namespaces functionality available. */
#define XML_NS
-/* Define to empty if `const' does not conform to ANSI C. */
-#undef const
-
-/* Define to `long' if <sys/types.h> does not define. */
-#undef off_t
-
-/* Define to `unsigned' if <sys/types.h> does not define. */
-#undef size_t
-
-
#endif /* AMIGACONFIG_H */
diff --git a/Modules/expat/ascii.h b/Modules/expat/ascii.h
index 337e5bb..d10530b 100644
--- a/Modules/expat/ascii.h
+++ b/Modules/expat/ascii.h
@@ -83,3 +83,10 @@
#define ASCII_LSQB 0x5B
#define ASCII_RSQB 0x5D
#define ASCII_UNDERSCORE 0x5F
+#define ASCII_LPAREN 0x28
+#define ASCII_RPAREN 0x29
+#define ASCII_FF 0x0C
+#define ASCII_SLASH 0x2F
+#define ASCII_HASH 0x23
+#define ASCII_PIPE 0x7C
+#define ASCII_COMMA 0x2C
diff --git a/Modules/expat/expat.h b/Modules/expat/expat.h
index 89646d2..06b5de0 100644
--- a/Modules/expat/expat.h
+++ b/Modules/expat/expat.h
@@ -742,6 +742,29 @@ XML_GetSpecifiedAttributeCount(XML_Parser parser);
XMLPARSEAPI(int)
XML_GetIdAttributeIndex(XML_Parser parser);
+#ifdef XML_ATTR_INFO
+/* Source file byte offsets for the start and end of attribute names and values.
+ The value indices are exclusive of surrounding quotes; thus in a UTF-8 source
+ file an attribute value of "blah" will yield:
+ info->valueEnd - info->valueStart = 4 bytes.
+*/
+typedef struct {
+ XML_Index nameStart; /* Offset to beginning of the attribute name. */
+ XML_Index nameEnd; /* Offset after the attribute name's last byte. */
+ XML_Index valueStart; /* Offset to beginning of the attribute value. */
+ XML_Index valueEnd; /* Offset after the attribute value's last byte. */
+} XML_AttrInfo;
+
+/* Returns an array of XML_AttrInfo structures for the attribute/value pairs
+ passed in last call to the XML_StartElementHandler that were specified
+ in the start-tag rather than defaulted. Each attribute/value pair counts
+ as 1; thus the number of entries in the array is
+ XML_GetSpecifiedAttributeCount(parser) / 2.
+*/
+XMLPARSEAPI(const XML_AttrInfo *)
+XML_GetAttributeInfo(XML_Parser parser);
+#endif
+
/* Parses some input. Returns XML_STATUS_ERROR if a fatal error is
detected. The last call to XML_Parse must have isFinal true; len
may be zero for this call (or any other).
@@ -994,7 +1017,9 @@ enum XML_FeatureEnum {
XML_FEATURE_MIN_SIZE,
XML_FEATURE_SIZEOF_XML_CHAR,
XML_FEATURE_SIZEOF_XML_LCHAR,
- XML_FEATURE_NS
+ XML_FEATURE_NS,
+ XML_FEATURE_LARGE_SIZE,
+ XML_FEATURE_ATTR_INFO
/* Additional features must be added to the end of this enum. */
};
@@ -1014,7 +1039,7 @@ XML_GetFeatureList(void);
change to major or minor version.
*/
#define XML_MAJOR_VERSION 2
-#define XML_MINOR_VERSION 0
+#define XML_MINOR_VERSION 1
#define XML_MICRO_VERSION 0
#ifdef __cplusplus
diff --git a/Modules/expat/expat_external.h b/Modules/expat/expat_external.h
index f054014..f337e1c 100644
--- a/Modules/expat/expat_external.h
+++ b/Modules/expat/expat_external.h
@@ -38,9 +38,9 @@
system headers may assume the cdecl convention.
*/
#ifndef XMLCALL
-#if defined(XML_USE_MSC_EXTENSIONS)
+#if defined(_MSC_VER)
#define XMLCALL __cdecl
-#elif defined(__GNUC__) && defined(__i386)
+#elif defined(__GNUC__) && defined(__i386) && !defined(__INTEL_COMPILER)
#define XMLCALL __attribute__((cdecl))
#else
/* For any platform which uses this definition and supports more than
diff --git a/Modules/expat/internal.h b/Modules/expat/internal.h
index ff056c6..dd54548 100644
--- a/Modules/expat/internal.h
+++ b/Modules/expat/internal.h
@@ -20,7 +20,7 @@
and therefore subject to change.
*/
-#if defined(__GNUC__) && defined(__i386__)
+#if defined(__GNUC__) && defined(__i386__) && !defined(__MINGW32__)
/* We'll use this version by default only where we know it helps.
regparm() generates warnings on Solaris boxes. See SF bug #692878.
diff --git a/Modules/expat/watcomconfig.h b/Modules/expat/watcomconfig.h
new file mode 100644
index 0000000..2f05e3f
--- /dev/null
+++ b/Modules/expat/watcomconfig.h
@@ -0,0 +1,47 @@
+/* expat_config.h for use with Open Watcom 1.5 and above. */
+
+#ifndef WATCOMCONFIG_H
+#define WATCOMCONFIG_H
+
+#ifdef __NT__
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+
+/* 1234 = LIL_ENDIAN, 4321 = BIGENDIAN */
+#define BYTEORDER 1234
+
+/* Define to 1 if you have the `memmove' function. */
+#define HAVE_MEMMOVE 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT "expat-bugs@mail.libexpat.org"
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "expat"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "expat 2.0.0"
+
+/* Define to the one symbol short name of this package. */
+#undef PACKAGE_TARNAME
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "2.0.0"
+
+/* Define to specify how much context to retain around the current parse
+ point. */
+#define XML_CONTEXT_BYTES 1024
+
+/* Define to make parameter entity parsing functionality available. */
+#define XML_DTD 1
+
+/* Define to make XML Namespaces functionality available. */
+#define XML_NS 1
+
+#endif
+
diff --git a/Modules/expat/xmlparse.c b/Modules/expat/xmlparse.c
index 46f6507..f35aa36 100644
--- a/Modules/expat/xmlparse.c
+++ b/Modules/expat/xmlparse.c
@@ -2,24 +2,27 @@
See the file COPYING for copying permission.
*/
+#include <stddef.h>
+#include <string.h> /* memset(), memcpy() */
+#include <assert.h>
+#include <limits.h> /* UINT_MAX */
+#include <time.h> /* time() */
+
#define XML_BUILDING_EXPAT 1
#ifdef COMPILED_FROM_DSP
#include "winconfig.h"
#elif defined(MACOS_CLASSIC)
#include "macconfig.h"
-#elif defined(__amigaos4__)
+#elif defined(__amigaos__)
#include "amigaconfig.h"
+#elif defined(__WATCOMC__)
+#include "watcomconfig.h"
#elif defined(HAVE_EXPAT_CONFIG_H)
#include <expat_config.h>
#endif /* ndef COMPILED_FROM_DSP */
-#include <stddef.h>
-#include <string.h> /* memset(), memcpy() */
-#include <assert.h>
-#include <limits.h> /* UINT_MAX */
-#include <time.h> /* time() */
-
+#include "ascii.h"
#include "expat.h"
#ifdef XML_UNICODE
@@ -28,7 +31,8 @@
#define XmlGetInternalEncoding XmlGetUtf16InternalEncoding
#define XmlGetInternalEncodingNS XmlGetUtf16InternalEncodingNS
#define XmlEncode XmlUtf16Encode
-#define MUST_CONVERT(enc, s) (!(enc)->isUtf16 || (((unsigned long)s) & 1))
+/* Using pointer subtraction to convert to integer type. */
+#define MUST_CONVERT(enc, s) (!(enc)->isUtf16 || (((char *)(s) - (char *)NULL) & 1))
typedef unsigned short ICHAR;
#else
#define XML_ENCODE_MAX XML_UTF8_ENCODE_MAX
@@ -325,15 +329,15 @@ processXmlDecl(XML_Parser parser, int isGeneralTextEntity,
static enum XML_Error
initializeEncoding(XML_Parser parser);
static enum XML_Error
-doProlog(XML_Parser parser, const ENCODING *enc, const char *s,
- const char *end, int tok, const char *next, const char **nextPtr,
+doProlog(XML_Parser parser, const ENCODING *enc, const char *s,
+ const char *end, int tok, const char *next, const char **nextPtr,
XML_Bool haveMore);
static enum XML_Error
-processInternalEntity(XML_Parser parser, ENTITY *entity,
+processInternalEntity(XML_Parser parser, ENTITY *entity,
XML_Bool betweenDecl);
static enum XML_Error
doContent(XML_Parser parser, int startTagLevel, const ENCODING *enc,
- const char *start, const char *end, const char **endPtr,
+ const char *start, const char *end, const char **endPtr,
XML_Bool haveMore);
static enum XML_Error
doCdataSection(XML_Parser parser, const ENCODING *, const char **startPtr,
@@ -351,7 +355,7 @@ static enum XML_Error
addBinding(XML_Parser parser, PREFIX *prefix, const ATTRIBUTE_ID *attId,
const XML_Char *uri, BINDING **bindingsPtr);
static int
-defineAttribute(ELEMENT_TYPE *type, ATTRIBUTE_ID *, XML_Bool isCdata,
+defineAttribute(ELEMENT_TYPE *type, ATTRIBUTE_ID *, XML_Bool isCdata,
XML_Bool isId, const XML_Char *dfltValue, XML_Parser parser);
static enum XML_Error
storeAttributeValue(XML_Parser parser, const ENCODING *, XML_Bool isCdata,
@@ -436,6 +440,7 @@ parserCreate(const XML_Char *encodingName,
const XML_Memory_Handling_Suite *memsuite,
const XML_Char *nameSep,
DTD *dtd);
+
static void
parserInit(XML_Parser parser, const XML_Char *encodingName);
@@ -535,6 +540,9 @@ struct XML_ParserStruct {
NS_ATT *m_nsAtts;
unsigned long m_nsAttsVersion;
unsigned char m_nsAttsPower;
+#ifdef XML_ATTR_INFO
+ XML_AttrInfo *m_attInfo;
+#endif
POSITION m_position;
STRING_POOL m_tempPool;
STRING_POOL m_temp2Pool;
@@ -643,6 +651,7 @@ struct XML_ParserStruct {
#define nsAtts (parser->m_nsAtts)
#define nsAttsVersion (parser->m_nsAttsVersion)
#define nsAttsPower (parser->m_nsAttsPower)
+#define attInfo (parser->m_attInfo)
#define tempPool (parser->m_tempPool)
#define temp2Pool (parser->m_temp2Pool)
#define groupConnector (parser->m_groupConnector)
@@ -673,10 +682,12 @@ XML_ParserCreateNS(const XML_Char *encodingName, XML_Char nsSep)
}
static const XML_Char implicitContext[] = {
- 'x', 'm', 'l', '=', 'h', 't', 't', 'p', ':', '/', '/',
- 'w', 'w', 'w', '.', 'w', '3', '.', 'o', 'r', 'g', '/',
- 'X', 'M', 'L', '/', '1', '9', '9', '8', '/',
- 'n', 'a', 'm', 'e', 's', 'p', 'a', 'c', 'e', '\0'
+ ASCII_x, ASCII_m, ASCII_l, ASCII_EQUALS, ASCII_h, ASCII_t, ASCII_t, ASCII_p,
+ ASCII_COLON, ASCII_SLASH, ASCII_SLASH, ASCII_w, ASCII_w, ASCII_w,
+ ASCII_PERIOD, ASCII_w, ASCII_3, ASCII_PERIOD, ASCII_o, ASCII_r, ASCII_g,
+ ASCII_SLASH, ASCII_X, ASCII_M, ASCII_L, ASCII_SLASH, ASCII_1, ASCII_9,
+ ASCII_9, ASCII_8, ASCII_SLASH, ASCII_n, ASCII_a, ASCII_m, ASCII_e,
+ ASCII_s, ASCII_p, ASCII_a, ASCII_c, ASCII_e, '\0'
};
static unsigned long
@@ -690,23 +701,22 @@ generate_hash_secret_salt(void)
static XML_Bool /* only valid for root parser */
startParsing(XML_Parser parser)
{
- /* hash functions must be initialized before setContext() is called */
-
- if (hash_secret_salt == 0)
- hash_secret_salt = generate_hash_secret_salt();
- if (ns) {
- /* implicit context only set for root parser, since child
- parsers (i.e. external entity parsers) will inherit it
- */
- return setContext(parser, implicitContext);
- }
- return XML_TRUE;
+ /* hash functions must be initialized before setContext() is called */
+ if (hash_secret_salt == 0)
+ hash_secret_salt = generate_hash_secret_salt();
+ if (ns) {
+ /* implicit context only set for root parser, since child
+ parsers (i.e. external entity parsers) will inherit it
+ */
+ return setContext(parser, implicitContext);
+ }
+ return XML_TRUE;
}
XML_Parser XMLCALL
XML_ParserCreate_MM(const XML_Char *encodingName,
- const XML_Memory_Handling_Suite *memsuite,
- const XML_Char *nameSep)
+ const XML_Memory_Handling_Suite *memsuite,
+ const XML_Char *nameSep)
{
return parserCreate(encodingName, memsuite, nameSep, NULL);
}
@@ -753,9 +763,20 @@ parserCreate(const XML_Char *encodingName,
FREE(parser);
return NULL;
}
+#ifdef XML_ATTR_INFO
+ attInfo = (XML_AttrInfo*)MALLOC(attsSize * sizeof(XML_AttrInfo));
+ if (attInfo == NULL) {
+ FREE(atts);
+ FREE(parser);
+ return NULL;
+ }
+#endif
dataBuf = (XML_Char *)MALLOC(INIT_DATA_BUF_SIZE * sizeof(XML_Char));
if (dataBuf == NULL) {
FREE(atts);
+#ifdef XML_ATTR_INFO
+ FREE(attInfo);
+#endif
FREE(parser);
return NULL;
}
@@ -768,6 +789,9 @@ parserCreate(const XML_Char *encodingName,
if (_dtd == NULL) {
FREE(dataBuf);
FREE(atts);
+#ifdef XML_ATTR_INFO
+ FREE(attInfo);
+#endif
FREE(parser);
return NULL;
}
@@ -783,7 +807,7 @@ parserCreate(const XML_Char *encodingName,
unknownEncodingHandler = NULL;
unknownEncodingHandlerData = NULL;
- namespaceSeparator = '!';
+ namespaceSeparator = ASCII_EXCL;
ns = XML_FALSE;
ns_triplets = XML_FALSE;
@@ -1154,6 +1178,9 @@ XML_ParserFree(XML_Parser parser)
#endif /* XML_DTD */
dtdDestroy(_dtd, (XML_Bool)!parentParser, &parser->m_mem);
FREE((void *)atts);
+#ifdef XML_ATTR_INFO
+ FREE((void *)attInfo);
+#endif
FREE(groupConnector);
FREE(buffer);
FREE(dataBuf);
@@ -1234,6 +1261,14 @@ XML_GetIdAttributeIndex(XML_Parser parser)
return idAttIndex;
}
+#ifdef XML_ATTR_INFO
+const XML_AttrInfo * XMLCALL
+XML_GetAttributeInfo(XML_Parser parser)
+{
+ return attInfo;
+}
+#endif
+
void XMLCALL
XML_SetElementHandler(XML_Parser parser,
XML_StartElementHandler start,
@@ -1499,7 +1534,7 @@ XML_Parse(XML_Parser parser, const char *s, int len, int isFinal)
XmlUpdatePosition(encoding, positionPtr, bufferPtr, &position);
positionPtr = bufferPtr;
return XML_STATUS_SUSPENDED;
- case XML_INITIALIZED:
+ case XML_INITIALIZED:
case XML_PARSING:
ps_parsing = XML_FINISHED;
/* fall through */
@@ -1555,15 +1590,11 @@ XML_Parse(XML_Parser parser, const char *s, int len, int isFinal)
: (char *)REALLOC(buffer, len * 2));
if (temp == NULL) {
errorCode = XML_ERROR_NO_MEMORY;
- return XML_STATUS_ERROR;
- }
- buffer = temp;
- if (!buffer) {
- errorCode = XML_ERROR_NO_MEMORY;
eventPtr = eventEndPtr = NULL;
processor = errorProcessor;
return XML_STATUS_ERROR;
}
+ buffer = temp;
bufferLim = buffer + len * 2;
}
memcpy(buffer, end, nLeftOver);
@@ -1629,7 +1660,7 @@ XML_ParseBuffer(XML_Parser parser, int len, int isFinal)
case XML_SUSPENDED:
result = XML_STATUS_SUSPENDED;
break;
- case XML_INITIALIZED:
+ case XML_INITIALIZED:
case XML_PARSING:
if (isFinal) {
ps_parsing = XML_FINISHED;
@@ -1719,6 +1750,8 @@ XML_GetBuffer(XML_Parser parser, int len)
bufferPtr = buffer = newBuf;
#endif /* not defined XML_CONTEXT_BYTES */
}
+ eventPtr = eventEndPtr = NULL;
+ positionPtr = NULL;
}
return bufferEnd;
}
@@ -1776,7 +1809,7 @@ XML_ResumeParser(XML_Parser parser)
case XML_SUSPENDED:
result = XML_STATUS_SUSPENDED;
break;
- case XML_INITIALIZED:
+ case XML_INITIALIZED:
case XML_PARSING:
if (ps_finalBuffer) {
ps_parsing = XML_FINISHED;
@@ -2001,6 +2034,12 @@ XML_GetFeatureList(void)
#ifdef XML_NS
{XML_FEATURE_NS, XML_L("XML_NS"), 0},
#endif
+#ifdef XML_LARGE_SIZE
+ {XML_FEATURE_LARGE_SIZE, XML_L("XML_LARGE_SIZE"), 0},
+#endif
+#ifdef XML_ATTR_INFO
+ {XML_FEATURE_ATTR_INFO, XML_L("XML_ATTR_INFO"), 0},
+#endif
{XML_FEATURE_END, NULL, 0}
};
@@ -2063,7 +2102,7 @@ contentProcessor(XML_Parser parser,
const char *end,
const char **endPtr)
{
- enum XML_Error result = doContent(parser, 0, encoding, start, end,
+ enum XML_Error result = doContent(parser, 0, encoding, start, end,
endPtr, (XML_Bool)!ps_finalBuffer);
if (result == XML_ERROR_NONE) {
if (!storeRawNames(parser))
@@ -2145,7 +2184,7 @@ externalEntityInitProcessor3(XML_Parser parser,
if (result != XML_ERROR_NONE)
return result;
switch (ps_parsing) {
- case XML_SUSPENDED:
+ case XML_SUSPENDED:
*endPtr = next;
return XML_ERROR_NONE;
case XML_FINISHED:
@@ -2179,7 +2218,7 @@ externalEntityContentProcessor(XML_Parser parser,
const char *end,
const char **endPtr)
{
- enum XML_Error result = doContent(parser, 1, encoding, start, end,
+ enum XML_Error result = doContent(parser, 1, encoding, start, end,
endPtr, (XML_Bool)!ps_finalBuffer);
if (result == XML_ERROR_NONE) {
if (!storeRawNames(parser))
@@ -2198,7 +2237,7 @@ doContent(XML_Parser parser,
XML_Bool haveMore)
{
/* save one level of indirection */
- DTD * const dtd = _dtd;
+ DTD * const dtd = _dtd;
const char **eventPP;
const char **eventEndPP;
@@ -2229,8 +2268,8 @@ doContent(XML_Parser parser,
}
else if (defaultHandler)
reportDefault(parser, enc, s, end);
- /* We are at the end of the final buffer, should we check for
- XML_SUSPENDED, XML_FINISHED?
+ /* We are at the end of the final buffer, should we check for
+ XML_SUSPENDED, XML_FINISHED?
*/
if (startTagLevel == 0)
return XML_ERROR_NO_ELEMENTS;
@@ -2581,8 +2620,8 @@ doContent(XML_Parser parser,
}
else if (defaultHandler)
reportDefault(parser, enc, s, end);
- /* We are at the end of the final buffer, should we check for
- XML_SUSPENDED, XML_FINISHED?
+ /* We are at the end of the final buffer, should we check for
+ XML_SUSPENDED, XML_FINISHED?
*/
if (startTagLevel == 0) {
*eventPP = end;
@@ -2595,26 +2634,29 @@ doContent(XML_Parser parser,
*nextPtr = end;
return XML_ERROR_NONE;
case XML_TOK_DATA_CHARS:
- if (characterDataHandler) {
- if (MUST_CONVERT(enc, s)) {
- for (;;) {
- ICHAR *dataPtr = (ICHAR *)dataBuf;
- XmlConvert(enc, &s, next, &dataPtr, (ICHAR *)dataBufEnd);
- *eventEndPP = s;
- characterDataHandler(handlerArg, dataBuf,
- (int)(dataPtr - (ICHAR *)dataBuf));
- if (s == next)
- break;
- *eventPP = s;
+ {
+ XML_CharacterDataHandler charDataHandler = characterDataHandler;
+ if (charDataHandler) {
+ if (MUST_CONVERT(enc, s)) {
+ for (;;) {
+ ICHAR *dataPtr = (ICHAR *)dataBuf;
+ XmlConvert(enc, &s, next, &dataPtr, (ICHAR *)dataBufEnd);
+ *eventEndPP = s;
+ charDataHandler(handlerArg, dataBuf,
+ (int)(dataPtr - (ICHAR *)dataBuf));
+ if (s == next)
+ break;
+ *eventPP = s;
+ }
}
+ else
+ charDataHandler(handlerArg,
+ (XML_Char *)s,
+ (int)((XML_Char *)next - (XML_Char *)s));
}
- else
- characterDataHandler(handlerArg,
- (XML_Char *)s,
- (int)((XML_Char *)next - (XML_Char *)s));
+ else if (defaultHandler)
+ reportDefault(parser, enc, s, next);
}
- else if (defaultHandler)
- reportDefault(parser, enc, s, next);
break;
case XML_TOK_PI:
if (!reportProcessingInstruction(parser, enc, s, next))
@@ -2631,7 +2673,7 @@ doContent(XML_Parser parser,
}
*eventPP = s = next;
switch (ps_parsing) {
- case XML_SUSPENDED:
+ case XML_SUSPENDED:
*nextPtr = next;
return XML_ERROR_NONE;
case XML_FINISHED:
@@ -2690,23 +2732,44 @@ storeAtts(XML_Parser parser, const ENCODING *enc,
if (n + nDefaultAtts > attsSize) {
int oldAttsSize = attsSize;
ATTRIBUTE *temp;
+#ifdef XML_ATTR_INFO
+ XML_AttrInfo *temp2;
+#endif
attsSize = n + nDefaultAtts + INIT_ATTS_SIZE;
temp = (ATTRIBUTE *)REALLOC((void *)atts, attsSize * sizeof(ATTRIBUTE));
if (temp == NULL)
return XML_ERROR_NO_MEMORY;
atts = temp;
+#ifdef XML_ATTR_INFO
+ temp2 = (XML_AttrInfo *)REALLOC((void *)attInfo, attsSize * sizeof(XML_AttrInfo));
+ if (temp2 == NULL)
+ return XML_ERROR_NO_MEMORY;
+ attInfo = temp2;
+#endif
if (n > oldAttsSize)
XmlGetAttributes(enc, attStr, n, atts);
}
appAtts = (const XML_Char **)atts;
for (i = 0; i < n; i++) {
+ ATTRIBUTE *currAtt = &atts[i];
+#ifdef XML_ATTR_INFO
+ XML_AttrInfo *currAttInfo = &attInfo[i];
+#endif
/* add the name and value to the attribute list */
- ATTRIBUTE_ID *attId = getAttributeId(parser, enc, atts[i].name,
- atts[i].name
- + XmlNameLength(enc, atts[i].name));
+ ATTRIBUTE_ID *attId = getAttributeId(parser, enc, currAtt->name,
+ currAtt->name
+ + XmlNameLength(enc, currAtt->name));
if (!attId)
return XML_ERROR_NO_MEMORY;
+#ifdef XML_ATTR_INFO
+ currAttInfo->nameStart = parseEndByteIndex - (parseEndPtr - currAtt->name);
+ currAttInfo->nameEnd = currAttInfo->nameStart +
+ XmlNameLength(enc, currAtt->name);
+ currAttInfo->valueStart = parseEndByteIndex -
+ (parseEndPtr - currAtt->valuePtr);
+ currAttInfo->valueEnd = parseEndByteIndex - (parseEndPtr - currAtt->valueEnd);
+#endif
/* Detect duplicate attributes by their QNames. This does not work when
namespace processing is turned on and different prefixes for the same
namespace are used. For this case we have a check further down.
@@ -2848,8 +2911,6 @@ storeAtts(XML_Parser parser, const ENCODING *enc,
unsigned long uriHash = hash_secret_salt;
((XML_Char *)s)[-1] = 0; /* clear flag */
id = (ATTRIBUTE_ID *)lookup(parser, &dtd->attributeIds, s, 0);
- if (!id)
- return XML_ERROR_NO_MEMORY;
b = id->prefix->binding;
if (!b)
return XML_ERROR_UNBOUND_PREFIX;
@@ -2861,7 +2922,7 @@ storeAtts(XML_Parser parser, const ENCODING *enc,
return XML_ERROR_NO_MEMORY;
uriHash = CHAR_HASH(uriHash, c);
}
- while (*s++ != XML_T(':'))
+ while (*s++ != XML_T(ASCII_COLON))
;
do { /* copies null terminator */
const XML_Char c = *s;
@@ -2935,7 +2996,7 @@ storeAtts(XML_Parser parser, const ENCODING *enc,
if (!binding)
return XML_ERROR_UNBOUND_PREFIX;
localPart = tagNamePtr->str;
- while (*localPart++ != XML_T(':'))
+ while (*localPart++ != XML_T(ASCII_COLON))
;
}
else if (dtd->defaultPrefix.binding) {
@@ -2990,25 +3051,29 @@ addBinding(XML_Parser parser, PREFIX *prefix, const ATTRIBUTE_ID *attId,
const XML_Char *uri, BINDING **bindingsPtr)
{
static const XML_Char xmlNamespace[] = {
- 'h', 't', 't', 'p', ':', '/', '/',
- 'w', 'w', 'w', '.', 'w', '3', '.', 'o', 'r', 'g', '/',
- 'X', 'M', 'L', '/', '1', '9', '9', '8', '/',
- 'n', 'a', 'm', 'e', 's', 'p', 'a', 'c', 'e', '\0'
+ ASCII_h, ASCII_t, ASCII_t, ASCII_p, ASCII_COLON, ASCII_SLASH, ASCII_SLASH,
+ ASCII_w, ASCII_w, ASCII_w, ASCII_PERIOD, ASCII_w, ASCII_3, ASCII_PERIOD,
+ ASCII_o, ASCII_r, ASCII_g, ASCII_SLASH, ASCII_X, ASCII_M, ASCII_L,
+ ASCII_SLASH, ASCII_1, ASCII_9, ASCII_9, ASCII_8, ASCII_SLASH,
+ ASCII_n, ASCII_a, ASCII_m, ASCII_e, ASCII_s, ASCII_p, ASCII_a, ASCII_c,
+ ASCII_e, '\0'
};
- static const int xmlLen =
+ static const int xmlLen =
(int)sizeof(xmlNamespace)/sizeof(XML_Char) - 1;
static const XML_Char xmlnsNamespace[] = {
- 'h', 't', 't', 'p', ':', '/', '/',
- 'w', 'w', 'w', '.', 'w', '3', '.', 'o', 'r', 'g', '/',
- '2', '0', '0', '0', '/', 'x', 'm', 'l', 'n', 's', '/', '\0'
+ ASCII_h, ASCII_t, ASCII_t, ASCII_p, ASCII_COLON, ASCII_SLASH, ASCII_SLASH,
+ ASCII_w, ASCII_w, ASCII_w, ASCII_PERIOD, ASCII_w, ASCII_3, ASCII_PERIOD,
+ ASCII_o, ASCII_r, ASCII_g, ASCII_SLASH, ASCII_2, ASCII_0, ASCII_0,
+ ASCII_0, ASCII_SLASH, ASCII_x, ASCII_m, ASCII_l, ASCII_n, ASCII_s,
+ ASCII_SLASH, '\0'
};
- static const int xmlnsLen =
+ static const int xmlnsLen =
(int)sizeof(xmlnsNamespace)/sizeof(XML_Char) - 1;
XML_Bool mustBeXML = XML_FALSE;
XML_Bool isXML = XML_TRUE;
XML_Bool isXMLNS = XML_TRUE;
-
+
BINDING *b;
int len;
@@ -3017,13 +3082,13 @@ addBinding(XML_Parser parser, PREFIX *prefix, const ATTRIBUTE_ID *attId,
return XML_ERROR_UNDECLARING_PREFIX;
if (prefix->name
- && prefix->name[0] == XML_T('x')
- && prefix->name[1] == XML_T('m')
- && prefix->name[2] == XML_T('l')) {
+ && prefix->name[0] == XML_T(ASCII_x)
+ && prefix->name[1] == XML_T(ASCII_m)
+ && prefix->name[2] == XML_T(ASCII_l)) {
/* Not allowed to bind xmlns */
- if (prefix->name[3] == XML_T('n')
- && prefix->name[4] == XML_T('s')
+ if (prefix->name[3] == XML_T(ASCII_n)
+ && prefix->name[4] == XML_T(ASCII_s)
&& prefix->name[5] == XML_T('\0'))
return XML_ERROR_RESERVED_PREFIX_XMLNS;
@@ -3035,7 +3100,7 @@ addBinding(XML_Parser parser, PREFIX *prefix, const ATTRIBUTE_ID *attId,
if (isXML && (len > xmlLen || uri[len] != xmlNamespace[len]))
isXML = XML_FALSE;
- if (!mustBeXML && isXMLNS
+ if (!mustBeXML && isXMLNS
&& (len > xmlnsLen || uri[len] != xmlnsNamespace[len]))
isXMLNS = XML_FALSE;
}
@@ -3177,26 +3242,29 @@ doCdataSection(XML_Parser parser,
reportDefault(parser, enc, s, next);
break;
case XML_TOK_DATA_CHARS:
- if (characterDataHandler) {
- if (MUST_CONVERT(enc, s)) {
- for (;;) {
- ICHAR *dataPtr = (ICHAR *)dataBuf;
- XmlConvert(enc, &s, next, &dataPtr, (ICHAR *)dataBufEnd);
- *eventEndPP = next;
- characterDataHandler(handlerArg, dataBuf,
- (int)(dataPtr - (ICHAR *)dataBuf));
- if (s == next)
- break;
- *eventPP = s;
+ {
+ XML_CharacterDataHandler charDataHandler = characterDataHandler;
+ if (charDataHandler) {
+ if (MUST_CONVERT(enc, s)) {
+ for (;;) {
+ ICHAR *dataPtr = (ICHAR *)dataBuf;
+ XmlConvert(enc, &s, next, &dataPtr, (ICHAR *)dataBufEnd);
+ *eventEndPP = next;
+ charDataHandler(handlerArg, dataBuf,
+ (int)(dataPtr - (ICHAR *)dataBuf));
+ if (s == next)
+ break;
+ *eventPP = s;
+ }
}
+ else
+ charDataHandler(handlerArg,
+ (XML_Char *)s,
+ (int)((XML_Char *)next - (XML_Char *)s));
}
- else
- characterDataHandler(handlerArg,
- (XML_Char *)s,
- (int)((XML_Char *)next - (XML_Char *)s));
+ else if (defaultHandler)
+ reportDefault(parser, enc, s, next);
}
- else if (defaultHandler)
- reportDefault(parser, enc, s, next);
break;
case XML_TOK_INVALID:
*eventPP = next;
@@ -3243,7 +3311,7 @@ ignoreSectionProcessor(XML_Parser parser,
const char *end,
const char **endPtr)
{
- enum XML_Error result = doIgnoreSection(parser, encoding, &start, end,
+ enum XML_Error result = doIgnoreSection(parser, encoding, &start, end,
endPtr, (XML_Bool)!ps_finalBuffer);
if (result != XML_ERROR_NONE)
return result;
@@ -3525,7 +3593,7 @@ entityValueInitProcessor(XML_Parser parser,
const char *next = start;
eventPtr = start;
- for (;;) {
+ for (;;) {
tok = XmlPrologTok(encoding, start, end, &next);
eventEndPtr = next;
if (tok <= 0) {
@@ -3553,7 +3621,7 @@ entityValueInitProcessor(XML_Parser parser,
if (result != XML_ERROR_NONE)
return result;
switch (ps_parsing) {
- case XML_SUSPENDED:
+ case XML_SUSPENDED:
*nextPtr = next;
return XML_ERROR_NONE;
case XML_FINISHED:
@@ -3618,7 +3686,7 @@ externalParEntProcessor(XML_Parser parser,
}
processor = prologProcessor;
- return doProlog(parser, encoding, s, end, tok, next,
+ return doProlog(parser, encoding, s, end, tok, next,
nextPtr, (XML_Bool)!ps_finalBuffer);
}
@@ -3668,7 +3736,7 @@ prologProcessor(XML_Parser parser,
{
const char *next = s;
int tok = XmlPrologTok(encoding, s, end, &next);
- return doProlog(parser, encoding, s, end, tok, next,
+ return doProlog(parser, encoding, s, end, tok, next,
nextPtr, (XML_Bool)!ps_finalBuffer);
}
@@ -3683,26 +3751,30 @@ doProlog(XML_Parser parser,
XML_Bool haveMore)
{
#ifdef XML_DTD
- static const XML_Char externalSubsetName[] = { '#' , '\0' };
+ static const XML_Char externalSubsetName[] = { ASCII_HASH , '\0' };
#endif /* XML_DTD */
- static const XML_Char atypeCDATA[] = { 'C', 'D', 'A', 'T', 'A', '\0' };
- static const XML_Char atypeID[] = { 'I', 'D', '\0' };
- static const XML_Char atypeIDREF[] = { 'I', 'D', 'R', 'E', 'F', '\0' };
- static const XML_Char atypeIDREFS[] = { 'I', 'D', 'R', 'E', 'F', 'S', '\0' };
- static const XML_Char atypeENTITY[] = { 'E', 'N', 'T', 'I', 'T', 'Y', '\0' };
- static const XML_Char atypeENTITIES[] =
- { 'E', 'N', 'T', 'I', 'T', 'I', 'E', 'S', '\0' };
+ static const XML_Char atypeCDATA[] =
+ { ASCII_C, ASCII_D, ASCII_A, ASCII_T, ASCII_A, '\0' };
+ static const XML_Char atypeID[] = { ASCII_I, ASCII_D, '\0' };
+ static const XML_Char atypeIDREF[] =
+ { ASCII_I, ASCII_D, ASCII_R, ASCII_E, ASCII_F, '\0' };
+ static const XML_Char atypeIDREFS[] =
+ { ASCII_I, ASCII_D, ASCII_R, ASCII_E, ASCII_F, ASCII_S, '\0' };
+ static const XML_Char atypeENTITY[] =
+ { ASCII_E, ASCII_N, ASCII_T, ASCII_I, ASCII_T, ASCII_Y, '\0' };
+ static const XML_Char atypeENTITIES[] = { ASCII_E, ASCII_N,
+ ASCII_T, ASCII_I, ASCII_T, ASCII_I, ASCII_E, ASCII_S, '\0' };
static const XML_Char atypeNMTOKEN[] = {
- 'N', 'M', 'T', 'O', 'K', 'E', 'N', '\0' };
- static const XML_Char atypeNMTOKENS[] = {
- 'N', 'M', 'T', 'O', 'K', 'E', 'N', 'S', '\0' };
- static const XML_Char notationPrefix[] = {
- 'N', 'O', 'T', 'A', 'T', 'I', 'O', 'N', '(', '\0' };
- static const XML_Char enumValueSep[] = { '|', '\0' };
- static const XML_Char enumValueStart[] = { '(', '\0' };
+ ASCII_N, ASCII_M, ASCII_T, ASCII_O, ASCII_K, ASCII_E, ASCII_N, '\0' };
+ static const XML_Char atypeNMTOKENS[] = { ASCII_N, ASCII_M, ASCII_T,
+ ASCII_O, ASCII_K, ASCII_E, ASCII_N, ASCII_S, '\0' };
+ static const XML_Char notationPrefix[] = { ASCII_N, ASCII_O, ASCII_T,
+ ASCII_A, ASCII_T, ASCII_I, ASCII_O, ASCII_N, ASCII_LPAREN, '\0' };
+ static const XML_Char enumValueSep[] = { ASCII_PIPE, '\0' };
+ static const XML_Char enumValueStart[] = { ASCII_LPAREN, '\0' };
/* save one level of indirection */
- DTD * const dtd = _dtd;
+ DTD * const dtd = _dtd;
const char **eventPP;
const char **eventEndPP;
@@ -3818,15 +3890,17 @@ doProlog(XML_Parser parser,
#endif /* XML_DTD */
dtd->hasParamEntityRefs = XML_TRUE;
if (startDoctypeDeclHandler) {
+ XML_Char *pubId;
if (!XmlIsPublicId(enc, s, next, eventPP))
return XML_ERROR_PUBLICID;
- doctypePubid = poolStoreString(&tempPool, enc,
- s + enc->minBytesPerChar,
- next - enc->minBytesPerChar);
- if (!doctypePubid)
+ pubId = poolStoreString(&tempPool, enc,
+ s + enc->minBytesPerChar,
+ next - enc->minBytesPerChar);
+ if (!pubId)
return XML_ERROR_NO_MEMORY;
- normalizePublicId((XML_Char *)doctypePubid);
+ normalizePublicId(pubId);
poolFinish(&tempPool);
+ doctypePubid = pubId;
handleDefault = XML_FALSE;
goto alreadyChecked;
}
@@ -3881,8 +3955,8 @@ doProlog(XML_Parser parser,
entity->publicId))
return XML_ERROR_EXTERNAL_ENTITY_HANDLING;
if (dtd->paramEntityRead) {
- if (!dtd->standalone &&
- notStandaloneHandler &&
+ if (!dtd->standalone &&
+ notStandaloneHandler &&
!notStandaloneHandler(handlerArg))
return XML_ERROR_NOT_STANDALONE;
}
@@ -4010,11 +4084,11 @@ doProlog(XML_Parser parser,
0, parser))
return XML_ERROR_NO_MEMORY;
if (attlistDeclHandler && declAttributeType) {
- if (*declAttributeType == XML_T('(')
- || (*declAttributeType == XML_T('N')
- && declAttributeType[1] == XML_T('O'))) {
+ if (*declAttributeType == XML_T(ASCII_LPAREN)
+ || (*declAttributeType == XML_T(ASCII_N)
+ && declAttributeType[1] == XML_T(ASCII_O))) {
/* Enumerated or Notation type */
- if (!poolAppendChar(&tempPool, XML_T(')'))
+ if (!poolAppendChar(&tempPool, XML_T(ASCII_RPAREN))
|| !poolAppendChar(&tempPool, XML_T('\0')))
return XML_ERROR_NO_MEMORY;
declAttributeType = tempPool.start;
@@ -4047,11 +4121,11 @@ doProlog(XML_Parser parser,
declAttributeIsCdata, XML_FALSE, attVal, parser))
return XML_ERROR_NO_MEMORY;
if (attlistDeclHandler && declAttributeType) {
- if (*declAttributeType == XML_T('(')
- || (*declAttributeType == XML_T('N')
- && declAttributeType[1] == XML_T('O'))) {
+ if (*declAttributeType == XML_T(ASCII_LPAREN)
+ || (*declAttributeType == XML_T(ASCII_N)
+ && declAttributeType[1] == XML_T(ASCII_O))) {
/* Enumerated or Notation type */
- if (!poolAppendChar(&tempPool, XML_T(')'))
+ if (!poolAppendChar(&tempPool, XML_T(ASCII_RPAREN))
|| !poolAppendChar(&tempPool, XML_T('\0')))
return XML_ERROR_NO_MEMORY;
declAttributeType = tempPool.start;
@@ -4321,7 +4395,7 @@ doProlog(XML_Parser parser,
switch (tok) {
case XML_TOK_PARAM_ENTITY_REF:
/* PE references in internal subset are
- not allowed within declarations. */
+ not allowed within declarations. */
return XML_ERROR_PARAM_ENTITY_REF;
case XML_TOK_XML_DECL:
return XML_ERROR_MISPLACED_XML_PI;
@@ -4379,14 +4453,14 @@ doProlog(XML_Parser parser,
}
break;
case XML_ROLE_GROUP_SEQUENCE:
- if (groupConnector[prologState.level] == '|')
+ if (groupConnector[prologState.level] == ASCII_PIPE)
return XML_ERROR_SYNTAX;
- groupConnector[prologState.level] = ',';
+ groupConnector[prologState.level] = ASCII_COMMA;
if (dtd->in_eldecl && elementDeclHandler)
handleDefault = XML_FALSE;
break;
case XML_ROLE_GROUP_CHOICE:
- if (groupConnector[prologState.level] == ',')
+ if (groupConnector[prologState.level] == ASCII_COMMA)
return XML_ERROR_SYNTAX;
if (dtd->in_eldecl
&& !groupConnector[prologState.level]
@@ -4398,7 +4472,7 @@ doProlog(XML_Parser parser,
if (elementDeclHandler)
handleDefault = XML_FALSE;
}
- groupConnector[prologState.level] = '|';
+ groupConnector[prologState.level] = ASCII_PIPE;
break;
case XML_ROLE_PARAM_ENTITY_REF:
#ifdef XML_DTD
@@ -4442,7 +4516,7 @@ doProlog(XML_Parser parser,
return XML_ERROR_RECURSIVE_ENTITY_REF;
if (entity->textPtr) {
enum XML_Error result;
- XML_Bool betweenDecl =
+ XML_Bool betweenDecl =
(role == XML_ROLE_PARAM_ENTITY_REF ? XML_TRUE : XML_FALSE);
result = processInternalEntity(parser, entity, betweenDecl);
if (result != XML_ERROR_NONE)
@@ -4637,7 +4711,7 @@ doProlog(XML_Parser parser,
reportDefault(parser, enc, s, next);
switch (ps_parsing) {
- case XML_SUSPENDED:
+ case XML_SUSPENDED:
*nextPtr = next;
return XML_ERROR_NONE;
case XML_FINISHED:
@@ -4707,7 +4781,7 @@ epilogProcessor(XML_Parser parser,
}
eventPtr = s = next;
switch (ps_parsing) {
- case XML_SUSPENDED:
+ case XML_SUSPENDED:
*nextPtr = next;
return XML_ERROR_NONE;
case XML_FINISHED:
@@ -4750,12 +4824,12 @@ processInternalEntity(XML_Parser parser, ENTITY *entity,
#ifdef XML_DTD
if (entity->is_param) {
int tok = XmlPrologTok(internalEncoding, textStart, textEnd, &next);
- result = doProlog(parser, internalEncoding, textStart, textEnd, tok,
+ result = doProlog(parser, internalEncoding, textStart, textEnd, tok,
next, &next, XML_FALSE);
}
- else
+ else
#endif /* XML_DTD */
- result = doContent(parser, tagLevel, internalEncoding, textStart,
+ result = doContent(parser, tagLevel, internalEncoding, textStart,
textEnd, &next, XML_FALSE);
if (result == XML_ERROR_NONE) {
@@ -4795,13 +4869,13 @@ internalEntityProcessor(XML_Parser parser,
#ifdef XML_DTD
if (entity->is_param) {
int tok = XmlPrologTok(internalEncoding, textStart, textEnd, &next);
- result = doProlog(parser, internalEncoding, textStart, textEnd, tok,
+ result = doProlog(parser, internalEncoding, textStart, textEnd, tok,
next, &next, XML_FALSE);
}
else
#endif /* XML_DTD */
- result = doContent(parser, openEntity->startTagLevel, internalEncoding,
- textStart, textEnd, &next, XML_FALSE);
+ result = doContent(parser, openEntity->startTagLevel, internalEncoding,
+ textStart, textEnd, &next, XML_FALSE);
if (result != XML_ERROR_NONE)
return result;
@@ -4822,7 +4896,7 @@ internalEntityProcessor(XML_Parser parser,
int tok;
processor = prologProcessor;
tok = XmlPrologTok(encoding, s, end, &next);
- return doProlog(parser, encoding, s, end, tok, next, nextPtr,
+ return doProlog(parser, encoding, s, end, tok, next, nextPtr,
(XML_Bool)!ps_finalBuffer);
}
else
@@ -4831,8 +4905,8 @@ internalEntityProcessor(XML_Parser parser,
processor = contentProcessor;
/* see externalEntityContentProcessor vs contentProcessor */
return doContent(parser, parentParser ? 1 : 0, encoding, s, end,
- nextPtr, (XML_Bool)!ps_finalBuffer);
- }
+ nextPtr, (XML_Bool)!ps_finalBuffer);
+ }
}
static enum XML_Error PTRCALL
@@ -4985,7 +5059,7 @@ appendAttributeValue(XML_Parser parser, const ENCODING *enc, XML_Bool isCdata,
if (!entity->textPtr) {
if (enc == encoding)
eventPtr = ptr;
- return XML_ERROR_ATTRIBUTE_EXTERNAL_ENTITY_REF;
+ return XML_ERROR_ATTRIBUTE_EXTERNAL_ENTITY_REF;
}
else {
enum XML_Error result;
@@ -5328,7 +5402,7 @@ setElementTypePrefix(XML_Parser parser, ELEMENT_TYPE *elementType)
DTD * const dtd = _dtd; /* save one level of indirection */
const XML_Char *name;
for (name = elementType->name; *name; name++) {
- if (*name == XML_T(':')) {
+ if (*name == XML_T(ASCII_COLON)) {
PREFIX *prefix;
const XML_Char *s;
for (s = elementType->name; s != name; s++) {
@@ -5375,12 +5449,12 @@ getAttributeId(XML_Parser parser, const ENCODING *enc,
poolFinish(&dtd->pool);
if (!ns)
;
- else if (name[0] == XML_T('x')
- && name[1] == XML_T('m')
- && name[2] == XML_T('l')
- && name[3] == XML_T('n')
- && name[4] == XML_T('s')
- && (name[5] == XML_T('\0') || name[5] == XML_T(':'))) {
+ else if (name[0] == XML_T(ASCII_x)
+ && name[1] == XML_T(ASCII_m)
+ && name[2] == XML_T(ASCII_l)
+ && name[3] == XML_T(ASCII_n)
+ && name[4] == XML_T(ASCII_s)
+ && (name[5] == XML_T('\0') || name[5] == XML_T(ASCII_COLON))) {
if (name[5] == XML_T('\0'))
id->prefix = &dtd->defaultPrefix;
else
@@ -5391,7 +5465,7 @@ getAttributeId(XML_Parser parser, const ENCODING *enc,
int i;
for (i = 0; name[i]; i++) {
/* attributes without prefix are *not* in the default namespace */
- if (name[i] == XML_T(':')) {
+ if (name[i] == XML_T(ASCII_COLON)) {
int j;
for (j = 0; j < i; j++) {
if (!poolAppendChar(&dtd->pool, name[j]))
@@ -5401,8 +5475,6 @@ getAttributeId(XML_Parser parser, const ENCODING *enc,
return NULL;
id->prefix = (PREFIX *)lookup(parser, &dtd->prefixes, poolStart(&dtd->pool),
sizeof(PREFIX));
- if (!id->prefix)
- return NULL;
if (id->prefix->name == poolStart(&dtd->pool))
poolFinish(&dtd->pool);
else
@@ -5415,7 +5487,7 @@ getAttributeId(XML_Parser parser, const ENCODING *enc,
return id;
}
-#define CONTEXT_SEP XML_T('\f')
+#define CONTEXT_SEP XML_T(ASCII_FF)
static const XML_Char *
getContext(XML_Parser parser)
@@ -5427,7 +5499,7 @@ getContext(XML_Parser parser)
if (dtd->defaultPrefix.binding) {
int i;
int len;
- if (!poolAppendChar(&tempPool, XML_T('=')))
+ if (!poolAppendChar(&tempPool, XML_T(ASCII_EQUALS)))
return NULL;
len = dtd->defaultPrefix.binding->uriLen;
if (namespaceSeparator)
@@ -5453,7 +5525,7 @@ getContext(XML_Parser parser)
for (s = prefix->name; *s; s++)
if (!poolAppendChar(&tempPool, *s))
return NULL;
- if (!poolAppendChar(&tempPool, XML_T('=')))
+ if (!poolAppendChar(&tempPool, XML_T(ASCII_EQUALS)))
return NULL;
len = prefix->binding->uriLen;
if (namespaceSeparator)
@@ -5505,7 +5577,7 @@ setContext(XML_Parser parser, const XML_Char *context)
context = s;
poolDiscard(&tempPool);
}
- else if (*s == XML_T('=')) {
+ else if (*s == XML_T(ASCII_EQUALS)) {
PREFIX *prefix;
if (poolLength(&tempPool) == 0)
prefix = &dtd->defaultPrefix;
@@ -6162,12 +6234,13 @@ poolGrow(STRING_POOL *pool)
}
if (pool->blocks && pool->start == pool->blocks->s) {
int blockSize = (int)(pool->end - pool->start)*2;
- pool->blocks = (BLOCK *)
+ BLOCK *temp = (BLOCK *)
pool->mem->realloc_fcn(pool->blocks,
(offsetof(BLOCK, s)
+ blockSize * sizeof(XML_Char)));
- if (pool->blocks == NULL)
+ if (temp == NULL)
return XML_FALSE;
+ pool->blocks = temp;
pool->blocks->size = blockSize;
pool->ptr = pool->blocks->s + (pool->ptr - pool->start);
pool->start = pool->blocks->s;
diff --git a/Modules/expat/xmlrole.c b/Modules/expat/xmlrole.c
index 15d4d8f..44772e2 100644
--- a/Modules/expat/xmlrole.c
+++ b/Modules/expat/xmlrole.c
@@ -2,20 +2,22 @@
See the file COPYING for copying permission.
*/
+#include <stddef.h>
+
#ifdef COMPILED_FROM_DSP
#include "winconfig.h"
#elif defined(MACOS_CLASSIC)
#include "macconfig.h"
-#elif defined(__amigaos4__)
+#elif defined(__amigaos__)
#include "amigaconfig.h"
+#elif defined(__WATCOMC__)
+#include "watcomconfig.h"
#else
#ifdef HAVE_EXPAT_CONFIG_H
#include <expat_config.h>
#endif
#endif /* ndef COMPILED_FROM_DSP */
-#include <stddef.h>
-
#include "expat_external.h"
#include "internal.h"
#include "xmlrole.h"
@@ -53,12 +55,16 @@ static const char KW_IDREF[] = {
ASCII_I, ASCII_D, ASCII_R, ASCII_E, ASCII_F, '\0' };
static const char KW_IDREFS[] = {
ASCII_I, ASCII_D, ASCII_R, ASCII_E, ASCII_F, ASCII_S, '\0' };
+#ifdef XML_DTD
static const char KW_IGNORE[] = {
ASCII_I, ASCII_G, ASCII_N, ASCII_O, ASCII_R, ASCII_E, '\0' };
+#endif
static const char KW_IMPLIED[] = {
ASCII_I, ASCII_M, ASCII_P, ASCII_L, ASCII_I, ASCII_E, ASCII_D, '\0' };
+#ifdef XML_DTD
static const char KW_INCLUDE[] = {
ASCII_I, ASCII_N, ASCII_C, ASCII_L, ASCII_U, ASCII_D, ASCII_E, '\0' };
+#endif
static const char KW_NDATA[] = {
ASCII_N, ASCII_D, ASCII_A, ASCII_T, ASCII_A, '\0' };
static const char KW_NMTOKEN[] = {
diff --git a/Modules/expat/xmltok.c b/Modules/expat/xmltok.c
index db92247..b9cd7a4 100644
--- a/Modules/expat/xmltok.c
+++ b/Modules/expat/xmltok.c
@@ -2,20 +2,22 @@
See the file COPYING for copying permission.
*/
+#include <stddef.h>
+
#ifdef COMPILED_FROM_DSP
#include "winconfig.h"
#elif defined(MACOS_CLASSIC)
#include "macconfig.h"
-#elif defined(__amigaos4__)
+#elif defined(__amigaos__)
#include "amigaconfig.h"
+#elif defined(__WATCOMC__)
+#include "watcomconfig.h"
#else
#ifdef HAVE_EXPAT_CONFIG_H
#include <expat_config.h>
#endif
#endif /* ndef COMPILED_FROM_DSP */
-#include <stddef.h>
-
#include "expat_external.h"
#include "internal.h"
#include "xmltok.h"
@@ -295,7 +297,9 @@ sb_charMatches(const ENCODING *enc, const char *p, int c)
#endif
#define PREFIX(ident) normal_ ## ident
+#define XML_TOK_IMPL_C
#include "xmltok_impl.c"
+#undef XML_TOK_IMPL_C
#undef MINBPC
#undef BYTE_TYPE
@@ -692,7 +696,9 @@ little2_isNmstrtMin(const ENCODING *enc, const char *p)
#define IS_NMSTRT_CHAR(enc, p, n) (0)
#define IS_NMSTRT_CHAR_MINBPC(enc, p) LITTLE2_IS_NMSTRT_CHAR_MINBPC(enc, p)
+#define XML_TOK_IMPL_C
#include "xmltok_impl.c"
+#undef XML_TOK_IMPL_C
#undef MINBPC
#undef BYTE_TYPE
@@ -831,7 +837,9 @@ big2_isNmstrtMin(const ENCODING *enc, const char *p)
#define IS_NMSTRT_CHAR(enc, p, n) (0)
#define IS_NMSTRT_CHAR_MINBPC(enc, p) BIG2_IS_NMSTRT_CHAR_MINBPC(enc, p)
+#define XML_TOK_IMPL_C
#include "xmltok_impl.c"
+#undef XML_TOK_IMPL_C
#undef MINBPC
#undef BYTE_TYPE
@@ -1337,7 +1345,7 @@ unknown_toUtf16(const ENCODING *enc,
ENCODING *
XmlInitUnknownEncoding(void *mem,
int *table,
- CONVERTER convert,
+ CONVERTER convert,
void *userData)
{
int i;
@@ -1576,7 +1584,7 @@ initScan(const ENCODING * const *encodingTable,
if (ptr[0] == '\0') {
/* 0 isn't a legal data character. Furthermore a document
entity can only start with ASCII characters. So the only
- way this can fail to be big-endian UTF-16 if it it's an
+ way this can fail to be big-endian UTF-16 is if it is an
external parsed general entity that's labelled as
UTF-16LE.
*/
@@ -1610,7 +1618,9 @@ initScan(const ENCODING * const *encodingTable,
#define NS(x) x
#define ns(x) x
+#define XML_TOK_NS_C
#include "xmltok_ns.c"
+#undef XML_TOK_NS_C
#undef NS
#undef ns
@@ -1619,7 +1629,9 @@ initScan(const ENCODING * const *encodingTable,
#define NS(x) x ## NS
#define ns(x) x ## _ns
+#define XML_TOK_NS_C
#include "xmltok_ns.c"
+#undef XML_TOK_NS_C
#undef NS
#undef ns
@@ -1627,7 +1639,7 @@ initScan(const ENCODING * const *encodingTable,
ENCODING *
XmlInitUnknownEncodingNS(void *mem,
int *table,
- CONVERTER convert,
+ CONVERTER convert,
void *userData)
{
ENCODING *enc = XmlInitUnknownEncoding(mem, table, convert, userData);
diff --git a/Modules/expat/xmltok_impl.c b/Modules/expat/xmltok_impl.c
index f793a6b..9c2895b 100644
--- a/Modules/expat/xmltok_impl.c
+++ b/Modules/expat/xmltok_impl.c
@@ -2,6 +2,9 @@
See the file COPYING for copying permission.
*/
+/* This file is included! */
+#ifdef XML_TOK_IMPL_C
+
#ifndef IS_INVALID_CHAR
#define IS_INVALID_CHAR(enc, ptr, n) (0)
#endif
@@ -882,7 +885,7 @@ PREFIX(scanPercent)(const ENCODING *enc, const char *ptr, const char *end,
const char **nextTokPtr)
{
if (ptr == end)
- return -XML_TOK_PERCENT;
+ return XML_TOK_PARTIAL;
switch (BYTE_TYPE(enc, ptr)) {
CHECK_NMSTRT_CASES(enc, ptr, end, nextTokPtr)
case BT_S: case BT_LF: case BT_CR: case BT_PERCNT:
@@ -1777,3 +1780,4 @@ PREFIX(updatePosition)(const ENCODING *enc,
#undef CHECK_NMSTRT_CASE
#undef CHECK_NMSTRT_CASES
+#endif /* XML_TOK_IMPL_C */
diff --git a/Modules/expat/xmltok_ns.c b/Modules/expat/xmltok_ns.c
index d2f8938..c3b88fd 100644
--- a/Modules/expat/xmltok_ns.c
+++ b/Modules/expat/xmltok_ns.c
@@ -1,3 +1,10 @@
+/* Copyright (c) 1998, 1999 Thai Open Source Software Center Ltd
+ See the file COPYING for copying permission.
+*/
+
+/* This file is included! */
+#ifdef XML_TOK_NS_C
+
const ENCODING *
NS(XmlGetUtf8InternalEncoding)(void)
{
@@ -104,3 +111,5 @@ NS(XmlParseXmlDecl)(int isGeneralTextEntity,
encoding,
standalone);
}
+
+#endif /* XML_TOK_NS_C */
diff --git a/Modules/fcntlmodule.c b/Modules/fcntlmodule.c
index 997867a..41b3cde 100644
--- a/Modules/fcntlmodule.c
+++ b/Modules/fcntlmodule.c
@@ -27,7 +27,7 @@ conv_descriptor(PyObject *object, int *target)
}
-/* fcntl(fd, opt, [arg]) */
+/* fcntl(fd, op, [arg]) */
static PyObject *
fcntl_fcntl(PyObject *self, PyObject *args)
@@ -77,11 +77,12 @@ fcntl_fcntl(PyObject *self, PyObject *args)
}
PyDoc_STRVAR(fcntl_doc,
-"fcntl(fd, opt, [arg])\n\
+"fcntl(fd, op, [arg])\n\
\n\
-Perform the requested operation on file descriptor fd. The operation\n\
-is defined by op and is operating system dependent. These constants are\n\
-available from the fcntl module. The argument arg is optional, and\n\
+Perform the operation op on file descriptor fd. The values used\n\
+for op are operating system dependent, and are available\n\
+as constants in the fcntl module, using the same names as used in\n\
+the relevant C header files. The argument arg is optional, and\n\
defaults to 0; it may be an int or a string. If arg is given as a string,\n\
the return value of fcntl is a string of that length, containing the\n\
resulting value put in the arg buffer by the operating system. The length\n\
@@ -90,7 +91,7 @@ is an integer or if none is specified, the result value is an integer\n\
corresponding to the return value of the fcntl call in the C code.");
-/* ioctl(fd, opt, [arg]) */
+/* ioctl(fd, op, [arg]) */
static PyObject *
fcntl_ioctl(PyObject *self, PyObject *args)
@@ -104,7 +105,7 @@ fcntl_ioctl(PyObject *self, PyObject *args)
whereas the system expects it to be a 32bit bit field value
regardless of it being passed as an int or unsigned long on
various platforms. See the termios.TIOCSWINSZ constant across
- platforms for an example of thise.
+ platforms for an example of this.
If any of the 64bit platforms ever decide to use more than 32bits
in their unsigned long ioctl codes this will break and need
@@ -212,11 +213,12 @@ fcntl_ioctl(PyObject *self, PyObject *args)
}
PyDoc_STRVAR(ioctl_doc,
-"ioctl(fd, opt[, arg[, mutate_flag]])\n\
+"ioctl(fd, op[, arg[, mutate_flag]])\n\
\n\
-Perform the requested operation on file descriptor fd. The operation is\n\
-defined by opt and is operating system dependent. Typically these codes are\n\
-retrieved from the fcntl or termios library modules.\n\
+Perform the operation op on file descriptor fd. The values used for op\n\
+are operating system dependent, and are available as constants in the\n\
+fcntl or termios library modules, using the same names as used in the\n\
+relevant C header files.\n\
\n\
The argument arg is optional, and defaults to 0; it may be an int or a\n\
buffer containing character data (most likely a string or an array). \n\
@@ -299,7 +301,7 @@ PyDoc_STRVAR(flock_doc,
"flock(fd, operation)\n\
\n\
Perform the lock operation op on file descriptor fd. See the Unix \n\
-manual page for flock(3) for details. (On some systems, this function is\n\
+manual page for flock(2) for details. (On some systems, this function is\n\
emulated using fcntl().)");
diff --git a/Modules/flmodule.c b/Modules/flmodule.c
index b5a78cf..0ddf600 100644
--- a/Modules/flmodule.c
+++ b/Modules/flmodule.c
@@ -97,10 +97,8 @@ forgetgeneric(genericobject *g)
{
int i, n;
- Py_XDECREF(g->ob_callback);
- g->ob_callback = NULL;
- Py_XDECREF(g->ob_callback_arg);
- g->ob_callback_arg = NULL;
+ Py_CLEAR(g->ob_callback);
+ Py_CLEAR(g->ob_callback_arg);
if (allgenerics == NULL)
return; /* No objects known yet */
n = PyList_Size(allgenerics);
@@ -132,10 +130,8 @@ releaseobjects(FL_FORM *form)
/* The object is now unreachable for
do_forms and check_forms, so
delete it from the list of known objects */
- Py_XDECREF(g->ob_callback);
- g->ob_callback = NULL;
- Py_XDECREF(g->ob_callback_arg);
- g->ob_callback_arg = NULL;
+ Py_CLEAR(g->ob_callback);
+ Py_CLEAR(g->ob_callback_arg);
PyList_SetItem(allgenerics, i, (PyObject *)NULL);
nfreeslots++;
}
diff --git a/Modules/gcmodule.c b/Modules/gcmodule.c
index 384c47d..916e481 100644
--- a/Modules/gcmodule.c
+++ b/Modules/gcmodule.c
@@ -111,6 +111,46 @@ static Py_ssize_t long_lived_pending = 0;
http://mail.python.org/pipermail/python-dev/2008-June/080579.html
*/
+/*
+ NOTE: about untracking of mutable objects.
+
+ Certain types of container cannot participate in a reference cycle, and
+ so do not need to be tracked by the garbage collector. Untracking these
+ objects reduces the cost of garbage collections. However, determining
+ which objects may be untracked is not free, and the costs must be
+ weighed against the benefits for garbage collection.
+
+ There are two possible strategies for when to untrack a container:
+
+ i) When the container is created.
+ ii) When the container is examined by the garbage collector.
+
+ Tuples containing only immutable objects (integers, strings etc, and
+ recursively, tuples of immutable objects) do not need to be tracked.
+ The interpreter creates a large number of tuples, many of which will
+ not survive until garbage collection. It is therefore not worthwhile
+ to untrack eligible tuples at creation time.
+
+ Instead, all tuples except the empty tuple are tracked when created.
+ During garbage collection it is determined whether any surviving tuples
+ can be untracked. A tuple can be untracked if all of its contents are
+ already not tracked. Tuples are examined for untracking in all garbage
+ collection cycles. It may take more than one cycle to untrack a tuple.
+
+ Dictionaries containing only immutable objects also do not need to be
+ tracked. Dictionaries are untracked when created. If a tracked item is
+ inserted into a dictionary (either as a key or value), the dictionary
+ becomes tracked. During a full garbage collection (all generations),
+ the collector will untrack any dictionaries whose contents are not
+ tracked.
+
+ The module provides the python function is_tracked(obj), which returns
+ the CURRENT tracking status of the object. Subsequent garbage
+ collections may change the tracking status of the object.
+
+ Untracking of certain containers was introduced in issue #4688, and
+ the algorithm was refined in response to issue #14775.
+*/
/* set for debugging information */
#define DEBUG_STATS (1<<0) /* print collection statistics */
@@ -436,9 +476,6 @@ move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
if (PyTuple_CheckExact(op)) {
_PyTuple_MaybeUntrack(op);
}
- else if (PyDict_CheckExact(op)) {
- _PyDict_MaybeUntrack(op);
- }
}
else {
/* This *may* be unreachable. To make progress,
@@ -478,6 +515,20 @@ has_finalizer(PyObject *op)
return 0;
}
+/* Try to untrack all currently tracked dictionaries */
+static void
+untrack_dicts(PyGC_Head *head)
+{
+ PyGC_Head *next, *gc = head->gc.gc_next;
+ while (gc != head) {
+ PyObject *op = FROM_GC(gc);
+ next = gc->gc.gc_next;
+ if (PyDict_CheckExact(op))
+ _PyDict_MaybeUntrack(op);
+ gc = next;
+ }
+}
+
/* Move the objects in unreachable with __del__ methods into `finalizers`.
* Objects moved into `finalizers` have gc_refs set to GC_REACHABLE; the
* objects remaining in unreachable are left at GC_TENTATIVELY_UNREACHABLE.
@@ -890,6 +941,9 @@ collect(int generation)
gc_list_merge(young, old);
}
else {
+ /* We only untrack dicts in full collections, to avoid quadratic
+ dict build-up. See issue #14775. */
+ untrack_dicts(young);
long_lived_pending = 0;
long_lived_total = gc_list_size(young);
}
diff --git a/Modules/getaddrinfo.c b/Modules/getaddrinfo.c
index 1d0bfbb..9d054d0 100644
--- a/Modules/getaddrinfo.c
+++ b/Modules/getaddrinfo.c
@@ -430,7 +430,7 @@ getaddrinfo(const char*hostname, const char*servname,
break;
#ifdef ENABLE_IPV6
case AF_INET6:
- pfx = ((struct in6_addr *)pton)->s6_addr8[0];
+ pfx = ((struct in6_addr *)pton)->s6_addr[0];
if (pfx == 0 || pfx == 0xfe || pfx == 0xff)
pai->ai_flags &= ~AI_CANONNAME;
break;
diff --git a/Modules/getnameinfo.c b/Modules/getnameinfo.c
index 7892ae9..f7985c9 100644
--- a/Modules/getnameinfo.c
+++ b/Modules/getnameinfo.c
@@ -161,7 +161,7 @@ getnameinfo(sa, salen, host, hostlen, serv, servlen, flags)
break;
#ifdef ENABLE_IPV6
case AF_INET6:
- pfx = ((struct sockaddr_in6 *)sa)->sin6_addr.s6_addr8[0];
+ pfx = ((struct sockaddr_in6 *)sa)->sin6_addr.s6_addr[0];
if (pfx == 0 || pfx == 0xfe || pfx == 0xff)
flags |= NI_NUMERICHOST;
break;
diff --git a/Modules/getpath.c b/Modules/getpath.c
index 9faafa3..de96d47 100644
--- a/Modules/getpath.c
+++ b/Modules/getpath.c
@@ -335,12 +335,27 @@ search_for_exec_prefix(char *argv0_path, char *home)
return 1;
}
- /* Check to see if argv[0] is in the build directory */
+ /* Check to see if argv[0] is in the build directory. "pybuilddir.txt"
+ is written by setup.py and contains the relative path to the location
+ of shared library modules. */
strcpy(exec_prefix, argv0_path);
- joinpath(exec_prefix, "Modules/Setup");
+ joinpath(exec_prefix, "pybuilddir.txt");
if (isfile(exec_prefix)) {
- reduce(exec_prefix);
- return -1;
+ FILE *f = fopen(exec_prefix, "r");
+ if (f == NULL)
+ errno = 0;
+ else {
+ char rel_builddir_path[MAXPATHLEN+1];
+ size_t n;
+ n = fread(rel_builddir_path, 1, MAXPATHLEN, f);
+ rel_builddir_path[n] = '\0';
+ fclose(f);
+ if (n >= 0) {
+ strcpy(exec_prefix, argv0_path);
+ joinpath(exec_prefix, rel_builddir_path);
+ return -1;
+ }
+ }
}
/* Search from argv0_path, until root is found */
diff --git a/Modules/grpmodule.c b/Modules/grpmodule.c
index 07de880..040d8b0 100644
--- a/Modules/grpmodule.c
+++ b/Modules/grpmodule.c
@@ -3,15 +3,15 @@
#include "Python.h"
#include "structseq.h"
+#include "posixmodule.h"
-#include <sys/types.h>
#include <grp.h>
static PyStructSequence_Field struct_group_type_fields[] = {
{"gr_name", "group name"},
{"gr_passwd", "password"},
- {"gr_gid", "group id"},
- {"gr_mem", "group memebers"},
+ {"gr_gid", "group id"},
+ {"gr_mem", "group members"},
{0}
};
@@ -70,7 +70,7 @@ mkgrent(struct group *p)
Py_INCREF(Py_None);
}
#endif
- SET(setIndex++, PyInt_FromLong((long) p->gr_gid));
+ SET(setIndex++, _PyInt_FromGid(p->gr_gid));
SET(setIndex++, w);
#undef SET
@@ -86,17 +86,25 @@ static PyObject *
grp_getgrgid(PyObject *self, PyObject *pyo_id)
{
PyObject *py_int_id;
- unsigned int gid;
+ gid_t gid;
struct group *p;
py_int_id = PyNumber_Int(pyo_id);
if (!py_int_id)
- return NULL;
- gid = PyInt_AS_LONG(py_int_id);
+ return NULL;
+ if (!_Py_Gid_Converter(py_int_id, &gid)) {
+ Py_DECREF(py_int_id);
+ return NULL;
+ }
Py_DECREF(py_int_id);
if ((p = getgrgid(gid)) == NULL) {
- PyErr_Format(PyExc_KeyError, "getgrgid(): gid not found: %d", gid);
+ if (gid < 0)
+ PyErr_Format(PyExc_KeyError,
+ "getgrgid(): gid not found: %ld", (long)gid);
+ else
+ PyErr_Format(PyExc_KeyError,
+ "getgrgid(): gid not found: %lu", (unsigned long)gid);
return NULL;
}
return mkgrent(p);
@@ -113,7 +121,7 @@ grp_getgrnam(PyObject *self, PyObject *pyo_name)
if (!py_str_name)
return NULL;
name = PyString_AS_STRING(py_str_name);
-
+
if ((p = getgrnam(name)) == NULL) {
PyErr_Format(PyExc_KeyError, "getgrnam(): name not found: %s", name);
Py_DECREF(py_str_name);
diff --git a/Modules/itertoolsmodule.c b/Modules/itertoolsmodule.c
index b51ccf9..cd45eb9 100644
--- a/Modules/itertoolsmodule.c
+++ b/Modules/itertoolsmodule.c
@@ -401,14 +401,31 @@ teedataobject_traverse(teedataobject *tdo, visitproc visit, void * arg)
return 0;
}
+static void
+teedataobject_safe_decref(PyObject *obj)
+{
+ while (obj && Py_TYPE(obj) == &teedataobject_type &&
+ Py_REFCNT(obj) == 1) {
+ PyObject *nextlink = ((teedataobject *)obj)->nextlink;
+ ((teedataobject *)obj)->nextlink = NULL;
+ Py_DECREF(obj);
+ obj = nextlink;
+ }
+ Py_XDECREF(obj);
+}
+
static int
teedataobject_clear(teedataobject *tdo)
{
int i;
+ PyObject *tmp;
+
Py_CLEAR(tdo->it);
for (i=0 ; i<tdo->numread ; i++)
Py_CLEAR(tdo->values[i]);
- Py_CLEAR(tdo->nextlink);
+ tmp = tdo->nextlink;
+ tdo->nextlink = NULL;
+ teedataobject_safe_decref(tmp);
return 0;
}
@@ -475,6 +492,8 @@ tee_next(teeobject *to)
if (to->index >= LINKCELLS) {
link = teedataobject_jumplink(to->dataobj);
+ if (link == NULL)
+ return NULL;
Py_DECREF(to->dataobj);
to->dataobj = (teedataobject *)link;
to->index = 0;
@@ -903,11 +922,13 @@ dropwhile_next(dropwhileobject *lz)
}
ok = PyObject_IsTrue(good);
Py_DECREF(good);
- if (!ok) {
+ if (ok == 0) {
lz->start = 1;
return item;
}
Py_DECREF(item);
+ if (ok < 0)
+ return NULL;
}
}
@@ -1043,10 +1064,11 @@ takewhile_next(takewhileobject *lz)
}
ok = PyObject_IsTrue(good);
Py_DECREF(good);
- if (ok)
+ if (ok > 0)
return item;
Py_DECREF(item);
- lz->stop = 1;
+ if (ok == 0)
+ lz->stop = 1;
return NULL;
}
@@ -1219,19 +1241,22 @@ islice_next(isliceobject *lz)
Py_ssize_t oldnext;
PyObject *(*iternext)(PyObject *);
+ if (it == NULL)
+ return NULL;
+
iternext = *Py_TYPE(it)->tp_iternext;
while (lz->cnt < lz->next) {
item = iternext(it);
if (item == NULL)
- return NULL;
+ goto empty;
Py_DECREF(item);
lz->cnt++;
}
if (stop != -1 && lz->cnt >= stop)
- return NULL;
+ goto empty;
item = iternext(it);
if (item == NULL)
- return NULL;
+ goto empty;
lz->cnt++;
oldnext = lz->next;
/* The (size_t) cast below avoids the danger of undefined
@@ -1240,6 +1265,10 @@ islice_next(isliceobject *lz)
if (lz->next < oldnext || (stop != -1 && lz->next > stop))
lz->next = stop;
return item;
+
+empty:
+ Py_CLEAR(lz->it);
+ return NULL;
}
PyDoc_STRVAR(islice_doc,
@@ -3001,9 +3030,11 @@ ifilter_next(ifilterobject *lz)
ok = PyObject_IsTrue(good);
Py_DECREF(good);
}
- if (ok)
+ if (ok > 0)
return item;
Py_DECREF(item);
+ if (ok < 0)
+ return NULL;
}
}
@@ -3144,9 +3175,11 @@ ifilterfalse_next(ifilterfalseobject *lz)
ok = PyObject_IsTrue(good);
Py_DECREF(good);
}
- if (!ok)
+ if (ok == 0)
return item;
Py_DECREF(item);
+ if (ok < 0)
+ return NULL;
}
}
@@ -3400,10 +3433,10 @@ PyDoc_STRVAR(count_doc,
Return a count object whose .next() method returns consecutive values.\n\
Equivalent to:\n\n\
def count(firstval=0, step=1):\n\
- x = firstval\n\
- while 1:\n\
- yield x\n\
- x += step\n");
+ x = firstval\n\
+ while 1:\n\
+ yield x\n\
+ x += step\n");
static PyTypeObject count_type = {
PyVarObject_HEAD_INIT(NULL, 0)
@@ -3650,14 +3683,17 @@ repeat_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
repeatobject *ro;
PyObject *element;
- Py_ssize_t cnt = -1;
+ Py_ssize_t cnt = -1, n_kwds = 0;
static char *kwargs[] = {"object", "times", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|n:repeat", kwargs,
&element, &cnt))
return NULL;
- if (PyTuple_Size(args) == 2 && cnt < 0)
+ if (kwds != NULL)
+ n_kwds = PyDict_Size(kwds);
+ /* Does user supply times argument? */
+ if ((PyTuple_Size(args) + n_kwds == 2) && cnt < 0)
cnt = 0;
ro = (repeatobject *)type->tp_alloc(type, 0);
diff --git a/Modules/main.c b/Modules/main.c
index 135ef6e..ef9b245 100644
--- a/Modules/main.c
+++ b/Modules/main.c
@@ -264,6 +264,7 @@ Py_Main(int argc, char **argv)
/* Hash randomization needed early for all string operations
(including -W and -X options). */
+ _PyOS_opterr = 0; /* prevent printing the error in 1st pass */
while ((c = _PyOS_GetOpt(argc, argv, PROGRAM_OPTS)) != EOF) {
if (c == 'm' || c == 'c') {
/* -c / -m is the last option: following arguments are
@@ -583,7 +584,7 @@ Py_Main(int argc, char **argv)
sts = PyRun_SimpleStringFlags(command, &cf) != 0;
free(command);
} else if (module) {
- sts = RunModule(module, 1);
+ sts = (RunModule(module, 1) != 0);
free(module);
}
else {
diff --git a/Modules/mathmodule.c b/Modules/mathmodule.c
index cd74b0d..845fb9b 100644
--- a/Modules/mathmodule.c
+++ b/Modules/mathmodule.c
@@ -1277,23 +1277,33 @@ loghelper(PyObject* arg, double (*func)(double), char *funcname)
{
/* If it is long, do it ourselves. */
if (PyLong_Check(arg)) {
- double x;
+ double x, result;
Py_ssize_t e;
- x = _PyLong_Frexp((PyLongObject *)arg, &e);
- if (x == -1.0 && PyErr_Occurred())
- return NULL;
- if (x <= 0.0) {
+
+ /* Negative or zero inputs give a ValueError. */
+ if (Py_SIZE(arg) <= 0) {
PyErr_SetString(PyExc_ValueError,
"math domain error");
return NULL;
}
- /* Special case for log(1), to make sure we get an
- exact result there. */
- if (e == 1 && x == 0.5)
- return PyFloat_FromDouble(0.0);
- /* Value is ~= x * 2**e, so the log ~= log(x) + log(2) * e. */
- x = func(x) + func(2.0) * e;
- return PyFloat_FromDouble(x);
+
+ x = PyLong_AsDouble(arg);
+ if (x == -1.0 && PyErr_Occurred()) {
+ if (!PyErr_ExceptionMatches(PyExc_OverflowError))
+ return NULL;
+ /* Here the conversion to double overflowed, but it's possible
+ to compute the log anyway. Clear the exception and continue. */
+ PyErr_Clear();
+ x = _PyLong_Frexp((PyLongObject *)arg, &e);
+ if (x == -1.0 && PyErr_Occurred())
+ return NULL;
+ /* Value is ~= x * 2**e, so the log ~= log(x) + log(2) * e. */
+ result = func(x) + func(2.0) * e;
+ }
+ else
+ /* Successfully converted x to a double. */
+ result = func(x);
+ return PyFloat_FromDouble(result);
}
/* Else let libm handle it by itself. */
diff --git a/Modules/md5module.c b/Modules/md5module.c
index 0683ef5..103da14 100644
--- a/Modules/md5module.c
+++ b/Modules/md5module.c
@@ -51,12 +51,25 @@ static PyObject *
md5_update(md5object *self, PyObject *args)
{
Py_buffer view;
+ Py_ssize_t n;
+ unsigned char *buf;
if (!PyArg_ParseTuple(args, "s*:update", &view))
return NULL;
- md5_append(&self->md5, (unsigned char*)view.buf,
- Py_SAFE_DOWNCAST(view.len, Py_ssize_t, unsigned int));
+ n = view.len;
+ buf = (unsigned char *) view.buf;
+ while (n > 0) {
+ Py_ssize_t nbytes;
+ if (n > INT_MAX)
+ nbytes = INT_MAX;
+ else
+ nbytes = n;
+ md5_append(&self->md5, buf,
+ Py_SAFE_DOWNCAST(nbytes, Py_ssize_t, unsigned int));
+ buf += nbytes;
+ n -= nbytes;
+ }
PyBuffer_Release(&view);
Py_RETURN_NONE;
@@ -262,6 +275,8 @@ MD5_new(PyObject *self, PyObject *args)
{
md5object *md5p;
Py_buffer view = { 0 };
+ Py_ssize_t n;
+ unsigned char *buf;
if (!PyArg_ParseTuple(args, "|s*:new", &view))
return NULL;
@@ -271,9 +286,18 @@ MD5_new(PyObject *self, PyObject *args)
return NULL;
}
- if (view.len > 0) {
- md5_append(&md5p->md5, (unsigned char*)view.buf,
- Py_SAFE_DOWNCAST(view.len, Py_ssize_t, unsigned int));
+ n = view.len;
+ buf = (unsigned char *) view.buf;
+ while (n > 0) {
+ Py_ssize_t nbytes;
+ if (n > INT_MAX)
+ nbytes = INT_MAX;
+ else
+ nbytes = n;
+ md5_append(&md5p->md5, buf,
+ Py_SAFE_DOWNCAST(nbytes, Py_ssize_t, unsigned int));
+ buf += nbytes;
+ n -= nbytes;
}
PyBuffer_Release(&view);
diff --git a/Modules/mmapmodule.c b/Modules/mmapmodule.c
index a5027f5..67e4000 100644
--- a/Modules/mmapmodule.c
+++ b/Modules/mmapmodule.c
@@ -1188,19 +1188,22 @@ new_mmap_object(PyTypeObject *type, PyObject *args, PyObject *kwdict)
# endif
if (fd != -1 && fstat(fd, &st) == 0 && S_ISREG(st.st_mode)) {
if (map_size == 0) {
- off_t calc_size;
+ if (st.st_size == 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot mmap an empty file");
+ return NULL;
+ }
if (offset >= st.st_size) {
PyErr_SetString(PyExc_ValueError,
"mmap offset is greater than file size");
return NULL;
}
- calc_size = st.st_size - offset;
- map_size = calc_size;
- if (map_size != calc_size) {
+ if (st.st_size - offset > PY_SSIZE_T_MAX) {
PyErr_SetString(PyExc_ValueError,
"mmap length is too large");
- return NULL;
- }
+ return NULL;
+ }
+ map_size = (Py_ssize_t) (st.st_size - offset);
} else if (offset + (size_t)map_size > st.st_size) {
PyErr_SetString(PyExc_ValueError,
"mmap length is greater than file size");
@@ -1383,17 +1386,25 @@ new_mmap_object(PyTypeObject *type, PyObject *args, PyObject *kwdict)
}
size = (((PY_LONG_LONG) high) << 32) + low;
+ if (size == 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot mmap an empty file");
+ Py_DECREF(m_obj);
+ return NULL;
+ }
if (offset >= size) {
PyErr_SetString(PyExc_ValueError,
"mmap offset is greater than file size");
Py_DECREF(m_obj);
return NULL;
}
- if (offset - size > PY_SSIZE_T_MAX)
- /* Map area too large to fit in memory */
- m_obj->size = (Py_ssize_t) -1;
- else
- m_obj->size = (Py_ssize_t) (size - offset);
+ if (size - offset > PY_SSIZE_T_MAX) {
+ PyErr_SetString(PyExc_ValueError,
+ "mmap length is too large");
+ Py_DECREF(m_obj);
+ return NULL;
+ }
+ m_obj->size = (Py_ssize_t) (size - offset);
} else {
m_obj->size = map_size;
size = offset + map_size;
diff --git a/Modules/operator.c b/Modules/operator.c
index 274d8aa..375592c 100644
--- a/Modules/operator.c
+++ b/Modules/operator.c
@@ -235,6 +235,132 @@ op_delslice(PyObject *s, PyObject *a)
#define spam2o(OP,ALTOP,DOC) {#OP, op_##OP, METH_O, PyDoc_STR(DOC)}, \
{#ALTOP, op_##OP, METH_O, PyDoc_STR(DOC)},
+
+
+/* compare_digest **********************************************************/
+
+/*
+ * timing safe compare
+ *
+ * Returns 1 of the strings are equal.
+ * In case of len(a) != len(b) the function tries to keep the timing
+ * dependent on the length of b. CPU cache locally may still alter timing
+ * a bit.
+ */
+static int
+_tscmp(const unsigned char *a, const unsigned char *b,
+ Py_ssize_t len_a, Py_ssize_t len_b)
+{
+ /* The volatile type declarations make sure that the compiler has no
+ * chance to optimize and fold the code in any way that may change
+ * the timing.
+ */
+ volatile Py_ssize_t length;
+ volatile const unsigned char *left;
+ volatile const unsigned char *right;
+ Py_ssize_t i;
+ unsigned char result;
+
+ /* loop count depends on length of b */
+ length = len_b;
+ left = NULL;
+ right = b;
+
+ /* don't use else here to keep the amount of CPU instructions constant,
+ * volatile forces re-evaluation
+ * */
+ if (len_a == length) {
+ left = *((volatile const unsigned char**)&a);
+ result = 0;
+ }
+ if (len_a != length) {
+ left = b;
+ result = 1;
+ }
+
+ for (i=0; i < length; i++) {
+ result |= *left++ ^ *right++;
+ }
+
+ return (result == 0);
+}
+
+PyDoc_STRVAR(compare_digest__doc__,
+"compare_digest(a, b) -> bool\n"
+"\n"
+"Return 'a == b'. This function uses an approach designed to prevent\n"
+"timing analysis, making it appropriate for cryptography.\n"
+"a and b must both be of the same type: either str (ASCII only),\n"
+"or any type that supports the buffer protocol (e.g. bytes).\n"
+"\n"
+"Note: If a and b are of different lengths, or if an error occurs,\n"
+"a timing attack could theoretically reveal information about the\n"
+"types and lengths of a and b--but not their values.\n");
+
+static PyObject*
+compare_digest(PyObject *self, PyObject *args)
+{
+ PyObject *a, *b;
+ int rc;
+
+ if (!PyArg_ParseTuple(args, "OO:compare_digest", &a, &b)) {
+ return NULL;
+ }
+
+ /* Unicode string */
+ if (PyUnicode_Check(a) && PyUnicode_Check(b)) {
+ rc = _tscmp((const unsigned char *)PyUnicode_AS_DATA(a),
+ (const unsigned char *)PyUnicode_AS_DATA(b),
+ PyUnicode_GET_DATA_SIZE(a),
+ PyUnicode_GET_DATA_SIZE(b));
+ }
+ /* fallback to buffer interface for bytes, bytesarray and other */
+ else {
+ Py_buffer view_a;
+ Py_buffer view_b;
+
+ if (PyObject_CheckBuffer(a) == 0 && PyObject_CheckBuffer(b) == 0) {
+ PyErr_Format(PyExc_TypeError,
+ "unsupported operand types(s) or combination of types: "
+ "'%.100s' and '%.100s'",
+ Py_TYPE(a)->tp_name, Py_TYPE(b)->tp_name);
+ return NULL;
+ }
+
+ if (PyObject_GetBuffer(a, &view_a, PyBUF_SIMPLE) == -1) {
+ return NULL;
+ }
+ if (view_a.ndim > 1) {
+ PyErr_SetString(PyExc_BufferError,
+ "Buffer must be single dimension");
+ PyBuffer_Release(&view_a);
+ return NULL;
+ }
+
+ if (PyObject_GetBuffer(b, &view_b, PyBUF_SIMPLE) == -1) {
+ PyBuffer_Release(&view_a);
+ return NULL;
+ }
+ if (view_b.ndim > 1) {
+ PyErr_SetString(PyExc_BufferError,
+ "Buffer must be single dimension");
+ PyBuffer_Release(&view_a);
+ PyBuffer_Release(&view_b);
+ return NULL;
+ }
+
+ rc = _tscmp((const unsigned char*)view_a.buf,
+ (const unsigned char*)view_b.buf,
+ view_a.len,
+ view_b.len);
+
+ PyBuffer_Release(&view_a);
+ PyBuffer_Release(&view_b);
+ }
+
+ return PyBool_FromLong(rc);
+}
+
static struct PyMethodDef operator_methods[] = {
spam1o(isCallable,
@@ -318,6 +444,8 @@ spam2(ne,__ne__, "ne(a, b) -- Same as a!=b.")
spam2(gt,__gt__, "gt(a, b) -- Same as a>b.")
spam2(ge,__ge__, "ge(a, b) -- Same as a>=b.")
+ {"_compare_digest", (PyCFunction)compare_digest, METH_VARARGS,
+ compare_digest__doc__},
{NULL, NULL} /* sentinel */
};
@@ -412,8 +540,8 @@ PyDoc_STRVAR(itemgetter_doc,
"itemgetter(item, ...) --> itemgetter object\n\
\n\
Return a callable object that fetches the given item(s) from its operand.\n\
-After, f=itemgetter(2), the call f(r) returns r[2].\n\
-After, g=itemgetter(2,5,3), the call g(r) returns (r[2], r[5], r[3])");
+After f = itemgetter(2), the call f(r) returns r[2].\n\
+After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3])");
static PyTypeObject itemgetter_type = {
PyVarObject_HEAD_INIT(NULL, 0)
@@ -592,9 +720,9 @@ PyDoc_STRVAR(attrgetter_doc,
"attrgetter(attr, ...) --> attrgetter object\n\
\n\
Return a callable object that fetches the given attribute(s) from its operand.\n\
-After, f=attrgetter('name'), the call f(r) returns r.name.\n\
-After, g=attrgetter('name', 'date'), the call g(r) returns (r.name, r.date).\n\
-After, h=attrgetter('name.first', 'name.last'), the call h(r) returns\n\
+After f = attrgetter('name'), the call f(r) returns r.name.\n\
+After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date).\n\
+After h = attrgetter('name.first', 'name.last'), the call h(r) returns\n\
(r.name.first, r.name.last).");
static PyTypeObject attrgetter_type = {
@@ -724,8 +852,8 @@ PyDoc_STRVAR(methodcaller_doc,
"methodcaller(name, ...) --> methodcaller object\n\
\n\
Return a callable object that calls the given method on its operand.\n\
-After, f = methodcaller('name'), the call f(r) returns r.name().\n\
-After, g = methodcaller('name', 'date', foo=1), the call g(r) returns\n\
+After f = methodcaller('name'), the call f(r) returns r.name().\n\
+After g = methodcaller('name', 'date', foo=1), the call g(r) returns\n\
r.name('date', foo=1).");
static PyTypeObject methodcaller_type = {
diff --git a/Modules/ossaudiodev.c b/Modules/ossaudiodev.c
index 647a21e..284cc61 100644
--- a/Modules/ossaudiodev.c
+++ b/Modules/ossaudiodev.c
@@ -215,7 +215,7 @@ oss_mixer_dealloc(oss_mixer_t *self)
*/
/* _do_ioctl_1() is a private helper function used for the OSS ioctls --
- SNDCTL_DSP_{SETFMT,CHANNELS,SPEED} -- that that are called from C
+ SNDCTL_DSP_{SETFMT,CHANNELS,SPEED} -- that are called from C
like this:
ioctl(fd, SNDCTL_DSP_cmd, &arg)
@@ -490,7 +490,6 @@ oss_setparameters(oss_audio_t *self, PyObject *args)
{
int wanted_fmt, wanted_channels, wanted_rate, strict=0;
int fmt, channels, rate;
- PyObject * rv; /* return tuple (fmt, channels, rate) */
if (!PyArg_ParseTuple(args, "iii|i:setparameters",
&wanted_fmt, &wanted_channels, &wanted_rate,
@@ -532,13 +531,7 @@ oss_setparameters(oss_audio_t *self, PyObject *args)
/* Construct the return value: a (fmt, channels, rate) tuple that
tells what the audio hardware was actually set to. */
- rv = PyTuple_New(3);
- if (rv == NULL)
- return NULL;
- PyTuple_SET_ITEM(rv, 0, PyInt_FromLong(fmt));
- PyTuple_SET_ITEM(rv, 1, PyInt_FromLong(channels));
- PyTuple_SET_ITEM(rv, 2, PyInt_FromLong(rate));
- return rv;
+ return Py_BuildValue("(iii)", fmt, channels, rate);
}
static int
diff --git a/Modules/parsermodule.c b/Modules/parsermodule.c
index 632475c..eb2d600 100644
--- a/Modules/parsermodule.c
+++ b/Modules/parsermodule.c
@@ -169,9 +169,33 @@ typedef struct {
static void parser_free(PyST_Object *st);
+static PyObject* parser_sizeof(PyST_Object *, void *);
static int parser_compare(PyST_Object *left, PyST_Object *right);
static PyObject *parser_getattr(PyObject *self, char *name);
+static PyObject* parser_compilest(PyST_Object *, PyObject *, PyObject *);
+static PyObject* parser_isexpr(PyST_Object *, PyObject *, PyObject *);
+static PyObject* parser_issuite(PyST_Object *, PyObject *, PyObject *);
+static PyObject* parser_st2list(PyST_Object *, PyObject *, PyObject *);
+static PyObject* parser_st2tuple(PyST_Object *, PyObject *, PyObject *);
+#define PUBLIC_METHOD_TYPE (METH_VARARGS|METH_KEYWORDS)
+
+static PyMethodDef
+parser_methods[] = {
+ {"compile", (PyCFunction)parser_compilest, PUBLIC_METHOD_TYPE,
+ PyDoc_STR("Compile this ST object into a code object.")},
+ {"isexpr", (PyCFunction)parser_isexpr, PUBLIC_METHOD_TYPE,
+ PyDoc_STR("Determines if this ST object was created from an expression.")},
+ {"issuite", (PyCFunction)parser_issuite, PUBLIC_METHOD_TYPE,
+ PyDoc_STR("Determines if this ST object was created from a suite.")},
+ {"tolist", (PyCFunction)parser_st2list, PUBLIC_METHOD_TYPE,
+ PyDoc_STR("Creates a list-tree representation of this ST.")},
+ {"totuple", (PyCFunction)parser_st2tuple, PUBLIC_METHOD_TYPE,
+ PyDoc_STR("Creates a tuple-tree representation of this ST.")},
+ {"__sizeof__", (PyCFunction)parser_sizeof, METH_NOARGS,
+ PyDoc_STR("Returns size in memory, in bytes.")},
+ {NULL, NULL, 0, NULL}
+};
static
PyTypeObject PyST_Type = {
@@ -200,7 +224,14 @@ PyTypeObject PyST_Type = {
Py_TPFLAGS_DEFAULT, /* tp_flags */
/* __doc__ */
- "Intermediate representation of a Python parse tree."
+ "Intermediate representation of a Python parse tree.",
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ parser_methods, /* tp_methods */
}; /* PyST_Type */
@@ -319,10 +350,14 @@ parser_st2tuple(PyST_Object *self, PyObject *args, PyObject *kw)
int lineno = 0;
int col_offset = 0;
if (line_option != NULL) {
- lineno = (PyObject_IsTrue(line_option) != 0) ? 1 : 0;
+ lineno = PyObject_IsTrue(line_option);
+ if (lineno < 0)
+ return NULL;
}
if (col_option != NULL) {
- col_offset = (PyObject_IsTrue(col_option) != 0) ? 1 : 0;
+ col_offset = PyObject_IsTrue(col_option);
+ if (col_offset < 0)
+ return NULL;
}
/*
* Convert ST into a tuple representation. Use Guido's function,
@@ -370,10 +405,14 @@ parser_st2list(PyST_Object *self, PyObject *args, PyObject *kw)
int lineno = 0;
int col_offset = 0;
if (line_option != 0) {
- lineno = PyObject_IsTrue(line_option) ? 1 : 0;
+ lineno = PyObject_IsTrue(line_option);
+ if (lineno < 0)
+ return NULL;
}
- if (col_option != NULL) {
- col_offset = (PyObject_IsTrue(col_option) != 0) ? 1 : 0;
+ if (col_option != 0) {
+ col_offset = PyObject_IsTrue(col_option);
+ if (col_offset < 0)
+ return NULL;
}
/*
* Convert ST into a tuple representation. Use Guido's function,
@@ -494,25 +533,6 @@ parser_issuite(PyST_Object *self, PyObject *args, PyObject *kw)
}
-#define PUBLIC_METHOD_TYPE (METH_VARARGS|METH_KEYWORDS)
-
-static PyMethodDef
-parser_methods[] = {
- {"compile", (PyCFunction)parser_compilest, PUBLIC_METHOD_TYPE,
- PyDoc_STR("Compile this ST object into a code object.")},
- {"isexpr", (PyCFunction)parser_isexpr, PUBLIC_METHOD_TYPE,
- PyDoc_STR("Determines if this ST object was created from an expression.")},
- {"issuite", (PyCFunction)parser_issuite, PUBLIC_METHOD_TYPE,
- PyDoc_STR("Determines if this ST object was created from a suite.")},
- {"tolist", (PyCFunction)parser_st2list, PUBLIC_METHOD_TYPE,
- PyDoc_STR("Creates a list-tree representation of this ST.")},
- {"totuple", (PyCFunction)parser_st2tuple, PUBLIC_METHOD_TYPE,
- PyDoc_STR("Creates a tuple-tree representation of this ST.")},
-
- {NULL, NULL, 0, NULL}
-};
-
-
static PyObject*
parser_getattr(PyObject *self, char *name)
{
@@ -678,7 +698,7 @@ parser_tuple2st(PyST_Object *self, PyObject *args, PyObject *kw)
err_string("parse tree does not use a valid start symbol");
}
}
- /* Make sure we throw an exception on all errors. We should never
+ /* Make sure we raise an exception on all errors. We should never
* get this, but we'd do well to be sure something is done.
*/
if (st == NULL && !PyErr_Occurred())
@@ -695,6 +715,15 @@ parser_tuple2ast(PyST_Object *self, PyObject *args, PyObject *kw)
return parser_tuple2st(self, args, kw);
}
+static PyObject *
+parser_sizeof(PyST_Object *st, void *unused)
+{
+ Py_ssize_t res;
+
+ res = sizeof(PyST_Object) + _PyNode_SizeOf(st->st_node);
+ return PyLong_FromSsize_t(res);
+}
+
/* node* build_node_children()
*
@@ -784,7 +813,7 @@ build_node_children(PyObject *tuple, node *root, int *line_num)
else if (!ISNONTERMINAL(type)) {
/*
* It has to be one or the other; this is an error.
- * Throw an exception.
+ * Raise an exception.
*/
PyObject *err = Py_BuildValue("os", elem, "unknown node type.");
PyErr_SetObject(parser_error, err);
@@ -834,7 +863,7 @@ build_node_tree(PyObject *tuple)
if (ISTERMINAL(num)) {
/*
* The tuple is simple, but it doesn't start with a start symbol.
- * Throw an exception now and be done with it.
+ * Raise an exception now and be done with it.
*/
tuple = Py_BuildValue("os", tuple,
"Illegal syntax-tree; cannot start with terminal symbol.");
diff --git a/Modules/posixmodule.c b/Modules/posixmodule.c
index 1bd5c1a..cd4672c 100644
--- a/Modules/posixmodule.c
+++ b/Modules/posixmodule.c
@@ -27,6 +27,9 @@
#include "Python.h"
#include "structseq.h"
+#ifndef MS_WINDOWS
+#include "posixmodule.h"
+#endif
#if defined(__VMS)
# include <unixio.h>
@@ -347,9 +350,133 @@ extern int lstat(const char *, struct stat *);
#endif
#endif
+
+#ifndef MS_WINDOWS
+PyObject *
+_PyInt_FromUid(uid_t uid)
+{
+ if (uid <= LONG_MAX)
+ return PyInt_FromLong(uid);
+ return PyLong_FromUnsignedLong(uid);
+}
+
+PyObject *
+_PyInt_FromGid(gid_t gid)
+{
+ if (gid <= LONG_MAX)
+ return PyInt_FromLong(gid);
+ return PyLong_FromUnsignedLong(gid);
+}
+
+int
+_Py_Uid_Converter(PyObject *obj, void *p)
+{
+ int overflow;
+ long result;
+ if (PyFloat_Check(obj)) {
+ PyErr_SetString(PyExc_TypeError,
+ "integer argument expected, got float");
+ return 0;
+ }
+ result = PyLong_AsLongAndOverflow(obj, &overflow);
+ if (overflow < 0)
+ goto OverflowDown;
+ if (!overflow && result == -1) {
+ /* error or -1 */
+ if (PyErr_Occurred())
+ return 0;
+ *(uid_t *)p = (uid_t)-1;
+ }
+ else {
+ /* unsigned uid_t */
+ unsigned long uresult;
+ if (overflow > 0) {
+ uresult = PyLong_AsUnsignedLong(obj);
+ if (PyErr_Occurred()) {
+ if (PyErr_ExceptionMatches(PyExc_OverflowError))
+ goto OverflowUp;
+ return 0;
+ }
+ } else {
+ if (result < 0)
+ goto OverflowDown;
+ uresult = result;
+ }
+ if (sizeof(uid_t) < sizeof(long) &&
+ (unsigned long)(uid_t)uresult != uresult)
+ goto OverflowUp;
+ *(uid_t *)p = (uid_t)uresult;
+ }
+ return 1;
+
+OverflowDown:
+ PyErr_SetString(PyExc_OverflowError,
+ "user id is less than minimum");
+ return 0;
+
+OverflowUp:
+ PyErr_SetString(PyExc_OverflowError,
+ "user id is greater than maximum");
+ return 0;
+}
+
+int
+_Py_Gid_Converter(PyObject *obj, void *p)
+{
+ int overflow;
+ long result;
+ if (PyFloat_Check(obj)) {
+ PyErr_SetString(PyExc_TypeError,
+ "integer argument expected, got float");
+ return 0;
+ }
+ result = PyLong_AsLongAndOverflow(obj, &overflow);
+ if (overflow < 0)
+ goto OverflowDown;
+ if (!overflow && result == -1) {
+ /* error or -1 */
+ if (PyErr_Occurred())
+ return 0;
+ *(gid_t *)p = (gid_t)-1;
+ }
+ else {
+ /* unsigned gid_t */
+ unsigned long uresult;
+ if (overflow > 0) {
+ uresult = PyLong_AsUnsignedLong(obj);
+ if (PyErr_Occurred()) {
+ if (PyErr_ExceptionMatches(PyExc_OverflowError))
+ goto OverflowUp;
+ return 0;
+ }
+ } else {
+ if (result < 0)
+ goto OverflowDown;
+ uresult = result;
+ }
+ if (sizeof(gid_t) < sizeof(long) &&
+ (unsigned long)(gid_t)uresult != uresult)
+ goto OverflowUp;
+ *(gid_t *)p = (gid_t)uresult;
+ }
+ return 1;
+
+OverflowDown:
+ PyErr_SetString(PyExc_OverflowError,
+ "group id is less than minimum");
+ return 0;
+
+OverflowUp:
+ PyErr_SetString(PyExc_OverflowError,
+ "group id is greater than maximum");
+ return 0;
+}
+#endif /* MS_WINDOWS */
+
+
#if defined _MSC_VER && _MSC_VER >= 1400
/* Microsoft CRT in VS2005 and higher will verify that a filehandle is
- * valid and throw an assertion if it isn't.
+ * valid and raise an assertion if it isn't.
* Normally, an invalid fd is likely to be a C program error and therefore
* an assertion can be useful, but it does contradict the POSIX standard
* which for write(2) states:
@@ -441,9 +568,10 @@ _PyVerify_fd_dup2(int fd1, int fd2)
#endif
/* Return a dictionary corresponding to the POSIX environment table */
-#ifdef WITH_NEXT_FRAMEWORK
+#if defined(WITH_NEXT_FRAMEWORK) || (defined(__APPLE__) && defined(Py_ENABLE_SHARED))
/* On Darwin/MacOSX a shared library or framework has no access to
-** environ directly, we must obtain it with _NSGetEnviron().
+** environ directly, we must obtain it with _NSGetEnviron(). See also
+** man environ(7).
*/
#include <crt_externs.h>
static char **environ;
@@ -463,7 +591,7 @@ convertenviron(void)
d = PyDict_New();
if (d == NULL)
return NULL;
-#ifdef WITH_NEXT_FRAMEWORK
+#if defined(WITH_NEXT_FRAMEWORK) || (defined(__APPLE__) && defined(Py_ENABLE_SHARED))
if (environ == NULL)
environ = *_NSGetEnviron();
#endif
@@ -1305,8 +1433,13 @@ _pystat_fromstructstat(STRUCT_STAT *st)
PyStructSequence_SET_ITEM(v, 2, PyInt_FromLong((long)st->st_dev));
#endif
PyStructSequence_SET_ITEM(v, 3, PyInt_FromLong((long)st->st_nlink));
- PyStructSequence_SET_ITEM(v, 4, PyInt_FromLong((long)st->st_uid));
- PyStructSequence_SET_ITEM(v, 5, PyInt_FromLong((long)st->st_gid));
+#if defined(MS_WINDOWS)
+ PyStructSequence_SET_ITEM(v, 4, PyInt_FromLong(0));
+ PyStructSequence_SET_ITEM(v, 5, PyInt_FromLong(0));
+#else
+ PyStructSequence_SET_ITEM(v, 4, _PyInt_FromUid(st->st_uid));
+ PyStructSequence_SET_ITEM(v, 5, _PyInt_FromGid(st->st_gid));
+#endif
#ifdef HAVE_LARGEFILE_SUPPORT
PyStructSequence_SET_ITEM(v, 6,
PyLong_FromLongLong((PY_LONG_LONG)st->st_size));
@@ -1883,14 +2016,16 @@ static PyObject *
posix_chown(PyObject *self, PyObject *args)
{
char *path = NULL;
- long uid, gid;
+ uid_t uid;
+ gid_t gid;
int res;
- if (!PyArg_ParseTuple(args, "etll:chown",
+ if (!PyArg_ParseTuple(args, "etO&O&:chown",
Py_FileSystemDefaultEncoding, &path,
- &uid, &gid))
+ _Py_Uid_Converter, &uid,
+ _Py_Gid_Converter, &gid))
return NULL;
Py_BEGIN_ALLOW_THREADS
- res = chown(path, (uid_t) uid, (gid_t) gid);
+ res = chown(path, uid, gid);
Py_END_ALLOW_THREADS
if (res < 0)
return posix_error_with_allocated_filename(path);
@@ -1910,12 +2045,15 @@ static PyObject *
posix_fchown(PyObject *self, PyObject *args)
{
int fd;
- long uid, gid;
+ uid_t uid;
+ gid_t gid;
int res;
- if (!PyArg_ParseTuple(args, "ill:chown", &fd, &uid, &gid))
+ if (!PyArg_ParseTuple(args, "iO&O&:fchown", &fd,
+ _Py_Uid_Converter, &uid,
+ _Py_Gid_Converter, &gid))
return NULL;
Py_BEGIN_ALLOW_THREADS
- res = fchown(fd, (uid_t) uid, (gid_t) gid);
+ res = fchown(fd, uid, gid);
Py_END_ALLOW_THREADS
if (res < 0)
return posix_error();
@@ -1933,14 +2071,16 @@ static PyObject *
posix_lchown(PyObject *self, PyObject *args)
{
char *path = NULL;
- long uid, gid;
+ uid_t uid;
+ gid_t gid;
int res;
- if (!PyArg_ParseTuple(args, "etll:lchown",
+ if (!PyArg_ParseTuple(args, "etO&O&:lchown",
Py_FileSystemDefaultEncoding, &path,
- &uid, &gid))
+ _Py_Uid_Converter, &uid,
+ _Py_Gid_Converter, &gid))
return NULL;
Py_BEGIN_ALLOW_THREADS
- res = lchown(path, (uid_t) uid, (gid_t) gid);
+ res = lchown(path, uid, gid);
Py_END_ALLOW_THREADS
if (res < 0)
return posix_error_with_allocated_filename(path);
@@ -1956,7 +2096,9 @@ PyDoc_STRVAR(posix_getcwd__doc__,
"getcwd() -> path\n\n\
Return a string representing the current working directory.");
-#if (defined(__sun) && defined(__SVR4)) || defined(__OpenBSD__)
+#if (defined(__sun) && defined(__SVR4)) || \
+ defined(__OpenBSD__) || \
+ defined(__NetBSD__)
/* Issue 9185: getcwd() returns NULL/ERANGE indefinitely. */
static PyObject *
posix_getcwd(PyObject *self, PyObject *noargs)
@@ -3841,7 +3983,7 @@ Return the current process's effective group id.");
static PyObject *
posix_getegid(PyObject *self, PyObject *noargs)
{
- return PyInt_FromLong((long)getegid());
+ return _PyInt_FromGid(getegid());
}
#endif
@@ -3854,7 +3996,7 @@ Return the current process's effective user id.");
static PyObject *
posix_geteuid(PyObject *self, PyObject *noargs)
{
- return PyInt_FromLong((long)geteuid());
+ return _PyInt_FromUid(geteuid());
}
#endif
@@ -3867,7 +4009,7 @@ Return the current process's group id.");
static PyObject *
posix_getgid(PyObject *self, PyObject *noargs)
{
- return PyInt_FromLong((long)getgid());
+ return _PyInt_FromGid(getgid());
}
#endif
@@ -3912,6 +4054,34 @@ posix_getgroups(PyObject *self, PyObject *noargs)
gid_t* alt_grouplist = grouplist;
int n;
+#ifdef __APPLE__
+ /* Issue #17557: As of OS X 10.8, getgroups(2) no longer raises EINVAL if
+ * there are more groups than can fit in grouplist. Therefore, on OS X
+ * always first call getgroups with length 0 to get the actual number
+ * of groups.
+ */
+ n = getgroups(0, NULL);
+ if (n < 0) {
+ return posix_error();
+ } else if (n <= MAX_GROUPS) {
+ /* groups will fit in existing array */
+ alt_grouplist = grouplist;
+ } else {
+ alt_grouplist = PyMem_Malloc(n * sizeof(gid_t));
+ if (alt_grouplist == NULL) {
+ errno = EINVAL;
+ return posix_error();
+ }
+ }
+
+ n = getgroups(n, alt_grouplist);
+ if (n == -1) {
+ if (alt_grouplist != grouplist) {
+ PyMem_Free(alt_grouplist);
+ }
+ return posix_error();
+ }
+#else
n = getgroups(MAX_GROUPS, grouplist);
if (n < 0) {
if (errno == EINVAL) {
@@ -3938,11 +4108,13 @@ posix_getgroups(PyObject *self, PyObject *noargs)
return posix_error();
}
}
+#endif
+
result = PyList_New(n);
if (result != NULL) {
int i;
for (i = 0; i < n; ++i) {
- PyObject *o = PyInt_FromLong((long)alt_grouplist[i]);
+ PyObject *o = _PyInt_FromGid(alt_grouplist[i]);
if (o == NULL) {
Py_DECREF(result);
result = NULL;
@@ -3971,12 +4143,22 @@ static PyObject *
posix_initgroups(PyObject *self, PyObject *args)
{
char *username;
- long gid;
+#ifdef __APPLE__
+ int gid;
+#else
+ gid_t gid;
+#endif
- if (!PyArg_ParseTuple(args, "sl:initgroups", &username, &gid))
+#ifdef __APPLE__
+ if (!PyArg_ParseTuple(args, "si:initgroups", &username,
+ &gid))
+#else
+ if (!PyArg_ParseTuple(args, "sO&:initgroups", &username,
+ _Py_Gid_Converter, &gid))
+#endif
return NULL;
- if (initgroups(username, (gid_t) gid) == -1)
+ if (initgroups(username, gid) == -1)
return PyErr_SetFromErrno(PyExc_OSError);
Py_INCREF(Py_None);
@@ -4090,7 +4272,7 @@ Return the current process's user id.");
static PyObject *
posix_getuid(PyObject *self, PyObject *noargs)
{
- return PyInt_FromLong((long)getuid());
+ return _PyInt_FromUid(getuid());
}
#endif
@@ -4226,6 +4408,7 @@ posix__isdir(PyObject *self, PyObject *args)
return NULL;
attributes = GetFileAttributesA(path);
+ PyMem_Free(path);
if (attributes == INVALID_FILE_ATTRIBUTES)
Py_RETURN_FALSE;
@@ -5736,15 +5919,9 @@ Set the current process's user id.");
static PyObject *
posix_setuid(PyObject *self, PyObject *args)
{
- long uid_arg;
uid_t uid;
- if (!PyArg_ParseTuple(args, "l:setuid", &uid_arg))
- return NULL;
- uid = uid_arg;
- if (uid != uid_arg) {
- PyErr_SetString(PyExc_OverflowError, "user id too big");
+ if (!PyArg_ParseTuple(args, "O&:setuid", _Py_Uid_Converter, &uid))
return NULL;
- }
if (setuid(uid) < 0)
return posix_error();
Py_INCREF(Py_None);
@@ -5761,15 +5938,9 @@ Set the current process's effective user id.");
static PyObject *
posix_seteuid (PyObject *self, PyObject *args)
{
- long euid_arg;
uid_t euid;
- if (!PyArg_ParseTuple(args, "l", &euid_arg))
- return NULL;
- euid = euid_arg;
- if (euid != euid_arg) {
- PyErr_SetString(PyExc_OverflowError, "user id too big");
+ if (!PyArg_ParseTuple(args, "O&:seteuid", _Py_Uid_Converter, &euid))
return NULL;
- }
if (seteuid(euid) < 0) {
return posix_error();
} else {
@@ -5787,15 +5958,9 @@ Set the current process's effective group id.");
static PyObject *
posix_setegid (PyObject *self, PyObject *args)
{
- long egid_arg;
gid_t egid;
- if (!PyArg_ParseTuple(args, "l", &egid_arg))
- return NULL;
- egid = egid_arg;
- if (egid != egid_arg) {
- PyErr_SetString(PyExc_OverflowError, "group id too big");
+ if (!PyArg_ParseTuple(args, "O&:setegid", _Py_Gid_Converter, &egid))
return NULL;
- }
if (setegid(egid) < 0) {
return posix_error();
} else {
@@ -5813,23 +5978,11 @@ Set the current process's real and effective user ids.");
static PyObject *
posix_setreuid (PyObject *self, PyObject *args)
{
- long ruid_arg, euid_arg;
uid_t ruid, euid;
- if (!PyArg_ParseTuple(args, "ll", &ruid_arg, &euid_arg))
- return NULL;
- if (ruid_arg == -1)
- ruid = (uid_t)-1; /* let the compiler choose how -1 fits */
- else
- ruid = ruid_arg; /* otherwise, assign from our long */
- if (euid_arg == -1)
- euid = (uid_t)-1;
- else
- euid = euid_arg;
- if ((euid_arg != -1 && euid != euid_arg) ||
- (ruid_arg != -1 && ruid != ruid_arg)) {
- PyErr_SetString(PyExc_OverflowError, "user id too big");
+ if (!PyArg_ParseTuple(args, "O&O&:setreuid",
+ _Py_Uid_Converter, &ruid,
+ _Py_Uid_Converter, &euid))
return NULL;
- }
if (setreuid(ruid, euid) < 0) {
return posix_error();
} else {
@@ -5847,23 +6000,11 @@ Set the current process's real and effective group ids.");
static PyObject *
posix_setregid (PyObject *self, PyObject *args)
{
- long rgid_arg, egid_arg;
gid_t rgid, egid;
- if (!PyArg_ParseTuple(args, "ll", &rgid_arg, &egid_arg))
- return NULL;
- if (rgid_arg == -1)
- rgid = (gid_t)-1; /* let the compiler choose how -1 fits */
- else
- rgid = rgid_arg; /* otherwise, assign from our long */
- if (egid_arg == -1)
- egid = (gid_t)-1;
- else
- egid = egid_arg;
- if ((egid_arg != -1 && egid != egid_arg) ||
- (rgid_arg != -1 && rgid != rgid_arg)) {
- PyErr_SetString(PyExc_OverflowError, "group id too big");
+ if (!PyArg_ParseTuple(args, "O&O&:setregid",
+ _Py_Gid_Converter, &rgid,
+ _Py_Gid_Converter, &egid))
return NULL;
- }
if (setregid(rgid, egid) < 0) {
return posix_error();
} else {
@@ -5881,15 +6022,9 @@ Set the current process's group id.");
static PyObject *
posix_setgid(PyObject *self, PyObject *args)
{
- long gid_arg;
gid_t gid;
- if (!PyArg_ParseTuple(args, "l:setgid", &gid_arg))
- return NULL;
- gid = gid_arg;
- if (gid != gid_arg) {
- PyErr_SetString(PyExc_OverflowError, "group id too big");
+ if (!PyArg_ParseTuple(args, "O&:setgid", _Py_Gid_Converter, &gid))
return NULL;
- }
if (setgid(gid) < 0)
return posix_error();
Py_INCREF(Py_None);
@@ -5922,35 +6057,13 @@ posix_setgroups(PyObject *self, PyObject *groups)
elem = PySequence_GetItem(groups, i);
if (!elem)
return NULL;
- if (!PyInt_Check(elem)) {
- if (!PyLong_Check(elem)) {
- PyErr_SetString(PyExc_TypeError,
- "groups must be integers");
- Py_DECREF(elem);
- return NULL;
- } else {
- unsigned long x = PyLong_AsUnsignedLong(elem);
- if (PyErr_Occurred()) {
- PyErr_SetString(PyExc_TypeError,
- "group id too big");
- Py_DECREF(elem);
- return NULL;
- }
- grouplist[i] = x;
- /* read back to see if it fits in gid_t */
- if (grouplist[i] != x) {
- PyErr_SetString(PyExc_TypeError,
- "group id too big");
- Py_DECREF(elem);
- return NULL;
- }
- }
+ if (!PyInt_Check(elem) && !PyLong_Check(elem)) {
+ PyErr_SetString(PyExc_TypeError,
+ "groups must be integers");
+ Py_DECREF(elem);
+ return NULL;
} else {
- long x = PyInt_AsLong(elem);
- grouplist[i] = x;
- if (grouplist[i] != x) {
- PyErr_SetString(PyExc_TypeError,
- "group id too big");
+ if (!_Py_Gid_Converter(elem, &grouplist[i])) {
Py_DECREF(elem);
return NULL;
}
@@ -6468,8 +6581,12 @@ PyDoc_STRVAR(posix_close__doc__,
"close(fd)\n\n\
Close a file descriptor (for low level IO).");
+/*
+The underscore at end of function name avoids a name clash with the libc
+function posix_close.
+*/
static PyObject *
-posix_close(PyObject *self, PyObject *args)
+posix_close_(PyObject *self, PyObject *args)
{
int fd, res;
if (!PyArg_ParseTuple(args, "i:close", &fd))
@@ -6550,7 +6667,8 @@ posix_dup2(PyObject *self, PyObject *args)
PyDoc_STRVAR(posix_lseek__doc__,
"lseek(fd, pos, how) -> newpos\n\n\
-Set the current position of a file descriptor.");
+Set the current position of a file descriptor.\n\
+Return the new cursor position in bytes, starting from the beginning.");
static PyObject *
posix_lseek(PyObject *self, PyObject *args)
@@ -6731,8 +6849,35 @@ posix_fdopen(PyObject *self, PyObject *args)
PyMem_FREE(mode);
return NULL;
}
- if (!_PyVerify_fd(fd))
+ if (!_PyVerify_fd(fd)) {
+ PyMem_FREE(mode);
return posix_error();
+ }
+#if defined(HAVE_FSTAT) && defined(S_IFDIR) && defined(EISDIR)
+ {
+ struct stat buf;
+ const char *msg;
+ PyObject *exc;
+ if (fstat(fd, &buf) == 0 && S_ISDIR(buf.st_mode)) {
+ PyMem_FREE(mode);
+ msg = strerror(EISDIR);
+ exc = PyObject_CallFunction(PyExc_IOError, "(isO)",
+ EISDIR, msg, "<fdopen>");
+ if (exc) {
+ PyErr_SetObject(PyExc_IOError, exc);
+ Py_DECREF(exc);
+ }
+ return NULL;
+ }
+ }
+#endif
+ /* The dummy filename used here must be kept in sync with the value
+ tested against in gzip.GzipFile.__init__() - see issue #13781. */
+ f = PyFile_FromFile(NULL, "<fdopen>", orgmode, fclose);
+ if (f == NULL) {
+ PyMem_FREE(mode);
+ return NULL;
+ }
Py_BEGIN_ALLOW_THREADS
#if !defined(MS_WINDOWS) && defined(HAVE_FCNTL_H)
if (mode[0] == 'a') {
@@ -6755,11 +6900,9 @@ posix_fdopen(PyObject *self, PyObject *args)
PyMem_FREE(mode);
if (fp == NULL)
return posix_error();
- /* The dummy filename used here must be kept in sync with the value
- tested against in gzip.GzipFile.__init__() - see issue #13781. */
- f = PyFile_FromFile(fp, "<fdopen>", orgmode, fclose);
- if (f != NULL)
- PyFile_SetBufSize(f, bufsize);
+ /* We now know we will succeed, so initialize the file object. */
+ ((PyFileObject *)f)->f_fp = fp;
+ PyFile_SetBufSize(f, bufsize);
return f;
}
@@ -8576,9 +8719,11 @@ Set the current process's real, effective, and saved user ids.");
static PyObject*
posix_setresuid (PyObject *self, PyObject *args)
{
- /* We assume uid_t is no larger than a long. */
- long ruid, euid, suid;
- if (!PyArg_ParseTuple(args, "lll", &ruid, &euid, &suid))
+ uid_t ruid, euid, suid;
+ if (!PyArg_ParseTuple(args, "O&O&O&:setresuid",
+ _Py_Uid_Converter, &ruid,
+ _Py_Uid_Converter, &euid,
+ _Py_Uid_Converter, &suid))
return NULL;
if (setresuid(ruid, euid, suid) < 0)
return posix_error();
@@ -8594,9 +8739,11 @@ Set the current process's real, effective, and saved group ids.");
static PyObject*
posix_setresgid (PyObject *self, PyObject *args)
{
- /* We assume uid_t is no larger than a long. */
- long rgid, egid, sgid;
- if (!PyArg_ParseTuple(args, "lll", &rgid, &egid, &sgid))
+ gid_t rgid, egid, sgid;
+ if (!PyArg_ParseTuple(args, "O&O&O&:setresgid",
+ _Py_Gid_Converter, &rgid,
+ _Py_Gid_Converter, &egid,
+ _Py_Gid_Converter, &sgid))
return NULL;
if (setresgid(rgid, egid, sgid) < 0)
return posix_error();
@@ -8613,14 +8760,11 @@ static PyObject*
posix_getresuid (PyObject *self, PyObject *noargs)
{
uid_t ruid, euid, suid;
- long l_ruid, l_euid, l_suid;
if (getresuid(&ruid, &euid, &suid) < 0)
return posix_error();
- /* Force the values into long's as we don't know the size of uid_t. */
- l_ruid = ruid;
- l_euid = euid;
- l_suid = suid;
- return Py_BuildValue("(lll)", l_ruid, l_euid, l_suid);
+ return Py_BuildValue("(NNN)", _PyInt_FromUid(ruid),
+ _PyInt_FromUid(euid),
+ _PyInt_FromUid(suid));
}
#endif
@@ -8633,14 +8777,11 @@ static PyObject*
posix_getresgid (PyObject *self, PyObject *noargs)
{
uid_t rgid, egid, sgid;
- long l_rgid, l_egid, l_sgid;
if (getresgid(&rgid, &egid, &sgid) < 0)
return posix_error();
- /* Force the values into long's as we don't know the size of uid_t. */
- l_rgid = rgid;
- l_egid = egid;
- l_sgid = sgid;
- return Py_BuildValue("(lll)", l_rgid, l_egid, l_sgid);
+ return Py_BuildValue("(NNN)", _PyInt_FromGid(rgid),
+ _PyInt_FromGid(egid),
+ _PyInt_FromGid(sgid));
}
#endif
@@ -8849,7 +8990,7 @@ static PyMethodDef posix_methods[] = {
{"tcsetpgrp", posix_tcsetpgrp, METH_VARARGS, posix_tcsetpgrp__doc__},
#endif /* HAVE_TCSETPGRP */
{"open", posix_open, METH_VARARGS, posix_open__doc__},
- {"close", posix_close, METH_VARARGS, posix_close__doc__},
+ {"close", posix_close_, METH_VARARGS, posix_close__doc__},
{"closerange", posix_closerange, METH_VARARGS, posix_closerange__doc__},
{"dup", posix_dup, METH_VARARGS, posix_dup__doc__},
{"dup2", posix_dup2, METH_VARARGS, posix_dup2__doc__},
diff --git a/Modules/posixmodule.h b/Modules/posixmodule.h
new file mode 100644
index 0000000..084e063
--- /dev/null
+++ b/Modules/posixmodule.h
@@ -0,0 +1,25 @@
+/* Declarations shared between the different POSIX-related modules */
+
+#ifndef Py_POSIXMODULE_H
+#define Py_POSIXMODULE_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifndef Py_LIMITED_API
+#ifndef MS_WINDOWS
+PyAPI_FUNC(PyObject *) _PyInt_FromUid(uid_t);
+PyAPI_FUNC(PyObject *) _PyInt_FromGid(gid_t);
+PyAPI_FUNC(int) _Py_Uid_Converter(PyObject *, void *);
+PyAPI_FUNC(int) _Py_Gid_Converter(PyObject *, void *);
+#endif /* MS_WINDOWS */
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* !Py_POSIXMODULE_H */
diff --git a/Modules/pwdmodule.c b/Modules/pwdmodule.c
index 6729c84..c2907f6 100644
--- a/Modules/pwdmodule.c
+++ b/Modules/pwdmodule.c
@@ -3,8 +3,8 @@
#include "Python.h"
#include "structseq.h"
+#include "posixmodule.h"
-#include <sys/types.h>
#include <pwd.h>
static PyStructSequence_Field struct_pwd_type_fields[] = {
@@ -73,8 +73,8 @@ mkpwent(struct passwd *p)
#else
SETS(setIndex++, p->pw_passwd);
#endif
- SETI(setIndex++, p->pw_uid);
- SETI(setIndex++, p->pw_gid);
+ PyStructSequence_SET_ITEM(v, setIndex++, _PyInt_FromUid(p->pw_uid));
+ PyStructSequence_SET_ITEM(v, setIndex++, _PyInt_FromGid(p->pw_gid));
#ifdef __VMS
SETS(setIndex++, "");
#else
@@ -103,13 +103,21 @@ See help(pwd) for more on password database entries.");
static PyObject *
pwd_getpwuid(PyObject *self, PyObject *args)
{
- unsigned int uid;
+ uid_t uid;
struct passwd *p;
- if (!PyArg_ParseTuple(args, "I:getpwuid", &uid))
+ if (!PyArg_ParseTuple(args, "O&:getpwuid", _Py_Uid_Converter, &uid)) {
+ if (PyErr_ExceptionMatches(PyExc_OverflowError))
+ PyErr_Format(PyExc_KeyError,
+ "getpwuid(): uid not found");
return NULL;
+ }
if ((p = getpwuid(uid)) == NULL) {
- PyErr_Format(PyExc_KeyError,
- "getpwuid(): uid not found: %d", uid);
+ if (uid < 0)
+ PyErr_Format(PyExc_KeyError,
+ "getpwuid(): uid not found: %ld", (long)uid);
+ else
+ PyErr_Format(PyExc_KeyError,
+ "getpwuid(): uid not found: %lu", (unsigned long)uid);
return NULL;
}
return mkpwent(p);
diff --git a/Modules/pyexpat.c b/Modules/pyexpat.c
index 976908c..a95c388 100644
--- a/Modules/pyexpat.c
+++ b/Modules/pyexpat.c
@@ -976,7 +976,7 @@ xmlparse_ParseFile(xmlparseobject *self, PyObject *f)
void *buf = XML_GetBuffer(self->itself, BUF_SIZE);
if (buf == NULL) {
Py_XDECREF(readmethod);
- return PyErr_NoMemory();
+ return get_parse_result(self, 0);
}
bytes_read = readinst(buf, BUF_SIZE, readmethod);
@@ -1174,13 +1174,16 @@ static PyObject *
xmlparse_UseForeignDTD(xmlparseobject *self, PyObject *args)
{
PyObject *flagobj = NULL;
- XML_Bool flag = XML_TRUE;
+ int flag = 1;
enum XML_Error rc;
- if (!PyArg_UnpackTuple(args, "UseForeignDTD", 0, 1, &flagobj))
+ if (!PyArg_ParseTuple(args, "|O:UseForeignDTD", &flagobj))
return NULL;
- if (flagobj != NULL)
- flag = PyObject_IsTrue(flagobj) ? XML_TRUE : XML_FALSE;
- rc = XML_UseForeignDTD(self->itself, flag);
+ if (flagobj != NULL) {
+ flag = PyObject_IsTrue(flagobj);
+ if (flag < 0)
+ return NULL;
+ }
+ rc = XML_UseForeignDTD(self->itself, flag ? XML_TRUE : XML_FALSE);
if (rc != XML_ERROR_NONE) {
return set_error(self, rc);
}
@@ -1249,6 +1252,13 @@ PyUnknownEncodingHandler(void *encodingHandlerData,
if (_u_string == NULL)
return result;
+ if (PyUnicode_GET_SIZE(_u_string) != 256) {
+ Py_DECREF(_u_string);
+ PyErr_SetString(PyExc_ValueError,
+ "multi-byte encodings are not supported");
+ return result;
+ }
+
for (i = 0; i < 256; i++) {
/* Stupid to access directly, but fast */
Py_UNICODE c = _u_string->str[i];
@@ -1549,7 +1559,10 @@ xmlparse_setattr(xmlparseobject *self, char *name, PyObject *v)
return -1;
}
if (strcmp(name, "buffer_text") == 0) {
- if (PyObject_IsTrue(v)) {
+ int b = PyObject_IsTrue(v);
+ if (b < 0)
+ return -1;
+ if (b) {
if (self->buffer == NULL) {
self->buffer = malloc(self->buffer_size);
if (self->buffer == NULL) {
@@ -1568,39 +1581,39 @@ xmlparse_setattr(xmlparseobject *self, char *name, PyObject *v)
return 0;
}
if (strcmp(name, "namespace_prefixes") == 0) {
- if (PyObject_IsTrue(v))
- self->ns_prefixes = 1;
- else
- self->ns_prefixes = 0;
+ int b = PyObject_IsTrue(v);
+ if (b < 0)
+ return -1;
+ self->ns_prefixes = b;
XML_SetReturnNSTriplet(self->itself, self->ns_prefixes);
return 0;
}
if (strcmp(name, "ordered_attributes") == 0) {
- if (PyObject_IsTrue(v))
- self->ordered_attributes = 1;
- else
- self->ordered_attributes = 0;
+ int b = PyObject_IsTrue(v);
+ if (b < 0)
+ return -1;
+ self->ordered_attributes = b;
return 0;
}
if (strcmp(name, "returns_unicode") == 0) {
- if (PyObject_IsTrue(v)) {
+ int b = PyObject_IsTrue(v);
+ if (b < 0)
+ return -1;
#ifndef Py_USING_UNICODE
+ if (b) {
PyErr_SetString(PyExc_ValueError,
"Unicode support not available");
return -1;
-#else
- self->returns_unicode = 1;
-#endif
}
- else
- self->returns_unicode = 0;
+#endif
+ self->returns_unicode = b;
return 0;
}
if (strcmp(name, "specified_attributes") == 0) {
- if (PyObject_IsTrue(v))
- self->specified_attributes = 1;
- else
- self->specified_attributes = 0;
+ int b = PyObject_IsTrue(v);
+ if (b < 0)
+ return -1;
+ self->specified_attributes = b;
return 0;
}
diff --git a/Modules/readline.c b/Modules/readline.c
index b5e258d..f19fa0b 100644
--- a/Modules/readline.c
+++ b/Modules/readline.c
@@ -54,14 +54,16 @@ extern char **completion_matches(char *, CPFunction *);
* with the "real" readline and cannot be detected at compile-time,
* hence we use a runtime check to detect if we're using libedit
*
- * Currently there is one know API incompatibility:
+ * Currently there is one known API incompatibility:
* - 'get_history' has a 1-based index with GNU readline, and a 0-based
- * index with libedit's emulation.
+ * index with older versions of libedit's emulation.
* - Note that replace_history and remove_history use a 0-based index
- * with both implementation.
+ * with both implementations.
*/
static int using_libedit_emulation = 0;
static const char libedit_version_tag[] = "EditLine wrapper";
+
+static int libedit_history_start = 0;
#endif /* __APPLE__ */
static void
@@ -69,6 +71,10 @@ on_completion_display_matches_hook(char **matches,
int num_matches, int max_length);
+/* Memory allocated for rl_completer_word_break_characters
+ (see issue #17289 for the motivation). */
+static char *completer_word_break_characters;
+
/* Exported function to send one line to readline's init file parser */
static PyObject *
@@ -200,8 +206,7 @@ set_hook(const char *funcname, PyObject **hook_var, PyObject *args)
if (!PyArg_ParseTuple(args, buf, &function))
return NULL;
if (function == Py_None) {
- Py_XDECREF(*hook_var);
- *hook_var = NULL;
+ Py_CLEAR(*hook_var);
}
else if (PyCallable_Check(function)) {
PyObject *tmp = *hook_var;
@@ -344,12 +349,20 @@ set_completer_delims(PyObject *self, PyObject *args)
{
char *break_chars;
- if(!PyArg_ParseTuple(args, "s:set_completer_delims", &break_chars)) {
+ if (!PyArg_ParseTuple(args, "s:set_completer_delims", &break_chars)) {
return NULL;
}
- free((void*)rl_completer_word_break_characters);
- rl_completer_word_break_characters = strdup(break_chars);
- Py_RETURN_NONE;
+ /* Keep a reference to the allocated memory in the module state in case
+ some other module modifies rl_completer_word_break_characters
+ (see issue #17289). */
+ free(completer_word_break_characters);
+ completer_word_break_characters = strdup(break_chars);
+ if (completer_word_break_characters) {
+ rl_completer_word_break_characters = completer_word_break_characters;
+ Py_RETURN_NONE;
+ }
+ else
+ return PyErr_NoMemory();
}
PyDoc_STRVAR(doc_set_completer_delims,
@@ -543,21 +556,21 @@ get_history_item(PyObject *self, PyObject *args)
return NULL;
#ifdef __APPLE__
if (using_libedit_emulation) {
- /* Libedit emulation uses 0-based indexes,
- * the real one uses 1-based indexes,
- * adjust the index to ensure that Python
- * code doesn't have to worry about the
- * difference.
+ /* Older versions of libedit's readline emulation
+ * use 0-based indexes, while readline and newer
+ * versions of libedit use 1-based indexes.
*/
int length = _py_get_history_length();
- idx --;
+
+ idx = idx - 1 + libedit_history_start;
/*
* Apple's readline emulation crashes when
* the index is out of range, therefore
* test for that and fail gracefully.
*/
- if (idx < 0 || idx >= length) {
+ if (idx < (0 + libedit_history_start)
+ || idx >= (length + libedit_history_start)) {
Py_RETURN_NONE;
}
}
@@ -736,14 +749,22 @@ on_hook(PyObject *func)
}
static int
+#if defined(_RL_FUNCTION_TYPEDEF)
on_startup_hook(void)
+#else
+on_startup_hook()
+#endif
{
return on_hook(startup_hook);
}
#ifdef HAVE_RL_PRE_INPUT_HOOK
static int
+#if defined(_RL_FUNCTION_TYPEDEF)
on_pre_input_hook(void)
+#else
+on_pre_input_hook()
+#endif
{
return on_hook(pre_input_hook);
}
@@ -838,7 +859,7 @@ on_completion(const char *text, int state)
* before calling the normal completer */
static char **
-flex_complete(char *text, int start, int end)
+flex_complete(const char *text, int start, int end)
{
#ifdef HAVE_RL_COMPLETION_APPEND_CHARACTER
rl_completion_append_character ='\0';
@@ -871,6 +892,17 @@ setup_readline(void)
*/
if (using_libedit_emulation)
rl_initialize();
+
+ /* Detect if libedit's readline emulation uses 0-based
+ * indexing or 1-based indexing.
+ */
+ add_history("1");
+ if (history_get(1) == NULL) {
+ libedit_history_start = 0;
+ } else {
+ libedit_history_start = 1;
+ }
+ clear_history();
#endif /* __APPLE__ */
using_history();
@@ -886,14 +918,15 @@ setup_readline(void)
rl_bind_key_in_map ('\t', rl_complete, emacs_meta_keymap);
rl_bind_key_in_map ('\033', rl_complete, emacs_meta_keymap);
/* Set our hook functions */
- rl_startup_hook = (Function *)on_startup_hook;
+ rl_startup_hook = on_startup_hook;
#ifdef HAVE_RL_PRE_INPUT_HOOK
- rl_pre_input_hook = (Function *)on_pre_input_hook;
+ rl_pre_input_hook = on_pre_input_hook;
#endif
/* Set our completion function */
- rl_attempted_completion_function = (CPPFunction *)flex_complete;
+ rl_attempted_completion_function = flex_complete;
/* Set Python word break characters */
- rl_completer_word_break_characters =
+ completer_word_break_characters =
+ rl_completer_word_break_characters =
strdup(" \t\n`~!@#$%^&*()-=+[{]}\\|;:'\",<>/?");
/* All nonalphanums except '.' */
@@ -906,7 +939,7 @@ setup_readline(void)
*/
#ifdef __APPLE__
if (using_libedit_emulation)
- rl_read_init_file(NULL);
+ rl_read_init_file(NULL);
else
#endif /* __APPLE__ */
rl_initialize();
@@ -1077,11 +1110,8 @@ call_readline(FILE *sys_stdin, FILE *sys_stdout, char *prompt)
if (length > 0)
#ifdef __APPLE__
if (using_libedit_emulation) {
- /*
- * Libedit's emulation uses 0-based indexes,
- * the real readline uses 1-based indexes.
- */
- line = history_get(length - 1)->line;
+ /* handle older 0-based or newer 1-based indexing */
+ line = history_get(length + libedit_history_start - 1)->line;
} else
#endif /* __APPLE__ */
line = history_get(length)->line;
@@ -1137,8 +1167,6 @@ initreadline(void)
if (m == NULL)
return;
-
-
PyOS_ReadlineFunctionPointer = call_readline;
setup_readline();
}
diff --git a/Modules/resource.c b/Modules/resource.c
index 9993b93..53a6c3e 100644
--- a/Modules/resource.c
+++ b/Modules/resource.c
@@ -145,10 +145,9 @@ resource_setrlimit(PyObject *self, PyObject *args)
{
struct rlimit rl;
int resource;
- PyObject *curobj, *maxobj;
+ PyObject *limits, *curobj, *maxobj;
- if (!PyArg_ParseTuple(args, "i(OO):setrlimit",
- &resource, &curobj, &maxobj))
+ if (!PyArg_ParseTuple(args, "iO:setrlimit", &resource, &limits))
return NULL;
if (resource < 0 || resource >= RLIM_NLIMITS) {
@@ -157,23 +156,36 @@ resource_setrlimit(PyObject *self, PyObject *args)
return NULL;
}
+ limits = PySequence_Tuple(limits);
+ if (!limits)
+ /* Here limits is a borrowed reference */
+ return NULL;
+
+ if (PyTuple_GET_SIZE(limits) != 2) {
+ PyErr_SetString(PyExc_ValueError,
+ "expected a tuple of 2 integers");
+ goto error;
+ }
+ curobj = PyTuple_GET_ITEM(limits, 0);
+ maxobj = PyTuple_GET_ITEM(limits, 1);
+
#if !defined(HAVE_LARGEFILE_SUPPORT)
rl.rlim_cur = PyInt_AsLong(curobj);
if (rl.rlim_cur == (rlim_t)-1 && PyErr_Occurred())
- return NULL;
+ goto error;
rl.rlim_max = PyInt_AsLong(maxobj);
if (rl.rlim_max == (rlim_t)-1 && PyErr_Occurred())
- return NULL;
+ goto error;
#else
/* The limits are probably bigger than a long */
rl.rlim_cur = PyLong_Check(curobj) ?
PyLong_AsLongLong(curobj) : PyInt_AsLong(curobj);
if (rl.rlim_cur == (rlim_t)-1 && PyErr_Occurred())
- return NULL;
+ goto error;
rl.rlim_max = PyLong_Check(maxobj) ?
PyLong_AsLongLong(maxobj) : PyInt_AsLong(maxobj);
if (rl.rlim_max == (rlim_t)-1 && PyErr_Occurred())
- return NULL;
+ goto error;
#endif
rl.rlim_cur = rl.rlim_cur & RLIM_INFINITY;
@@ -187,10 +199,15 @@ resource_setrlimit(PyObject *self, PyObject *args)
"not allowed to raise maximum limit");
else
PyErr_SetFromErrno(ResourceError);
- return NULL;
+ goto error;
}
+ Py_DECREF(limits);
Py_INCREF(Py_None);
return Py_None;
+
+ error:
+ Py_DECREF(limits);
+ return NULL;
}
static PyObject *
diff --git a/Modules/selectmodule.c b/Modules/selectmodule.c
index 5a8580c..2707b05 100644
--- a/Modules/selectmodule.c
+++ b/Modules/selectmodule.c
@@ -71,8 +71,7 @@ reap_obj(pylist fd2obj[FD_SETSIZE + 1])
{
int i;
for (i = 0; i < FD_SETSIZE + 1 && fd2obj[i].sentinel >= 0; i++) {
- Py_XDECREF(fd2obj[i].obj);
- fd2obj[i].obj = NULL;
+ Py_CLEAR(fd2obj[i].obj);
}
fd2obj[0].sentinel = -1;
}
@@ -87,7 +86,6 @@ seq2set(PyObject *seq, fd_set *set, pylist fd2obj[FD_SETSIZE + 1])
int i;
int max = -1;
int index = 0;
- int len = -1;
PyObject* fast_seq = NULL;
PyObject* o = NULL;
@@ -98,9 +96,7 @@ seq2set(PyObject *seq, fd_set *set, pylist fd2obj[FD_SETSIZE + 1])
if (!fast_seq)
return -1;
- len = PySequence_Fast_GET_SIZE(fast_seq);
-
- for (i = 0; i < len; i++) {
+ for (i = 0; i < PySequence_Fast_GET_SIZE(fast_seq); i++) {
SOCKET v;
/* any intervening fileno() calls could decr this refcnt */
@@ -321,6 +317,7 @@ typedef struct {
int ufd_uptodate;
int ufd_len;
struct pollfd *ufds;
+ int poll_running;
} pollObject;
static PyTypeObject poll_Type;
@@ -346,14 +343,35 @@ update_ufd_array(pollObject *self)
i = pos = 0;
while (PyDict_Next(self->dict, &pos, &key, &value)) {
- self->ufds[i].fd = PyInt_AsLong(key);
- self->ufds[i].events = (short)PyInt_AsLong(value);
+ assert(i < self->ufd_len);
+ /* Never overflow */
+ self->ufds[i].fd = (int)PyInt_AsLong(key);
+ self->ufds[i].events = (short)(unsigned short)PyInt_AsLong(value);
i++;
}
+ assert(i == self->ufd_len);
self->ufd_uptodate = 1;
return 1;
}
+static int
+ushort_converter(PyObject *obj, void *ptr)
+{
+ unsigned long uval;
+
+ uval = PyLong_AsUnsignedLong(obj);
+ if (uval == (unsigned long)-1 && PyErr_Occurred())
+ return 0;
+ if (uval > USHRT_MAX) {
+ PyErr_SetString(PyExc_OverflowError,
+ "Python int too large for C unsigned short");
+ return 0;
+ }
+
+ *(unsigned short *)ptr = Py_SAFE_DOWNCAST(uval, unsigned long, unsigned short);
+ return 1;
+}
+
PyDoc_STRVAR(poll_register_doc,
"register(fd [, eventmask] ) -> None\n\n\
Register a file descriptor with the polling object.\n\
@@ -365,12 +383,12 @@ static PyObject *
poll_register(pollObject *self, PyObject *args)
{
PyObject *o, *key, *value;
- int fd, events = POLLIN | POLLPRI | POLLOUT;
+ int fd;
+ unsigned short events = POLLIN | POLLPRI | POLLOUT;
int err;
- if (!PyArg_ParseTuple(args, "O|i:register", &o, &events)) {
+ if (!PyArg_ParseTuple(args, "O|O&:register", &o, ushort_converter, &events))
return NULL;
- }
fd = PyObject_AsFileDescriptor(o);
if (fd == -1) return NULL;
@@ -408,12 +426,12 @@ static PyObject *
poll_modify(pollObject *self, PyObject *args)
{
PyObject *o, *key, *value;
- int fd, events;
+ int fd;
+ unsigned short events;
int err;
- if (!PyArg_ParseTuple(args, "Oi:modify", &o, &events)) {
+ if (!PyArg_ParseTuple(args, "OO&:modify", &o, ushort_converter, &events))
return NULL;
- }
fd = PyObject_AsFileDescriptor(o);
if (fd == -1) return NULL;
@@ -506,22 +524,33 @@ poll_poll(pollObject *self, PyObject *args)
tout = PyNumber_Int(tout);
if (!tout)
return NULL;
- timeout = PyInt_AsLong(tout);
+ timeout = _PyInt_AsInt(tout);
Py_DECREF(tout);
if (timeout == -1 && PyErr_Occurred())
return NULL;
}
+ /* Avoid concurrent poll() invocation, issue 8865 */
+ if (self->poll_running) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "concurrent poll() invocation");
+ return NULL;
+ }
+
/* Ensure the ufd array is up to date */
if (!self->ufd_uptodate)
if (update_ufd_array(self) == 0)
return NULL;
+ self->poll_running = 1;
+
/* call poll() */
Py_BEGIN_ALLOW_THREADS
poll_result = poll(self->ufds, self->ufd_len, timeout);
Py_END_ALLOW_THREADS
+ self->poll_running = 0;
+
if (poll_result < 0) {
PyErr_SetFromErrno(SelectError);
return NULL;
@@ -598,6 +627,7 @@ newPollObject(void)
array pointed to by ufds matches the contents of the dictionary. */
self->ufd_uptodate = 0;
self->ufds = NULL;
+ self->poll_running = 0;
self->dict = PyDict_New();
if (self->dict == NULL) {
Py_DECREF(self);
@@ -1202,6 +1232,23 @@ static PyTypeObject kqueue_queue_Type;
# error uintptr_t does not match int, long, or long long!
#endif
+/*
+ * kevent is not standard and its members vary across BSDs.
+ */
+#if !defined(__OpenBSD__)
+# define IDENT_TYPE T_UINTPTRT
+# define IDENT_CAST Py_intptr_t
+# define DATA_TYPE T_INTPTRT
+# define DATA_FMT_UNIT INTPTRT_FMT_UNIT
+# define IDENT_AsType PyLong_AsUintptr_t
+#else
+# define IDENT_TYPE T_UINT
+# define IDENT_CAST int
+# define DATA_TYPE T_INT
+# define DATA_FMT_UNIT "i"
+# define IDENT_AsType PyLong_AsUnsignedLong
+#endif
+
/* Unfortunately, we can't store python objects in udata, because
* kevents in the kernel can be removed without warning, which would
* forever lose the refcount on the object stored with it.
@@ -1209,11 +1256,11 @@ static PyTypeObject kqueue_queue_Type;
#define KQ_OFF(x) offsetof(kqueue_event_Object, x)
static struct PyMemberDef kqueue_event_members[] = {
- {"ident", T_UINTPTRT, KQ_OFF(e.ident)},
+ {"ident", IDENT_TYPE, KQ_OFF(e.ident)},
{"filter", T_SHORT, KQ_OFF(e.filter)},
{"flags", T_USHORT, KQ_OFF(e.flags)},
{"fflags", T_UINT, KQ_OFF(e.fflags)},
- {"data", T_INTPTRT, KQ_OFF(e.data)},
+ {"data", DATA_TYPE, KQ_OFF(e.data)},
{"udata", T_UINTPTRT, KQ_OFF(e.udata)},
{NULL} /* Sentinel */
};
@@ -1239,7 +1286,7 @@ kqueue_event_init(kqueue_event_Object *self, PyObject *args, PyObject *kwds)
PyObject *pfd;
static char *kwlist[] = {"ident", "filter", "flags", "fflags",
"data", "udata", NULL};
- static char *fmt = "O|hhi" INTPTRT_FMT_UNIT UINTPTRT_FMT_UNIT ":kevent";
+ static char *fmt = "O|hHI" DATA_FMT_UNIT UINTPTRT_FMT_UNIT ":kevent";
EV_SET(&(self->e), 0, EVFILT_READ, EV_ADD, 0, 0, 0); /* defaults */
@@ -1249,8 +1296,12 @@ kqueue_event_init(kqueue_event_Object *self, PyObject *args, PyObject *kwds)
return -1;
}
- if (PyLong_Check(pfd)) {
- self->e.ident = PyLong_AsUintptr_t(pfd);
+ if (PyLong_Check(pfd)
+#if IDENT_TYPE == T_UINT
+ && PyLong_AsUnsignedLong(pfd) <= UINT_MAX
+#endif
+ ) {
+ self->e.ident = IDENT_AsType(pfd);
}
else {
self->e.ident = PyObject_AsFileDescriptor(pfd);
@@ -1278,10 +1329,10 @@ kqueue_event_richcompare(kqueue_event_Object *s, kqueue_event_Object *o,
Py_TYPE(s)->tp_name, Py_TYPE(o)->tp_name);
return NULL;
}
- if (((result = s->e.ident - o->e.ident) == 0) &&
+ if (((result = (IDENT_CAST)(s->e.ident - o->e.ident)) == 0) &&
((result = s->e.filter - o->e.filter) == 0) &&
((result = s->e.flags - o->e.flags) == 0) &&
- ((result = s->e.fflags - o->e.fflags) == 0) &&
+ ((result = (int)(s->e.fflags - o->e.fflags)) == 0) &&
((result = s->e.data - o->e.data) == 0) &&
((result = s->e.udata - o->e.udata) == 0)
) {
@@ -1736,7 +1787,7 @@ descriptors can be used.");
static PyMethodDef select_methods[] = {
{"select", select_select, METH_VARARGS, select_doc},
-#ifdef HAVE_POLL
+#if defined(HAVE_POLL) && !defined(HAVE_BROKEN_POLL)
{"poll", select_poll, METH_NOARGS, poll_doc},
#endif /* HAVE_POLL */
{0, 0}, /* sentinel */
@@ -1768,7 +1819,7 @@ initselect(void)
PyModule_AddIntConstant(m, "PIPE_BUF", PIPE_BUF);
#endif
-#if defined(HAVE_POLL)
+#if defined(HAVE_POLL) && !defined(HAVE_BROKEN_POLL)
#ifdef __APPLE__
if (select_have_broken_poll()) {
if (PyObject_DelAttrString(m, "poll") == -1) {
diff --git a/Modules/shamodule.c b/Modules/shamodule.c
index a86e722..656208d 100644
--- a/Modules/shamodule.c
+++ b/Modules/shamodule.c
@@ -429,12 +429,25 @@ static PyObject *
SHA_update(SHAobject *self, PyObject *args)
{
Py_buffer view;
+ Py_ssize_t n;
+ unsigned char *buf;
if (!PyArg_ParseTuple(args, "s*:update", &view))
return NULL;
- sha_update(self, (unsigned char*)view.buf,
- Py_SAFE_DOWNCAST(view.len, Py_ssize_t, unsigned int));
+ n = view.len;
+ buf = (unsigned char *) view.buf;
+ while (n > 0) {
+ Py_ssize_t nbytes;
+ if (n > INT_MAX)
+ nbytes = INT_MAX;
+ else
+ nbytes = n;
+ sha_update(self, buf,
+ Py_SAFE_DOWNCAST(nbytes, Py_ssize_t, unsigned int));
+ buf += nbytes;
+ n -= nbytes;
+ }
PyBuffer_Release(&view);
Py_RETURN_NONE;
@@ -536,6 +549,8 @@ SHA_new(PyObject *self, PyObject *args, PyObject *kwdict)
static char *kwlist[] = {"string", NULL};
SHAobject *new;
Py_buffer view = { 0 };
+ Py_ssize_t n;
+ unsigned char *buf;
if (!PyArg_ParseTupleAndKeywords(args, kwdict, "|s*:new", kwlist,
&view)) {
@@ -554,10 +569,21 @@ SHA_new(PyObject *self, PyObject *args, PyObject *kwdict)
PyBuffer_Release(&view);
return NULL;
}
- if (view.len > 0) {
- sha_update(new, (unsigned char*)view.buf,
- Py_SAFE_DOWNCAST(view.len, Py_ssize_t, unsigned int));
+
+ n = view.len;
+ buf = (unsigned char *) view.buf;
+ while (n > 0) {
+ Py_ssize_t nbytes;
+ if (n > INT_MAX)
+ nbytes = INT_MAX;
+ else
+ nbytes = n;
+ sha_update(new, buf,
+ Py_SAFE_DOWNCAST(nbytes, Py_ssize_t, unsigned int));
+ buf += nbytes;
+ n -= nbytes;
}
+
PyBuffer_Release(&view);
return (PyObject *)new;
diff --git a/Modules/signalmodule.c b/Modules/signalmodule.c
index 908c2ee..184b3a9 100644
--- a/Modules/signalmodule.c
+++ b/Modules/signalmodule.c
@@ -321,7 +321,10 @@ signal_signal(PyObject *self, PyObject *args)
Handlers[sig_num].tripped = 0;
Py_INCREF(obj);
Handlers[sig_num].func = obj;
- return old_handler;
+ if (old_handler != NULL)
+ return old_handler;
+ else
+ Py_RETURN_NONE;
}
PyDoc_STRVAR(signal_doc,
@@ -349,8 +352,13 @@ signal_getsignal(PyObject *self, PyObject *args)
return NULL;
}
old_handler = Handlers[sig_num].func;
- Py_INCREF(old_handler);
- return old_handler;
+ if (old_handler != NULL) {
+ Py_INCREF(old_handler);
+ return old_handler;
+ }
+ else {
+ Py_RETURN_NONE;
+ }
}
PyDoc_STRVAR(getsignal_doc,
@@ -407,7 +415,7 @@ signal_set_wakeup_fd(PyObject *self, PyObject *args)
return NULL;
}
#endif
- if (fd != -1 && fstat(fd, &buf) != 0) {
+ if (fd != -1 && (!_PyVerify_fd(fd) || fstat(fd, &buf) != 0)) {
PyErr_SetString(PyExc_ValueError, "invalid fd");
return NULL;
}
@@ -972,9 +980,25 @@ PyOS_InterruptOccurred(void)
return 0;
}
+static void
+_clear_pending_signals(void)
+{
+ int i;
+ if (!is_tripped)
+ return;
+ is_tripped = 0;
+ for (i = 1; i < NSIG; ++i) {
+ Handlers[i].tripped = 0;
+ }
+}
+
void
PyOS_AfterFork(void)
{
+ /* Clear the signal flags after forking so that they aren't handled
+ * in both processes if they came in just before the fork() but before
+ * the interpreter had an opportunity to call the handlers. issue9535. */
+ _clear_pending_signals();
#ifdef WITH_THREAD
/* PyThread_ReInitTLS() must be called early, to make sure that the TLS API
* can be called safely. */
diff --git a/Modules/socketmodule.c b/Modules/socketmodule.c
index c521521..880f311 100644
--- a/Modules/socketmodule.c
+++ b/Modules/socketmodule.c
@@ -92,6 +92,7 @@ Local naming conventions:
#include "Python.h"
#include "structmember.h"
+#include "timefuncs.h"
#undef MAX
#define MAX(x, y) ((x) < (y) ? (y) : (x))
@@ -473,6 +474,17 @@ select_error(void)
return NULL;
}
+#ifdef MS_WINDOWS
+#ifndef WSAEAGAIN
+#define WSAEAGAIN WSAEWOULDBLOCK
+#endif
+#define CHECK_ERRNO(expected) \
+ (WSAGetLastError() == WSA ## expected)
+#else
+#define CHECK_ERRNO(expected) \
+ (errno == expected)
+#endif
+
/* Convenience function to raise an error according to errno
and return a NULL pointer from a function. */
@@ -661,7 +673,7 @@ internal_setblocking(PySocketSockObject *s, int block)
after they've reacquired the interpreter lock.
Returns 1 on timeout, -1 on error, 0 otherwise. */
static int
-internal_select(PySocketSockObject *s, int writing)
+internal_select_ex(PySocketSockObject *s, int writing, double interval)
{
int n;
@@ -673,6 +685,10 @@ internal_select(PySocketSockObject *s, int writing)
if (s->sock_fd < 0)
return 0;
+ /* Handling this condition here simplifies the select loops */
+ if (interval < 0.0)
+ return 1;
+
/* Prefer poll, if available, since you can poll() any fd
* which can't be done with select(). */
#ifdef HAVE_POLL
@@ -684,7 +700,7 @@ internal_select(PySocketSockObject *s, int writing)
pollfd.events = writing ? POLLOUT : POLLIN;
/* s->sock_timeout is in seconds, timeout in ms */
- timeout = (int)(s->sock_timeout * 1000 + 0.5);
+ timeout = (int)(interval * 1000 + 0.5);
n = poll(&pollfd, 1, timeout);
}
#else
@@ -692,8 +708,8 @@ internal_select(PySocketSockObject *s, int writing)
/* Construct the arguments to select */
fd_set fds;
struct timeval tv;
- tv.tv_sec = (int)s->sock_timeout;
- tv.tv_usec = (int)((s->sock_timeout - tv.tv_sec) * 1e6);
+ tv.tv_sec = (int)interval;
+ tv.tv_usec = (int)((interval - tv.tv_sec) * 1e6);
FD_ZERO(&fds);
FD_SET(s->sock_fd, &fds);
@@ -712,6 +728,48 @@ internal_select(PySocketSockObject *s, int writing)
return 0;
}
+static int
+internal_select(PySocketSockObject *s, int writing)
+{
+ return internal_select_ex(s, writing, s->sock_timeout);
+}
+
+/*
+ Two macros for automatic retry of select() in case of false positives
+ (for example, select() could indicate a socket is ready for reading
+ but the data then discarded by the OS because of a wrong checksum).
+ Here is an example of use:
+
+ BEGIN_SELECT_LOOP(s)
+ Py_BEGIN_ALLOW_THREADS
+ timeout = internal_select_ex(s, 0, interval);
+ if (!timeout)
+ outlen = recv(s->sock_fd, cbuf, len, flags);
+ Py_END_ALLOW_THREADS
+ if (timeout == 1) {
+ PyErr_SetString(socket_timeout, "timed out");
+ return -1;
+ }
+ END_SELECT_LOOP(s)
+*/
+#define BEGIN_SELECT_LOOP(s) \
+ { \
+ double deadline, interval = s->sock_timeout; \
+ int has_timeout = s->sock_timeout > 0.0; \
+ if (has_timeout) { \
+ deadline = _PyTime_FloatTime() + s->sock_timeout; \
+ } \
+ while (1) { \
+ errno = 0;
+
+#define END_SELECT_LOOP(s) \
+ if (!has_timeout || \
+ (!CHECK_ERRNO(EWOULDBLOCK) && !CHECK_ERRNO(EAGAIN))) \
+ break; \
+ interval = deadline - _PyTime_FloatTime(); \
+ } \
+ }
+
/* Initialize a new socket object. */
static double defaulttimeout = -1.0; /* Default timeout for new sockets */
@@ -761,7 +819,7 @@ new_sockobject(SOCKET_T fd, int family, int type, int proto)
/* Lock to allow python interpreter to continue, but only allow one
thread to be in gethostbyname or getaddrinfo */
#if defined(USE_GETHOSTBYNAME_LOCK) || defined(USE_GETADDRINFO_LOCK)
-PyThread_type_lock netdb_lock;
+static PyThread_type_lock netdb_lock;
#endif
@@ -1310,7 +1368,7 @@ getsockaddrarg(PySocketSockObject *s, PyObject *args,
"getsockaddrarg: port must be 0-65535.");
return 0;
}
- if (flowinfo < 0 || flowinfo > 0xfffff) {
+ if (flowinfo > 0xfffff) {
PyErr_SetString(
PyExc_OverflowError,
"getsockaddrarg: flowinfo must be 0-1048575.");
@@ -1656,8 +1714,9 @@ sock_accept(PySocketSockObject *s)
if (!IS_SELECTABLE(s))
return select_error();
+ BEGIN_SELECT_LOOP(s)
Py_BEGIN_ALLOW_THREADS
- timeout = internal_select(s, 0);
+ timeout = internal_select_ex(s, 0, interval);
if (!timeout)
newfd = accept(s->sock_fd, SAS2SA(&addrbuf), &addrlen);
Py_END_ALLOW_THREADS
@@ -1666,6 +1725,7 @@ sock_accept(PySocketSockObject *s)
PyErr_SetString(socket_timeout, "timed out");
return NULL;
}
+ END_SELECT_LOOP(s)
#ifdef MS_WINDOWS
if (newfd == INVALID_SOCKET)
@@ -1713,7 +1773,7 @@ info is a pair (hostaddr, port).");
static PyObject *
sock_setblocking(PySocketSockObject *s, PyObject *arg)
{
- int block;
+ long block;
block = PyInt_AsLong(arg);
if (block == -1 && PyErr_Occurred())
@@ -2243,7 +2303,7 @@ sock_listen(PySocketSockObject *s, PyObject *arg)
int backlog;
int res;
- backlog = PyInt_AsLong(arg);
+ backlog = _PyInt_AsInt(arg);
if (backlog == -1 && PyErr_Occurred())
return NULL;
Py_BEGIN_ALLOW_THREADS
@@ -2355,8 +2415,9 @@ sock_recv_guts(PySocketSockObject *s, char* cbuf, int len, int flags)
}
#ifndef __VMS
+ BEGIN_SELECT_LOOP(s)
Py_BEGIN_ALLOW_THREADS
- timeout = internal_select(s, 0);
+ timeout = internal_select_ex(s, 0, interval);
if (!timeout)
outlen = recv(s->sock_fd, cbuf, len, flags);
Py_END_ALLOW_THREADS
@@ -2365,6 +2426,7 @@ sock_recv_guts(PySocketSockObject *s, char* cbuf, int len, int flags)
PyErr_SetString(socket_timeout, "timed out");
return -1;
}
+ END_SELECT_LOOP(s)
if (outlen < 0) {
/* Note: the call to errorhandler() ALWAYS indirectly returned
NULL, so ignore its return value */
@@ -2386,8 +2448,9 @@ sock_recv_guts(PySocketSockObject *s, char* cbuf, int len, int flags)
segment = remaining;
}
+ BEGIN_SELECT_LOOP(s)
Py_BEGIN_ALLOW_THREADS
- timeout = internal_select(s, 0);
+ timeout = internal_select_ex(s, 0, interval);
if (!timeout)
nread = recv(s->sock_fd, read_buf, segment, flags);
Py_END_ALLOW_THREADS
@@ -2396,6 +2459,8 @@ sock_recv_guts(PySocketSockObject *s, char* cbuf, int len, int flags)
PyErr_SetString(socket_timeout, "timed out");
return -1;
}
+ END_SELECT_LOOP(s)
+
if (nread < 0) {
s->errorhandler();
return -1;
@@ -2559,9 +2624,10 @@ sock_recvfrom_guts(PySocketSockObject *s, char* cbuf, int len, int flags,
return -1;
}
+ BEGIN_SELECT_LOOP(s)
Py_BEGIN_ALLOW_THREADS
memset(&addrbuf, 0, addrlen);
- timeout = internal_select(s, 0);
+ timeout = internal_select_ex(s, 0, interval);
if (!timeout) {
#ifndef MS_WINDOWS
#if defined(PYOS_OS2) && !defined(PYCC_GCC)
@@ -2582,6 +2648,7 @@ sock_recvfrom_guts(PySocketSockObject *s, char* cbuf, int len, int flags,
PyErr_SetString(socket_timeout, "timed out");
return -1;
}
+ END_SELECT_LOOP(s)
if (n < 0) {
s->errorhandler();
return -1;
@@ -2665,7 +2732,6 @@ sock_recvfrom_into(PySocketSockObject *s, PyObject *args, PyObject* kwds)
&recvlen, &flags))
return NULL;
buflen = buf.len;
- assert(buf.buf != 0 && buflen > 0);
if (recvlen < 0) {
PyErr_SetString(PyExc_ValueError,
@@ -2675,6 +2741,10 @@ sock_recvfrom_into(PySocketSockObject *s, PyObject *args, PyObject* kwds)
if (recvlen == 0) {
/* If nbytes was not specified, use the buffer's length */
recvlen = buflen;
+ } else if (recvlen > buflen) {
+ PyErr_SetString(PyExc_ValueError,
+ "nbytes is greater than the length of the buffer");
+ goto error;
}
readlen = sock_recvfrom_guts(s, buf.buf, recvlen, flags, &addr);
@@ -2719,8 +2789,9 @@ sock_send(PySocketSockObject *s, PyObject *args)
buf = pbuf.buf;
len = pbuf.len;
+ BEGIN_SELECT_LOOP(s)
Py_BEGIN_ALLOW_THREADS
- timeout = internal_select(s, 1);
+ timeout = internal_select_ex(s, 1, interval);
if (!timeout)
#ifdef __VMS
n = sendsegmented(s->sock_fd, buf, len, flags);
@@ -2728,13 +2799,14 @@ sock_send(PySocketSockObject *s, PyObject *args)
n = send(s->sock_fd, buf, len, flags);
#endif
Py_END_ALLOW_THREADS
-
- PyBuffer_Release(&pbuf);
-
if (timeout == 1) {
+ PyBuffer_Release(&pbuf);
PyErr_SetString(socket_timeout, "timed out");
return NULL;
}
+ END_SELECT_LOOP(s)
+
+ PyBuffer_Release(&pbuf);
if (n < 0)
return s->errorhandler();
return PyInt_FromLong((long)n);
@@ -2768,8 +2840,9 @@ sock_sendall(PySocketSockObject *s, PyObject *args)
}
do {
+ BEGIN_SELECT_LOOP(s)
Py_BEGIN_ALLOW_THREADS
- timeout = internal_select(s, 1);
+ timeout = internal_select_ex(s, 1, interval);
n = -1;
if (!timeout) {
#ifdef __VMS
@@ -2784,6 +2857,7 @@ sock_sendall(PySocketSockObject *s, PyObject *args)
PyErr_SetString(socket_timeout, "timed out");
return NULL;
}
+ END_SELECT_LOOP(s)
/* PyErr_CheckSignals() might change errno */
saved_errno = errno;
/* We must run our signal handlers before looping again.
@@ -2863,17 +2937,20 @@ sock_sendto(PySocketSockObject *s, PyObject *args)
return NULL;
}
+ BEGIN_SELECT_LOOP(s)
Py_BEGIN_ALLOW_THREADS
- timeout = internal_select(s, 1);
+ timeout = internal_select_ex(s, 1, interval);
if (!timeout)
n = sendto(s->sock_fd, buf, len, flags, SAS2SA(&addrbuf), addrlen);
Py_END_ALLOW_THREADS
- PyBuffer_Release(&pbuf);
if (timeout == 1) {
+ PyBuffer_Release(&pbuf);
PyErr_SetString(socket_timeout, "timed out");
return NULL;
}
+ END_SELECT_LOOP(s)
+ PyBuffer_Release(&pbuf);
if (n < 0)
return s->errorhandler();
return PyInt_FromLong((long)n);
@@ -2894,7 +2971,7 @@ sock_shutdown(PySocketSockObject *s, PyObject *arg)
int how;
int res;
- how = PyInt_AsLong(arg);
+ how = _PyInt_AsInt(arg);
if (how == -1 && PyErr_Occurred())
return NULL;
Py_BEGIN_ALLOW_THREADS
@@ -4090,17 +4167,30 @@ socket_getaddrinfo(PyObject *self, PyObject *args)
"getaddrinfo() argument 1 must be string or None");
return NULL;
}
- if (PyInt_Check(pobj)) {
- PyOS_snprintf(pbuf, sizeof(pbuf), "%ld", PyInt_AsLong(pobj));
+ if (PyInt_Check(pobj) || PyLong_Check(pobj)) {
+ long value = PyLong_AsLong(pobj);
+ if (value == -1 && PyErr_Occurred())
+ return NULL;
+ PyOS_snprintf(pbuf, sizeof(pbuf), "%ld", value);
pptr = pbuf;
} else if (PyString_Check(pobj)) {
pptr = PyString_AsString(pobj);
} else if (pobj == Py_None) {
pptr = (char *)NULL;
} else {
- PyErr_SetString(socket_error, "Int or String expected");
+ PyErr_SetString(socket_error,
+ "getaddrinfo() argument 2 must be integer or string");
goto err;
}
+#if defined(__APPLE__) && defined(AI_NUMERICSERV)
+ if ((flags & AI_NUMERICSERV) && (pptr == NULL || (pptr[0] == '0' && pptr[1] == 0))) {
+ /* On OSX upto at least OSX 10.8 getaddrinfo crashes
+ * if AI_NUMERICSERV is set and the servname is NULL or "0".
+ * This workaround avoids a segfault in libsystem.
+ */
+ pptr = "00";
+ }
+#endif
memset(&hints, 0, sizeof(hints));
hints.ai_family = family;
hints.ai_socktype = socktype;
@@ -4181,7 +4271,7 @@ socket_getnameinfo(PyObject *self, PyObject *args)
if (!PyArg_ParseTuple(sa, "si|II",
&hostp, &port, &flowinfo, &scope_id))
return NULL;
- if (flowinfo < 0 || flowinfo > 0xfffff) {
+ if (flowinfo > 0xfffff) {
PyErr_SetString(PyExc_OverflowError,
"getsockaddrarg: flowinfo must be 0-1048575.");
return NULL;
diff --git a/Modules/sre.h b/Modules/sre.h
index d4af05c..22a2d5d 100644
--- a/Modules/sre.h
+++ b/Modules/sre.h
@@ -14,11 +14,21 @@
#include "sre_constants.h"
/* size of a code word (must be unsigned short or larger, and
- large enough to hold a Py_UNICODE character) */
-#ifdef Py_UNICODE_WIDE
-#define SRE_CODE Py_UCS4
+ large enough to hold a UCS4 character) */
+#ifdef Py_USING_UNICODE
+# define SRE_CODE Py_UCS4
+# if SIZEOF_SIZE_T > 4
+# define SRE_MAXREPEAT (~(SRE_CODE)0)
+# else
+# define SRE_MAXREPEAT ((SRE_CODE)PY_SSIZE_T_MAX)
+# endif
#else
-#define SRE_CODE unsigned short
+# define SRE_CODE unsigned int
+# if SIZEOF_SIZE_T > SIZEOF_INT
+# define SRE_MAXREPEAT (~(SRE_CODE)0)
+# else
+# define SRE_MAXREPEAT ((SRE_CODE)PY_SSIZE_T_MAX)
+# endif
#endif
typedef struct {
diff --git a/Modules/stropmodule.c b/Modules/stropmodule.c
index 4684baf..913bef8 100644
--- a/Modules/stropmodule.c
+++ b/Modules/stropmodule.c
@@ -593,7 +593,7 @@ strop_expandtabs(PyObject *self, PyObject *args)
char* e;
char* p;
char* q;
- Py_ssize_t i, j, old_j;
+ Py_ssize_t i, j;
PyObject* out;
char* string;
Py_ssize_t stringlen;
@@ -610,30 +610,29 @@ strop_expandtabs(PyObject *self, PyObject *args)
}
/* First pass: determine size of output string */
- i = j = old_j = 0; /* j: current column; i: total of previous lines */
+ i = j = 0; /* j: current column; i: total of previous lines */
e = string + stringlen;
for (p = string; p < e; p++) {
if (*p == '\t') {
- j += tabsize - (j%tabsize);
- if (old_j > j) {
- PyErr_SetString(PyExc_OverflowError,
- "new string is too long");
- return NULL;
- }
- old_j = j;
+ Py_ssize_t incr = tabsize - (j%tabsize);
+ if (j > PY_SSIZE_T_MAX - incr)
+ goto overflow;
+ j += incr;
} else {
+ if (j > PY_SSIZE_T_MAX - 1)
+ goto overflow;
j++;
if (*p == '\n') {
+ if (i > PY_SSIZE_T_MAX - j)
+ goto overflow;
i += j;
j = 0;
}
}
}
- if ((i + j) < 0) {
- PyErr_SetString(PyExc_OverflowError, "new string is too long");
- return NULL;
- }
+ if (i > PY_SSIZE_T_MAX - j)
+ goto overflow;
/* Second pass: create output string and fill it */
out = PyString_FromStringAndSize(NULL, i+j);
@@ -658,6 +657,9 @@ strop_expandtabs(PyObject *self, PyObject *args)
}
return out;
+ overflow:
+ PyErr_SetString(PyExc_OverflowError, "result is too long");
+ return NULL;
}
diff --git a/Modules/svmodule.c b/Modules/svmodule.c
index 6e419ce..1519065 100644
--- a/Modules/svmodule.c
+++ b/Modules/svmodule.c
@@ -279,8 +279,7 @@ capture_dealloc(captureobject *self)
(void)svUnlockCaptureData(self->ob_svideo->ob_svideo,
self->ob_capture);
self->ob_capture = NULL;
- Py_DECREF(self->ob_svideo);
- self->ob_svideo = NULL;
+ Py_CLEAR(self->ob_svideo);
}
PyObject_Del(self);
}
diff --git a/Modules/symtablemodule.c b/Modules/symtablemodule.c
index 60f9ba9..ed2bcc7 100644
--- a/Modules/symtablemodule.c
+++ b/Modules/symtablemodule.c
@@ -33,7 +33,7 @@ symtable_symtable(PyObject *self, PyObject *args)
st = Py_SymtableString(str, filename, start);
if (st == NULL)
return NULL;
- t = st->st_symbols;
+ t = (PyObject *)st->st_top;
Py_INCREF(t);
PyMem_Free((void *)st->st_future);
PySymtable_Free(st);
@@ -52,6 +52,9 @@ init_symtable(void)
{
PyObject *m;
+ if (PyType_Ready(&PySTEntry_Type) < 0)
+ return;
+
m = Py_InitModule("_symtable", symtable_methods);
if (m == NULL)
return;
diff --git a/Modules/threadmodule.c b/Modules/threadmodule.c
index 4e41085..efc5d7f 100644
--- a/Modules/threadmodule.c
+++ b/Modules/threadmodule.c
@@ -56,12 +56,12 @@ lock_PyThread_acquire_lock(lockobject *self, PyObject *args)
}
PyDoc_STRVAR(acquire_doc,
-"acquire([wait]) -> None or bool\n\
+"acquire([wait]) -> bool\n\
(acquire_lock() is an obsolete synonym)\n\
\n\
Lock the lock. Without argument, this blocks if the lock is already\n\
locked (even by the same thread), waiting for another thread to release\n\
-the lock, and return None once the lock is acquired.\n\
+the lock, and return True once the lock is acquired.\n\
With an argument, this will only block if the argument is true,\n\
and the return value reflects whether the lock is acquired.\n\
The blocking operation is not interruptible.");
@@ -618,6 +618,8 @@ t_bootstrap(void *boot_raw)
PyErr_Clear();
else {
PyObject *file;
+ PyObject *exc, *value, *tb;
+ PyErr_Fetch(&exc, &value, &tb);
PySys_WriteStderr(
"Unhandled exception in thread started by ");
file = PySys_GetObject("stderr");
@@ -626,6 +628,7 @@ t_bootstrap(void *boot_raw)
else
PyObject_Print(boot->func, stderr, 0);
PySys_WriteStderr("\n");
+ PyErr_Restore(exc, value, tb);
PyErr_PrintEx(0);
}
}
diff --git a/Modules/timemodule.c b/Modules/timemodule.c
index 397cf8c..12c43b0 100644
--- a/Modules/timemodule.c
+++ b/Modules/timemodule.c
@@ -96,7 +96,7 @@ static int floatsleep(double);
static double floattime(void);
/* For Y2K check */
-static PyObject *moddict;
+static PyObject *moddict = NULL;
/* Exposed in timefuncs.h. */
time_t
@@ -858,6 +858,11 @@ inittime(void)
/* Accept 2-digit dates unless PYTHONY2K is set and non-empty */
p = Py_GETENV("PYTHONY2K");
PyModule_AddIntConstant(m, "accept2dyear", (long) (!p || !*p));
+ /* If an embedded interpreter is shutdown and reinitialized the old
+ moddict was not decrefed on shutdown and the next import of this
+ module leads to a leak. Conditionally decref here to prevent that.
+ */
+ Py_XDECREF(moddict);
/* Squirrel away the module's dictionary for the y2k check */
moddict = PyModule_GetDict(m);
Py_INCREF(moddict);
@@ -1051,4 +1056,9 @@ floatsleep(double secs)
return 0;
}
-
+/* export floattime to socketmodule.c */
+PyAPI_FUNC(double)
+_PyTime_FloatTime(void)
+{
+ return floattime();
+}
diff --git a/Modules/unicodedata.c b/Modules/unicodedata.c
index 1445a95..6f9c7e8 100644
--- a/Modules/unicodedata.c
+++ b/Modules/unicodedata.c
@@ -274,7 +274,7 @@ unicodedata_category(PyObject *self, PyObject *args)
PyDoc_STRVAR(unicodedata_bidirectional__doc__,
"bidirectional(unichr)\n\
\n\
-Returns the bidirectional category assigned to the Unicode character\n\
+Returns the bidirectional class assigned to the Unicode character\n\
unichr as string. If no such value is defined, an empty string is\n\
returned.");
@@ -506,7 +506,7 @@ nfd_nfkd(PyObject *self, PyObject *input, int k)
stackptr = 0;
isize = PyUnicode_GET_SIZE(input);
- /* Overallocate atmost 10 characters. */
+ /* Overallocate at most 10 characters. */
space = (isize > 10 ? 10 : isize) + isize;
result = PyUnicode_FromUnicode(NULL, space);
if (!result)
@@ -520,7 +520,7 @@ nfd_nfkd(PyObject *self, PyObject *input, int k)
while(stackptr) {
Py_UNICODE code = stack[--stackptr];
/* Hangul Decomposition adds three characters in
- a single step, so we need atleast that much room. */
+ a single step, so we need at least that much room. */
if (space < 3) {
Py_ssize_t newsize = PyString_GET_SIZE(result) + 10;
space += 10;
diff --git a/Modules/zipimport.c b/Modules/zipimport.c
index b7a1b8d..7240cb4 100644
--- a/Modules/zipimport.c
+++ b/Modules/zipimport.c
@@ -709,7 +709,12 @@ read_directory(char *archive)
"'%.200s'", archive);
return NULL;
}
- fseek(fp, -22, SEEK_END);
+
+ if (fseek(fp, -22, SEEK_END) == -1) {
+ fclose(fp);
+ PyErr_Format(ZipImportError, "can't read Zip file: %s", archive);
+ return NULL;
+ }
header_position = ftell(fp);
if (fread(endof_central_dir, 1, 22, fp) != 22) {
fclose(fp);
@@ -743,11 +748,13 @@ read_directory(char *archive)
PyObject *t;
int err;
- fseek(fp, header_offset, 0); /* Start of file header */
+ if (fseek(fp, header_offset, 0) == -1) /* Start of file header */
+ goto fseek_error;
l = PyMarshal_ReadLongFromFile(fp);
if (l != 0x02014B50)
break; /* Bad: Central Dir File Header */
- fseek(fp, header_offset + 10, 0);
+ if (fseek(fp, header_offset + 10, 0) == -1)
+ goto fseek_error;
compress = PyMarshal_ReadShortFromFile(fp);
time = PyMarshal_ReadShortFromFile(fp);
date = PyMarshal_ReadShortFromFile(fp);
@@ -758,7 +765,8 @@ read_directory(char *archive)
header_size = 46 + name_size +
PyMarshal_ReadShortFromFile(fp) +
PyMarshal_ReadShortFromFile(fp);
- fseek(fp, header_offset + 42, 0);
+ if (fseek(fp, header_offset + 42, 0) == -1)
+ goto fseek_error;
file_offset = PyMarshal_ReadLongFromFile(fp) + arc_offset;
if (name_size > MAXPATHLEN)
name_size = MAXPATHLEN;
@@ -790,6 +798,11 @@ read_directory(char *archive)
PySys_WriteStderr("# zipimport: found %ld names in %s\n",
count, archive);
return files;
+fseek_error:
+ fclose(fp);
+ Py_XDECREF(files);
+ PyErr_Format(ZipImportError, "can't read Zip file: %s", archive);
+ return NULL;
error:
fclose(fp);
Py_XDECREF(files);
@@ -857,7 +870,12 @@ get_data(char *archive, PyObject *toc_entry)
}
/* Check to make sure the local file header is correct */
- fseek(fp, file_offset, 0);
+ if (fseek(fp, file_offset, 0) == -1) {
+ fclose(fp);
+ PyErr_Format(ZipImportError, "can't read Zip file: %s", archive);
+ return NULL;
+ }
+
l = PyMarshal_ReadLongFromFile(fp);
if (l != 0x04034B50) {
/* Bad: Local File Header */
@@ -867,7 +885,12 @@ get_data(char *archive, PyObject *toc_entry)
fclose(fp);
return NULL;
}
- fseek(fp, file_offset + 26, 0);
+ if (fseek(fp, file_offset + 26, 0) == -1) {
+ fclose(fp);
+ PyErr_Format(ZipImportError, "can't read Zip file: %s", archive);
+ return NULL;
+ }
+
l = 30 + PyMarshal_ReadShortFromFile(fp) +
PyMarshal_ReadShortFromFile(fp); /* local header size */
file_offset += l; /* Start of file data */
@@ -881,8 +904,13 @@ get_data(char *archive, PyObject *toc_entry)
buf = PyString_AsString(raw_data);
err = fseek(fp, file_offset, 0);
- if (err == 0)
+ if (err == 0) {
bytes_read = fread(buf, 1, data_size, fp);
+ } else {
+ fclose(fp);
+ PyErr_Format(ZipImportError, "can't read Zip file: %s", archive);
+ return NULL;
+ }
fclose(fp);
if (err || bytes_read != data_size) {
PyErr_SetString(PyExc_IOError,
diff --git a/Modules/zlib/deflate.c b/Modules/zlib/deflate.c
index 29ce1f6..4c6df00 100644
--- a/Modules/zlib/deflate.c
+++ b/Modules/zlib/deflate.c
@@ -163,7 +163,7 @@ struct static_tree_desc_s {int dummy;}; /* for buggy compilers */
/* ===========================================================================
* Update a hash value with the given input byte
- * IN assertion: all calls to to UPDATE_HASH are made with consecutive
+ * IN assertion: all calls to UPDATE_HASH are made with consecutive
* input characters, so that a running hash key can be computed from the
* previous key instead of complete recalculation each time.
*/
@@ -176,7 +176,7 @@ struct static_tree_desc_s {int dummy;}; /* for buggy compilers */
* the previous length of the hash chain.
* If this file is compiled with -DFASTEST, the compression level is forced
* to 1, and no hash chains are maintained.
- * IN assertion: all calls to to INSERT_STRING are made with consecutive
+ * IN assertion: all calls to INSERT_STRING are made with consecutive
* input characters and the first MIN_MATCH bytes of str are valid
* (except for the last MIN_MATCH-1 bytes of the input file).
*/
diff --git a/Modules/zlib/zlib.h b/Modules/zlib/zlib.h
index 0228179..fda7b19 100644
--- a/Modules/zlib/zlib.h
+++ b/Modules/zlib/zlib.h
@@ -764,7 +764,7 @@ ZEXTERN int ZEXPORT inflateSync OF((z_streamp strm));
inflateSync returns Z_OK if a full flush point has been found, Z_BUF_ERROR
if no more input was provided, Z_DATA_ERROR if no flush point has been found,
or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
- case, the application may save the current current value of total_in which
+ case, the application may save the current value of total_in which
indicates where valid compressed data was found. In the error case, the
application may repeatedly call inflateSync, providing more input each time,
until success or end of the input data.
diff --git a/Modules/zlibmodule.c b/Modules/zlibmodule.c
index 035aa8e..519af94 100644
--- a/Modules/zlibmodule.c
+++ b/Modules/zlibmodule.c
@@ -101,7 +101,7 @@ zlib_error(z_stream zst, int err, char *msg)
PyDoc_STRVAR(compressobj__doc__,
"compressobj([level]) -- Return a compressor object.\n"
"\n"
-"Optional arg level is the compression level, in 1-9.");
+"Optional arg level is the compression level, in 0-9.");
PyDoc_STRVAR(decompressobj__doc__,
"decompressobj([wbits]) -- Return a decompressor object.\n"
@@ -132,7 +132,7 @@ newcompobject(PyTypeObject *type)
PyDoc_STRVAR(compress__doc__,
"compress(string[, level]) -- Returned compressed string.\n"
"\n"
-"Optional arg level is the compression level, in 1-9.");
+"Optional arg level is the compression level, in 0-9.");
static PyObject *
PyZlib_compress(PyObject *self, PyObject *args)
@@ -467,6 +467,49 @@ PyZlib_objcompress(compobject *self, PyObject *args)
return RetVal;
}
+/* Helper for objdecompress() and unflush(). Saves any unconsumed input data in
+ self->unused_data or self->unconsumed_tail, as appropriate. */
+static int
+save_unconsumed_input(compobject *self, int err)
+{
+ if (err == Z_STREAM_END) {
+ /* The end of the compressed data has been reached. Store the leftover
+ input data in self->unused_data. */
+ if (self->zst.avail_in > 0) {
+ Py_ssize_t old_size = PyString_GET_SIZE(self->unused_data);
+ Py_ssize_t new_size;
+ PyObject *new_data;
+ if (self->zst.avail_in > PY_SSIZE_T_MAX - old_size) {
+ PyErr_NoMemory();
+ return -1;
+ }
+ new_size = old_size + self->zst.avail_in;
+ new_data = PyString_FromStringAndSize(NULL, new_size);
+ if (new_data == NULL)
+ return -1;
+ Py_MEMCPY(PyString_AS_STRING(new_data),
+ PyString_AS_STRING(self->unused_data), old_size);
+ Py_MEMCPY(PyString_AS_STRING(new_data) + old_size,
+ self->zst.next_in, self->zst.avail_in);
+ Py_DECREF(self->unused_data);
+ self->unused_data = new_data;
+ self->zst.avail_in = 0;
+ }
+ }
+ if (self->zst.avail_in > 0 || PyString_GET_SIZE(self->unconsumed_tail)) {
+ /* This code handles two distinct cases:
+ 1. Output limit was reached. Save leftover input in unconsumed_tail.
+ 2. All input data was consumed. Clear unconsumed_tail. */
+ PyObject *new_data = PyString_FromStringAndSize(
+ (char *)self->zst.next_in, self->zst.avail_in);
+ if (new_data == NULL)
+ return -1;
+ Py_DECREF(self->unconsumed_tail);
+ self->unconsumed_tail = new_data;
+ }
+ return 0;
+}
+
PyDoc_STRVAR(decomp_decompress__doc__,
"decompress(data, max_length) -- Return a string containing the decompressed\n"
"version of the data.\n"
@@ -541,43 +584,20 @@ PyZlib_objdecompress(compobject *self, PyObject *args)
Py_END_ALLOW_THREADS
}
- if(max_length) {
- /* Not all of the compressed data could be accommodated in a buffer of
- the specified size. Return the unconsumed tail in an attribute. */
- Py_DECREF(self->unconsumed_tail);
- self->unconsumed_tail = PyString_FromStringAndSize((char *)self->zst.next_in,
- self->zst.avail_in);
- }
- else if (PyString_GET_SIZE(self->unconsumed_tail) > 0) {
- /* All of the compressed data was consumed. Clear unconsumed_tail. */
- Py_DECREF(self->unconsumed_tail);
- self->unconsumed_tail = PyString_FromStringAndSize("", 0);
- }
- if(!self->unconsumed_tail) {
+ if (save_unconsumed_input(self, err) < 0) {
Py_DECREF(RetVal);
RetVal = NULL;
goto error;
}
- /* The end of the compressed data has been reached, so set the
- unused_data attribute to a string containing the remainder of the
- data in the string. Note that this is also a logical place to call
- inflateEnd, but the old behaviour of only calling it on flush() is
- preserved.
- */
- if (err == Z_STREAM_END) {
- Py_XDECREF(self->unused_data); /* Free original empty string */
- self->unused_data = PyString_FromStringAndSize(
- (char *)self->zst.next_in, self->zst.avail_in);
- if (self->unused_data == NULL) {
- Py_DECREF(RetVal);
- goto error;
- }
+ /* This is the logical place to call inflateEnd, but the old behaviour of
+ only calling it on flush() is preserved. */
+
+ if (err != Z_STREAM_END && err != Z_OK && err != Z_BUF_ERROR) {
/* We will only get Z_BUF_ERROR if the output buffer was full
but there wasn't more output when we tried again, so it is
not an error condition.
*/
- } else if (err != Z_OK && err != Z_BUF_ERROR) {
zlib_error(self->zst, err, "while decompressing");
Py_DECREF(RetVal);
RetVal = NULL;
@@ -810,6 +830,8 @@ PyZlib_unflush(compobject *self, PyObject *args)
ENTER_ZLIB
start_total_out = self->zst.total_out;
+ self->zst.avail_in = PyString_GET_SIZE(self->unconsumed_tail);
+ self->zst.next_in = (Byte *)PyString_AS_STRING(self->unconsumed_tail);
self->zst.avail_out = length;
self->zst.next_out = (Byte *)PyString_AS_STRING(retval);
@@ -831,6 +853,12 @@ PyZlib_unflush(compobject *self, PyObject *args)
Py_END_ALLOW_THREADS
}
+ if (save_unconsumed_input(self, err) < 0) {
+ Py_DECREF(retval);
+ retval = NULL;
+ goto error;
+ }
+
/* If flushmode is Z_FINISH, we also have to call deflateEnd() to free
various data structures. Note we should only get Z_STREAM_END when
flushmode is Z_FINISH */
@@ -844,6 +872,7 @@ PyZlib_unflush(compobject *self, PyObject *args)
goto error;
}
}
+
_PyString_Resize(&retval, self->zst.total_out - start_total_out);
error:
@@ -1010,7 +1039,7 @@ PyDoc_STRVAR(zlib_module_documentation,
"zlib library, which is based on GNU zip.\n"
"\n"
"adler32(string[, start]) -- Compute an Adler-32 checksum.\n"
-"compress(string[, level]) -- Compress string, with compression level in 1-9.\n"
+"compress(string[, level]) -- Compress string, with compression level in 0-9.\n"
"compressobj([level]) -- Return a compressor object.\n"
"crc32(string[, start]) -- Compute a CRC-32 checksum.\n"
"decompress(string,[wbits],[bufsize]) -- Decompresses a compressed string.\n"
diff --git a/Objects/abstract.c b/Objects/abstract.c
index 81c19e1..5707eb2 100644
--- a/Objects/abstract.c
+++ b/Objects/abstract.c
@@ -126,7 +126,7 @@ _PyObject_LengthHint(PyObject *o, Py_ssize_t defaultvalue)
PyErr_Clear();
return defaultvalue;
}
- rv = PyLong_Check(ro) ? PyLong_AsSsize_t(ro) : defaultvalue;
+ rv = PyNumber_Check(ro) ? PyInt_AsSsize_t(ro) : defaultvalue;
Py_DECREF(ro);
return rv;
}
@@ -2617,10 +2617,8 @@ PyObject_CallMethod(PyObject *o, char *name, char *format, ...)
return null_error();
func = PyObject_GetAttrString(o, name);
- if (func == NULL) {
- PyErr_SetString(PyExc_AttributeError, name);
- return 0;
- }
+ if (func == NULL)
+ return NULL;
if (!PyCallable_Check(func)) {
type_error("attribute of type '%.200s' is not callable", func);
@@ -2656,10 +2654,8 @@ _PyObject_CallMethod_SizeT(PyObject *o, char *name, char *format, ...)
return null_error();
func = PyObject_GetAttrString(o, name);
- if (func == NULL) {
- PyErr_SetString(PyExc_AttributeError, name);
- return 0;
- }
+ if (func == NULL)
+ return NULL;
if (!PyCallable_Check(func)) {
type_error("attribute of type '%.200s' is not callable", func);
diff --git a/Objects/bufferobject.c b/Objects/bufferobject.c
index c52f0bc..bcfab71 100644
--- a/Objects/bufferobject.c
+++ b/Objects/bufferobject.c
@@ -88,7 +88,7 @@ get_buf(PyBufferObject *self, void **ptr, Py_ssize_t *size,
*size = count;
else
*size = self->b_size;
- if (offset + *size > count)
+ if (*size > count - offset)
*size = count - offset;
}
return 1;
@@ -802,6 +802,16 @@ buffer_getcharbuf(PyBufferObject *self, Py_ssize_t idx, const char **pp)
return size;
}
+static int buffer_getbuffer(PyBufferObject *self, Py_buffer *buf, int flags)
+{
+ void *ptr;
+ Py_ssize_t size;
+ if (!get_buf(self, &ptr, &size, ANY_BUFFER))
+ return -1;
+ return PyBuffer_FillInfo(buf, (PyObject*)self, ptr, size,
+ self->b_readonly, flags);
+}
+
static PySequenceMethods buffer_as_sequence = {
(lenfunc)buffer_length, /*sq_length*/
(binaryfunc)buffer_concat, /*sq_concat*/
@@ -823,6 +833,7 @@ static PyBufferProcs buffer_as_buffer = {
(writebufferproc)buffer_getwritebuf,
(segcountproc)buffer_getsegcount,
(charbufferproc)buffer_getcharbuf,
+ (getbufferproc)buffer_getbuffer,
};
PyTypeObject PyBuffer_Type = {
@@ -845,7 +856,7 @@ PyTypeObject PyBuffer_Type = {
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
&buffer_as_buffer, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GETCHARBUFFER, /* tp_flags */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GETCHARBUFFER | Py_TPFLAGS_HAVE_NEWBUFFER, /* tp_flags */
buffer_doc, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
diff --git a/Objects/bytearrayobject.c b/Objects/bytearrayobject.c
index a40c0ab..fd0ce7c 100644
--- a/Objects/bytearrayobject.c
+++ b/Objects/bytearrayobject.c
@@ -636,8 +636,14 @@ bytearray_ass_subscript(PyByteArrayObject *self, PyObject *index, PyObject *valu
needed = 0;
}
else if (values == (PyObject *)self || !PyByteArray_Check(values)) {
- /* Make a copy and call this function recursively */
int err;
+ if (PyNumber_Check(values) || PyUnicode_Check(values)) {
+ PyErr_SetString(PyExc_TypeError,
+ "can assign only bytes, buffers, or iterables "
+ "of ints in range(0, 256)");
+ return -1;
+ }
+ /* Make a copy and call this function recursively */
values = PyByteArray_FromObject(values);
if (values == NULL)
return -1;
@@ -988,10 +994,8 @@ bytearray_repr(PyByteArrayObject *self)
*p++ = *quote_postfix++;
}
*p = '\0';
- if (_PyString_Resize(&v, (p - PyString_AS_STRING(v)))) {
- Py_DECREF(v);
- return NULL;
- }
+ /* v is cleared on error */
+ (void)_PyString_Resize(&v, (p - PyString_AS_STRING(v)));
return v;
}
}
@@ -2296,8 +2300,10 @@ bytearray_extend(PyByteArrayObject *self, PyObject *arg)
}
bytearray_obj = PyByteArray_FromStringAndSize(NULL, buf_size);
- if (bytearray_obj == NULL)
+ if (bytearray_obj == NULL) {
+ Py_DECREF(it);
return NULL;
+ }
buf = PyByteArray_AS_STRING(bytearray_obj);
while ((item = PyIter_Next(it)) != NULL) {
@@ -2330,8 +2336,10 @@ bytearray_extend(PyByteArrayObject *self, PyObject *arg)
return NULL;
}
- if (bytearray_setslice(self, Py_SIZE(self), Py_SIZE(self), bytearray_obj) == -1)
+ if (bytearray_setslice(self, Py_SIZE(self), Py_SIZE(self), bytearray_obj) == -1) {
+ Py_DECREF(bytearray_obj);
return NULL;
+ }
Py_DECREF(bytearray_obj);
Py_RETURN_NONE;
@@ -2645,7 +2653,7 @@ bytearray_join(PyByteArrayObject *self, PyObject *it)
}
PyDoc_STRVAR(splitlines__doc__,
-"B.splitlines([keepends]) -> list of lines\n\
+"B.splitlines(keepends=False) -> list of lines\n\
\n\
Return a list of the lines in B, breaking at line boundaries.\n\
Line breaks are not included in the resulting list unless keepends\n\
diff --git a/Objects/classobject.c b/Objects/classobject.c
index 161906a..2c9c216 100644
--- a/Objects/classobject.c
+++ b/Objects/classobject.c
@@ -225,10 +225,16 @@ static PyObject *
class_getattr(register PyClassObject *op, PyObject *name)
{
register PyObject *v;
- register char *sname = PyString_AsString(name);
+ register char *sname;
PyClassObject *klass;
descrgetfunc f;
+ if (!PyString_Check(name)) {
+ PyErr_SetString(PyExc_TypeError, "attribute name must be a string");
+ return NULL;
+ }
+
+ sname = PyString_AsString(name);
if (sname[0] == '_' && sname[1] == '_') {
if (strcmp(sname, "__dict__") == 0) {
if (PyEval_GetRestricted()) {
@@ -336,6 +342,10 @@ class_setattr(PyClassObject *op, PyObject *name, PyObject *v)
"classes are read-only in restricted mode");
return -1;
}
+ if (!PyString_Check(name)) {
+ PyErr_SetString(PyExc_TypeError, "attribute name must be a string");
+ return -1;
+ }
sname = PyString_AsString(name);
if (sname[0] == '_' && sname[1] == '_') {
Py_ssize_t n = PyString_Size(name);
@@ -699,7 +709,14 @@ static PyObject *
instance_getattr1(register PyInstanceObject *inst, PyObject *name)
{
register PyObject *v;
- register char *sname = PyString_AsString(name);
+ register char *sname;
+
+ if (!PyString_Check(name)) {
+ PyErr_SetString(PyExc_TypeError, "attribute name must be a string");
+ return NULL;
+ }
+
+ sname = PyString_AsString(name);
if (sname[0] == '_' && sname[1] == '_') {
if (strcmp(sname, "__dict__") == 0) {
if (PyEval_GetRestricted()) {
@@ -810,7 +827,14 @@ static int
instance_setattr(PyInstanceObject *inst, PyObject *name, PyObject *v)
{
PyObject *func, *args, *res, *tmp;
- char *sname = PyString_AsString(name);
+ char *sname;
+
+ if (!PyString_Check(name)) {
+ PyErr_SetString(PyExc_TypeError, "attribute name must be a string");
+ return -1;
+ }
+
+ sname = PyString_AsString(name);
if (sname[0] == '_' && sname[1] == '_') {
Py_ssize_t n = PyString_Size(name);
if (sname[n-1] == '_' && sname[n-2] == '_') {
diff --git a/Objects/complexobject.c b/Objects/complexobject.c
index 677ac0e..5ee0c15 100644
--- a/Objects/complexobject.c
+++ b/Objects/complexobject.c
@@ -885,7 +885,7 @@ complex_conjugate(PyObject *self)
PyDoc_STRVAR(complex_conjugate_doc,
"complex.conjugate() -> complex\n"
"\n"
-"Returns the complex conjugate of its argument. (3-4j).conjugate() == 3+4j.");
+"Return the complex conjugate of its argument. (3-4j).conjugate() == 3+4j.");
static PyObject *
complex_getnewargs(PyComplexObject *v)
@@ -897,7 +897,7 @@ complex_getnewargs(PyComplexObject *v)
PyDoc_STRVAR(complex__format__doc,
"complex.__format__() -> str\n"
"\n"
-"Converts to a string according to format_spec.");
+"Convert to a string according to format_spec.");
static PyObject *
complex__format__(PyObject* self, PyObject* args)
diff --git a/Objects/descrobject.c b/Objects/descrobject.c
index 8e7ea7d..3e9b034 100644
--- a/Objects/descrobject.c
+++ b/Objects/descrobject.c
@@ -254,14 +254,51 @@ static PyObject *
classmethoddescr_call(PyMethodDescrObject *descr, PyObject *args,
PyObject *kwds)
{
- PyObject *func, *result;
+ Py_ssize_t argc;
+ PyObject *self, *func, *result;
- func = PyCFunction_New(descr->d_method, (PyObject *)descr->d_type);
- if (func == NULL)
+ /* Make sure that the first argument is acceptable as 'self' */
+ assert(PyTuple_Check(args));
+ argc = PyTuple_GET_SIZE(args);
+ if (argc < 1) {
+ PyErr_Format(PyExc_TypeError,
+ "descriptor '%s' of '%.100s' "
+ "object needs an argument",
+ descr_name((PyDescrObject *)descr),
+ descr->d_type->tp_name);
+ return NULL;
+ }
+ self = PyTuple_GET_ITEM(args, 0);
+ if (!PyType_Check(self)) {
+ PyErr_Format(PyExc_TypeError,
+ "descriptor '%s' requires a type "
+ "but received a '%.100s'",
+ descr_name((PyDescrObject *)descr),
+ self->ob_type->tp_name);
return NULL;
+ }
+ if (!PyType_IsSubtype((PyTypeObject *)self, descr->d_type)) {
+ PyErr_Format(PyExc_TypeError,
+ "descriptor '%s' "
+ "requires a subtype of '%.100s' "
+ "but received '%.100s",
+ descr_name((PyDescrObject *)descr),
+ descr->d_type->tp_name,
+ self->ob_type->tp_name);
+ return NULL;
+ }
+ func = PyCFunction_New(descr->d_method, self);
+ if (func == NULL)
+ return NULL;
+ args = PyTuple_GetSlice(args, 1, argc);
+ if (args == NULL) {
+ Py_DECREF(func);
+ return NULL;
+ }
result = PyEval_CallObjectWithKeywords(func, args, kwds);
Py_DECREF(func);
+ Py_DECREF(args);
return result;
}
@@ -1326,21 +1363,25 @@ PyDoc_STRVAR(property_doc,
"\n"
"fget is a function to be used for getting an attribute value, and likewise\n"
"fset is a function for setting, and fdel a function for del'ing, an\n"
-"attribute. Typical use is to define a managed attribute x:\n"
+"attribute. Typical use is to define a managed attribute x:\n\n"
"class C(object):\n"
" def getx(self): return self._x\n"
" def setx(self, value): self._x = value\n"
" def delx(self): del self._x\n"
" x = property(getx, setx, delx, \"I'm the 'x' property.\")\n"
"\n"
-"Decorators make defining new properties or modifying existing ones easy:\n"
+"Decorators make defining new properties or modifying existing ones easy:\n\n"
"class C(object):\n"
" @property\n"
-" def x(self): return self._x\n"
+" def x(self):\n"
+" \"I am the 'x' property.\"\n"
+" return self._x\n"
" @x.setter\n"
-" def x(self, value): self._x = value\n"
+" def x(self, value):\n"
+" self._x = value\n"
" @x.deleter\n"
-" def x(self): del self._x\n"
+" def x(self):\n"
+" del self._x\n"
);
static int
diff --git a/Objects/dictobject.c b/Objects/dictobject.c
index ac99cfb..39e7035 100644
--- a/Objects/dictobject.c
+++ b/Objects/dictobject.c
@@ -502,27 +502,16 @@ _PyDict_MaybeUntrack(PyObject *op)
_PyObject_GC_UNTRACK(op);
}
-
/*
-Internal routine to insert a new item into the table.
-Used both by the internal resize routine and by the public insert routine.
-Eats a reference to key and one to value.
-Returns -1 if an error occurred, or 0 on success.
+Internal routine to insert a new item into the table when you have entry object.
+Used by insertdict.
*/
static int
-insertdict(register PyDictObject *mp, PyObject *key, long hash, PyObject *value)
+insertdict_by_entry(register PyDictObject *mp, PyObject *key, long hash,
+ PyDictEntry *ep, PyObject *value)
{
PyObject *old_value;
- register PyDictEntry *ep;
- typedef PyDictEntry *(*lookupfunc)(PyDictObject *, PyObject *, long);
- assert(mp->ma_lookup != NULL);
- ep = mp->ma_lookup(mp, key, hash);
- if (ep == NULL) {
- Py_DECREF(key);
- Py_DECREF(value);
- return -1;
- }
MAINTAIN_TRACKING(mp, key, value);
if (ep->me_value != NULL) {
old_value = ep->me_value;
@@ -545,6 +534,28 @@ insertdict(register PyDictObject *mp, PyObject *key, long hash, PyObject *value)
return 0;
}
+
+/*
+Internal routine to insert a new item into the table.
+Used both by the internal resize routine and by the public insert routine.
+Eats a reference to key and one to value.
+Returns -1 if an error occurred, or 0 on success.
+*/
+static int
+insertdict(register PyDictObject *mp, PyObject *key, long hash, PyObject *value)
+{
+ register PyDictEntry *ep;
+
+ assert(mp->ma_lookup != NULL);
+ ep = mp->ma_lookup(mp, key, hash);
+ if (ep == NULL) {
+ Py_DECREF(key);
+ Py_DECREF(value);
+ return -1;
+ }
+ return insertdict_by_entry(mp, key, hash, ep, value);
+}
+
/*
Internal routine used by dictresize() to insert an item which is
known to be absent from the dict. This routine also assumes that
@@ -738,6 +749,45 @@ PyDict_GetItem(PyObject *op, PyObject *key)
return ep->me_value;
}
+static int
+dict_set_item_by_hash_or_entry(register PyObject *op, PyObject *key,
+ long hash, PyDictEntry *ep, PyObject *value)
+{
+ register PyDictObject *mp;
+ register Py_ssize_t n_used;
+
+ mp = (PyDictObject *)op;
+ assert(mp->ma_fill <= mp->ma_mask); /* at least one empty slot */
+ n_used = mp->ma_used;
+ Py_INCREF(value);
+ Py_INCREF(key);
+ if (ep == NULL) {
+ if (insertdict(mp, key, hash, value) != 0)
+ return -1;
+ }
+ else {
+ if (insertdict_by_entry(mp, key, hash, ep, value) != 0)
+ return -1;
+ }
+ /* If we added a key, we can safely resize. Otherwise just return!
+ * If fill >= 2/3 size, adjust size. Normally, this doubles or
+ * quaduples the size, but it's also possible for the dict to shrink
+ * (if ma_fill is much larger than ma_used, meaning a lot of dict
+ * keys have been * deleted).
+ *
+ * Quadrupling the size improves average dictionary sparseness
+ * (reducing collisions) at the cost of some memory and iteration
+ * speed (which loops over every possible entry). It also halves
+ * the number of expensive resize operations in a growing dictionary.
+ *
+ * Very large dictionaries (over 50K items) use doubling instead.
+ * This may help applications with severe memory constraints.
+ */
+ if (!(mp->ma_used > n_used && mp->ma_fill*3 >= (mp->ma_mask+1)*2))
+ return 0;
+ return dictresize(mp, (mp->ma_used > 50000 ? 2 : 4) * mp->ma_used);
+}
+
/* CAUTION: PyDict_SetItem() must guarantee that it won't resize the
* dictionary if it's merely replacing the value for an existing key.
* This means that it's safe to loop over a dictionary with PyDict_Next()
@@ -747,9 +797,7 @@ PyDict_GetItem(PyObject *op, PyObject *key)
int
PyDict_SetItem(register PyObject *op, PyObject *key, PyObject *value)
{
- register PyDictObject *mp;
register long hash;
- register Py_ssize_t n_used;
if (!PyDict_Check(op)) {
PyErr_BadInternalCall();
@@ -757,7 +805,6 @@ PyDict_SetItem(register PyObject *op, PyObject *key, PyObject *value)
}
assert(key);
assert(value);
- mp = (PyDictObject *)op;
if (PyString_CheckExact(key)) {
hash = ((PyStringObject *)key)->ob_shash;
if (hash == -1)
@@ -768,29 +815,7 @@ PyDict_SetItem(register PyObject *op, PyObject *key, PyObject *value)
if (hash == -1)
return -1;
}
- assert(mp->ma_fill <= mp->ma_mask); /* at least one empty slot */
- n_used = mp->ma_used;
- Py_INCREF(value);
- Py_INCREF(key);
- if (insertdict(mp, key, hash, value) != 0)
- return -1;
- /* If we added a key, we can safely resize. Otherwise just return!
- * If fill >= 2/3 size, adjust size. Normally, this doubles or
- * quaduples the size, but it's also possible for the dict to shrink
- * (if ma_fill is much larger than ma_used, meaning a lot of dict
- * keys have been * deleted).
- *
- * Quadrupling the size improves average dictionary sparseness
- * (reducing collisions) at the cost of some memory and iteration
- * speed (which loops over every possible entry). It also halves
- * the number of expensive resize operations in a growing dictionary.
- *
- * Very large dictionaries (over 50K items) use doubling instead.
- * This may help applications with severe memory constraints.
- */
- if (!(mp->ma_used > n_used && mp->ma_fill*3 >= (mp->ma_mask+1)*2))
- return 0;
- return dictresize(mp, (mp->ma_used > 50000 ? 2 : 4) * mp->ma_used);
+ return dict_set_item_by_hash_or_entry(op, key, hash, NULL, value);
}
int
@@ -1328,49 +1353,50 @@ dict_fromkeys(PyObject *cls, PyObject *args)
if (d == NULL)
return NULL;
- if (PyDict_CheckExact(d) && PyDict_CheckExact(seq)) {
- PyDictObject *mp = (PyDictObject *)d;
- PyObject *oldvalue;
- Py_ssize_t pos = 0;
- PyObject *key;
- long hash;
-
- if (dictresize(mp, Py_SIZE(seq))) {
- Py_DECREF(d);
- return NULL;
- }
+ if (PyDict_CheckExact(d) && ((PyDictObject *)d)->ma_used == 0) {
+ if (PyDict_CheckExact(seq)) {
+ PyDictObject *mp = (PyDictObject *)d;
+ PyObject *oldvalue;
+ Py_ssize_t pos = 0;
+ PyObject *key;
+ long hash;
- while (_PyDict_Next(seq, &pos, &key, &oldvalue, &hash)) {
- Py_INCREF(key);
- Py_INCREF(value);
- if (insertdict(mp, key, hash, value)) {
+ if (dictresize(mp, Py_SIZE(seq))) {
Py_DECREF(d);
return NULL;
}
- }
- return d;
- }
- if (PyDict_CheckExact(d) && PyAnySet_CheckExact(seq)) {
- PyDictObject *mp = (PyDictObject *)d;
- Py_ssize_t pos = 0;
- PyObject *key;
- long hash;
-
- if (dictresize(mp, PySet_GET_SIZE(seq))) {
- Py_DECREF(d);
- return NULL;
+ while (_PyDict_Next(seq, &pos, &key, &oldvalue, &hash)) {
+ Py_INCREF(key);
+ Py_INCREF(value);
+ if (insertdict(mp, key, hash, value)) {
+ Py_DECREF(d);
+ return NULL;
+ }
+ }
+ return d;
}
+ if (PyAnySet_CheckExact(seq)) {
+ PyDictObject *mp = (PyDictObject *)d;
+ Py_ssize_t pos = 0;
+ PyObject *key;
+ long hash;
- while (_PySet_NextEntry(seq, &pos, &key, &hash)) {
- Py_INCREF(key);
- Py_INCREF(value);
- if (insertdict(mp, key, hash, value)) {
+ if (dictresize(mp, PySet_GET_SIZE(seq))) {
Py_DECREF(d);
return NULL;
}
+
+ while (_PySet_NextEntry(seq, &pos, &key, &hash)) {
+ Py_INCREF(key);
+ Py_INCREF(value);
+ if (insertdict(mp, key, hash, value)) {
+ Py_DECREF(d);
+ return NULL;
+ }
+ }
+ return d;
}
- return d;
}
it = PyObject_GetIter(seq);
@@ -1957,9 +1983,9 @@ dict_setdefault(register PyDictObject *mp, PyObject *args)
return NULL;
val = ep->me_value;
if (val == NULL) {
- val = failobj;
- if (PyDict_SetItem((PyObject*)mp, key, failobj))
- val = NULL;
+ if (dict_set_item_by_hash_or_entry((PyObject*)mp, key, hash, ep,
+ failobj) == 0)
+ val = failobj;
}
Py_XINCREF(val);
return val;
@@ -2893,6 +2919,10 @@ dictview_repr(dictviewobject *dv)
return NULL;
seq_str = PyObject_Repr(seq);
+ if (seq_str == NULL) {
+ Py_DECREF(seq);
+ return NULL;
+ }
result = PyString_FromFormat("%s(%s)", Py_TYPE(dv)->tp_name,
PyString_AS_STRING(seq_str));
Py_DECREF(seq_str);
diff --git a/Objects/exceptions.c b/Objects/exceptions.c
index 49f6d30..e165528 100644
--- a/Objects/exceptions.c
+++ b/Objects/exceptions.c
@@ -349,8 +349,7 @@ BaseException_set_message(PyBaseExceptionObject *self, PyObject *val)
if (PyDict_DelItemString(self->dict, "message") < 0)
return -1;
}
- Py_XDECREF(self->message);
- self->message = NULL;
+ Py_CLEAR(self->message);
return 0;
}
@@ -1649,6 +1648,10 @@ UnicodeEncodeError_str(PyObject *self)
PyObject *reason_str = NULL;
PyObject *encoding_str = NULL;
+ if (!uself->object)
+ /* Not properly initialized. */
+ return PyUnicode_FromString("");
+
/* Get reason and encoding as strings, which they might not be if
they've been modified after we were contructed. */
reason_str = PyObject_Str(uself->reason);
@@ -1734,6 +1737,10 @@ UnicodeDecodeError_str(PyObject *self)
PyObject *reason_str = NULL;
PyObject *encoding_str = NULL;
+ if (!uself->object)
+ /* Not properly initialized. */
+ return PyUnicode_FromString("");
+
/* Get reason and encoding as strings, which they might not be if
they've been modified after we were contructed. */
reason_str = PyObject_Str(uself->reason);
@@ -1831,6 +1838,10 @@ UnicodeTranslateError_str(PyObject *self)
PyObject *result = NULL;
PyObject *reason_str = NULL;
+ if (!uself->object)
+ /* Not properly initialized. */
+ return PyUnicode_FromString("");
+
/* Get reason as a string, which it might not be if it's been
modified after we were contructed. */
reason_str = PyObject_Str(uself->reason);
diff --git a/Objects/fileobject.c b/Objects/fileobject.c
index 737ebb7..5594058 100644
--- a/Objects/fileobject.c
+++ b/Objects/fileobject.c
@@ -493,9 +493,10 @@ PyFile_FromFile(FILE *fp, char *name, char *mode, int (*close)(FILE *))
PyObject *
PyFile_FromString(char *name, char *mode)
{
+ extern int fclose(FILE *);
PyFileObject *f;
- f = (PyFileObject *)PyFile_FromFile((FILE *)NULL, name, mode, NULL);
+ f = (PyFileObject *)PyFile_FromFile((FILE *)NULL, name, mode, fclose);
if (f != NULL) {
if (open_the_file(f, name, mode) == NULL) {
Py_DECREF(f);
@@ -635,11 +636,13 @@ file_dealloc(PyFileObject *f)
static PyObject *
file_repr(PyFileObject *f)
{
+ PyObject *ret = NULL;
+ PyObject *name = NULL;
if (PyUnicode_Check(f->f_name)) {
#ifdef Py_USING_UNICODE
- PyObject *ret = NULL;
- PyObject *name = PyUnicode_AsUnicodeEscapeString(f->f_name);
- const char *name_str = name ? PyString_AsString(name) : "?";
+ const char *name_str;
+ name = PyUnicode_AsUnicodeEscapeString(f->f_name);
+ name_str = name ? PyString_AsString(name) : "?";
ret = PyString_FromFormat("<%s file u'%s', mode '%s' at %p>",
f->f_fp == NULL ? "closed" : "open",
name_str,
@@ -649,11 +652,16 @@ file_repr(PyFileObject *f)
return ret;
#endif
} else {
- return PyString_FromFormat("<%s file '%s', mode '%s' at %p>",
+ name = PyObject_Repr(f->f_name);
+ if (name == NULL)
+ return NULL;
+ ret = PyString_FromFormat("<%s file %s, mode '%s' at %p>",
f->f_fp == NULL ? "closed" : "open",
- PyString_AsString(f->f_name),
+ PyString_AsString(name),
PyString_AsString(f->f_mode),
f);
+ Py_XDECREF(name);
+ return ret;
}
}
@@ -1072,12 +1080,23 @@ file_read(PyFileObject *f, PyObject *args)
return NULL;
bytesread = 0;
for (;;) {
+ int interrupted;
FILE_BEGIN_ALLOW_THREADS(f)
errno = 0;
chunksize = Py_UniversalNewlineFread(BUF(v) + bytesread,
buffersize - bytesread, f->f_fp, (PyObject *)f);
+ interrupted = ferror(f->f_fp) && errno == EINTR;
FILE_END_ALLOW_THREADS(f)
+ if (interrupted) {
+ clearerr(f->f_fp);
+ if (PyErr_CheckSignals()) {
+ Py_DECREF(v);
+ return NULL;
+ }
+ }
if (chunksize == 0) {
+ if (interrupted)
+ continue;
if (!ferror(f->f_fp))
break;
clearerr(f->f_fp);
@@ -1092,7 +1111,7 @@ file_read(PyFileObject *f, PyObject *args)
return NULL;
}
bytesread += chunksize;
- if (bytesread < buffersize) {
+ if (bytesread < buffersize && !interrupted) {
clearerr(f->f_fp);
break;
}
@@ -1133,12 +1152,23 @@ file_readinto(PyFileObject *f, PyObject *args)
ntodo = pbuf.len;
ndone = 0;
while (ntodo > 0) {
+ int interrupted;
FILE_BEGIN_ALLOW_THREADS(f)
errno = 0;
nnow = Py_UniversalNewlineFread(ptr+ndone, ntodo, f->f_fp,
(PyObject *)f);
+ interrupted = ferror(f->f_fp) && errno == EINTR;
FILE_END_ALLOW_THREADS(f)
+ if (interrupted) {
+ clearerr(f->f_fp);
+ if (PyErr_CheckSignals()) {
+ PyBuffer_Release(&pbuf);
+ return NULL;
+ }
+ }
if (nnow == 0) {
+ if (interrupted)
+ continue;
if (!ferror(f->f_fp))
break;
PyErr_SetFromErrno(PyExc_IOError);
@@ -1426,8 +1456,25 @@ get_line(PyFileObject *f, int n)
*buf++ = c;
if (c == '\n') break;
}
- if ( c == EOF && skipnextlf )
- newlinetypes |= NEWLINE_CR;
+ if (c == EOF) {
+ if (ferror(fp) && errno == EINTR) {
+ FUNLOCKFILE(fp);
+ FILE_ABORT_ALLOW_THREADS(f)
+ f->f_newlinetypes = newlinetypes;
+ f->f_skipnextlf = skipnextlf;
+
+ if (PyErr_CheckSignals()) {
+ Py_DECREF(v);
+ return NULL;
+ }
+ /* We executed Python signal handlers and got no exception.
+ * Now back to reading the line where we left off. */
+ clearerr(fp);
+ continue;
+ }
+ if (skipnextlf)
+ newlinetypes |= NEWLINE_CR;
+ }
} else /* If not universal newlines use the normal loop */
while ((c = GETC(fp)) != EOF &&
(*buf++ = c) != '\n' &&
@@ -1441,6 +1488,16 @@ get_line(PyFileObject *f, int n)
break;
if (c == EOF) {
if (ferror(fp)) {
+ if (errno == EINTR) {
+ if (PyErr_CheckSignals()) {
+ Py_DECREF(v);
+ return NULL;
+ }
+ /* We executed Python signal handlers and got no exception.
+ * Now back to reading the line where we left off. */
+ clearerr(fp);
+ continue;
+ }
PyErr_SetFromErrno(PyExc_IOError);
clearerr(fp);
Py_DECREF(v);
@@ -1616,7 +1673,7 @@ file_readlines(PyFileObject *f, PyObject *args)
size_t totalread = 0;
char *p, *q, *end;
int err;
- int shortread = 0;
+ int shortread = 0; /* bool, did the previous read come up short? */
if (f->f_fp == NULL)
return err_closed();
@@ -1646,6 +1703,14 @@ file_readlines(PyFileObject *f, PyObject *args)
sizehint = 0;
if (!ferror(f->f_fp))
break;
+ if (errno == EINTR) {
+ if (PyErr_CheckSignals()) {
+ goto error;
+ }
+ clearerr(f->f_fp);
+ shortread = 0;
+ continue;
+ }
PyErr_SetFromErrno(PyExc_IOError);
clearerr(f->f_fp);
goto error;
@@ -1739,6 +1804,7 @@ file_write(PyFileObject *f, PyObject *args)
const char *s;
Py_ssize_t n, n2;
PyObject *encoded = NULL;
+ int err_flag = 0, err;
if (f->f_fp == NULL)
return err_closed();
@@ -1751,7 +1817,6 @@ file_write(PyFileObject *f, PyObject *args)
n = pbuf.len;
}
else {
- const char *encoding, *errors;
PyObject *text;
if (!PyArg_ParseTuple(args, "O", &text))
return NULL;
@@ -1759,7 +1824,9 @@ file_write(PyFileObject *f, PyObject *args)
if (PyString_Check(text)) {
s = PyString_AS_STRING(text);
n = PyString_GET_SIZE(text);
+#ifdef Py_USING_UNICODE
} else if (PyUnicode_Check(text)) {
+ const char *encoding, *errors;
if (f->f_encoding != Py_None)
encoding = PyString_AS_STRING(f->f_encoding);
else
@@ -1773,6 +1840,7 @@ file_write(PyFileObject *f, PyObject *args)
return NULL;
s = PyString_AS_STRING(encoded);
n = PyString_GET_SIZE(encoded);
+#endif
} else {
if (PyObject_AsCharBuffer(text, &s, &n))
return NULL;
@@ -1782,11 +1850,16 @@ file_write(PyFileObject *f, PyObject *args)
FILE_BEGIN_ALLOW_THREADS(f)
errno = 0;
n2 = fwrite(s, 1, n, f->f_fp);
+ if (n2 != n || ferror(f->f_fp)) {
+ err_flag = 1;
+ err = errno;
+ }
FILE_END_ALLOW_THREADS(f)
Py_XDECREF(encoded);
if (f->f_binary)
PyBuffer_Release(&pbuf);
- if (n2 != n) {
+ if (err_flag) {
+ errno = err;
PyErr_SetFromErrno(PyExc_IOError);
clearerr(f->f_fp);
return NULL;
@@ -1868,13 +1941,13 @@ file_writelines(PyFileObject *f, PyObject *seq)
PyObject *v = PyList_GET_ITEM(list, i);
if (!PyString_Check(v)) {
const char *buffer;
- if (((f->f_binary &&
- PyObject_AsReadBuffer(v,
- (const void**)&buffer,
- &len)) ||
- PyObject_AsCharBuffer(v,
- &buffer,
- &len))) {
+ int res;
+ if (f->f_binary) {
+ res = PyObject_AsReadBuffer(v, (const void**)&buffer, &len);
+ } else {
+ res = PyObject_AsCharBuffer(v, &buffer, &len);
+ }
+ if (res) {
PyErr_SetString(PyExc_TypeError,
"writelines() argument must be a sequence of strings");
goto error;
@@ -2592,10 +2665,10 @@ int PyObject_AsFileDescriptor(PyObject *o)
PyObject *meth;
if (PyInt_Check(o)) {
- fd = PyInt_AsLong(o);
+ fd = _PyInt_AsInt(o);
}
else if (PyLong_Check(o)) {
- fd = PyLong_AsLong(o);
+ fd = _PyLong_AsInt(o);
}
else if ((meth = PyObject_GetAttrString(o, "fileno")) != NULL)
{
@@ -2605,11 +2678,11 @@ int PyObject_AsFileDescriptor(PyObject *o)
return -1;
if (PyInt_Check(fno)) {
- fd = PyInt_AsLong(fno);
+ fd = _PyInt_AsInt(fno);
Py_DECREF(fno);
}
else if (PyLong_Check(fno)) {
- fd = PyLong_AsLong(fno);
+ fd = _PyLong_AsInt(fno);
Py_DECREF(fno);
}
else {
diff --git a/Objects/floatobject.c b/Objects/floatobject.c
index 30f7b34..2bec0fb 100644
--- a/Objects/floatobject.c
+++ b/Objects/floatobject.c
@@ -1088,6 +1088,15 @@ _Py_double_round(double x, int ndigits) {
PyObject *result = NULL;
_Py_SET_53BIT_PRECISION_HEADER;
+ /* Easy path for the common case ndigits == 0. */
+ if (ndigits == 0) {
+ rounded = round(x);
+ if (fabs(rounded - x) == 0.5)
+ /* halfway between two integers; use round-away-from-zero */
+ rounded = x + (x > 0.0 ? 0.5 : -0.5);
+ return PyFloat_FromDouble(rounded);
+ }
+
/* The basic idea is very simple: convert and round the double to a
decimal string using _Py_dg_dtoa, then convert that decimal string
back to a double with _Py_dg_strtod. There's one minor difficulty:
@@ -1774,9 +1783,9 @@ error:
PyDoc_STRVAR(float_as_integer_ratio_doc,
"float.as_integer_ratio() -> (int, int)\n"
"\n"
-"Returns a pair of integers, whose ratio is exactly equal to the original\n"
+"Return a pair of integers, whose ratio is exactly equal to the original\n"
"float and with a positive denominator.\n"
-"Raises OverflowError on infinities and a ValueError on NaNs.\n"
+"Raise OverflowError on infinities and a ValueError on NaNs.\n"
"\n"
">>> (10.0).as_integer_ratio()\n"
"(10, 1)\n"
@@ -1961,7 +1970,7 @@ PyDoc_STRVAR(float_setformat_doc,
"'IEEE, big-endian' or 'IEEE, little-endian', and in addition can only be\n"
"one of the latter two if it appears to match the underlying C reality.\n"
"\n"
-"Overrides the automatic determination of C-level floating point type.\n"
+"Override the automatic determination of C-level floating point type.\n"
"This affects how floats are converted to and from binary strings.");
static PyObject *
@@ -2008,9 +2017,9 @@ PyDoc_STRVAR(float__format__doc,
static PyMethodDef float_methods[] = {
{"conjugate", (PyCFunction)float_float, METH_NOARGS,
- "Returns self, the complex conjugate of any float."},
+ "Return self, the complex conjugate of any float."},
{"__trunc__", (PyCFunction)float_trunc, METH_NOARGS,
- "Returns the Integral closest to x between 0 and x."},
+ "Return the Integral closest to x between 0 and x."},
{"as_integer_ratio", (PyCFunction)float_as_integer_ratio, METH_NOARGS,
float_as_integer_ratio_doc},
{"fromhex", (PyCFunction)float_fromhex,
@@ -2018,14 +2027,14 @@ static PyMethodDef float_methods[] = {
{"hex", (PyCFunction)float_hex,
METH_NOARGS, float_hex_doc},
{"is_integer", (PyCFunction)float_is_integer, METH_NOARGS,
- "Returns True if the float is an integer."},
+ "Return True if the float is an integer."},
#if 0
{"is_inf", (PyCFunction)float_is_inf, METH_NOARGS,
- "Returns True if the float is positive or negative infinite."},
+ "Return True if the float is positive or negative infinite."},
{"is_finite", (PyCFunction)float_is_finite, METH_NOARGS,
- "Returns True if the float is finite, neither infinite nor NaN."},
+ "Return True if the float is finite, neither infinite nor NaN."},
{"is_nan", (PyCFunction)float_is_nan, METH_NOARGS,
- "Returns True if the float is not a number (NaN)."},
+ "Return True if the float is not a number (NaN)."},
#endif
{"__getnewargs__", (PyCFunction)float_getnewargs, METH_NOARGS},
{"__getformat__", (PyCFunction)float_getformat,
diff --git a/Objects/frameobject.c b/Objects/frameobject.c
index a3476cf..f9e4a0e 100644
--- a/Objects/frameobject.c
+++ b/Objects/frameobject.c
@@ -214,6 +214,7 @@ frame_setlineno(PyFrameObject *f, PyObject* p_new_lineno)
case SETUP_LOOP:
case SETUP_EXCEPT:
case SETUP_FINALLY:
+ case SETUP_WITH:
blockstack[blockstack_top++] = addr;
in_finally[blockstack_top-1] = 0;
break;
@@ -221,7 +222,7 @@ frame_setlineno(PyFrameObject *f, PyObject* p_new_lineno)
case POP_BLOCK:
assert(blockstack_top > 0);
setup_op = code[blockstack[blockstack_top-1]];
- if (setup_op == SETUP_FINALLY) {
+ if (setup_op == SETUP_FINALLY || setup_op == SETUP_WITH) {
in_finally[blockstack_top-1] = 1;
}
else {
@@ -236,7 +237,7 @@ frame_setlineno(PyFrameObject *f, PyObject* p_new_lineno)
* be seeing such an END_FINALLY.) */
if (blockstack_top > 0) {
setup_op = code[blockstack[blockstack_top-1]];
- if (setup_op == SETUP_FINALLY) {
+ if (setup_op == SETUP_FINALLY || setup_op == SETUP_WITH) {
blockstack_top--;
}
}
@@ -298,6 +299,7 @@ frame_setlineno(PyFrameObject *f, PyObject* p_new_lineno)
case SETUP_LOOP:
case SETUP_EXCEPT:
case SETUP_FINALLY:
+ case SETUP_WITH:
delta_iblock++;
break;
diff --git a/Objects/genobject.c b/Objects/genobject.c
index af812c0..082e03c 100644
--- a/Objects/genobject.c
+++ b/Objects/genobject.c
@@ -76,6 +76,7 @@ gen_send_ex(PyGenObject *gen, PyObject *arg, int exc)
/* Generators always return to their most recent caller, not
* necessarily their creator. */
+ f->f_tstate = tstate;
Py_XINCREF(tstate->frame);
assert(f->f_back == NULL);
f->f_back = tstate->frame;
@@ -89,6 +90,8 @@ gen_send_ex(PyGenObject *gen, PyObject *arg, int exc)
* cycle. */
assert(f->f_back == tstate->frame);
Py_CLEAR(f->f_back);
+ /* Clear the borrowed reference to the thread state */
+ f->f_tstate = NULL;
/* If the generator just returned (as opposed to yielding), signal
* that the generator is exhausted. */
@@ -120,7 +123,7 @@ gen_send(PyGenObject *gen, PyObject *arg)
}
PyDoc_STRVAR(close_doc,
-"close(arg) -> raise GeneratorExit inside generator.");
+"close() -> raise GeneratorExit inside generator.");
static PyObject *
gen_close(PyGenObject *gen, PyObject *args)
diff --git a/Objects/intobject.c b/Objects/intobject.c
index e518e74..28182f9 100644
--- a/Objects/intobject.c
+++ b/Objects/intobject.c
@@ -189,6 +189,20 @@ PyInt_AsLong(register PyObject *op)
return val;
}
+int
+_PyInt_AsInt(PyObject *obj)
+{
+ long result = PyInt_AsLong(obj);
+ if (result == -1 && PyErr_Occurred())
+ return -1;
+ if (result > INT_MAX || result < INT_MIN) {
+ PyErr_SetString(PyExc_OverflowError,
+ "Python int too large to convert to C int");
+ return -1;
+ }
+ return (int)result;
+}
+
Py_ssize_t
PyInt_AsSsize_t(register PyObject *op)
{
@@ -1059,8 +1073,14 @@ int_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
if (!PyArg_ParseTupleAndKeywords(args, kwds, "|Oi:int", kwlist,
&x, &base))
return NULL;
- if (x == NULL)
+ if (x == NULL) {
+ if (base != -909) {
+ PyErr_SetString(PyExc_TypeError,
+ "int() missing string argument");
+ return NULL;
+ }
return PyInt_FromLong(0L);
+ }
if (base == -909)
return PyNumber_Int(x);
if (PyString_Check(x)) {
@@ -1334,15 +1354,20 @@ static PyGetSetDef int_getset[] = {
};
PyDoc_STRVAR(int_doc,
-"int(x[, base]) -> integer\n\
+"int(x=0) -> int or long\n\
+int(x, base=10) -> int or long\n\
+\n\
+Convert a number or string to an integer, or return 0 if no arguments\n\
+are given. If x is floating point, the conversion truncates towards zero.\n\
+If x is outside the integer range, the function returns a long instead.\n\
\n\
-Convert a string or number to an integer, if possible. A floating point\n\
-argument will be truncated towards zero (this does not include a string\n\
-representation of a floating point number!) When converting a string, use\n\
-the optional base. It is an error to supply a base when converting a\n\
-non-string. If base is zero, the proper base is guessed based on the\n\
-string content. If the argument is outside the integer range a\n\
-long object will be returned instead.");
+If x is not a number or if base is given, then x must be a string or\n\
+Unicode object representing an integer literal in the given base. The\n\
+literal can be preceded by '+' or '-' and be surrounded by whitespace.\n\
+The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to\n\
+interpret the base from the string as an integer literal.\n\
+>>> int('0b100', base=0)\n\
+4");
static PyNumberMethods int_as_number = {
(binaryfunc)int_add, /*nb_add*/
diff --git a/Objects/listsort.txt b/Objects/listsort.txt
index 31a5445..08fef96 100644
--- a/Objects/listsort.txt
+++ b/Objects/listsort.txt
@@ -60,6 +60,7 @@ Comparison with Python's Samplesort Hybrid
/sort: ascending data
3sort: ascending, then 3 random exchanges
+sort: ascending, then 10 random at the end
+ %sort: ascending, then randomly replace 1% of elements w/ random values
~sort: many duplicates
=sort: all equal
!sort: worst case scenario
@@ -99,11 +100,13 @@ Comparison with Python's Samplesort Hybrid
The algorithms are effectively identical in these cases, except that
timsort does one less compare in \sort.
- Now for the more interesting cases. lg(n!) is the information-theoretic
- limit for the best any comparison-based sorting algorithm can do on
- average (across all permutations). When a method gets significantly
- below that, it's either astronomically lucky, or is finding exploitable
- structure in the data.
+ Now for the more interesting cases. Where lg(x) is the logarithm of x to
+ the base 2 (e.g., lg(8)=3), lg(n!) is the information-theoretic limit for
+ the best any comparison-based sorting algorithm can do on average (across
+ all permutations). When a method gets significantly below that, it's
+ either astronomically lucky, or is finding exploitable structure in the
+ data.
+
n lg(n!) *sort 3sort +sort %sort ~sort !sort
------- ------- ------ ------- ------- ------ ------- --------
@@ -250,7 +253,7 @@ Computing minrun
----------------
If N < 64, minrun is N. IOW, binary insertion sort is used for the whole
array then; it's hard to beat that given the overheads of trying something
-fancier.
+fancier (see note BINSORT).
When N is a power of 2, testing on random data showed that minrun values of
16, 32, 64 and 128 worked about equally well. At 256 the data-movement cost
@@ -378,10 +381,10 @@ with wildly unbalanced run lengths.
Merge Memory
------------
-Merging adjacent runs of lengths A and B in-place is very difficult.
-Theoretical constructions are known that can do it, but they're too difficult
-and slow for practical use. But if we have temp memory equal to min(A, B),
-it's easy.
+Merging adjacent runs of lengths A and B in-place, and in linear time, is
+difficult. Theoretical constructions are known that can do it, but they're
+too difficult and slow for practical use. But if we have temp memory equal
+to min(A, B), it's easy.
If A is smaller (function merge_lo), copy A to a temp array, leave B alone,
and then we can do the obvious merge algorithm left to right, from the temp
@@ -456,10 +459,10 @@ finding the right spot early in B (more on that later).
After finding such a k, the region of uncertainty is reduced to 2**(k-1) - 1
consecutive elements, and a straight binary search requires exactly k-1
-additional comparisons to nail it. Then we copy all the B's up to that
-point in one chunk, and then copy A[0]. Note that no matter where A[0]
-belongs in B, the combination of galloping + binary search finds it in no
-more than about 2*lg(B) comparisons.
+additional comparisons to nail it (see note REGION OF UNCERTAINTY). Then we
+copy all the B's up to that point in one chunk, and then copy A[0]. Note
+that no matter where A[0] belongs in B, the combination of galloping + binary
+search finds it in no more than about 2*lg(B) comparisons.
If we did a straight binary search, we could find it in no more than
ceiling(lg(B+1)) comparisons -- but straight binary search takes that many
@@ -572,11 +575,11 @@ Galloping Complication
The description above was for merge_lo. merge_hi has to merge "from the
other end", and really needs to gallop starting at the last element in a run
instead of the first. Galloping from the first still works, but does more
-comparisons than it should (this is significant -- I timed it both ways).
-For this reason, the gallop_left() and gallop_right() functions have a
-"hint" argument, which is the index at which galloping should begin. So
-galloping can actually start at any index, and proceed at offsets of 1, 3,
-7, 15, ... or -1, -3, -7, -15, ... from the starting index.
+comparisons than it should (this is significant -- I timed it both ways). For
+this reason, the gallop_left() and gallop_right() (see note LEFT OR RIGHT)
+functions have a "hint" argument, which is the index at which galloping
+should begin. So galloping can actually start at any index, and proceed at
+offsets of 1, 3, 7, 15, ... or -1, -3, -7, -15, ... from the starting index.
In the code as I type it's always called with either 0 or n-1 (where n is
the # of elements in a run). It's tempting to try to do something fancier,
@@ -675,3 +678,78 @@ immediately. The consequence is that it ends up using two compares to sort
[2, 1]. Gratifyingly, timsort doesn't do any special-casing, so had to be
taught how to deal with mixtures of ascending and descending runs
efficiently in all cases.
+
+
+NOTES
+-----
+
+BINSORT
+A "binary insertion sort" is just like a textbook insertion sort, but instead
+of locating the correct position of the next item via linear (one at a time)
+search, an equivalent to Python's bisect.bisect_right is used to find the
+correct position in logarithmic time. Most texts don't mention this
+variation, and those that do usually say it's not worth the bother: insertion
+sort remains quadratic (expected and worst cases) either way. Speeding the
+search doesn't reduce the quadratic data movement costs.
+
+But in CPython's case, comparisons are extraordinarily expensive compared to
+moving data, and the details matter. Moving objects is just copying
+pointers. Comparisons can be arbitrarily expensive (can invoke arbitary
+user-supplied Python code), but even in simple cases (like 3 < 4) _all_
+decisions are made at runtime: what's the type of the left comparand? the
+type of the right? do they need to be coerced to a common type? where's the
+code to compare these types? And so on. Even the simplest Python comparison
+triggers a large pile of C-level pointer dereferences, conditionals, and
+function calls.
+
+So cutting the number of compares is almost always measurably helpful in
+CPython, and the savings swamp the quadratic-time data movement costs for
+reasonable minrun values.
+
+
+LEFT OR RIGHT
+gallop_left() and gallop_right() are akin to the Python bisect module's
+bisect_left() and bisect_right(): they're the same unless the slice they're
+searching contains a (at least one) value equal to the value being searched
+for. In that case, gallop_left() returns the position immediately before the
+leftmost equal value, and gallop_right() the position immediately after the
+rightmost equal value. The distinction is needed to preserve stability. In
+general, when merging adjacent runs A and B, gallop_left is used to search
+thru B for where an element from A belongs, and gallop_right to search thru A
+for where an element from B belongs.
+
+
+REGION OF UNCERTAINTY
+Two kinds of confusion seem to be common about the claim that after finding
+a k such that
+
+ B[2**(k-1) - 1] < A[0] <= B[2**k - 1]
+
+then a binary search requires exactly k-1 tries to find A[0]'s proper
+location. For concreteness, say k=3, so B[3] < A[0] <= B[7].
+
+The first confusion takes the form "OK, then the region of uncertainty is at
+indices 3, 4, 5, 6 and 7: that's 5 elements, not the claimed 2**(k-1) - 1 =
+3"; or the region is viewed as a Python slice and the objection is "but that's
+the slice B[3:7], so has 7-3 = 4 elements". Resolution: we've already
+compared A[0] against B[3] and against B[7], so A[0]'s correct location is
+already known wrt _both_ endpoints. What remains is to find A[0]'s correct
+location wrt B[4], B[5] and B[6], which spans 3 elements. Or in general, the
+slice (leaving off both endpoints) (2**(k-1)-1)+1 through (2**k-1)-1
+inclusive = 2**(k-1) through (2**k-1)-1 inclusive, which has
+ (2**k-1)-1 - 2**(k-1) + 1 =
+ 2**k-1 - 2**(k-1) =
+ 2*2**k-1 - 2**(k-1) =
+ (2-1)*2**(k-1) - 1 =
+ 2**(k-1) - 1
+elements.
+
+The second confusion: "k-1 = 2 binary searches can find the correct location
+among 2**(k-1) = 4 elements, but you're only applying it to 3 elements: we
+could make this more efficient by arranging for the region of uncertainty to
+span 2**(k-1) elements." Resolution: that confuses "elements" with
+"locations". In a slice with N elements, there are N+1 _locations_. In the
+example, with the region of uncertainty B[4], B[5], B[6], there are 4
+locations: before B[4], between B[4] and B[5], between B[5] and B[6], and
+after B[6]. In general, across 2**(k-1)-1 elements, there are 2**(k-1)
+locations. That's why k-1 binary searches are necessary and sufficient.
diff --git a/Objects/longobject.c b/Objects/longobject.c
index cd86a1f..405be2e 100644
--- a/Objects/longobject.c
+++ b/Objects/longobject.c
@@ -339,6 +339,24 @@ PyLong_AsLong(PyObject *obj)
return result;
}
+/* Get a C int from a long int object or any object that has an __int__
+ method. Return -1 and set an error if overflow occurs. */
+
+int
+_PyLong_AsInt(PyObject *obj)
+{
+ int overflow;
+ long result = PyLong_AsLongAndOverflow(obj, &overflow);
+ if (overflow || result > INT_MAX || result < INT_MIN) {
+ /* XXX: could be cute and give a different
+ message for overflow == -1 */
+ PyErr_SetString(PyExc_OverflowError,
+ "Python int too large to convert to C int");
+ return -1;
+ }
+ return (int)result;
+}
+
/* Get a Py_ssize_t from a long int object.
Returns -1 and sets an error condition if overflow occurs. */
@@ -3456,10 +3474,16 @@ long_pow(PyObject *v, PyObject *w, PyObject *x)
goto Done;
}
- /* if base < 0:
- base = base % modulus
- Having the base positive just makes things easier. */
- if (Py_SIZE(a) < 0) {
+ /* Reduce base by modulus in some cases:
+ 1. If base < 0. Forcing the base non-negative makes things easier.
+ 2. If base is obviously larger than the modulus. The "small
+ exponent" case later can multiply directly by base repeatedly,
+ while the "large exponent" case multiplies directly by base 31
+ times. It can be unboundedly faster to multiply by
+ base % modulus instead.
+ We could _always_ do this reduction, but l_divmod() isn't cheap,
+ so we only do it when it buys something. */
+ if (Py_SIZE(a) < 0 || Py_SIZE(a) > Py_SIZE(c)) {
if (l_divmod(a, c, NULL, &temp) < 0)
goto Error;
Py_DECREF(a);
@@ -3987,8 +4011,14 @@ long_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
if (!PyArg_ParseTupleAndKeywords(args, kwds, "|Oi:long", kwlist,
&x, &base))
return NULL;
- if (x == NULL)
+ if (x == NULL) {
+ if (base != -909) {
+ PyErr_SetString(PyExc_TypeError,
+ "long() missing string argument");
+ return NULL;
+ }
return PyLong_FromLong(0L);
+ }
if (base == -909)
return PyNumber_Long(x);
else if (PyString_Check(x)) {
@@ -4221,13 +4251,19 @@ static PyGetSetDef long_getset[] = {
};
PyDoc_STRVAR(long_doc,
-"long(x[, base]) -> integer\n\
+"long(x=0) -> long\n\
+long(x, base=10) -> long\n\
+\n\
+Convert a number or string to a long integer, or return 0L if no arguments\n\
+are given. If x is floating point, the conversion truncates towards zero.\n\
\n\
-Convert a string or number to a long integer, if possible. A floating\n\
-point argument will be truncated towards zero (this does not include a\n\
-string representation of a floating point number!) When converting a\n\
-string, use the optional base. It is an error to supply a base when\n\
-converting a non-string.");
+If x is not a number or if base is given, then x must be a string or\n\
+Unicode object representing an integer literal in the given base. The\n\
+literal can be preceded by '+' or '-' and be surrounded by whitespace.\n\
+The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to\n\
+interpret the base from the string as an integer literal.\n\
+>>> int('0b100', base=0)\n\
+4L");
static PyNumberMethods long_as_number = {
(binaryfunc)long_add, /*nb_add*/
diff --git a/Objects/moduleobject.c b/Objects/moduleobject.c
index 08e9740..f2fed30 100644
--- a/Objects/moduleobject.c
+++ b/Objects/moduleobject.c
@@ -122,7 +122,8 @@ _PyModule_Clear(PyObject *m)
if (s[0] == '_' && s[1] != '_') {
if (Py_VerboseFlag > 1)
PySys_WriteStderr("# clear[1] %s\n", s);
- PyDict_SetItem(d, key, Py_None);
+ if (PyDict_SetItem(d, key, Py_None) != 0)
+ PyErr_Clear();
}
}
}
@@ -135,7 +136,8 @@ _PyModule_Clear(PyObject *m)
if (s[0] != '_' || strcmp(s, "__builtins__") != 0) {
if (Py_VerboseFlag > 1)
PySys_WriteStderr("# clear[2] %s\n", s);
- PyDict_SetItem(d, key, Py_None);
+ if (PyDict_SetItem(d, key, Py_None) != 0)
+ PyErr_Clear();
}
}
}
diff --git a/Objects/object.c b/Objects/object.c
index 9303086..14f4e9f 100644
--- a/Objects/object.c
+++ b/Objects/object.c
@@ -474,7 +474,7 @@ PyObject_Unicode(PyObject *v)
PyObject *func;
PyObject *str;
int unicode_method_found = 0;
- static PyObject *unicodestr;
+ static PyObject *unicodestr = NULL;
if (v == NULL) {
res = PyString_FromString("<NULL>");
@@ -491,6 +491,11 @@ PyObject_Unicode(PyObject *v)
if (PyInstance_Check(v)) {
/* We're an instance of a classic class */
/* Try __unicode__ from the instance -- alas we have no type */
+ if (!unicodestr) {
+ unicodestr = PyString_InternFromString("__unicode__");
+ if (!unicodestr)
+ return NULL;
+ }
func = PyObject_GetAttr(v, unicodestr);
if (func != NULL) {
unicode_method_found = 1;
@@ -2111,8 +2116,10 @@ _Py_ReadyTypes(void)
if (PyType_Ready(&PySet_Type) < 0)
Py_FatalError("Can't initialize set type");
+#ifdef Py_USING_UNICODE
if (PyType_Ready(&PyUnicode_Type) < 0)
Py_FatalError("Can't initialize unicode type");
+#endif
if (PyType_Ready(&PySlice_Type) < 0)
Py_FatalError("Can't initialize slice type");
@@ -2196,6 +2203,18 @@ _Py_ReadyTypes(void)
if (PyType_Ready(&PyFile_Type) < 0)
Py_FatalError("Can't initialize file type");
+
+ if (PyType_Ready(&PyCapsule_Type) < 0)
+ Py_FatalError("Can't initialize capsule type");
+
+ if (PyType_Ready(&PyCell_Type) < 0)
+ Py_FatalError("Can't initialize cell type");
+
+ if (PyType_Ready(&PyCallIter_Type) < 0)
+ Py_FatalError("Can't initialize call iter type");
+
+ if (PyType_Ready(&PySeqIter_Type) < 0)
+ Py_FatalError("Can't initialize sequence iterator type");
}
@@ -2426,6 +2445,18 @@ _PyTrash_deposit_object(PyObject *op)
_PyTrash_delete_later = op;
}
+/* The equivalent API, using per-thread state recursion info */
+void
+_PyTrash_thread_deposit_object(PyObject *op)
+{
+ PyThreadState *tstate = PyThreadState_GET();
+ assert(PyObject_IS_GC(op));
+ assert(_Py_AS_GC(op)->gc.gc_refs == _PyGC_REFS_UNTRACKED);
+ assert(op->ob_refcnt == 0);
+ _Py_AS_GC(op)->gc.gc_prev = (PyGC_Head *) tstate->trash_delete_later;
+ tstate->trash_delete_later = op;
+}
+
/* Dealloccate all the objects in the _PyTrash_delete_later list. Called when
* the call-stack unwinds again.
*/
@@ -2452,6 +2483,31 @@ _PyTrash_destroy_chain(void)
}
}
+/* The equivalent API, using per-thread state recursion info */
+void
+_PyTrash_thread_destroy_chain(void)
+{
+ PyThreadState *tstate = PyThreadState_GET();
+ while (tstate->trash_delete_later) {
+ PyObject *op = tstate->trash_delete_later;
+ destructor dealloc = Py_TYPE(op)->tp_dealloc;
+
+ tstate->trash_delete_later =
+ (PyObject*) _Py_AS_GC(op)->gc.gc_prev;
+
+ /* Call the deallocator directly. This used to try to
+ * fool Py_DECREF into calling it indirectly, but
+ * Py_DECREF was already called on this object, and in
+ * assorted non-release builds calling Py_DECREF again ends
+ * up distorting allocation statistics.
+ */
+ assert(op->ob_refcnt == 0);
+ ++tstate->trash_delete_nesting;
+ (*dealloc)(op);
+ --tstate->trash_delete_nesting;
+ }
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c
index 483847c..1434206 100644
--- a/Objects/obmalloc.c
+++ b/Objects/obmalloc.c
@@ -2,6 +2,13 @@
#ifdef WITH_PYMALLOC
+#ifdef HAVE_MMAP
+ #include <sys/mman.h>
+ #ifdef MAP_ANONYMOUS
+ #define ARENAS_USE_MMAP
+ #endif
+#endif
+
#ifdef WITH_VALGRIND
#include <valgrind/valgrind.h>
@@ -75,7 +82,8 @@ static int running_on_valgrind = -1;
* Allocation strategy abstract:
*
* For small requests, the allocator sub-allocates <Big> blocks of memory.
- * Requests greater than 256 bytes are routed to the system's allocator.
+ * Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the
+ * system's allocator.
*
* Small requests are grouped in size classes spaced 8 bytes apart, due
* to the required valid alignment of the returned address. Requests of
@@ -107,10 +115,11 @@ static int running_on_valgrind = -1;
* 57-64 64 7
* 65-72 72 8
* ... ... ...
- * 241-248 248 30
- * 249-256 256 31
+ * 497-504 504 62
+ * 505-512 512 63
*
- * 0, 257 and up: routed to the underlying allocator.
+ * 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying
+ * allocator.
*/
/*==========================================================================*/
@@ -143,10 +152,13 @@ static int running_on_valgrind = -1;
* 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 256
* 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
*
+ * Note: a size threshold of 512 guarantees that newly created dictionaries
+ * will be allocated from preallocated memory pools on 64-bit.
+ *
* Although not required, for better performance and space efficiency,
* it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
*/
-#define SMALL_REQUEST_THRESHOLD 256
+#define SMALL_REQUEST_THRESHOLD 512
#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
/*
@@ -174,15 +186,15 @@ static int running_on_valgrind = -1;
/*
* The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
* on a page boundary. This is a reserved virtual address space for the
- * current process (obtained through a malloc call). In no way this means
- * that the memory arenas will be used entirely. A malloc(<Big>) is usually
- * an address range reservation for <Big> bytes, unless all pages within this
- * space are referenced subsequently. So malloc'ing big blocks and not using
- * them does not mean "wasting memory". It's an addressable range wastage...
+ * current process (obtained through a malloc()/mmap() call). In no way this
+ * means that the memory arenas will be used entirely. A malloc(<Big>) is
+ * usually an address range reservation for <Big> bytes, unless all pages within
+ * this space are referenced subsequently. So malloc'ing big blocks and not
+ * using them does not mean "wasting memory". It's an addressable range
+ * wastage...
*
- * Therefore, allocating arenas with malloc is not optimal, because there is
- * some address space wastage, but this is the most portable way to request
- * memory from the system across various platforms.
+ * Arenas are allocated with mmap() on systems supporting anonymous memory
+ * mappings to reduce heap fragmentation.
*/
#define ARENA_SIZE (256 << 10) /* 256KB */
@@ -440,6 +452,9 @@ static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
, PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
#if NB_SMALL_SIZE_CLASSES > 56
, PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
+#if NB_SMALL_SIZE_CLASSES > 64
+#error "NB_SMALL_SIZE_CLASSES should be less than 64"
+#endif /* NB_SMALL_SIZE_CLASSES > 64 */
#endif /* NB_SMALL_SIZE_CLASSES > 56 */
#endif /* NB_SMALL_SIZE_CLASSES > 48 */
#endif /* NB_SMALL_SIZE_CLASSES > 40 */
@@ -525,6 +540,8 @@ new_arena(void)
{
struct arena_object* arenaobj;
uint excess; /* number of bytes above pool alignment */
+ void *address;
+ int err;
#ifdef PYMALLOC_DEBUG
if (Py_GETENV("PYTHONMALLOCSTATS"))
@@ -577,8 +594,15 @@ new_arena(void)
arenaobj = unused_arena_objects;
unused_arena_objects = arenaobj->nextarena;
assert(arenaobj->address == 0);
- arenaobj->address = (uptr)malloc(ARENA_SIZE);
- if (arenaobj->address == 0) {
+#ifdef ARENAS_USE_MMAP
+ address = mmap(NULL, ARENA_SIZE, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ err = (address == MAP_FAILED);
+#else
+ address = malloc(ARENA_SIZE);
+ err = (address == 0);
+#endif
+ if (err) {
/* The allocation failed: return NULL after putting the
* arenaobj back.
*/
@@ -586,6 +610,7 @@ new_arena(void)
unused_arena_objects = arenaobj;
return NULL;
}
+ arenaobj->address = (uptr)address;
++narenas_currently_allocated;
#ifdef PYMALLOC_DEBUG
@@ -1054,7 +1079,11 @@ PyObject_Free(void *p)
unused_arena_objects = ao;
/* Free the entire arena. */
+#ifdef ARENAS_USE_MMAP
+ munmap((void *)ao->address, ARENA_SIZE);
+#else
free((void *)ao->address);
+#endif
ao->address = 0; /* mark unassociated */
--narenas_currently_allocated;
@@ -1713,7 +1742,7 @@ printone(const char* msg, size_t value)
k = 3;
do {
size_t nextvalue = value / 10;
- uint digit = (uint)(value - nextvalue * 10);
+ unsigned int digit = (unsigned int)(value - nextvalue * 10);
value = nextvalue;
buf[i--] = (char)(digit + '0');
--k;
diff --git a/Objects/rangeobject.c b/Objects/rangeobject.c
index 4e64dba..5203f40 100644
--- a/Objects/rangeobject.c
+++ b/Objects/rangeobject.c
@@ -37,6 +37,30 @@ get_len_of_range(long lo, long hi, long step)
return 0UL;
}
+/* Return a stop value suitable for reconstructing the xrange from
+ * a (start, stop, step) triple. Used in range_repr and range_reduce.
+ * Computes start + len * step, clipped to the range [LONG_MIN, LONG_MAX].
+ */
+static long
+get_stop_for_range(rangeobject *r)
+{
+ long last;
+
+ if (r->len == 0)
+ return r->start;
+
+ /* The tricky bit is avoiding overflow. We first compute the last entry in
+ the xrange, start + (len - 1) * step, which is guaranteed to lie within
+ the range of a long, and then add step to it. See the range_reverse
+ comments for an explanation of the casts below.
+ */
+ last = (long)(r->start + (unsigned long)(r->len - 1) * r->step);
+ if (r->step > 0)
+ return last > LONG_MAX - r->step ? LONG_MAX : last + r->step;
+ else
+ return last < LONG_MIN - r->step ? LONG_MIN : last + r->step;
+}
+
static PyObject *
range_new(PyTypeObject *type, PyObject *args, PyObject *kw)
{
@@ -80,7 +104,8 @@ range_new(PyTypeObject *type, PyObject *args, PyObject *kw)
}
PyDoc_STRVAR(range_doc,
-"xrange([start,] stop[, step]) -> xrange object\n\
+"xrange(stop) -> xrange object\n\
+xrange(start, stop[, step]) -> xrange object\n\
\n\
Like range(), but instead of returning a list, returns an object that\n\
generates the numbers in the range on demand. For looping, this is \n\
@@ -112,17 +137,17 @@ range_repr(rangeobject *r)
if (r->start == 0 && r->step == 1)
rtn = PyString_FromFormat("xrange(%ld)",
- r->start + r->len * r->step);
+ get_stop_for_range(r));
else if (r->step == 1)
rtn = PyString_FromFormat("xrange(%ld, %ld)",
r->start,
- r->start + r->len * r->step);
+ get_stop_for_range(r));
else
rtn = PyString_FromFormat("xrange(%ld, %ld, %ld)",
r->start,
- r->start + r->len * r->step,
+ get_stop_for_range(r),
r->step);
return rtn;
}
@@ -131,9 +156,9 @@ range_repr(rangeobject *r)
static PyObject *
range_reduce(rangeobject *r, PyObject *args)
{
- return Py_BuildValue("(O(iii))", Py_TYPE(r),
+ return Py_BuildValue("(O(lll))", Py_TYPE(r),
r->start,
- r->start + r->len * r->step,
+ get_stop_for_range(r),
r->step);
}
diff --git a/Objects/setobject.c b/Objects/setobject.c
index af1ce16..b4b1178 100644
--- a/Objects/setobject.c
+++ b/Objects/setobject.c
@@ -212,7 +212,6 @@ static int
set_insert_key(register PySetObject *so, PyObject *key, long hash)
{
register setentry *entry;
- typedef setentry *(*lookupfunc)(PySetObject *, PyObject *, long);
assert(so->lookup != NULL);
entry = so->lookup(so, key, hash);
@@ -1797,12 +1796,8 @@ set_richcompare(PySetObject *v, PyObject *w, int op)
PyObject *r1, *r2;
if(!PyAnySet_Check(w)) {
- if (op == Py_EQ)
- Py_RETURN_FALSE;
- if (op == Py_NE)
- Py_RETURN_TRUE;
- PyErr_SetString(PyExc_TypeError, "can only compare to a set");
- return NULL;
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
}
switch (op) {
case Py_EQ:
diff --git a/Objects/sliceobject.c b/Objects/sliceobject.c
index d1fe052..767a50a 100644
--- a/Objects/sliceobject.c
+++ b/Objects/sliceobject.c
@@ -211,7 +211,8 @@ slice_new(PyTypeObject *type, PyObject *args, PyObject *kw)
}
PyDoc_STRVAR(slice_doc,
-"slice([start,] stop[, step])\n\
+"slice(stop)\n\
+slice(start, stop[, step])\n\
\n\
Create a slice object. This is used for extended slicing (e.g. a[0:10:2]).");
diff --git a/Objects/stringlib/formatter.h b/Objects/stringlib/formatter.h
index c49a104..122abe6 100644
--- a/Objects/stringlib/formatter.h
+++ b/Objects/stringlib/formatter.h
@@ -73,7 +73,7 @@ static int
get_integer(STRINGLIB_CHAR **ptr, STRINGLIB_CHAR *end,
Py_ssize_t *result)
{
- Py_ssize_t accumulator, digitval, oldaccumulator;
+ Py_ssize_t accumulator, digitval;
int numdigits;
accumulator = numdigits = 0;
for (;;(*ptr)++, numdigits++) {
@@ -83,19 +83,17 @@ get_integer(STRINGLIB_CHAR **ptr, STRINGLIB_CHAR *end,
if (digitval < 0)
break;
/*
- This trick was copied from old Unicode format code. It's cute,
- but would really suck on an old machine with a slow divide
- implementation. Fortunately, in the normal case we do not
- expect too many digits.
+ Detect possible overflow before it happens:
+
+ accumulator * 10 + digitval > PY_SSIZE_T_MAX if and only if
+ accumulator > (PY_SSIZE_T_MAX - digitval) / 10.
*/
- oldaccumulator = accumulator;
- accumulator *= 10;
- if ((accumulator+10)/10 != oldaccumulator+1) {
+ if (accumulator > (PY_SSIZE_T_MAX - digitval) / 10) {
PyErr_Format(PyExc_ValueError,
"Too many decimal digits in format string");
return -1;
}
- accumulator += digitval;
+ accumulator = accumulator * 10 + digitval;
}
*result = accumulator;
return numdigits;
@@ -182,8 +180,9 @@ parse_internal_render_format_spec(STRINGLIB_CHAR *format_spec,
Py_ssize_t consumed;
int align_specified = 0;
+ int fill_char_specified = 0;
- format->fill_char = '\0';
+ format->fill_char = ' ';
format->align = default_align;
format->alternate = 0;
format->sign = '\0';
@@ -197,6 +196,7 @@ parse_internal_render_format_spec(STRINGLIB_CHAR *format_spec,
if (end-ptr >= 2 && is_alignment_token(ptr[1])) {
format->align = ptr[1];
format->fill_char = ptr[0];
+ fill_char_specified = 1;
align_specified = 1;
ptr += 2;
}
@@ -220,7 +220,7 @@ parse_internal_render_format_spec(STRINGLIB_CHAR *format_spec,
}
/* The special case for 0-padding (backwards compat) */
- if (format->fill_char == '\0' && end-ptr >= 1 && ptr[0] == '0') {
+ if (!fill_char_specified && end-ptr >= 1 && ptr[0] == '0') {
format->fill_char = '0';
if (!align_specified) {
format->align = '=';
@@ -717,8 +717,7 @@ format_string_internal(PyObject *value, const InternalFormatSpec *format)
/* Write into that space. First the padding. */
p = fill_padding(STRINGLIB_STR(result), len,
- format->fill_char=='\0'?' ':format->fill_char,
- lpad, rpad);
+ format->fill_char, lpad, rpad);
/* Then the source string. */
memcpy(p, STRINGLIB_STR(value), len * sizeof(STRINGLIB_CHAR));
@@ -895,8 +894,7 @@ format_int_or_long_internal(PyObject *value, const InternalFormatSpec *format,
/* Populate the memory. */
fill_number(STRINGLIB_STR(result), &spec, pnumeric_chars, n_digits,
- prefix, format->fill_char == '\0' ? ' ' : format->fill_char,
- &locale, format->type == 'X');
+ prefix, format->fill_char, &locale, format->type == 'X');
done:
Py_XDECREF(tmp);
@@ -930,7 +928,7 @@ format_float_internal(PyObject *value,
Py_ssize_t n_total;
int has_decimal;
double val;
- Py_ssize_t precision = format->precision;
+ Py_ssize_t precision;
Py_ssize_t default_precision = 6;
STRINGLIB_CHAR type = format->type;
int add_pct = 0;
@@ -949,6 +947,12 @@ format_float_internal(PyObject *value,
from a hard-code pseudo-locale */
LocaleInfo locale;
+ if (format->precision > INT_MAX) {
+ PyErr_SetString(PyExc_ValueError, "precision too big");
+ goto done;
+ }
+ precision = (int)format->precision;
+
/* Alternate is not allowed on floats. */
if (format->alternate) {
PyErr_SetString(PyExc_ValueError,
@@ -1044,8 +1048,7 @@ format_float_internal(PyObject *value,
/* Populate the memory. */
fill_number(STRINGLIB_STR(result), &spec, p, n_digits, NULL,
- format->fill_char == '\0' ? ' ' : format->fill_char, &locale,
- 0);
+ format->fill_char, &locale, 0);
done:
PyMem_Free(buf);
@@ -1080,7 +1083,7 @@ format_complex_internal(PyObject *value,
Py_ssize_t n_im_total;
int re_has_decimal;
int im_has_decimal;
- Py_ssize_t precision = format->precision;
+ Py_ssize_t precision;
Py_ssize_t default_precision = 6;
STRINGLIB_CHAR type = format->type;
STRINGLIB_CHAR *p_re;
@@ -1109,6 +1112,12 @@ format_complex_internal(PyObject *value,
from a hard-code pseudo-locale */
LocaleInfo locale;
+ if (format->precision > INT_MAX) {
+ PyErr_SetString(PyExc_ValueError, "precision too big");
+ goto done;
+ }
+ precision = (int)format->precision;
+
/* Alternate is not allowed on complex. */
if (format->alternate) {
PyErr_SetString(PyExc_ValueError,
@@ -1255,8 +1264,7 @@ format_complex_internal(PyObject *value,
/* Populate the memory. First, the padding. */
p = fill_padding(STRINGLIB_STR(result),
n_re_total + n_im_total + 1 + add_parens * 2,
- format->fill_char=='\0' ? ' ' : format->fill_char,
- lpad, rpad);
+ format->fill_char, lpad, rpad);
if (add_parens)
*p++ = '(';
diff --git a/Objects/stringlib/string_format.h b/Objects/stringlib/string_format.h
index 075fa1d..965e1ad 100644
--- a/Objects/stringlib/string_format.h
+++ b/Objects/stringlib/string_format.h
@@ -197,7 +197,6 @@ get_integer(const SubString *str)
{
Py_ssize_t accumulator = 0;
Py_ssize_t digitval;
- Py_ssize_t oldaccumulator;
STRINGLIB_CHAR *p;
/* empty string is an error */
@@ -209,19 +208,17 @@ get_integer(const SubString *str)
if (digitval < 0)
return -1;
/*
- This trick was copied from old Unicode format code. It's cute,
- but would really suck on an old machine with a slow divide
- implementation. Fortunately, in the normal case we do not
- expect too many digits.
+ Detect possible overflow before it happens:
+
+ accumulator * 10 + digitval > PY_SSIZE_T_MAX if and only if
+ accumulator > (PY_SSIZE_T_MAX - digitval) / 10.
*/
- oldaccumulator = accumulator;
- accumulator *= 10;
- if ((accumulator+10)/10 != oldaccumulator+1) {
+ if (accumulator > (PY_SSIZE_T_MAX - digitval) / 10) {
PyErr_Format(PyExc_ValueError,
"Too many decimal digits in format string");
return -1;
}
- accumulator += digitval;
+ accumulator = accumulator * 10 + digitval;
}
return accumulator;
}
diff --git a/Objects/stringlib/transmogrify.h b/Objects/stringlib/transmogrify.h
index 1e132e5..be595a6 100644
--- a/Objects/stringlib/transmogrify.h
+++ b/Objects/stringlib/transmogrify.h
@@ -15,7 +15,7 @@ stringlib_expandtabs(PyObject *self, PyObject *args)
{
const char *e, *p;
char *q;
- size_t i, j;
+ Py_ssize_t i, j;
PyObject *u;
int tabsize = 8;
@@ -25,35 +25,31 @@ stringlib_expandtabs(PyObject *self, PyObject *args)
/* First pass: determine size of output string */
i = j = 0;
e = STRINGLIB_STR(self) + STRINGLIB_LEN(self);
- for (p = STRINGLIB_STR(self); p < e; p++)
+ for (p = STRINGLIB_STR(self); p < e; p++) {
if (*p == '\t') {
if (tabsize > 0) {
- j += tabsize - (j % tabsize);
- if (j > PY_SSIZE_T_MAX) {
- PyErr_SetString(PyExc_OverflowError,
- "result is too long");
- return NULL;
- }
+ Py_ssize_t incr = tabsize - (j % tabsize);
+ if (j > PY_SSIZE_T_MAX - incr)
+ goto overflow;
+ j += incr;
}
}
else {
+ if (j > PY_SSIZE_T_MAX - 1)
+ goto overflow;
j++;
if (*p == '\n' || *p == '\r') {
+ if (i > PY_SSIZE_T_MAX - j)
+ goto overflow;
i += j;
j = 0;
- if (i > PY_SSIZE_T_MAX) {
- PyErr_SetString(PyExc_OverflowError,
- "result is too long");
- return NULL;
- }
}
}
-
- if ((i + j) > PY_SSIZE_T_MAX) {
- PyErr_SetString(PyExc_OverflowError, "result is too long");
- return NULL;
}
+ if (i > PY_SSIZE_T_MAX - j)
+ goto overflow;
+
/* Second pass: create output string and fill it */
u = STRINGLIB_NEW(NULL, i + j);
if (!u)
@@ -62,7 +58,7 @@ stringlib_expandtabs(PyObject *self, PyObject *args)
j = 0;
q = STRINGLIB_STR(u);
- for (p = STRINGLIB_STR(self); p < e; p++)
+ for (p = STRINGLIB_STR(self); p < e; p++) {
if (*p == '\t') {
if (tabsize > 0) {
i = tabsize - (j % tabsize);
@@ -77,8 +73,12 @@ stringlib_expandtabs(PyObject *self, PyObject *args)
if (*p == '\n' || *p == '\r')
j = 0;
}
-
+ }
+
return u;
+ overflow:
+ PyErr_SetString(PyExc_OverflowError, "result too long");
+ return NULL;
}
Py_LOCAL_INLINE(PyObject *)
diff --git a/Objects/stringobject.c b/Objects/stringobject.c
index ab377dd..0b6d36c 100644
--- a/Objects/stringobject.c
+++ b/Objects/stringobject.c
@@ -726,6 +726,10 @@ PyObject *PyString_DecodeEscape(const char *s,
errors);
goto failed;
}
+ /* skip \x */
+ if (s < end && isxdigit(Py_CHARMASK(s[0])))
+ s++; /* and a hexdigit */
+ break;
#ifndef Py_USING_UNICODE
case 'u':
case 'U':
@@ -744,8 +748,8 @@ PyObject *PyString_DecodeEscape(const char *s,
UTF-8 bytes may follow. */
}
}
- if (p-buf < newlen && _PyString_Resize(&v, p - buf))
- goto failed;
+ if (p-buf < newlen)
+ _PyString_Resize(&v, p - buf); /* v is cleared on error */
return v;
failed:
Py_DECREF(v);
@@ -878,9 +882,9 @@ string_print(PyStringObject *op, FILE *fp, int flags)
size -= chunk_size;
}
#ifdef __VMS
- if (size) fwrite(data, (int)size, 1, fp);
+ if (size) fwrite(data, (size_t)size, 1, fp);
#else
- fwrite(data, 1, (int)size, fp);
+ fwrite(data, 1, (size_t)size, fp);
#endif
Py_END_ALLOW_THREADS
return 0;
@@ -1251,7 +1255,6 @@ _PyString_Eq(PyObject *o1, PyObject *o2)
PyStringObject *a = (PyStringObject*) o1;
PyStringObject *b = (PyStringObject*) o2;
return Py_SIZE(a) == Py_SIZE(b)
- && *a->ob_sval == *b->ob_sval
&& memcmp(a->ob_sval, b->ob_sval, Py_SIZE(a)) == 0;
}
@@ -2328,7 +2331,7 @@ return_self(PyStringObject *self)
}
Py_LOCAL_INLINE(Py_ssize_t)
-countchar(const char *target, int target_len, char c, Py_ssize_t maxcount)
+countchar(const char *target, Py_ssize_t target_len, char c, Py_ssize_t maxcount)
{
Py_ssize_t count=0;
const char *start=target;
@@ -3088,24 +3091,25 @@ string_expandtabs(PyStringObject *self, PyObject *args)
i = 0; /* chars up to and including most recent \n or \r */
j = 0; /* chars since most recent \n or \r (use in tab calculations) */
e = PyString_AS_STRING(self) + PyString_GET_SIZE(self); /* end of input */
- for (p = PyString_AS_STRING(self); p < e; p++)
- if (*p == '\t') {
- if (tabsize > 0) {
- incr = tabsize - (j % tabsize);
- if (j > PY_SSIZE_T_MAX - incr)
- goto overflow1;
- j += incr;
+ for (p = PyString_AS_STRING(self); p < e; p++) {
+ if (*p == '\t') {
+ if (tabsize > 0) {
+ incr = tabsize - (j % tabsize);
+ if (j > PY_SSIZE_T_MAX - incr)
+ goto overflow1;
+ j += incr;
+ }
}
- }
- else {
- if (j > PY_SSIZE_T_MAX - 1)
- goto overflow1;
- j++;
- if (*p == '\n' || *p == '\r') {
- if (i > PY_SSIZE_T_MAX - j)
+ else {
+ if (j > PY_SSIZE_T_MAX - 1)
goto overflow1;
- i += j;
- j = 0;
+ j++;
+ if (*p == '\n' || *p == '\r') {
+ if (i > PY_SSIZE_T_MAX - j)
+ goto overflow1;
+ i += j;
+ j = 0;
+ }
}
}
@@ -3121,25 +3125,26 @@ string_expandtabs(PyStringObject *self, PyObject *args)
q = PyString_AS_STRING(u); /* next output char */
qe = PyString_AS_STRING(u) + PyString_GET_SIZE(u); /* end of output */
- for (p = PyString_AS_STRING(self); p < e; p++)
- if (*p == '\t') {
- if (tabsize > 0) {
- i = tabsize - (j % tabsize);
- j += i;
- while (i--) {
- if (q >= qe)
- goto overflow2;
- *q++ = ' ';
+ for (p = PyString_AS_STRING(self); p < e; p++) {
+ if (*p == '\t') {
+ if (tabsize > 0) {
+ i = tabsize - (j % tabsize);
+ j += i;
+ while (i--) {
+ if (q >= qe)
+ goto overflow2;
+ *q++ = ' ';
+ }
}
}
- }
- else {
- if (q >= qe)
- goto overflow2;
- *q++ = *p;
- j++;
- if (*p == '\n' || *p == '\r')
- j = 0;
+ else {
+ if (q >= qe)
+ goto overflow2;
+ *q++ = *p;
+ j++;
+ if (*p == '\n' || *p == '\r')
+ j = 0;
+ }
}
return u;
@@ -3545,7 +3550,7 @@ string_istitle(PyStringObject *self, PyObject *uncased)
PyDoc_STRVAR(splitlines__doc__,
-"S.splitlines([keepends]) -> list of strings\n\
+"S.splitlines(keepends=False) -> list of strings\n\
\n\
Return a list of the lines in S, breaking at line boundaries.\n\
Line breaks are not included in the resulting list unless keepends\n\
@@ -3799,7 +3804,7 @@ PyTypeObject PyBaseString_Type = {
};
PyDoc_STRVAR(string_doc,
-"str(object) -> string\n\
+"str(object='') -> string\n\
\n\
Return a nice string representation of the object.\n\
If the argument is a string, the return value is the same object.");
@@ -3855,8 +3860,7 @@ PyString_Concat(register PyObject **pv, register PyObject *w)
if (*pv == NULL)
return;
if (w == NULL || !PyString_Check(*pv)) {
- Py_DECREF(*pv);
- *pv = NULL;
+ Py_CLEAR(*pv);
return;
}
v = string_concat((PyStringObject *) *pv, w);
@@ -4254,8 +4258,8 @@ PyString_Format(PyObject *format, PyObject *args)
arglen = -1;
argidx = -2;
}
- if (Py_TYPE(args)->tp_as_mapping && !PyTuple_Check(args) &&
- !PyObject_TypeCheck(args, &PyBaseString_Type))
+ if (Py_TYPE(args)->tp_as_mapping && Py_TYPE(args)->tp_as_mapping->mp_subscript &&
+ !PyTuple_Check(args) && !PyObject_TypeCheck(args, &PyBaseString_Type))
dict = args;
while (--fmtcnt >= 0) {
if (*fmt != '%') {
@@ -4355,7 +4359,9 @@ PyString_Format(PyObject *format, PyObject *args)
"* wants int");
goto error;
}
- width = PyInt_AsLong(v);
+ width = PyInt_AsSsize_t(v);
+ if (width == -1 && PyErr_Occurred())
+ goto error;
if (width < 0) {
flags |= F_LJUST;
width = -width;
@@ -4369,7 +4375,7 @@ PyString_Format(PyObject *format, PyObject *args)
c = Py_CHARMASK(*fmt++);
if (!isdigit(c))
break;
- if ((width*10) / 10 != width) {
+ if (width > (PY_SSIZE_T_MAX - ((int)c - '0')) / 10) {
PyErr_SetString(
PyExc_ValueError,
"width too big");
@@ -4392,7 +4398,9 @@ PyString_Format(PyObject *format, PyObject *args)
"* wants int");
goto error;
}
- prec = PyInt_AsLong(v);
+ prec = _PyInt_AsInt(v);
+ if (prec == -1 && PyErr_Occurred())
+ goto error;
if (prec < 0)
prec = 0;
if (--fmtcnt >= 0)
@@ -4404,7 +4412,7 @@ PyString_Format(PyObject *format, PyObject *args)
c = Py_CHARMASK(*fmt++);
if (!isdigit(c))
break;
- if ((prec*10) / 10 != prec) {
+ if (prec > (INT_MAX - ((int)c - '0')) / 10) {
PyErr_SetString(
PyExc_ValueError,
"prec too big");
@@ -4489,7 +4497,10 @@ PyString_Format(PyObject *format, PyObject *args)
}
else {
iobj = PyNumber_Int(v);
- if (iobj==NULL) iobj = PyNumber_Long(v);
+ if (iobj==NULL) {
+ PyErr_Clear();
+ iobj = PyNumber_Long(v);
+ }
}
if (iobj!=NULL) {
if (PyInt_Check(iobj)) {
@@ -4779,12 +4790,9 @@ void
PyString_Fini(void)
{
int i;
- for (i = 0; i < UCHAR_MAX + 1; i++) {
- Py_XDECREF(characters[i]);
- characters[i] = NULL;
- }
- Py_XDECREF(nullstring);
- nullstring = NULL;
+ for (i = 0; i < UCHAR_MAX + 1; i++)
+ Py_CLEAR(characters[i]);
+ Py_CLEAR(nullstring);
}
void _Py_ReleaseInternedStrings(void)
@@ -4834,6 +4842,5 @@ void _Py_ReleaseInternedStrings(void)
"mortal/immortal\n", mortal_size, immortal_size);
Py_DECREF(keys);
PyDict_Clear(interned);
- Py_DECREF(interned);
- interned = NULL;
+ Py_CLEAR(interned);
}
diff --git a/Objects/tupleobject.c b/Objects/tupleobject.c
index 3249ccc..a3c185e 100644
--- a/Objects/tupleobject.c
+++ b/Objects/tupleobject.c
@@ -192,8 +192,10 @@ PyTuple_Pack(Py_ssize_t n, ...)
va_start(vargs, n);
result = PyTuple_New(n);
- if (result == NULL)
+ if (result == NULL) {
+ va_end(vargs);
return NULL;
+ }
items = ((PyTupleObject *)result)->ob_item;
for (i = 0; i < n; i++) {
o = va_arg(vargs, PyObject *);
@@ -879,8 +881,7 @@ _PyTuple_Resize(PyObject **pv, Py_ssize_t newsize)
_Py_ForgetReference((PyObject *) v);
/* DECREF items deleted by shrinkage */
for (i = newsize; i < oldsize; i++) {
- Py_XDECREF(v->ob_item[i]);
- v->ob_item[i] = NULL;
+ Py_CLEAR(v->ob_item[i]);
}
sv = PyObject_GC_Resize(PyTupleObject, v, newsize);
if (sv == NULL) {
@@ -926,8 +927,7 @@ PyTuple_Fini(void)
#if PyTuple_MAXSAVESIZE > 0
/* empty tuples are used all over the place and applications may
* rely on the fact that an empty tuple is a singleton. */
- Py_XDECREF(free_list[0]);
- free_list[0] = NULL;
+ Py_CLEAR(free_list[0]);
(void)PyTuple_ClearFreeList();
#endif
diff --git a/Objects/typeobject.c b/Objects/typeobject.c
index 3db02ed..be04c9e 100644
--- a/Objects/typeobject.c
+++ b/Objects/typeobject.c
@@ -225,6 +225,7 @@ static int
type_set_name(PyTypeObject *type, PyObject *value, void *context)
{
PyHeapTypeObject* et;
+ PyObject *tmp;
if (!(type->tp_flags & Py_TPFLAGS_HEAPTYPE)) {
PyErr_Format(PyExc_TypeError,
@@ -253,10 +254,13 @@ type_set_name(PyTypeObject *type, PyObject *value, void *context)
Py_INCREF(value);
- Py_DECREF(et->ht_name);
+ /* Wait until et is a sane state before Py_DECREF'ing the old et->ht_name
+ value. (Bug #16447.) */
+ tmp = et->ht_name;
et->ht_name = value;
type->tp_name = PyString_AS_STRING(value);
+ Py_DECREF(tmp);
return 0;
}
@@ -327,11 +331,15 @@ type_set_abstractmethods(PyTypeObject *type, PyObject *value, void *context)
abc.ABCMeta.__new__, so this function doesn't do anything
special to update subclasses.
*/
- int res;
+ int abstract, res;
if (value != NULL) {
+ abstract = PyObject_IsTrue(value);
+ if (abstract < 0)
+ return -1;
res = PyDict_SetItemString(type->tp_dict, "__abstractmethods__", value);
}
else {
+ abstract = 0;
res = PyDict_DelItemString(type->tp_dict, "__abstractmethods__");
if (res && PyErr_ExceptionMatches(PyExc_KeyError)) {
PyErr_SetString(PyExc_AttributeError, "__abstractmethods__");
@@ -340,12 +348,10 @@ type_set_abstractmethods(PyTypeObject *type, PyObject *value, void *context)
}
if (res == 0) {
PyType_Modified(type);
- if (value && PyObject_IsTrue(value)) {
+ if (abstract)
type->tp_flags |= Py_TPFLAGS_IS_ABSTRACT;
- }
- else {
+ else
type->tp_flags &= ~Py_TPFLAGS_IS_ABSTRACT;
- }
}
return res;
}
@@ -684,8 +690,10 @@ type_repr(PyTypeObject *type)
mod = NULL;
}
name = type_name(type, NULL);
- if (name == NULL)
+ if (name == NULL) {
+ Py_XDECREF(mod);
return NULL;
+ }
if (type->tp_flags & Py_TPFLAGS_HEAPTYPE)
kind = "class";
@@ -876,8 +884,13 @@ subtype_clear(PyObject *self)
assert(base);
}
- /* There's no need to clear the instance dict (if any);
- the collector will call its tp_clear handler. */
+ /* Clear the instance dict (if any), to break cycles involving only
+ __dict__ slots (as in the case 'self.__dict__ is self'). */
+ if (type->tp_dictoffset != base->tp_dictoffset) {
+ PyObject **dictptr = _PyObject_GetDictPtr(self);
+ if (dictptr && *dictptr)
+ Py_CLEAR(*dictptr);
+ }
if (baseclear)
return baseclear(self);
@@ -889,6 +902,7 @@ subtype_dealloc(PyObject *self)
{
PyTypeObject *type, *base;
destructor basedealloc;
+ PyThreadState *tstate = PyThreadState_GET();
/* Extract the type; we expect it to be a heap type */
type = Py_TYPE(self);
@@ -938,8 +952,10 @@ subtype_dealloc(PyObject *self)
/* See explanation at end of function for full disclosure */
PyObject_GC_UnTrack(self);
++_PyTrash_delete_nesting;
+ ++ tstate->trash_delete_nesting;
Py_TRASHCAN_SAFE_BEGIN(self);
--_PyTrash_delete_nesting;
+ -- tstate->trash_delete_nesting;
/* DO NOT restore GC tracking at this point. weakref callbacks
* (if any, and whether directly here or indirectly in something we
* call) may trigger GC, and if self is tracked at that point, it
@@ -1018,8 +1034,10 @@ subtype_dealloc(PyObject *self)
endlabel:
++_PyTrash_delete_nesting;
+ ++ tstate->trash_delete_nesting;
Py_TRASHCAN_SAFE_END(self);
--_PyTrash_delete_nesting;
+ -- tstate->trash_delete_nesting;
/* Explanation of the weirdness around the trashcan macros:
@@ -2525,6 +2543,13 @@ type_getattro(PyTypeObject *type, PyObject *name)
PyObject *meta_attribute, *attribute;
descrgetfunc meta_get;
+ if (!PyString_Check(name)) {
+ PyErr_Format(PyExc_TypeError,
+ "attribute name must be string, not '%.200s'",
+ name->ob_type->tp_name);
+ return NULL;
+ }
+
/* Initialize this type (we'll assume the metatype is initialized) */
if (type->tp_dict == NULL) {
if (PyType_Ready(type) < 0)
@@ -2876,14 +2901,14 @@ object_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
type->tp_init != object_init)
{
err = PyErr_WarnEx(PyExc_DeprecationWarning,
- "object.__new__() takes no parameters",
+ "object() takes no parameters",
1);
}
else if (type->tp_new != object_new ||
type->tp_init == object_init)
{
PyErr_SetString(PyExc_TypeError,
- "object.__new__() takes no parameters");
+ "object() takes no parameters");
err = -1;
}
}
@@ -2963,8 +2988,10 @@ object_repr(PyObject *self)
mod = NULL;
}
name = type_name(type, NULL);
- if (name == NULL)
+ if (name == NULL) {
+ Py_XDECREF(mod);
return NULL;
+ }
if (mod != NULL && strcmp(PyString_AS_STRING(mod), "__builtin__"))
rtn = PyString_FromFormat("<%s.%s object at %p>",
PyString_AS_STRING(mod),
@@ -2984,7 +3011,7 @@ object_str(PyObject *self)
unaryfunc f;
f = Py_TYPE(self)->tp_repr;
- if (f == NULL || f == object_str)
+ if (f == NULL)
f = object_repr;
return f(self);
}
@@ -3553,6 +3580,7 @@ add_methods(PyTypeObject *type, PyMethodDef *meth)
for (; meth->ml_name != NULL; meth++) {
PyObject *descr;
+ int err;
if (PyDict_GetItemString(dict, meth->ml_name) &&
!(meth->ml_flags & METH_COEXIST))
continue;
@@ -3576,9 +3604,10 @@ add_methods(PyTypeObject *type, PyMethodDef *meth)
}
if (descr == NULL)
return -1;
- if (PyDict_SetItemString(dict, meth->ml_name, descr) < 0)
- return -1;
+ err = PyDict_SetItemString(dict, meth->ml_name, descr);
Py_DECREF(descr);
+ if (err < 0)
+ return -1;
}
return 0;
}
@@ -5770,15 +5799,16 @@ slot_tp_del(PyObject *self)
}
-/* Table mapping __foo__ names to tp_foo offsets and slot_tp_foo wrapper
- functions. The offsets here are relative to the 'PyHeapTypeObject'
- structure, which incorporates the additional structures used for numbers,
- sequences and mappings.
- Note that multiple names may map to the same slot (e.g. __eq__,
- __ne__ etc. all map to tp_richcompare) and one name may map to multiple
- slots (e.g. __str__ affects tp_str as well as tp_repr). The table is
- terminated with an all-zero entry. (This table is further initialized and
- sorted in init_slotdefs() below.) */
+/*
+Table mapping __foo__ names to tp_foo offsets and slot_tp_foo wrapper functions.
+
+The table is ordered by offsets relative to the 'PyHeapTypeObject' structure,
+which incorporates the additional structures used for numbers, sequences and
+mappings. Note that multiple names may map to the same slot (e.g. __eq__,
+__ne__ etc. all map to tp_richcompare) and one name may map to multiple slots
+(e.g. __str__ affects tp_str as well as tp_repr). The table is terminated with
+an all-zero entry. (This table is further initialized in init_slotdefs().)
+*/
typedef struct wrapperbase slotdef;
@@ -5828,57 +5858,57 @@ typedef struct wrapperbase slotdef;
"x." NAME "(y) <==> " DOC)
static slotdef slotdefs[] = {
- SQSLOT("__len__", sq_length, slot_sq_length, wrap_lenfunc,
- "x.__len__() <==> len(x)"),
- /* Heap types defining __add__/__mul__ have sq_concat/sq_repeat == NULL.
- The logic in abstract.c always falls back to nb_add/nb_multiply in
- this case. Defining both the nb_* and the sq_* slots to call the
- user-defined methods has unexpected side-effects, as shown by
- test_descr.notimplemented() */
- SQSLOT("__add__", sq_concat, NULL, wrap_binaryfunc,
- "x.__add__(y) <==> x+y"),
- SQSLOT("__mul__", sq_repeat, NULL, wrap_indexargfunc,
- "x.__mul__(n) <==> x*n"),
- SQSLOT("__rmul__", sq_repeat, NULL, wrap_indexargfunc,
- "x.__rmul__(n) <==> n*x"),
- SQSLOT("__getitem__", sq_item, slot_sq_item, wrap_sq_item,
- "x.__getitem__(y) <==> x[y]"),
- SQSLOT("__getslice__", sq_slice, slot_sq_slice, wrap_ssizessizeargfunc,
- "x.__getslice__(i, j) <==> x[i:j]\n\
- \n\
- Use of negative indices is not supported."),
- SQSLOT("__setitem__", sq_ass_item, slot_sq_ass_item, wrap_sq_setitem,
- "x.__setitem__(i, y) <==> x[i]=y"),
- SQSLOT("__delitem__", sq_ass_item, slot_sq_ass_item, wrap_sq_delitem,
- "x.__delitem__(y) <==> del x[y]"),
- SQSLOT("__setslice__", sq_ass_slice, slot_sq_ass_slice,
- wrap_ssizessizeobjargproc,
- "x.__setslice__(i, j, y) <==> x[i:j]=y\n\
- \n\
- Use of negative indices is not supported."),
- SQSLOT("__delslice__", sq_ass_slice, slot_sq_ass_slice, wrap_delslice,
- "x.__delslice__(i, j) <==> del x[i:j]\n\
- \n\
- Use of negative indices is not supported."),
- SQSLOT("__contains__", sq_contains, slot_sq_contains, wrap_objobjproc,
- "x.__contains__(y) <==> y in x"),
- SQSLOT("__iadd__", sq_inplace_concat, NULL,
- wrap_binaryfunc, "x.__iadd__(y) <==> x+=y"),
- SQSLOT("__imul__", sq_inplace_repeat, NULL,
- wrap_indexargfunc, "x.__imul__(y) <==> x*=y"),
-
- MPSLOT("__len__", mp_length, slot_mp_length, wrap_lenfunc,
- "x.__len__() <==> len(x)"),
- MPSLOT("__getitem__", mp_subscript, slot_mp_subscript,
- wrap_binaryfunc,
- "x.__getitem__(y) <==> x[y]"),
- MPSLOT("__setitem__", mp_ass_subscript, slot_mp_ass_subscript,
- wrap_objobjargproc,
- "x.__setitem__(i, y) <==> x[i]=y"),
- MPSLOT("__delitem__", mp_ass_subscript, slot_mp_ass_subscript,
- wrap_delitem,
- "x.__delitem__(y) <==> del x[y]"),
-
+ TPSLOT("__str__", tp_print, NULL, NULL, ""),
+ TPSLOT("__repr__", tp_print, NULL, NULL, ""),
+ TPSLOT("__getattribute__", tp_getattr, NULL, NULL, ""),
+ TPSLOT("__getattr__", tp_getattr, NULL, NULL, ""),
+ TPSLOT("__setattr__", tp_setattr, NULL, NULL, ""),
+ TPSLOT("__delattr__", tp_setattr, NULL, NULL, ""),
+ TPSLOT("__cmp__", tp_compare, _PyObject_SlotCompare, wrap_cmpfunc,
+ "x.__cmp__(y) <==> cmp(x,y)"),
+ TPSLOT("__repr__", tp_repr, slot_tp_repr, wrap_unaryfunc,
+ "x.__repr__() <==> repr(x)"),
+ TPSLOT("__hash__", tp_hash, slot_tp_hash, wrap_hashfunc,
+ "x.__hash__() <==> hash(x)"),
+ FLSLOT("__call__", tp_call, slot_tp_call, (wrapperfunc)wrap_call,
+ "x.__call__(...) <==> x(...)", PyWrapperFlag_KEYWORDS),
+ TPSLOT("__str__", tp_str, slot_tp_str, wrap_unaryfunc,
+ "x.__str__() <==> str(x)"),
+ TPSLOT("__getattribute__", tp_getattro, slot_tp_getattr_hook,
+ wrap_binaryfunc, "x.__getattribute__('name') <==> x.name"),
+ TPSLOT("__getattr__", tp_getattro, slot_tp_getattr_hook, NULL, ""),
+ TPSLOT("__setattr__", tp_setattro, slot_tp_setattro, wrap_setattr,
+ "x.__setattr__('name', value) <==> x.name = value"),
+ TPSLOT("__delattr__", tp_setattro, slot_tp_setattro, wrap_delattr,
+ "x.__delattr__('name') <==> del x.name"),
+ TPSLOT("__lt__", tp_richcompare, slot_tp_richcompare, richcmp_lt,
+ "x.__lt__(y) <==> x<y"),
+ TPSLOT("__le__", tp_richcompare, slot_tp_richcompare, richcmp_le,
+ "x.__le__(y) <==> x<=y"),
+ TPSLOT("__eq__", tp_richcompare, slot_tp_richcompare, richcmp_eq,
+ "x.__eq__(y) <==> x==y"),
+ TPSLOT("__ne__", tp_richcompare, slot_tp_richcompare, richcmp_ne,
+ "x.__ne__(y) <==> x!=y"),
+ TPSLOT("__gt__", tp_richcompare, slot_tp_richcompare, richcmp_gt,
+ "x.__gt__(y) <==> x>y"),
+ TPSLOT("__ge__", tp_richcompare, slot_tp_richcompare, richcmp_ge,
+ "x.__ge__(y) <==> x>=y"),
+ TPSLOT("__iter__", tp_iter, slot_tp_iter, wrap_unaryfunc,
+ "x.__iter__() <==> iter(x)"),
+ TPSLOT("next", tp_iternext, slot_tp_iternext, wrap_next,
+ "x.next() -> the next value, or raise StopIteration"),
+ TPSLOT("__get__", tp_descr_get, slot_tp_descr_get, wrap_descr_get,
+ "descr.__get__(obj[, type]) -> value"),
+ TPSLOT("__set__", tp_descr_set, slot_tp_descr_set, wrap_descr_set,
+ "descr.__set__(obj, value)"),
+ TPSLOT("__delete__", tp_descr_set, slot_tp_descr_set,
+ wrap_descr_delete, "descr.__delete__(obj)"),
+ FLSLOT("__init__", tp_init, slot_tp_init, (wrapperfunc)wrap_init,
+ "x.__init__(...) initializes x; "
+ "see help(type(x)) for signature",
+ PyWrapperFlag_KEYWORDS),
+ TPSLOT("__new__", tp_new, slot_tp_new, NULL, ""),
+ TPSLOT("__del__", tp_del, slot_tp_del, NULL, ""),
BINSLOT("__add__", nb_add, slot_nb_add,
"+"),
RBINSLOT("__radd__", nb_add, slot_nb_add,
@@ -5936,8 +5966,6 @@ static slotdef slotdefs[] = {
"oct(x)"),
UNSLOT("__hex__", nb_hex, slot_nb_hex, wrap_unaryfunc,
"hex(x)"),
- NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc,
- "x[y:z] <==> x[y.__index__():z.__index__()]"),
IBSLOT("__iadd__", nb_inplace_add, slot_nb_inplace_add,
wrap_binaryfunc, "+="),
IBSLOT("__isub__", nb_inplace_subtract, slot_nb_inplace_subtract,
@@ -5968,58 +5996,57 @@ static slotdef slotdefs[] = {
slot_nb_inplace_floor_divide, wrap_binaryfunc, "//"),
IBSLOT("__itruediv__", nb_inplace_true_divide,
slot_nb_inplace_true_divide, wrap_binaryfunc, "/"),
-
- TPSLOT("__str__", tp_str, slot_tp_str, wrap_unaryfunc,
- "x.__str__() <==> str(x)"),
- TPSLOT("__str__", tp_print, NULL, NULL, ""),
- TPSLOT("__repr__", tp_repr, slot_tp_repr, wrap_unaryfunc,
- "x.__repr__() <==> repr(x)"),
- TPSLOT("__repr__", tp_print, NULL, NULL, ""),
- TPSLOT("__cmp__", tp_compare, _PyObject_SlotCompare, wrap_cmpfunc,
- "x.__cmp__(y) <==> cmp(x,y)"),
- TPSLOT("__hash__", tp_hash, slot_tp_hash, wrap_hashfunc,
- "x.__hash__() <==> hash(x)"),
- FLSLOT("__call__", tp_call, slot_tp_call, (wrapperfunc)wrap_call,
- "x.__call__(...) <==> x(...)", PyWrapperFlag_KEYWORDS),
- TPSLOT("__getattribute__", tp_getattro, slot_tp_getattr_hook,
- wrap_binaryfunc, "x.__getattribute__('name') <==> x.name"),
- TPSLOT("__getattribute__", tp_getattr, NULL, NULL, ""),
- TPSLOT("__getattr__", tp_getattro, slot_tp_getattr_hook, NULL, ""),
- TPSLOT("__getattr__", tp_getattr, NULL, NULL, ""),
- TPSLOT("__setattr__", tp_setattro, slot_tp_setattro, wrap_setattr,
- "x.__setattr__('name', value) <==> x.name = value"),
- TPSLOT("__setattr__", tp_setattr, NULL, NULL, ""),
- TPSLOT("__delattr__", tp_setattro, slot_tp_setattro, wrap_delattr,
- "x.__delattr__('name') <==> del x.name"),
- TPSLOT("__delattr__", tp_setattr, NULL, NULL, ""),
- TPSLOT("__lt__", tp_richcompare, slot_tp_richcompare, richcmp_lt,
- "x.__lt__(y) <==> x<y"),
- TPSLOT("__le__", tp_richcompare, slot_tp_richcompare, richcmp_le,
- "x.__le__(y) <==> x<=y"),
- TPSLOT("__eq__", tp_richcompare, slot_tp_richcompare, richcmp_eq,
- "x.__eq__(y) <==> x==y"),
- TPSLOT("__ne__", tp_richcompare, slot_tp_richcompare, richcmp_ne,
- "x.__ne__(y) <==> x!=y"),
- TPSLOT("__gt__", tp_richcompare, slot_tp_richcompare, richcmp_gt,
- "x.__gt__(y) <==> x>y"),
- TPSLOT("__ge__", tp_richcompare, slot_tp_richcompare, richcmp_ge,
- "x.__ge__(y) <==> x>=y"),
- TPSLOT("__iter__", tp_iter, slot_tp_iter, wrap_unaryfunc,
- "x.__iter__() <==> iter(x)"),
- TPSLOT("next", tp_iternext, slot_tp_iternext, wrap_next,
- "x.next() -> the next value, or raise StopIteration"),
- TPSLOT("__get__", tp_descr_get, slot_tp_descr_get, wrap_descr_get,
- "descr.__get__(obj[, type]) -> value"),
- TPSLOT("__set__", tp_descr_set, slot_tp_descr_set, wrap_descr_set,
- "descr.__set__(obj, value)"),
- TPSLOT("__delete__", tp_descr_set, slot_tp_descr_set,
- wrap_descr_delete, "descr.__delete__(obj)"),
- FLSLOT("__init__", tp_init, slot_tp_init, (wrapperfunc)wrap_init,
- "x.__init__(...) initializes x; "
- "see help(type(x)) for signature",
- PyWrapperFlag_KEYWORDS),
- TPSLOT("__new__", tp_new, slot_tp_new, NULL, ""),
- TPSLOT("__del__", tp_del, slot_tp_del, NULL, ""),
+ NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc,
+ "x[y:z] <==> x[y.__index__():z.__index__()]"),
+ MPSLOT("__len__", mp_length, slot_mp_length, wrap_lenfunc,
+ "x.__len__() <==> len(x)"),
+ MPSLOT("__getitem__", mp_subscript, slot_mp_subscript,
+ wrap_binaryfunc,
+ "x.__getitem__(y) <==> x[y]"),
+ MPSLOT("__setitem__", mp_ass_subscript, slot_mp_ass_subscript,
+ wrap_objobjargproc,
+ "x.__setitem__(i, y) <==> x[i]=y"),
+ MPSLOT("__delitem__", mp_ass_subscript, slot_mp_ass_subscript,
+ wrap_delitem,
+ "x.__delitem__(y) <==> del x[y]"),
+ SQSLOT("__len__", sq_length, slot_sq_length, wrap_lenfunc,
+ "x.__len__() <==> len(x)"),
+ /* Heap types defining __add__/__mul__ have sq_concat/sq_repeat == NULL.
+ The logic in abstract.c always falls back to nb_add/nb_multiply in
+ this case. Defining both the nb_* and the sq_* slots to call the
+ user-defined methods has unexpected side-effects, as shown by
+ test_descr.notimplemented() */
+ SQSLOT("__add__", sq_concat, NULL, wrap_binaryfunc,
+ "x.__add__(y) <==> x+y"),
+ SQSLOT("__mul__", sq_repeat, NULL, wrap_indexargfunc,
+ "x.__mul__(n) <==> x*n"),
+ SQSLOT("__rmul__", sq_repeat, NULL, wrap_indexargfunc,
+ "x.__rmul__(n) <==> n*x"),
+ SQSLOT("__getitem__", sq_item, slot_sq_item, wrap_sq_item,
+ "x.__getitem__(y) <==> x[y]"),
+ SQSLOT("__getslice__", sq_slice, slot_sq_slice, wrap_ssizessizeargfunc,
+ "x.__getslice__(i, j) <==> x[i:j]\n\
+ \n\
+ Use of negative indices is not supported."),
+ SQSLOT("__setitem__", sq_ass_item, slot_sq_ass_item, wrap_sq_setitem,
+ "x.__setitem__(i, y) <==> x[i]=y"),
+ SQSLOT("__delitem__", sq_ass_item, slot_sq_ass_item, wrap_sq_delitem,
+ "x.__delitem__(y) <==> del x[y]"),
+ SQSLOT("__setslice__", sq_ass_slice, slot_sq_ass_slice,
+ wrap_ssizessizeobjargproc,
+ "x.__setslice__(i, j, y) <==> x[i:j]=y\n\
+ \n\
+ Use of negative indices is not supported."),
+ SQSLOT("__delslice__", sq_ass_slice, slot_sq_ass_slice, wrap_delslice,
+ "x.__delslice__(i, j) <==> del x[i:j]\n\
+ \n\
+ Use of negative indices is not supported."),
+ SQSLOT("__contains__", sq_contains, slot_sq_contains, wrap_objobjproc,
+ "x.__contains__(y) <==> y in x"),
+ SQSLOT("__iadd__", sq_inplace_concat, NULL,
+ wrap_binaryfunc, "x.__iadd__(y) <==> x+=y"),
+ SQSLOT("__imul__", sq_inplace_repeat, NULL,
+ wrap_indexargfunc, "x.__imul__(y) <==> x*=y"),
{NULL}
};
@@ -6131,7 +6158,8 @@ update_one_slot(PyTypeObject *type, slotdef *p)
}
continue;
}
- if (Py_TYPE(descr) == &PyWrapperDescr_Type) {
+ if (Py_TYPE(descr) == &PyWrapperDescr_Type &&
+ ((PyWrapperDescrObject *)descr)->d_base->name_strobj == p->name_strobj) {
void **tptr = resolve_slotdups(type, p->name_strobj);
if (tptr == NULL || tptr == ptr)
generic = p->function;
@@ -6199,21 +6227,6 @@ update_slots_callback(PyTypeObject *type, void *data)
return 0;
}
-/* Comparison function for qsort() to compare slotdefs by their offset, and
- for equal offset by their address (to force a stable sort). */
-static int
-slotdef_cmp(const void *aa, const void *bb)
-{
- const slotdef *a = (const slotdef *)aa, *b = (const slotdef *)bb;
- int c = a->offset - b->offset;
- if (c != 0)
- return c;
- else
- /* Cannot use a-b, as this gives off_t,
- which may lose precision when converted to int. */
- return (a > b) ? 1 : (a < b) ? -1 : 0;
-}
-
/* Initialize the slotdefs table by adding interned string objects for the
names and sorting the entries. */
static void
@@ -6225,12 +6238,12 @@ init_slotdefs(void)
if (initialized)
return;
for (p = slotdefs; p->name; p++) {
+ /* Slots must be ordered by their offset in the PyHeapTypeObject. */
+ assert(!p[1].name || p->offset <= p[1].offset);
p->name_strobj = PyString_InternFromString(p->name);
if (!p->name_strobj)
Py_FatalError("Out of memory interning slotdef names");
}
- qsort((void *)slotdefs, (size_t)(p-slotdefs), sizeof(slotdef),
- slotdef_cmp);
initialized = 1;
}
@@ -6653,8 +6666,8 @@ super_init(PyObject *self, PyObject *args, PyObject *kwds)
}
PyDoc_STRVAR(super_doc,
-"super(type) -> unbound super object\n"
"super(type, obj) -> bound super object; requires isinstance(obj, type)\n"
+"super(type) -> unbound super object\n"
"super(type, type2) -> bound super object; requires issubclass(type2, type)\n"
"Typical use to call a cooperative superclass method:\n"
"class C(B):\n"
diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c
index 710bcf3..29f9bce 100644
--- a/Objects/unicodeobject.c
+++ b/Objects/unicodeobject.c
@@ -82,8 +82,9 @@ OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
/* --- Globals ------------------------------------------------------------
- The globals are initialized by the _PyUnicode_Init() API and should
- not be used before calling that API.
+NOTE: In the interpreter's initialization phase, some globals are currently
+ initialized dynamically as needed. In the process Unicode objects may
+ be created before the Unicode type is ready.
*/
@@ -93,15 +94,27 @@ extern "C" {
#endif
/* Free list for Unicode objects */
-static PyUnicodeObject *free_list;
-static int numfree;
+static PyUnicodeObject *free_list = NULL;
+static int numfree = 0;
/* The empty Unicode object is shared to improve performance. */
-static PyUnicodeObject *unicode_empty;
+static PyUnicodeObject *unicode_empty = NULL;
+
+#define _Py_RETURN_UNICODE_EMPTY() \
+ do { \
+ if (unicode_empty != NULL) \
+ Py_INCREF(unicode_empty); \
+ else { \
+ unicode_empty = _PyUnicode_New(0); \
+ if (unicode_empty != NULL) \
+ Py_INCREF(unicode_empty); \
+ } \
+ return (PyObject *)unicode_empty; \
+ } while (0)
/* Single character Unicode strings in the Latin-1 range are being
shared as well. */
-static PyUnicodeObject *unicode_latin1[256];
+static PyUnicodeObject *unicode_latin1[256] = {NULL};
/* Default encoding to use and assume when NULL is passed as encoding
parameter; it is initialized by _PyUnicode_Init().
@@ -110,7 +123,7 @@ static PyUnicodeObject *unicode_latin1[256];
PyUnicode_GetDefaultEncoding() APIs to access this global.
*/
-static char unicode_default_encoding[100];
+static char unicode_default_encoding[100 + 1] = "ascii";
/* Fast detection of the most frequent whitespace characters */
const unsigned char _Py_ascii_whitespace[] = {
@@ -204,7 +217,7 @@ PyUnicode_GetMax(void)
#define BLOOM_MASK unsigned long
-static BLOOM_MASK bloom_linebreak;
+static BLOOM_MASK bloom_linebreak = ~(BLOOM_MASK)0;
#define BLOOM_ADD(mask, ch) ((mask |= (1UL << ((ch) & (BLOOM_WIDTH - 1)))))
#define BLOOM(mask, ch) ((mask & (1UL << ((ch) & (BLOOM_WIDTH - 1)))))
@@ -448,10 +461,8 @@ PyObject *PyUnicode_FromUnicode(const Py_UNICODE *u,
if (u != NULL) {
/* Optimization for empty strings */
- if (size == 0 && unicode_empty != NULL) {
- Py_INCREF(unicode_empty);
- return (PyObject *)unicode_empty;
- }
+ if (size == 0)
+ _Py_RETURN_UNICODE_EMPTY();
/* Single character Unicode objects in the Latin-1 range are
shared when using this constructor */
@@ -497,10 +508,8 @@ PyObject *PyUnicode_FromStringAndSize(const char *u, Py_ssize_t size)
if (u != NULL) {
/* Optimization for empty strings */
- if (size == 0 && unicode_empty != NULL) {
- Py_INCREF(unicode_empty);
- return (PyObject *)unicode_empty;
- }
+ if (size == 0)
+ _Py_RETURN_UNICODE_EMPTY();
/* Single characters are shared when using this constructor.
Restrict to ASCII, since the input must be UTF-8. */
@@ -538,6 +547,37 @@ PyObject *PyUnicode_FromString(const char *u)
return PyUnicode_FromStringAndSize(u, size);
}
+/* _Py_UNICODE_NEXT is a private macro used to retrieve the character pointed
+ * by 'ptr', possibly combining surrogate pairs on narrow builds.
+ * 'ptr' and 'end' must be Py_UNICODE*, with 'ptr' pointing at the character
+ * that should be returned and 'end' pointing to the end of the buffer.
+ * ('end' is used on narrow builds to detect a lone surrogate at the
+ * end of the buffer that should be returned unchanged.)
+ * The ptr and end arguments should be side-effect free and ptr must an lvalue.
+ * The type of the returned char is always Py_UCS4.
+ *
+ * Note: the macro advances ptr to next char, so it might have side-effects
+ * (especially if used with other macros).
+ */
+
+/* helper macros used by _Py_UNICODE_NEXT */
+#define _Py_UNICODE_IS_HIGH_SURROGATE(ch) (0xD800 <= ch && ch <= 0xDBFF)
+#define _Py_UNICODE_IS_LOW_SURROGATE(ch) (0xDC00 <= ch && ch <= 0xDFFF)
+/* Join two surrogate characters and return a single Py_UCS4 value. */
+#define _Py_UNICODE_JOIN_SURROGATES(high, low) \
+ (((((Py_UCS4)(high) & 0x03FF) << 10) | \
+ ((Py_UCS4)(low) & 0x03FF)) + 0x10000)
+
+#ifdef Py_UNICODE_WIDE
+#define _Py_UNICODE_NEXT(ptr, end) *(ptr)++
+#else
+#define _Py_UNICODE_NEXT(ptr, end) \
+ (((_Py_UNICODE_IS_HIGH_SURROGATE(*(ptr)) && (ptr) < (end)) && \
+ _Py_UNICODE_IS_LOW_SURROGATE((ptr)[1])) ? \
+ ((ptr) += 2,_Py_UNICODE_JOIN_SURROGATES((ptr)[-2], (ptr)[-1])) : \
+ (Py_UCS4)*(ptr)++)
+#endif
+
#ifdef HAVE_WCHAR_H
#if (Py_UNICODE_SIZE == 2) && defined(SIZEOF_WCHAR_T) && (SIZEOF_WCHAR_T == 4)
@@ -731,8 +771,25 @@ PyUnicode_FromFormatV(const char *format, va_list vargs)
switch (*f) {
case 'c':
- (void)va_arg(count, int);
+ {
+ int ordinal = va_arg(count, int);
+#ifdef Py_UNICODE_WIDE
+ if (ordinal < 0 || ordinal > 0x10ffff) {
+ PyErr_SetString(PyExc_OverflowError,
+ "%c arg not in range(0x110000) "
+ "(wide Python build)");
+ goto fail;
+ }
+#else
+ if (ordinal < 0 || ordinal > 0xffff) {
+ PyErr_SetString(PyExc_OverflowError,
+ "%c arg not in range(0x10000) "
+ "(narrow Python build)");
+ goto fail;
+ }
+#endif
/* fall through... */
+ }
case '%':
n++;
break;
@@ -1162,13 +1219,10 @@ PyObject *PyUnicode_FromEncodedObject(register PyObject *obj,
}
/* Convert to Unicode */
- if (len == 0) {
- Py_INCREF(unicode_empty);
- v = (PyObject *)unicode_empty;
- }
- else
- v = PyUnicode_Decode(s, len, encoding, errors);
+ if (len == 0)
+ _Py_RETURN_UNICODE_EMPTY();
+ v = PyUnicode_Decode(s, len, encoding, errors);
return v;
onError:
@@ -1381,7 +1435,7 @@ int PyUnicode_SetDefaultEncoding(const char *encoding)
Py_DECREF(v);
strncpy(unicode_default_encoding,
encoding,
- sizeof(unicode_default_encoding));
+ sizeof(unicode_default_encoding) - 1);
return 0;
onError:
@@ -1617,6 +1671,7 @@ PyObject *PyUnicode_DecodeUTF7Stateful(const char *s,
(base64buffer >> (base64bits-16));
base64bits -= 16;
base64buffer &= (1 << base64bits) - 1; /* clear high bits */
+ assert(outCh <= 0xffff);
if (surrogate) {
/* expecting a second surrogate */
if (outCh >= 0xDC00 && outCh <= 0xDFFF) {
@@ -1683,6 +1738,7 @@ PyObject *PyUnicode_DecodeUTF7Stateful(const char *s,
inShift = 1;
shiftOutStart = p;
base64bits = 0;
+ base64buffer = 0;
}
}
else if (DECODE_DIRECT(ch)) { /* character decodes as itself */
@@ -1861,7 +1917,7 @@ char utf8_code_length[256] = {
illegal prefix. See RFC 3629 for details */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 00-0F */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -2217,7 +2273,7 @@ PyUnicode_DecodeUTF32Stateful(const char *s,
#endif
PyObject *errorHandler = NULL;
PyObject *exc = NULL;
-
+
q = (unsigned char *)s;
e = q + size;
@@ -2272,7 +2328,7 @@ PyUnicode_DecodeUTF32Stateful(const char *s,
/* On narrow builds we split characters outside the BMP into two
codepoints => count how much extra space we need. */
#ifndef Py_UNICODE_WIDE
- for (qq = q; qq < e; qq += 4)
+ for (qq = q; e - qq >= 4; qq += 4)
if (qq[iorder[2]] != 0 || qq[iorder[3]] != 0)
pairs++;
#endif
@@ -2564,9 +2620,12 @@ PyUnicode_DecodeUTF16Stateful(const char *s,
}
/* UTF-16 code pair: */
- if (q >= e) {
+ if (e - q < 2) {
+ q -= 2;
+ if (consumed)
+ break;
errmsg = "unexpected end of data";
- startinpos = (((const char *)q)-2)-starts;
+ startinpos = ((const char *)q)-starts;
endinpos = ((const char *)e)-starts;
goto utf16Error;
}
@@ -2729,7 +2788,6 @@ PyObject *PyUnicode_DecodeUnicodeEscape(const char *s,
Py_ssize_t startinpos;
Py_ssize_t endinpos;
Py_ssize_t outpos;
- int i;
PyUnicodeObject *v;
Py_UNICODE *p;
const char *end;
@@ -2815,29 +2873,19 @@ PyObject *PyUnicode_DecodeUnicodeEscape(const char *s,
message = "truncated \\UXXXXXXXX escape";
hexescape:
chr = 0;
- outpos = p-PyUnicode_AS_UNICODE(v);
- if (s+digits>end) {
- endinpos = size;
- if (unicode_decode_call_errorhandler(
- errors, &errorHandler,
- "unicodeescape", "end of string in escape sequence",
- starts, size, &startinpos, &endinpos, &exc, &s,
- &v, &outpos, &p))
- goto onError;
- goto nextByte;
- }
- for (i = 0; i < digits; ++i) {
- c = (unsigned char) s[i];
- if (!isxdigit(c)) {
- endinpos = (s+i+1)-starts;
- if (unicode_decode_call_errorhandler(
- errors, &errorHandler,
- "unicodeescape", message,
- starts, size, &startinpos, &endinpos, &exc, &s,
- &v, &outpos, &p))
- goto onError;
- goto nextByte;
+ if (end - s < digits) {
+ /* count only hex digits */
+ for (; s < end; ++s) {
+ c = (unsigned char)*s;
+ if (!Py_ISXDIGIT(c))
+ goto error;
}
+ goto error;
+ }
+ for (; digits--; ++s) {
+ c = (unsigned char)*s;
+ if (!Py_ISXDIGIT(c))
+ goto error;
chr = (chr<<4) & ~0xF;
if (c >= '0' && c <= '9')
chr += c - '0';
@@ -2846,7 +2894,6 @@ PyObject *PyUnicode_DecodeUnicodeEscape(const char *s,
else
chr += 10 + c - 'A';
}
- s += i;
if (chr == 0xffffffff && PyErr_Occurred())
/* _decoding_error will have already written into the
target buffer. */
@@ -2867,14 +2914,8 @@ PyObject *PyUnicode_DecodeUnicodeEscape(const char *s,
*p++ = 0xDC00 + (Py_UNICODE) (chr & 0x03FF);
#endif
} else {
- endinpos = s-starts;
- outpos = p-PyUnicode_AS_UNICODE(v);
- if (unicode_decode_call_errorhandler(
- errors, &errorHandler,
- "unicodeescape", "illegal Unicode character",
- starts, size, &startinpos, &endinpos, &exc, &s,
- &v, &outpos, &p))
- goto onError;
+ message = "illegal Unicode character";
+ goto error;
}
break;
@@ -2896,32 +2937,18 @@ PyObject *PyUnicode_DecodeUnicodeEscape(const char *s,
/* found a name. look it up in the unicode database */
message = "unknown Unicode character name";
s++;
- if (ucnhash_CAPI->getcode(NULL, start, (int)(s-start-1), &chr))
+ if (s - start - 1 <= INT_MAX &&
+ ucnhash_CAPI->getcode(NULL, start, (int)(s-start-1), &chr))
goto store;
}
}
- endinpos = s-starts;
- outpos = p-PyUnicode_AS_UNICODE(v);
- if (unicode_decode_call_errorhandler(
- errors, &errorHandler,
- "unicodeescape", message,
- starts, size, &startinpos, &endinpos, &exc, &s,
- &v, &outpos, &p))
- goto onError;
- break;
+ goto error;
default:
if (s > end) {
message = "\\ at end of string";
s--;
- endinpos = s-starts;
- outpos = p-PyUnicode_AS_UNICODE(v);
- if (unicode_decode_call_errorhandler(
- errors, &errorHandler,
- "unicodeescape", message,
- starts, size, &startinpos, &endinpos, &exc, &s,
- &v, &outpos, &p))
- goto onError;
+ goto error;
}
else {
*p++ = '\\';
@@ -2929,8 +2956,18 @@ PyObject *PyUnicode_DecodeUnicodeEscape(const char *s,
}
break;
}
- nextByte:
- ;
+ continue;
+
+ error:
+ endinpos = s-starts;
+ outpos = p-PyUnicode_AS_UNICODE(v);
+ if (unicode_decode_call_errorhandler(
+ errors, &errorHandler,
+ "unicodeescape", message,
+ starts, size, &startinpos, &endinpos, &exc, &s,
+ &v, &outpos, &p))
+ goto onError;
+ continue;
}
if (_PyUnicode_Resize(&v, p - PyUnicode_AS_UNICODE(v)) < 0)
goto onError;
@@ -3389,37 +3426,34 @@ PyObject *_PyUnicode_DecodeUnicodeInternal(const char *s,
end = s + size;
while (s < end) {
+ if (end-s < Py_UNICODE_SIZE) {
+ endinpos = end-starts;
+ reason = "truncated input";
+ goto error;
+ }
memcpy(p, s, sizeof(Py_UNICODE));
+#ifdef Py_UNICODE_WIDE
/* We have to sanity check the raw data, otherwise doom looms for
some malformed UCS-4 data. */
- if (
-#ifdef Py_UNICODE_WIDE
- *p > unimax || *p < 0 ||
-#endif
- end-s < Py_UNICODE_SIZE
- )
- {
- startinpos = s - starts;
- if (end-s < Py_UNICODE_SIZE) {
- endinpos = end-starts;
- reason = "truncated input";
- }
- else {
- endinpos = s - starts + Py_UNICODE_SIZE;
- reason = "illegal code point (> 0x10FFFF)";
- }
- outpos = p - PyUnicode_AS_UNICODE(v);
- if (unicode_decode_call_errorhandler(
- errors, &errorHandler,
- "unicode_internal", reason,
- starts, size, &startinpos, &endinpos, &exc, &s,
- &v, &outpos, &p)) {
- goto onError;
- }
+ if (*p > unimax || *p < 0) {
+ endinpos = s - starts + Py_UNICODE_SIZE;
+ reason = "illegal code point (> 0x10FFFF)";
+ goto error;
}
- else {
- p++;
- s += Py_UNICODE_SIZE;
+#endif
+ p++;
+ s += Py_UNICODE_SIZE;
+ continue;
+
+ error:
+ startinpos = s - starts;
+ outpos = p - PyUnicode_AS_UNICODE(v);
+ if (unicode_decode_call_errorhandler(
+ errors, &errorHandler,
+ "unicode_internal", reason,
+ starts, size, &startinpos, &endinpos, &exc, &s,
+ &v, &outpos, &p)) {
+ goto onError;
}
}
@@ -3486,8 +3520,7 @@ static void make_encode_exception(PyObject **exceptionObject,
goto onError;
return;
onError:
- Py_DECREF(*exceptionObject);
- *exceptionObject = NULL;
+ Py_CLEAR(*exceptionObject);
}
}
@@ -3641,26 +3674,22 @@ static PyObject *unicode_encode_ucs1(const Py_UNICODE *p,
case 4: /* xmlcharrefreplace */
respos = str-PyString_AS_STRING(res);
/* determine replacement size (temporarily (mis)uses p) */
- for (p = collstart, repsize = 0; p < collend; ++p) {
- if (*p<10)
+ for (p = collstart, repsize = 0; p < collend;) {
+ Py_UCS4 ch = _Py_UNICODE_NEXT(p, collend);
+ if (ch < 10)
repsize += 2+1+1;
- else if (*p<100)
+ else if (ch < 100)
repsize += 2+2+1;
- else if (*p<1000)
+ else if (ch < 1000)
repsize += 2+3+1;
- else if (*p<10000)
+ else if (ch < 10000)
repsize += 2+4+1;
-#ifndef Py_UNICODE_WIDE
- else
- repsize += 2+5+1;
-#else
- else if (*p<100000)
+ else if (ch < 100000)
repsize += 2+5+1;
- else if (*p<1000000)
+ else if (ch < 1000000)
repsize += 2+6+1;
else
repsize += 2+7+1;
-#endif
}
requiredsize = respos+repsize+(endp-collend);
if (requiredsize > ressize) {
@@ -3672,8 +3701,9 @@ static PyObject *unicode_encode_ucs1(const Py_UNICODE *p,
ressize = requiredsize;
}
/* generate replacement (temporarily (mis)uses p) */
- for (p = collstart; p < collend; ++p) {
- str += sprintf(str, "&#%d;", (int)*p);
+ for (p = collstart; p < collend;) {
+ Py_UCS4 ch = _Py_UNICODE_NEXT(p, collend);
+ str += sprintf(str, "&#%d;", (int)ch);
}
p = collend;
break;
@@ -4118,46 +4148,60 @@ PyObject *PyUnicode_DecodeCharmap(const char *s,
if (PyErr_ExceptionMatches(PyExc_LookupError)) {
/* No mapping found means: mapping is undefined. */
PyErr_Clear();
- x = Py_None;
- Py_INCREF(x);
+ goto Undefined;
} else
goto onError;
}
/* Apply mapping */
+ if (x == Py_None)
+ goto Undefined;
if (PyInt_Check(x)) {
long value = PyInt_AS_LONG(x);
- if (value < 0 || value > 65535) {
+ if (value == 0xFFFE)
+ goto Undefined;
+ if (value < 0 || value > 0x10FFFF) {
PyErr_SetString(PyExc_TypeError,
- "character mapping must be in range(65536)");
+ "character mapping must be in range(0x110000)");
Py_DECREF(x);
goto onError;
}
- *p++ = (Py_UNICODE)value;
- }
- else if (x == Py_None) {
- /* undefined mapping */
- outpos = p-PyUnicode_AS_UNICODE(v);
- startinpos = s-starts;
- endinpos = startinpos+1;
- if (unicode_decode_call_errorhandler(
- errors, &errorHandler,
- "charmap", "character maps to <undefined>",
- starts, size, &startinpos, &endinpos, &exc, &s,
- &v, &outpos, &p)) {
- Py_DECREF(x);
- goto onError;
+
+#ifndef Py_UNICODE_WIDE
+ if (value > 0xFFFF) {
+ /* see the code for 1-n mapping below */
+ if (extrachars < 2) {
+ /* resize first */
+ Py_ssize_t oldpos = p - PyUnicode_AS_UNICODE(v);
+ Py_ssize_t needed = 10 - extrachars;
+ extrachars += needed;
+ /* XXX overflow detection missing */
+ if (_PyUnicode_Resize(&v,
+ PyUnicode_GET_SIZE(v) + needed) < 0) {
+ Py_DECREF(x);
+ goto onError;
+ }
+ p = PyUnicode_AS_UNICODE(v) + oldpos;
+ }
+ value -= 0x10000;
+ *p++ = 0xD800 | (value >> 10);
+ *p++ = 0xDC00 | (value & 0x3FF);
+ extrachars -= 2;
}
- Py_DECREF(x);
- continue;
+ else
+#endif
+ *p++ = (Py_UNICODE)value;
}
else if (PyUnicode_Check(x)) {
Py_ssize_t targetsize = PyUnicode_GET_SIZE(x);
- if (targetsize == 1)
+ if (targetsize == 1) {
/* 1-1 mapping */
- *p++ = *PyUnicode_AS_UNICODE(x);
-
+ Py_UNICODE value = *PyUnicode_AS_UNICODE(x);
+ if (value == 0xFFFE)
+ goto Undefined;
+ *p++ = value;
+ }
else if (targetsize > 1) {
/* 1-n mapping */
if (targetsize > extrachars) {
@@ -4191,6 +4235,20 @@ PyObject *PyUnicode_DecodeCharmap(const char *s,
}
Py_DECREF(x);
++s;
+ continue;
+Undefined:
+ /* undefined mapping */
+ Py_XDECREF(x);
+ outpos = p-PyUnicode_AS_UNICODE(v);
+ startinpos = s-starts;
+ endinpos = startinpos+1;
+ if (unicode_decode_call_errorhandler(
+ errors, &errorHandler,
+ "charmap", "character maps to <undefined>",
+ starts, size, &startinpos, &endinpos, &exc, &s,
+ &v, &outpos, &p)) {
+ goto onError;
+ }
}
}
if (p - PyUnicode_AS_UNICODE(v) < PyUnicode_GET_SIZE(v))
@@ -4620,11 +4678,20 @@ int charmap_encoding_error(
*inpos = collendpos;
break;
case 4: /* xmlcharrefreplace */
- /* generate replacement (temporarily (mis)uses p) */
- for (collpos = collstartpos; collpos < collendpos; ++collpos) {
+ /* generate replacement */
+ for (collpos = collstartpos; collpos < collendpos;) {
char buffer[2+29+1+1];
char *cp;
- sprintf(buffer, "&#%d;", (int)p[collpos]);
+ Py_UCS4 ch = p[collpos++];
+#ifndef Py_UNICODE_WIDE
+ if ((0xD800 <= ch && ch <= 0xDBFF) &&
+ (collpos < collendpos) &&
+ (0xDC00 <= p[collpos] && p[collpos] <= 0xDFFF)) {
+ ch = ((((ch & 0x03FF) << 10) |
+ ((Py_UCS4)p[collpos++] & 0x03FF)) + 0x10000);
+ }
+#endif
+ sprintf(buffer, "&#%d;", (int)ch);
for (cp = buffer; *cp; ++cp) {
x = charmapencode_output(*cp, mapping, res, respos);
if (x==enc_EXCEPTION)
@@ -4758,8 +4825,7 @@ static void make_translate_exception(PyObject **exceptionObject,
goto onError;
return;
onError:
- Py_DECREF(*exceptionObject);
- *exceptionObject = NULL;
+ Py_CLEAR(*exceptionObject);
}
}
@@ -5039,10 +5105,11 @@ PyObject *PyUnicode_TranslateCharmap(const Py_UNICODE *p,
break;
case 4: /* xmlcharrefreplace */
/* generate replacement (temporarily (mis)uses p) */
- for (p = collstart; p < collend; ++p) {
+ for (p = collstart; p < collend;) {
char buffer[2+29+1+1];
char *cp;
- sprintf(buffer, "&#%d;", (int)*p);
+ Py_UCS4 ch = _Py_UNICODE_NEXT(p, collend);
+ sprintf(buffer, "&#%d;", (int)ch);
if (charmaptranslate_makespace(&res, &str,
(str-PyUnicode_AS_UNICODE(res))+strlen(buffer)+(endp-collend)))
goto onError;
@@ -5193,8 +5260,10 @@ int PyUnicode_EncodeDecimal(Py_UNICODE *s,
break;
case 4: /* xmlcharrefreplace */
/* generate replacement (temporarily (mis)uses p) */
- for (p = collstart; p < collend; ++p)
- output += sprintf(output, "&#%d;", (int)*p);
+ for (p = collstart; p < collend;) {
+ Py_UCS4 ch = _Py_UNICODE_NEXT(p, collend);
+ output += sprintf(output, "&#%d;", ch);
+ }
p = collend;
break;
default:
@@ -5549,7 +5618,7 @@ PyUnicode_Join(PyObject *separator, PyObject *seq)
PyObject *item;
Py_ssize_t i;
- fseq = PySequence_Fast(seq, "");
+ fseq = PySequence_Fast(seq, "can only join an iterable");
if (fseq == NULL) {
return NULL;
}
@@ -7521,7 +7590,7 @@ unicode_rsplit(PyUnicodeObject *self, PyObject *args)
}
PyDoc_STRVAR(splitlines__doc__,
- "S.splitlines([keepends]) -> list of strings\n\
+ "S.splitlines(keepends=False) -> list of strings\n\
\n\
Return a list of the lines in S, breaking at line boundaries.\n\
Line breaks are not included in the resulting list unless keepends\n\
@@ -7797,10 +7866,6 @@ unicode_getnewargs(PyUnicodeObject *v)
static PyMethodDef unicode_methods[] = {
-
- /* Order is according to common usage: often used methods should
- appear first, since lookup is done sequentially. */
-
{"encode", (PyCFunction) unicode_encode, METH_VARARGS | METH_KEYWORDS, encode__doc__},
{"replace", (PyCFunction) unicode_replace, METH_VARARGS, replace__doc__},
{"split", (PyCFunction) unicode_split, METH_VARARGS, split__doc__},
@@ -8279,8 +8344,8 @@ PyObject *PyUnicode_Format(PyObject *format,
arglen = -1;
argidx = -2;
}
- if (Py_TYPE(args)->tp_as_mapping && !PyTuple_Check(args) &&
- !PyObject_TypeCheck(args, &PyBaseString_Type))
+ if (Py_TYPE(args)->tp_as_mapping && Py_TYPE(args)->tp_as_mapping->mp_subscript &&
+ !PyTuple_Check(args) && !PyObject_TypeCheck(args, &PyBaseString_Type))
dict = args;
while (--fmtcnt >= 0) {
@@ -8384,7 +8449,9 @@ PyObject *PyUnicode_Format(PyObject *format,
"* wants int");
goto onError;
}
- width = PyInt_AsLong(v);
+ width = PyInt_AsSsize_t(v);
+ if (width == -1 && PyErr_Occurred())
+ goto onError;
if (width < 0) {
flags |= F_LJUST;
width = -width;
@@ -8398,7 +8465,7 @@ PyObject *PyUnicode_Format(PyObject *format,
c = *fmt++;
if (c < '0' || c > '9')
break;
- if ((width*10) / 10 != width) {
+ if (width > (PY_SSIZE_T_MAX - ((int)c - '0')) / 10) {
PyErr_SetString(PyExc_ValueError,
"width too big");
goto onError;
@@ -8419,7 +8486,9 @@ PyObject *PyUnicode_Format(PyObject *format,
"* wants int");
goto onError;
}
- prec = PyInt_AsLong(v);
+ prec = _PyInt_AsInt(v);
+ if (prec == -1 && PyErr_Occurred())
+ goto onError;
if (prec < 0)
prec = 0;
if (--fmtcnt >= 0)
@@ -8431,7 +8500,7 @@ PyObject *PyUnicode_Format(PyObject *format,
c = *fmt++;
if (c < '0' || c > '9')
break;
- if ((prec*10) / 10 != prec) {
+ if (prec > (INT_MAX - ((int)c - '0')) / 10) {
PyErr_SetString(PyExc_ValueError,
"prec too big");
goto onError;
@@ -8763,7 +8832,8 @@ unicode_subtype_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
}
PyDoc_STRVAR(unicode_doc,
- "unicode(string [, encoding[, errors]]) -> object\n\
+ "unicode(object='') -> unicode object\n\
+unicode(string[, encoding[, errors]]) -> unicode object\n\
\n\
Create a new Unicode object from the given encoded string.\n\
encoding defaults to the current default string encoding.\n\
@@ -8817,8 +8887,6 @@ PyTypeObject PyUnicode_Type = {
void _PyUnicode_Init(void)
{
- int i;
-
/* XXX - move this array to unicodectype.c ? */
Py_UNICODE linebreak[] = {
0x000A, /* LINE FEED */
@@ -8832,15 +8900,12 @@ void _PyUnicode_Init(void)
};
/* Init the implementation */
- free_list = NULL;
- numfree = 0;
- unicode_empty = _PyUnicode_New(0);
- if (!unicode_empty)
- return;
+ if (!unicode_empty) {
+ unicode_empty = _PyUnicode_New(0);
+ if (!unicode_empty)
+ return;
+ }
- strcpy(unicode_default_encoding, "ascii");
- for (i = 0; i < 256; i++)
- unicode_latin1[i] = NULL;
if (PyType_Ready(&PyUnicode_Type) < 0)
Py_FatalError("Can't initialize 'unicode'");
@@ -8850,6 +8915,12 @@ void _PyUnicode_Init(void)
);
PyType_Ready(&EncodingMapType);
+
+ if (PyType_Ready(&PyFieldNameIter_Type) < 0)
+ Py_FatalError("Can't initialize field name iterator type");
+
+ if (PyType_Ready(&PyFormatterIter_Type) < 0)
+ Py_FatalError("Can't initialize formatter iter type");
}
/* Finalize the Unicode implementation */
@@ -8879,15 +8950,11 @@ _PyUnicode_Fini(void)
{
int i;
- Py_XDECREF(unicode_empty);
- unicode_empty = NULL;
+ Py_CLEAR(unicode_empty);
+
+ for (i = 0; i < 256; i++)
+ Py_CLEAR(unicode_latin1[i]);
- for (i = 0; i < 256; i++) {
- if (unicode_latin1[i]) {
- Py_DECREF(unicode_latin1[i]);
- unicode_latin1[i] = NULL;
- }
- }
(void)PyUnicode_ClearFreeList();
}
diff --git a/Objects/weakrefobject.c b/Objects/weakrefobject.c
index 0e46d92..871c248 100644
--- a/Objects/weakrefobject.c
+++ b/Objects/weakrefobject.c
@@ -52,9 +52,8 @@ clear_weakref(PyWeakReference *self)
{
PyObject *callback = self->wr_callback;
- if (PyWeakref_GET_OBJECT(self) != Py_None) {
- PyWeakReference **list = GET_WEAKREFS_LISTPTR(
- PyWeakref_GET_OBJECT(self));
+ if (self->wr_object != Py_None) {
+ PyWeakReference **list = GET_WEAKREFS_LISTPTR(self->wr_object);
if (*list == self)
/* If 'self' is the end of the list (and thus self->wr_next == NULL)
@@ -168,13 +167,21 @@ weakref_repr(PyWeakReference *self)
PyErr_Clear();
else if (PyString_Check(nameobj))
name = PyString_AS_STRING(nameobj);
- PyOS_snprintf(buffer, sizeof(buffer),
- name ? "<weakref at %p; to '%.50s' at %p (%s)>"
- : "<weakref at %p; to '%.50s' at %p>",
- self,
- Py_TYPE(PyWeakref_GET_OBJECT(self))->tp_name,
- PyWeakref_GET_OBJECT(self),
- name);
+ if (name != NULL) {
+ PyOS_snprintf(buffer, sizeof(buffer),
+ "<weakref at %p; to '%.50s' at %p (%s)>",
+ self,
+ Py_TYPE(PyWeakref_GET_OBJECT(self))->tp_name,
+ PyWeakref_GET_OBJECT(self),
+ name);
+ }
+ else {
+ PyOS_snprintf(buffer, sizeof(buffer),
+ "<weakref at %p; to '%.50s' at %p>",
+ self,
+ Py_TYPE(PyWeakref_GET_OBJECT(self))->tp_name,
+ PyWeakref_GET_OBJECT(self));
+ }
Py_XDECREF(nameobj);
}
return PyString_FromString(buffer);
@@ -187,15 +194,19 @@ weakref_repr(PyWeakReference *self)
static PyObject *
weakref_richcompare(PyWeakReference* self, PyWeakReference* other, int op)
{
- if (op != Py_EQ || self->ob_type != other->ob_type) {
+ if ((op != Py_EQ && op != Py_NE) || self->ob_type != other->ob_type) {
Py_INCREF(Py_NotImplemented);
return Py_NotImplemented;
}
if (PyWeakref_GET_OBJECT(self) == Py_None
|| PyWeakref_GET_OBJECT(other) == Py_None) {
- PyObject *res = self==other ? Py_True : Py_False;
- Py_INCREF(res);
- return res;
+ int res = (self == other);
+ if (op == Py_NE)
+ res = !res;
+ if (res)
+ Py_RETURN_TRUE;
+ else
+ Py_RETURN_FALSE;
}
return PyObject_RichCompare(PyWeakref_GET_OBJECT(self),
PyWeakref_GET_OBJECT(other), op);
diff --git a/PC/VC6/bz2.dsp b/PC/VC6/bz2.dsp
index 3c164fa..352cbad 100644
--- a/PC/VC6/bz2.dsp
+++ b/PC/VC6/bz2.dsp
@@ -44,7 +44,7 @@ RSC=rc.exe
# PROP Target_Dir ""
F90=df.exe
# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "Py_BUILD_CORE_MODULE" /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c
-# ADD CPP /nologo /MD /W3 /GX /Zi /O2 /I "..\..\Include" /I ".." /I "..\..\..\bzip2-1.0.5" /D "Py_BUILD_CORE_MODULE" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MD /W3 /GX /Zi /O2 /I "..\..\Include" /I ".." /I "..\..\..\bzip2-1.0.6" /D "Py_BUILD_CORE_MODULE" /D "NDEBUG" /D "WIN32" /D "_WINDOWS" /YX /FD /c
# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /o "NUL" /win32
# ADD BASE RSC /l 0x409 /d "NDEBUG"
@@ -54,7 +54,7 @@ BSC32=bscmake.exe
# ADD BSC32 /nologo
LINK32=link.exe
# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
-# ADD LINK32 ..\..\..\bzip2-1.0.5\libbz2.lib /nologo /base:"0x1D170000" /subsystem:windows /dll /debug /machine:I386 /nodefaultlib:"libc" /out:"./bz2.pyd"
+# ADD LINK32 ..\..\..\bzip2-1.0.6\libbz2.lib /nologo /base:"0x1D170000" /subsystem:windows /dll /debug /machine:I386 /nodefaultlib:"libc" /out:"./bz2.pyd"
# SUBTRACT LINK32 /pdb:none /nodefaultlib
!ELSEIF "$(CFG)" == "bz2 - Win32 Debug"
@@ -72,7 +72,7 @@ LINK32=link.exe
# PROP Target_Dir ""
F90=df.exe
# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "Py_BUILD_CORE_MODULE" /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c
-# ADD CPP /nologo /MDd /W3 /Gm /GX /Zi /Od /I "..\..\Include" /I ".." /I "..\..\..\bzip2-1.0.5" /D "Py_BUILD_CORE_MODULE" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /YX /FD /c
+# ADD CPP /nologo /MDd /W3 /Gm /GX /Zi /Od /I "..\..\Include" /I ".." /I "..\..\..\bzip2-1.0.6" /D "Py_BUILD_CORE_MODULE" /D "_DEBUG" /D "WIN32" /D "_WINDOWS" /YX /FD /c
# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /o "NUL" /win32
# ADD BASE RSC /l 0x409 /d "_DEBUG"
@@ -82,7 +82,7 @@ BSC32=bscmake.exe
# ADD BSC32 /nologo
LINK32=link.exe
# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /pdbtype:sept
-# ADD LINK32 ..\..\..\bzip2-1.0.5\libbz2.lib /nologo /base:"0x1D170000" /subsystem:windows /dll /debug /machine:I386 /nodefaultlib:"msvcrt" /nodefaultlib:"libc" /out:"./bz2_d.pyd" /pdbtype:sept
+# ADD LINK32 ..\..\..\bzip2-1.0.6\libbz2.lib /nologo /base:"0x1D170000" /subsystem:windows /dll /debug /machine:I386 /nodefaultlib:"msvcrt" /nodefaultlib:"libc" /out:"./bz2_d.pyd" /pdbtype:sept
# SUBTRACT LINK32 /pdb:none
!ENDIF
diff --git a/PC/VC6/pythoncore.dsp b/PC/VC6/pythoncore.dsp
index 4fe426a..4c3cd98 100644
--- a/PC/VC6/pythoncore.dsp
+++ b/PC/VC6/pythoncore.dsp
@@ -663,6 +663,10 @@ SOURCE=..\..\Python\pythonrun.c
# End Source File
# Begin Source File
+SOURCE=..\..\Python\random.c
+# End Source File
+# Begin Source File
+
SOURCE=..\..\Objects\rangeobject.c
# End Source File
# Begin Source File
diff --git a/PC/VC6/readme.txt b/PC/VC6/readme.txt
index 03d2318..d48517d 100644
--- a/PC/VC6/readme.txt
+++ b/PC/VC6/readme.txt
@@ -120,14 +120,14 @@ bz2
Download the source from the python.org copy into the dist
directory:
- svn export http://svn.python.org/projects/external/bzip2-1.0.5
+ svn export http://svn.python.org/projects/external/bzip2-1.0.6
And requires building bz2 first.
- cd dist\bzip2-1.0.5
+ cd dist\bzip2-1.0.6
nmake -f makefile.msc
- All of this managed to build bzip2-1.0.5\libbz2.lib, which the Python
+ All of this managed to build bzip2-1.0.6\libbz2.lib, which the Python
project links in.
diff --git a/PC/VS7.1/pythoncore.vcproj b/PC/VS7.1/pythoncore.vcproj
index ea6ce78..230d2ef 100644
--- a/PC/VS7.1/pythoncore.vcproj
+++ b/PC/VS7.1/pythoncore.vcproj
@@ -767,6 +767,9 @@
RelativePath="..\..\Python\pythonrun.c">
</File>
<File
+ RelativePath="..\..\Python\random.c">
+ </File>
+ <File
RelativePath="..\..\Objects\rangeobject.c">
</File>
<File
diff --git a/PC/VS8.0/bz2.vcproj b/PC/VS8.0/bz2.vcproj
index e6ffdf2..f84a645 100644
--- a/PC/VS8.0/bz2.vcproj
+++ b/PC/VS8.0/bz2.vcproj
@@ -532,7 +532,7 @@
</File>
</Filter>
<Filter
- Name="bzip2 1.0.5 Header Files"
+ Name="bzip2 1.0.6 Header Files"
>
<File
RelativePath="$(bz2Dir)\bzlib.h"
@@ -544,7 +544,7 @@
</File>
</Filter>
<Filter
- Name="bzip2 1.0.5 Source Files"
+ Name="bzip2 1.0.6 Source Files"
>
<File
RelativePath="$(bz2Dir)\blocksort.c"
diff --git a/PC/VS8.0/pyproject.vsprops b/PC/VS8.0/pyproject.vsprops
index 2692b89..5a5fff6 100644
--- a/PC/VS8.0/pyproject.vsprops
+++ b/PC/VS8.0/pyproject.vsprops
@@ -78,11 +78,11 @@
/>
<UserMacro
Name="bz2Dir"
- Value="$(externalsDir)\bzip2-1.0.5"
+ Value="$(externalsDir)\bzip2-1.0.6"
/>
<UserMacro
Name="opensslDir"
- Value="$(externalsDir)\openssl-0.9.8l"
+ Value="$(externalsDir)\openssl-0.9.8y"
/>
<UserMacro
Name="tcltkDir"
diff --git a/PC/VS8.0/pythoncore.vcproj b/PC/VS8.0/pythoncore.vcproj
index f591967..c823226 100644
--- a/PC/VS8.0/pythoncore.vcproj
+++ b/PC/VS8.0/pythoncore.vcproj
@@ -1835,6 +1835,10 @@
>
</File>
<File
+ RelativePath="..\..\Python\random.c"
+ >
+ </File>
+ <File
RelativePath="..\..\Python\structmember.c"
>
</File>
diff --git a/PC/_subprocess.c b/PC/_subprocess.c
index 6780382..ffe8f41 100644
--- a/PC/_subprocess.c
+++ b/PC/_subprocess.c
@@ -331,7 +331,7 @@ getenvironment(PyObject* environment)
PyObject* values;
char* p;
- /* convert environment dictionary to windows enviroment string */
+ /* convert environment dictionary to windows environment string */
if (! PyMapping_Check(environment)) {
PyErr_SetString(
PyExc_TypeError, "environment must be dictionary or None");
@@ -367,7 +367,8 @@ getenvironment(PyObject* environment)
vsize + 1 + 1;
if (totalsize > PyString_GET_SIZE(out)) {
int offset = p - PyString_AS_STRING(out);
- _PyString_Resize(&out, totalsize + 1024);
+ if (_PyString_Resize(&out, totalsize + 1024))
+ goto exit;
p = PyString_AS_STRING(out) + offset;
}
memcpy(p, PyString_AS_STRING(key), ksize);
@@ -383,7 +384,7 @@ getenvironment(PyObject* environment)
_PyString_Resize(&out, p - PyString_AS_STRING(out));
/* PyObject_Print(out, stdout, 0); */
-
+exit:
Py_XDECREF(keys);
Py_XDECREF(values);
@@ -670,4 +671,5 @@ init_subprocess()
defint(d, "WAIT_OBJECT_0", WAIT_OBJECT_0);
defint(d, "CREATE_NEW_CONSOLE", CREATE_NEW_CONSOLE);
defint(d, "CREATE_NEW_PROCESS_GROUP", CREATE_NEW_PROCESS_GROUP);
+ defint(d, "STILL_ACTIVE", STILL_ACTIVE);
}
diff --git a/PC/_winreg.c b/PC/_winreg.c
index 445c3ed..f90a282 100644
--- a/PC/_winreg.c
+++ b/PC/_winreg.c
@@ -753,7 +753,8 @@ Py2Reg(PyObject *value, DWORD typ, BYTE **retDataBuf, DWORD *retDataSize)
Py_ssize_t i,j;
switch (typ) {
case REG_DWORD:
- if (value != Py_None && !PyInt_Check(value))
+ if (value != Py_None &&
+ !(PyInt_Check(value) || PyLong_Check(value)))
return FALSE;
*retDataBuf = (BYTE *)PyMem_NEW(DWORD, 1);
if (*retDataBuf==NULL){
@@ -765,10 +766,10 @@ Py2Reg(PyObject *value, DWORD typ, BYTE **retDataBuf, DWORD *retDataSize)
DWORD zero = 0;
memcpy(*retDataBuf, &zero, sizeof(DWORD));
}
- else
- memcpy(*retDataBuf,
- &PyInt_AS_LONG((PyIntObject *)value),
- sizeof(DWORD));
+ else {
+ DWORD d = PyLong_AsUnsignedLong(value);
+ memcpy(*retDataBuf, &d, sizeof(DWORD));
+ }
break;
case REG_SZ:
case REG_EXPAND_SZ:
@@ -887,7 +888,7 @@ Py2Reg(PyObject *value, DWORD typ, BYTE **retDataBuf, DWORD *retDataSize)
else {
void *src_buf;
PyBufferProcs *pb = value->ob_type->tp_as_buffer;
- if (pb==NULL) {
+ if (pb == NULL || pb->bf_getreadbuffer == NULL) {
PyErr_Format(PyExc_TypeError,
"Objects of type '%s' can not "
"be used as binary registry values",
@@ -895,9 +896,11 @@ Py2Reg(PyObject *value, DWORD typ, BYTE **retDataBuf, DWORD *retDataSize)
return FALSE;
}
*retDataSize = (*pb->bf_getreadbuffer)(value, 0, &src_buf);
- *retDataBuf = (BYTE *)PyMem_NEW(char,
- *retDataSize);
- if (*retDataBuf==NULL){
+ if (*retDataSize < 0) {
+ return FALSE;
+ }
+ *retDataBuf = (BYTE *)PyMem_NEW(char, *retDataSize);
+ if (*retDataBuf == NULL){
PyErr_NoMemory();
return FALSE;
}
@@ -917,9 +920,9 @@ Reg2Py(char *retDataBuf, DWORD retDataSize, DWORD typ)
switch (typ) {
case REG_DWORD:
if (retDataSize == 0)
- obData = Py_BuildValue("i", 0);
+ obData = Py_BuildValue("k", 0);
else
- obData = Py_BuildValue("i",
+ obData = Py_BuildValue("k",
*(int *)retDataBuf);
break;
case REG_SZ:
@@ -947,8 +950,10 @@ Reg2Py(char *retDataBuf, DWORD retDataSize, DWORD typ)
fixupMultiSZ(str, retDataBuf, retDataSize);
obData = PyList_New(s);
- if (obData == NULL)
+ if (obData == NULL) {
+ free(str);
return NULL;
+ }
for (index = 0; index < s; index++)
{
size_t len = _mbstrlen(str[index]);
@@ -956,6 +961,7 @@ Reg2Py(char *retDataBuf, DWORD retDataSize, DWORD typ)
PyErr_SetString(PyExc_OverflowError,
"registry string is too long for a Python string");
Py_DECREF(obData);
+ free(str);
return NULL;
}
PyList_SetItem(obData,
diff --git a/PC/msvcrtmodule.c b/PC/msvcrtmodule.c
index 057900d..057900d 100755..100644
--- a/PC/msvcrtmodule.c
+++ b/PC/msvcrtmodule.c
diff --git a/PC/pyconfig.h b/PC/pyconfig.h
index 1cfc59b..2b16665 100644
--- a/PC/pyconfig.h
+++ b/PC/pyconfig.h
@@ -342,7 +342,7 @@ Py_NO_ENABLE_SHARED to find out. Also support MS_NO_COREDLL for b/w compat */
# define SIZEOF_FPOS_T 8
# define SIZEOF_HKEY 8
# define SIZEOF_SIZE_T 8
-/* configure.in defines HAVE_LARGEFILE_SUPPORT iff HAVE_LONG_LONG,
+/* configure.ac defines HAVE_LARGEFILE_SUPPORT iff HAVE_LONG_LONG,
sizeof(off_t) > sizeof(long), and sizeof(PY_LONG_LONG) >= sizeof(off_t).
On Win64 the second condition is not true, but if fpos_t replaces off_t
then this is true. The uses of HAVE_LARGEFILE_SUPPORT imply that Win64
diff --git a/PC/python_nt.rc b/PC/python_nt.rc
index e132837..3f55723 100644
--- a/PC/python_nt.rc
+++ b/PC/python_nt.rc
@@ -61,7 +61,7 @@ BEGIN
VALUE "FileDescription", "Python Core\0"
VALUE "FileVersion", PYTHON_VERSION
VALUE "InternalName", "Python DLL\0"
- VALUE "LegalCopyright", "Copyright © 2001-2008 Python Software Foundation. Copyright © 2000 BeOpen.com. Copyright © 1995-2001 CNRI. Copyright © 1991-1995 SMC.\0"
+ VALUE "LegalCopyright", "Copyright © 2001-2014 Python Software Foundation. Copyright © 2000 BeOpen.com. Copyright © 1995-2001 CNRI. Copyright © 1991-1995 SMC.\0"
VALUE "OriginalFilename", PYTHON_DLL_NAME "\0"
VALUE "ProductName", "Python\0"
VALUE "ProductVersion", PYTHON_VERSION
diff --git a/PCbuild/build_ssl.py b/PCbuild/build_ssl.py
index f81e0bc..151aa54 100644
--- a/PCbuild/build_ssl.py
+++ b/PCbuild/build_ssl.py
@@ -64,37 +64,13 @@ def find_working_perl(perls):
print(" Please install ActivePerl and ensure it appears on your path")
return None
-# Locate the best SSL directory given a few roots to look into.
-def find_best_ssl_dir(sources):
- candidates = []
- for s in sources:
- try:
- # note: do not abspath s; the build will fail if any
- # higher up directory name has spaces in it.
- fnames = os.listdir(s)
- except os.error:
- fnames = []
- for fname in fnames:
- fqn = os.path.join(s, fname)
- if os.path.isdir(fqn) and fname.startswith("openssl-"):
- candidates.append(fqn)
- # Now we have all the candidates, locate the best.
- best_parts = []
- best_name = None
- for c in candidates:
- parts = re.split("[.-]", os.path.basename(c))[1:]
- # eg - openssl-0.9.7-beta1 - ignore all "beta" or any other qualifiers
- if len(parts) >= 4:
- continue
- if parts > best_parts:
- best_parts = parts
- best_name = c
- if best_name is not None:
- print("Found an SSL directory at '%s'" % (best_name,))
- else:
- print("Could not find an SSL directory in '%s'" % (sources,))
- sys.stdout.flush()
- return best_name
+# Fetch SSL directory from VC properties
+def get_ssl_dir():
+ propfile = (os.path.join(os.path.dirname(__file__), 'pyproject.vsprops'))
+ with open(propfile) as f:
+ m = re.search('openssl-([^"]+)"', f.read())
+ return "..\..\openssl-"+m.group(1)
+
def create_makefile64(makefile, m32):
"""Create and fix makefile for 64bit
@@ -190,7 +166,7 @@ def main():
print("No Perl installation was found. Existing Makefiles are used.")
sys.stdout.flush()
# Look for SSL 2 levels up from pcbuild - ie, same place zlib etc all live.
- ssl_dir = find_best_ssl_dir(("..\\..",))
+ ssl_dir = get_ssl_dir()
if ssl_dir is None:
sys.exit(1)
@@ -231,9 +207,9 @@ def main():
# Now run make.
if arch == "amd64":
- rc = os.system(r"ml64 -c -Foms\uptable.obj ms\uptable.asm")
+ rc = os.system("nasm -f win64 -DNEAR -Ox -g ms\\uptable.asm")
if rc:
- print("ml64 assembler has failed.")
+ print("nasm assembler has failed.")
sys.exit(rc)
shutil.copy(r"crypto\buildinf_%s.h" % arch, r"crypto\buildinf.h")
diff --git a/PCbuild/bz2.vcproj b/PCbuild/bz2.vcproj
index 04ad7d3..088fb46 100644
--- a/PCbuild/bz2.vcproj
+++ b/PCbuild/bz2.vcproj
@@ -532,7 +532,7 @@
</File>
</Filter>
<Filter
- Name="bzip2 1.0.5 Header Files"
+ Name="bzip2 1.0.6 Header Files"
>
<File
RelativePath="$(bz2Dir)\bzlib.h"
@@ -544,7 +544,7 @@
</File>
</Filter>
<Filter
- Name="bzip2 1.0.5 Source Files"
+ Name="bzip2 1.0.6 Source Files"
>
<File
RelativePath="$(bz2Dir)\blocksort.c"
diff --git a/PCbuild/pginstrument.vsprops b/PCbuild/pginstrument.vsprops
index 38c5f18..99c117b 100644
--- a/PCbuild/pginstrument.vsprops
+++ b/PCbuild/pginstrument.vsprops
@@ -22,7 +22,7 @@
<Tool
Name="VCLinkerTool"
OptimizeReferences="2"
- EnableCOMDATFolding="2"
+ EnableCOMDATFolding="1"
LinkTimeCodeGeneration="2"
ProfileGuidedDatabase="$(SolutionDir)$(PlatformName)-pgi\$(TargetName).pgd"
ImportLibrary="$(OutDirPGI)\$(TargetName).lib"
diff --git a/PCbuild/pyproject.vsprops b/PCbuild/pyproject.vsprops
index ae5570b..0dd61e5 100644
--- a/PCbuild/pyproject.vsprops
+++ b/PCbuild/pyproject.vsprops
@@ -78,11 +78,11 @@
/>
<UserMacro
Name="bz2Dir"
- Value="$(externalsDir)\bzip2-1.0.5"
+ Value="$(externalsDir)\bzip2-1.0.6"
/>
<UserMacro
Name="opensslDir"
- Value="$(externalsDir)\openssl-0.9.8l"
+ Value="$(externalsDir)\openssl-1.0.1h"
/>
<UserMacro
Name="tcltkDir"
diff --git a/PCbuild/readme.txt b/PCbuild/readme.txt
index 366fc07..9c017df 100644
--- a/PCbuild/readme.txt
+++ b/PCbuild/readme.txt
@@ -1,7 +1,7 @@
Building Python using VC++ 9.0
------------------------------
-This directory is used to build Python for Win32 and x64 platforms, e.g.
+This directory is used to build Python for Win32 and x64 platforms, e.g.
Windows 2000, XP, Vista and Windows Server 2008. In order to build 32-bit
debug and release executables, Microsoft Visual C++ 2008 Express Edition is
required at the very least. In order to build 64-bit debug and release
@@ -27,7 +27,7 @@ won't stop you from building Python.
The solution is configured to build the projects in the correct order. "Build
Solution" or F7 takes care of dependencies except for x64 builds. To make
-cross compiling x64 builds on a 32bit OS possible the x64 builds require a
+cross compiling x64 builds on a 32bit OS possible the x64 builds require a
32bit version of Python.
NOTE:
@@ -37,7 +37,7 @@ NOTE:
running a Python core buildbot test slave; see SUBPROJECTS below)
When using the Debug setting, the output files have a _d added to
-their name: python30_d.dll, python_d.exe, parser_d.pyd, and so on. Both
+their name: python27_d.dll, python_d.exe, parser_d.pyd, and so on. Both
the build and rt batch files accept a -d option for debug builds.
The 32bit builds end up in the solution folder PCbuild while the x64 builds
@@ -47,7 +47,7 @@ optimization end up in their own folders, too.
Legacy support
--------------
-You can find build directories for older versions of Visual Studio and
+You can find build directories for older versions of Visual Studio and
Visual C++ in the PC directory. The legacy build directories are no longer
actively maintained and may not work out of the box.
@@ -64,7 +64,7 @@ C RUNTIME
Visual Studio 2008 uses version 9 of the C runtime (MSVCRT9). The executables
are linked to a CRT "side by side" assembly which must be present on the target
-machine. This is avalible under the VC/Redist folder of your visual studio
+machine. This is available under the VC/Redist folder of your visual studio
distribution. On XP and later operating systems that support
side-by-side assemblies it is not enough to have the msvcrt90.dll present,
it has to be there as a whole assembly, that is, a folder with the .dll
@@ -105,44 +105,34 @@ winsound
Python-controlled subprojects that wrap external projects:
_bsddb
Wraps Berkeley DB 4.7.25, which is currently built by _bsddb.vcproj.
- project (see below).
+ project.
_sqlite3
- Wraps SQLite 3.6.21, which is currently built by sqlite3.vcproj (see below).
+ Wraps SQLite 3.6.21, which is currently built by sqlite3.vcproj.
_tkinter
Wraps the Tk windowing system. Unlike _bsddb and _sqlite3, there's no
corresponding tcltk.vcproj-type project that builds Tcl/Tk from vcproj's
within our pcbuild.sln, which means this module expects to find a
pre-built Tcl/Tk in either ..\..\tcltk for 32-bit or ..\..\tcltk64 for
64-bit (relative to this directory). See below for instructions to build
- Tcl/Tk.
+ Tcl/Tk.
bz2
Python wrapper for the libbz2 compression library. Homepage
http://sources.redhat.com/bzip2/
Download the source from the python.org copy into the dist
directory:
- svn export http://svn.python.org/projects/external/bzip2-1.0.5
+ svn export http://svn.python.org/projects/external/bzip2-1.0.6
** NOTE: if you use the Tools\buildbot\external(-amd64).bat approach for
obtaining external sources then you don't need to manually get the source
above via subversion. **
- A custom pre-link step in the bz2 project settings should manage to
- build bzip2-1.0.5\libbz2.lib by magic before bz2.pyd (or bz2_d.pyd) is
- linked in PCbuild\.
- However, the bz2 project is not smart enough to remove anything under
- bzip2-1.0.5\ when you do a clean, so if you want to rebuild bzip2.lib
- you need to clean up bzip2-1.0.5\ by hand.
-
- All of this managed to build libbz2.lib in
- bzip2-1.0.5\$platform-$configuration\, which the Python project links in.
-
_ssl
Python wrapper for the secure sockets library.
Get the source code through
- svn export http://svn.python.org/projects/external/openssl-0.9.8l
+ svn export http://svn.python.org/projects/external/openssl-1.0.1h
** NOTE: if you use the Tools\buildbot\external(-amd64).bat approach for
obtaining external sources then you don't need to manually get the source
@@ -154,18 +144,16 @@ _ssl
You must install the NASM assembler from
http://nasm.sf.net
- for x86 builds. Put nasmw.exe anywhere in your PATH.
- Note: recent releases of nasm only have nasm.exe. Just rename it to
- nasmw.exe.
+ for x86 builds. Put nasm.exe anywhere in your PATH.
You can also install ActivePerl from
http://www.activestate.com/activeperl/
- if you like to use the official sources instead of the files from
+ if you like to use the official sources instead of the files from
python's subversion repository. The svn version contains pre-build
makefiles and assembly files.
The build process makes sure that no patented algorithms are included.
- For now RC5, MDC2 and IDEA are excluded from the build. You may have
+ For now RC5, MDC2 and IDEA are excluded from the build. You may have
to manually remove $(OBJ_D)\i_*.obj from ms\nt.mak if the build process
complains about missing files or forbidden IDEA. Again the files provided
in the subversion repository are already fixed.
@@ -186,16 +174,16 @@ _ssl
this by hand.
The subprojects above wrap external projects Python doesn't control, and as
-such, a little more work is required in order to download the relevant source
+such, a little more work is required in order to download the relevant source
files for each project before they can be built. The buildbots do this each
-time they're built, so the easiest approach is to run either external.bat or
+time they're built, so the easiest approach is to run either external.bat or
external-amd64.bat in the ..\Tools\buildbot directory from ..\, i.e.:
C:\..\svn.python.org\projects\python\trunk\PCbuild>cd ..
C:\..\svn.python.org\projects\python\trunk>Tools\buildbot\external.bat
This extracts all the external subprojects from http://svn.python.org/external
-via Subversion (so you'll need an svn.exe on your PATH) and places them in
+via Subversion (so you'll need an svn.exe on your PATH) and places them in
..\.. (relative to this directory). The external(-amd64).bat scripts will
also build a debug build of Tcl/Tk; there aren't any equivalent batch files
for building release versions of Tcl/Tk lying around in the Tools\buildbot
@@ -238,7 +226,7 @@ XXX trent.nelson 02-Apr-08:
junction as follows (using the directory structure above as an example):
C:\..\python\trunk\external <- already exists and has built versions
- of the external subprojects
+ of the external subprojects
C:\..\python\branches\py3k>linkd.exe external ..\..\trunk\external
Link created at: external
@@ -251,19 +239,9 @@ XXX trent.nelson 02-Apr-08:
Building for Itanium
--------------------
-NOTE:
Official support for Itanium builds have been dropped from the build. Please
contact us and provide patches if you are interested in Itanium builds.
-The project files support a ReleaseItanium configuration which creates
-Win64/Itanium binaries. For this to work, you need to install the Platform
-SDK, in particular the 64-bit support. This includes an Itanium compiler
-(future releases of the SDK likely include an AMD64 compiler as well).
-In addition, you need the Visual Studio plugin for external C compilers,
-from http://sf.net/projects/vsextcomp. The plugin will wrap cl.exe, to
-locate the proper target compiler, and convert compiler options
-accordingly. The project files require atleast version 0.9.
-
Building for AMD64
------------------
@@ -283,7 +261,7 @@ Profile Guided Optimization
The solution has two configurations for PGO. The PGInstrument
configuration must be build first. The PGInstrument binaries are
-lniked against a profiling library and contain extra debug
+linked against a profiling library and contain extra debug
information. The PGUpdate configuration takes the profiling data and
generates optimized binaries.
@@ -291,22 +269,22 @@ The build_pgo.bat script automates the creation of optimized binaries. It
creates the PGI files, runs the unit test suite or PyBench with the PGI
python and finally creates the optimized files.
-http://msdn2.microsoft.com/en-us/library/e7k32f4k(VS.90).aspx
+http://msdn.microsoft.com/en-us/library/e7k32f4k(VS.90).aspx
Static library
--------------
The solution has no configuration for static libraries. However it is easy
-it build a static library instead of a DLL. You simply have to set the
+it build a static library instead of a DLL. You simply have to set the
"Configuration Type" to "Static Library (.lib)" and alter the preprocessor
macro "Py_ENABLE_SHARED" to "Py_NO_ENABLE_SHARED". You may also have to
-change the "Runtime Library" from "Multi-threaded DLL (/MD)" to
+change the "Runtime Library" from "Multi-threaded DLL (/MD)" to
"Multi-threaded (/MT)".
Visual Studio properties
------------------------
-The PCbuild solution makes heavy use of Visual Studio property files
+The PCbuild solution makes heavy use of Visual Studio property files
(*.vsprops). The properties can be viewed and altered in the Property
Manager (View -> Other Windows -> Property Manager).
diff --git a/PCbuild/rt.bat b/PCbuild/rt.bat
index ee1661a..9da1ed1 100644
--- a/PCbuild/rt.bat
+++ b/PCbuild/rt.bat
@@ -30,7 +30,7 @@ set prefix=.\
set suffix=
set qmode=
set dashO=
-set tcltk=
+set tcltk=tcltk
:CheckOpts
if "%1"=="-O" (set dashO=-O) & shift & goto CheckOpts
@@ -38,7 +38,7 @@ if "%1"=="-q" (set qmode=yes) & shift & goto CheckOpts
if "%1"=="-d" (set suffix=_d) & shift & goto CheckOpts
if "%1"=="-x64" (set prefix=amd64) & (set tcltk=tcltk64) & shift & goto CheckOpts
-PATH %PATH%;..\..\%tcltk%\bin
+PATH %PATH%;%~dp0..\..\%tcltk%\bin
set exe=%prefix%\python%suffix%
set cmd=%exe% %dashO% -Wd -3 -E -tt ../lib/test/regrtest.py %1 %2 %3 %4 %5 %6 %7 %8 %9
if defined qmode goto Qmode
diff --git a/Parser/asdl_c.py b/Parser/asdl_c.py
index 6a46549..7ebc236 100755
--- a/Parser/asdl_c.py
+++ b/Parser/asdl_c.py
@@ -977,7 +977,7 @@ def has_sequence(types, doing_specialization):
class StaticVisitor(PickleVisitor):
- CODE = '''Very simple, always emit this static code. Overide CODE'''
+ CODE = '''Very simple, always emit this static code. Override CODE'''
def visit(self, object):
self.emit(self.CODE, 0, reflow=False)
@@ -1033,7 +1033,7 @@ class ObjVisitor(PickleVisitor):
self.emit("case %s:" % t.name, 2)
self.emit("Py_INCREF(%s_singleton);" % t.name, 3)
self.emit("return %s_singleton;" % t.name, 3)
- self.emit("default:" % name, 2)
+ self.emit("default:", 2)
self.emit('/* should never happen, but just in case ... */', 3)
code = "PyErr_Format(PyExc_SystemError, \"unknown %s found\");" % name
self.emit(code, 3, reflow=False)
@@ -1117,10 +1117,18 @@ PyObject* PyAST_mod2obj(mod_ty t)
mod_ty PyAST_obj2mod(PyObject* ast, PyArena* arena, int mode)
{
mod_ty res;
- PyObject *req_type[] = {(PyObject*)Module_type, (PyObject*)Expression_type,
- (PyObject*)Interactive_type};
- char *req_name[] = {"Module", "Expression", "Interactive"};
+ PyObject *req_type[3];
+ char *req_name[3];
int isinstance;
+
+ req_type[0] = (PyObject*)Module_type;
+ req_type[1] = (PyObject*)Expression_type;
+ req_type[2] = (PyObject*)Interactive_type;
+
+ req_name[0] = "Module";
+ req_name[1] = "Expression";
+ req_name[2] = "Interactive";
+
assert(0 <= mode && mode <= 2);
init_types();
diff --git a/Parser/myreadline.c b/Parser/myreadline.c
index 07c1d44..59db41a 100644
--- a/Parser/myreadline.c
+++ b/Parser/myreadline.c
@@ -40,6 +40,10 @@ static int
my_fgets(char *buf, int len, FILE *fp)
{
char *p;
+#ifdef MS_WINDOWS
+ int i;
+#endif
+
while (1) {
if (PyOS_InputHook != NULL)
(void)(PyOS_InputHook)();
@@ -49,32 +53,24 @@ my_fgets(char *buf, int len, FILE *fp)
if (p != NULL)
return 0; /* No error */
#ifdef MS_WINDOWS
- /* In the case of a Ctrl+C or some other external event
- interrupting the operation:
- Win2k/NT: ERROR_OPERATION_ABORTED is the most recent Win32
- error code (and feof() returns TRUE).
- Win9x: Ctrl+C seems to have no effect on fgets() returning
- early - the signal handler is called, but the fgets()
- only returns "normally" (ie, when Enter hit or feof())
+ /* Ctrl-C anywhere on the line or Ctrl-Z if the only character
+ on a line will set ERROR_OPERATION_ABORTED. Under normal
+ circumstances Ctrl-C will also have caused the SIGINT handler
+ to fire. This signal fires in another thread and is not
+ guaranteed to have occurred before this point in the code.
+
+ Therefore: check in a small loop to see if the trigger has
+ fired, in which case assume this is a Ctrl-C event. If it
+ hasn't fired within 10ms assume that this is a Ctrl-Z on its
+ own or that the signal isn't going to fire for some other
+ reason and drop through to check for EOF.
*/
if (GetLastError()==ERROR_OPERATION_ABORTED) {
- /* Signals come asynchronously, so we sleep a brief
- moment before checking if the handler has been
- triggered (we cant just return 1 before the
- signal handler has been called, as the later
- signal may be treated as a separate interrupt).
- */
- Sleep(1);
- if (PyOS_InterruptOccurred()) {
- return 1; /* Interrupt */
+ for (i = 0; i < 10; i++) {
+ if (PyOS_InterruptOccurred())
+ return 1;
+ Sleep(1);
}
- /* Either the sleep wasn't long enough (need a
- short loop retrying?) or not interrupted at all
- (in which case we should revisit the whole thing!)
- Logging some warning would be nice. assert is not
- viable as under the debugger, the various dialogs
- mean the condition is not true.
- */
}
#endif /* MS_WINDOWS */
if (feof(fp)) {
diff --git a/Parser/node.c b/Parser/node.c
index 9eba76b..0dea30f 100644
--- a/Parser/node.c
+++ b/Parser/node.c
@@ -114,6 +114,7 @@ PyNode_AddChild(register node *n1, int type, char *str, int lineno, int col_offs
/* Forward */
static void freechildren(node *);
+static Py_ssize_t sizeofchildren(node *n);
void
@@ -125,6 +126,16 @@ PyNode_Free(node *n)
}
}
+Py_ssize_t
+_PyNode_SizeOf(node *n)
+{
+ Py_ssize_t res = 0;
+
+ if (n != NULL)
+ res = sizeof(node) + sizeofchildren(n);
+ return res;
+}
+
static void
freechildren(node *n)
{
@@ -136,3 +147,18 @@ freechildren(node *n)
if (STR(n) != NULL)
PyObject_FREE(STR(n));
}
+
+static Py_ssize_t
+sizeofchildren(node *n)
+{
+ Py_ssize_t res = 0;
+ int i;
+ for (i = NCH(n); --i >= 0; )
+ res += sizeofchildren(CHILD(n, i));
+ if (n->n_child != NULL)
+ /* allocated size of n->n_child array */
+ res += XXXROUNDUP(NCH(n)) * sizeof(node);
+ if (STR(n) != NULL)
+ res += strlen(STR(n)) + 1;
+ return res;
+}
diff --git a/Parser/parsetok.c b/Parser/parsetok.c
index e8d396a..069cc6b 100644
--- a/Parser/parsetok.c
+++ b/Parser/parsetok.c
@@ -131,7 +131,7 @@ parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret,
{
parser_state *ps;
node *n;
- int started = 0, handling_import = 0, handling_with = 0;
+ int started = 0;
if ((ps = PyParser_New(g, start)) == NULL) {
fprintf(stderr, "no mem for new parser\n");
@@ -163,7 +163,6 @@ parsetok(struct tok_state *tok, grammar *g, int start, perrdetail *err_ret,
}
if (type == ENDMARKER && started) {
type = NEWLINE; /* Add an extra newline */
- handling_with = handling_import = 0;
started = 0;
/* Add the right number of dedent tokens,
except if a certain flag is given --
diff --git a/Parser/tokenizer.c b/Parser/tokenizer.c
index ee6313b..3e4af53 100644
--- a/Parser/tokenizer.c
+++ b/Parser/tokenizer.c
@@ -277,8 +277,11 @@ check_coding_spec(const char* line, Py_ssize_t size, struct tok_state *tok,
tok->encoding = cs;
tok->decoding_state = -1;
}
- else
+ else {
+ PyErr_Format(PyExc_SyntaxError,
+ "encoding problem: %s", cs);
PyMem_FREE(cs);
+ }
#else
/* Without Unicode support, we cannot
process the coding spec. Since there
@@ -289,15 +292,12 @@ check_coding_spec(const char* line, Py_ssize_t size, struct tok_state *tok,
}
} else { /* then, compare cs with BOM */
r = (strcmp(tok->encoding, cs) == 0);
+ if (!r)
+ PyErr_Format(PyExc_SyntaxError,
+ "encoding problem: %s with BOM", cs);
PyMem_FREE(cs);
}
}
- if (!r) {
- cs = tok->encoding;
- if (!cs)
- cs = "with BOM";
- PyErr_Format(PyExc_SyntaxError, "encoding problem: %s", cs);
- }
return r;
}
@@ -400,6 +400,12 @@ fp_readl(char *s, int size, struct tok_state *tok)
buf = PyObject_CallObject(tok->decoding_readline, NULL);
if (buf == NULL)
return error_ret(tok);
+ if (!PyUnicode_Check(buf)) {
+ Py_DECREF(buf);
+ PyErr_SetString(PyExc_SyntaxError,
+ "codec did not return a unicode object");
+ return error_ret(tok);
+ }
} else {
tok->decoding_buffer = NULL;
if (PyString_CheckExact(buf))
@@ -528,7 +534,7 @@ decoding_fgets(char *s, int size, struct tok_state *tok)
"Non-ASCII character '\\x%.2x' "
"in file %.200s on line %i, "
"but no encoding declared; "
- "see http://www.python.org/peps/pep-0263.html for details",
+ "see http://python.org/dev/peps/pep-0263/ for details",
badchar, tok->filename, tok->lineno + 1);
PyErr_SetString(PyExc_SyntaxError, buf);
return error_ret(tok);
@@ -1494,15 +1500,24 @@ tok_get(register struct tok_state *tok, char **p_start, char **p_end)
} while (isdigit(c));
}
if (c == 'e' || c == 'E') {
- exponent:
+ int e;
+ exponent:
+ e = c;
/* Exponent part */
c = tok_nextc(tok);
- if (c == '+' || c == '-')
+ if (c == '+' || c == '-') {
c = tok_nextc(tok);
- if (!isdigit(c)) {
- tok->done = E_TOKEN;
+ if (!isdigit(c)) {
+ tok->done = E_TOKEN;
+ tok_backup(tok, c);
+ return ERRORTOKEN;
+ }
+ } else if (!isdigit(c)) {
tok_backup(tok, c);
- return ERRORTOKEN;
+ tok_backup(tok, e);
+ *p_start = tok->start;
+ *p_end = tok->cur;
+ return NUMBER;
}
do {
c = tok_nextc(tok);
diff --git a/Python/Python-ast.c b/Python/Python-ast.c
index dcfde3c..6cf99ec 100644
--- a/Python/Python-ast.c
+++ b/Python/Python-ast.c
@@ -6749,10 +6749,18 @@ PyObject* PyAST_mod2obj(mod_ty t)
mod_ty PyAST_obj2mod(PyObject* ast, PyArena* arena, int mode)
{
mod_ty res;
- PyObject *req_type[] = {(PyObject*)Module_type, (PyObject*)Expression_type,
- (PyObject*)Interactive_type};
- char *req_name[] = {"Module", "Expression", "Interactive"};
+ PyObject *req_type[3];
+ char *req_name[3];
int isinstance;
+
+ req_type[0] = (PyObject*)Module_type;
+ req_type[1] = (PyObject*)Expression_type;
+ req_type[2] = (PyObject*)Interactive_type;
+
+ req_name[0] = "Module";
+ req_name[1] = "Expression";
+ req_name[2] = "Interactive";
+
assert(0 <= mode && mode <= 2);
init_types();
diff --git a/Python/ast.c b/Python/ast.c
index 88e4745..80e6354 100644
--- a/Python/ast.c
+++ b/Python/ast.c
@@ -37,7 +37,7 @@ static expr_ty ast_for_testlist_comp(struct compiling *, const node *);
static expr_ty ast_for_call(struct compiling *, const node *, expr_ty);
static PyObject *parsenumber(struct compiling *, const char *);
-static PyObject *parsestr(struct compiling *, const char *);
+static PyObject *parsestr(struct compiling *, const node *n, const char *);
static PyObject *parsestrplus(struct compiling *, const node *n);
#ifndef LINENO
@@ -930,7 +930,7 @@ ast_for_decorated(struct compiling *c, const node *n)
return NULL;
assert(TYPE(CHILD(n, 1)) == funcdef ||
- TYPE(CHILD(n, 1)) == classdef);
+ TYPE(CHILD(n, 1)) == classdef);
if (TYPE(CHILD(n, 1)) == funcdef) {
thing = ast_for_funcdef(c, CHILD(n, 1), decorator_seq);
@@ -1744,14 +1744,19 @@ ast_for_factor(struct compiling *c, const node *n)
NCH(ppower) == 1 &&
TYPE((patom = CHILD(ppower, 0))) == atom &&
TYPE((pnum = CHILD(patom, 0))) == NUMBER) {
+ PyObject *pynum;
char *s = PyObject_MALLOC(strlen(STR(pnum)) + 2);
if (s == NULL)
return NULL;
s[0] = '-';
strcpy(s + 1, STR(pnum));
- PyObject_FREE(STR(pnum));
- STR(pnum) = s;
- return ast_for_atom(c, patom);
+ pynum = parsenumber(c, s);
+ PyObject_FREE(s);
+ if (!pynum)
+ return NULL;
+
+ PyArena_AddPyObject(c->c_arena, pynum);
+ return Num(pynum, LINENO(n), n->n_col_offset, c->c_arena);
}
expression = ast_for_expr(c, CHILD(n, 1));
@@ -3292,8 +3297,8 @@ ast_for_stmt(struct compiling *c, const node *n)
return ast_for_funcdef(c, ch, NULL);
case classdef:
return ast_for_classdef(c, ch, NULL);
- case decorated:
- return ast_for_decorated(c, ch);
+ case decorated:
+ return ast_for_decorated(c, ch);
default:
PyErr_Format(PyExc_SystemError,
"unhandled small_stmt: TYPE=%d NCH=%d\n",
@@ -3382,8 +3387,8 @@ decode_unicode(struct compiling *c, const char *s, size_t len, int rawmode, cons
/* check for integer overflow */
if (len > PY_SIZE_MAX / 6)
return NULL;
- /* "<C3><A4>" (2 bytes) may become "\U000000E4" (10 bytes), or 1:5
- "\ä" (3 bytes) may become "\u005c\U000000E4" (16 bytes), or ~1:6 */
+ /* "<C3><A4>" (2 bytes) may become "\U000000E4" (10 bytes), or 1:5
+ "\ä" (3 bytes) may become "\u005c\U000000E4" (16 bytes), or ~1:6 */
u = PyString_FromStringAndSize((char *)NULL, len * 6);
if (u == NULL)
return NULL;
@@ -3413,8 +3418,8 @@ decode_unicode(struct compiling *c, const char *s, size_t len, int rawmode, cons
sprintf(p, "\\U%02x%02x%02x%02x",
r[i + 0] & 0xFF,
r[i + 1] & 0xFF,
- r[i + 2] & 0xFF,
- r[i + 3] & 0xFF);
+ r[i + 2] & 0xFF,
+ r[i + 3] & 0xFF);
p += 10;
}
Py_DECREF(w);
@@ -3439,13 +3444,14 @@ decode_unicode(struct compiling *c, const char *s, size_t len, int rawmode, cons
* parsestr parses it, and returns the decoded Python string object.
*/
static PyObject *
-parsestr(struct compiling *c, const char *s)
+parsestr(struct compiling *c, const node *n, const char *s)
{
- size_t len;
+ size_t len, i;
int quote = Py_CHARMASK(*s);
int rawmode = 0;
int need_encoding;
int unicode = c->c_future_unicode;
+ int bytes = 0;
if (isalpha(quote) || quote == '_') {
if (quote == 'u' || quote == 'U') {
@@ -3455,6 +3461,7 @@ parsestr(struct compiling *c, const char *s)
if (quote == 'b' || quote == 'B') {
quote = *++s;
unicode = 0;
+ bytes = 1;
}
if (quote == 'r' || quote == 'R') {
quote = *++s;
@@ -3484,6 +3491,16 @@ parsestr(struct compiling *c, const char *s)
return NULL;
}
}
+ if (Py_Py3kWarningFlag && bytes) {
+ for (i = 0; i < len; i++) {
+ if ((unsigned char)s[i] > 127) {
+ if (!ast_warn(c, n,
+ "non-ascii bytes literals not supported in 3.x"))
+ return NULL;
+ break;
+ }
+ }
+ }
#ifdef Py_USING_UNICODE
if (unicode || Py_UnicodeFlag) {
return decode_unicode(c, s, len, rawmode, c->c_encoding);
@@ -3526,11 +3543,11 @@ parsestrplus(struct compiling *c, const node *n)
PyObject *v;
int i;
REQ(CHILD(n, 0), STRING);
- if ((v = parsestr(c, STR(CHILD(n, 0)))) != NULL) {
+ if ((v = parsestr(c, n, STR(CHILD(n, 0)))) != NULL) {
/* String literal concatenation */
for (i = 1; i < NCH(n); i++) {
PyObject *s;
- s = parsestr(c, STR(CHILD(n, i)));
+ s = parsestr(c, n, STR(CHILD(n, i)));
if (s == NULL)
goto onError;
if (PyString_Check(v) && PyString_Check(s)) {
diff --git a/Python/bltinmodule.c b/Python/bltinmodule.c
index c25588a..f052574 100644
--- a/Python/bltinmodule.c
+++ b/Python/bltinmodule.c
@@ -53,8 +53,12 @@ builtin___import__(PyObject *self, PyObject *args, PyObject *kwds)
PyDoc_STRVAR(import_doc,
"__import__(name, globals={}, locals={}, fromlist=[], level=-1) -> module\n\
\n\
-Import a module. The globals are only used to determine the context;\n\
-they are not modified. The locals are currently unused. The fromlist\n\
+Import a module. Because this function is meant for use by the Python\n\
+interpreter and not for general use it is better to use\n\
+importlib.import_module() to programmatically import a module.\n\
+\n\
+The globals argument is only used to determine the context;\n\
+they are not modified. The locals argument is unused. The fromlist\n\
should be a list of names to emulate ``from name import ...'', or an\n\
empty list to emulate ``import name''.\n\
When importing a module from a package, note that __import__('A.B', ...)\n\
@@ -116,7 +120,8 @@ builtin_all(PyObject *self, PyObject *v)
PyDoc_STRVAR(all_doc,
"all(iterable) -> bool\n\
\n\
-Return True if bool(x) is True for all values x in the iterable.");
+Return True if bool(x) is True for all values x in the iterable.\n\
+If the iterable is empty, return True.");
static PyObject *
builtin_any(PyObject *self, PyObject *v)
@@ -158,7 +163,8 @@ builtin_any(PyObject *self, PyObject *v)
PyDoc_STRVAR(any_doc,
"any(iterable) -> bool\n\
\n\
-Return True if bool(x) is True for any x in the iterable.");
+Return True if bool(x) is True for any x in the iterable.\n\
+If the iterable is empty, return False.");
static PyObject *
builtin_apply(PyObject *self, PyObject *args)
@@ -309,7 +315,7 @@ builtin_filter(PyObject *self, PyObject *args)
ok = PyObject_IsTrue(good);
Py_DECREF(good);
}
- if (ok) {
+ if (ok > 0) {
if (j < len)
PyList_SET_ITEM(result, j, item);
else {
@@ -320,8 +326,11 @@ builtin_filter(PyObject *self, PyObject *args)
}
++j;
}
- else
+ else {
Py_DECREF(item);
+ if (ok < 0)
+ goto Fail_result_it;
+ }
}
@@ -516,6 +525,8 @@ builtin_compile(PyObject *self, PyObject *args, PyObject *kwds)
mod_ty mod;
arena = PyArena_New();
+ if (arena == NULL)
+ return NULL;
mod = PyAST_obj2mod(cmd, arena, mode);
if (mod == NULL) {
PyArena_Free(arena);
@@ -1312,7 +1323,7 @@ builtin_len(PyObject *self, PyObject *v)
PyDoc_STRVAR(len_doc,
"len(object) -> integer\n\
\n\
-Return the number of items of a sequence or mapping.");
+Return the number of items of a sequence or collection.");
static PyObject *
@@ -1578,6 +1589,7 @@ builtin_print(PyObject *self, PyObject *args, PyObject *kwds)
Py_CLEAR(str_newline);
return NULL;
}
+#ifdef Py_USING_UNICODE
unicode_newline = PyUnicode_FromString("\n");
if (unicode_newline == NULL) {
Py_CLEAR(str_newline);
@@ -1591,6 +1603,7 @@ builtin_print(PyObject *self, PyObject *args, PyObject *kwds)
Py_CLEAR(unicode_space);
return NULL;
}
+#endif
}
if (!PyArg_ParseTupleAndKeywords(dummy_args, kwds, "|OOO:print",
kwlist, &sep, &end, &file))
@@ -1993,7 +2006,8 @@ builtin_range(PyObject *self, PyObject *args)
}
PyDoc_STRVAR(range_doc,
-"range([start,] stop[, step]) -> list of integers\n\
+"range(stop) -> list of integers\n\
+range(start, stop[, step]) -> list of integers\n\
\n\
Return a list containing an arithmetic progression of integers.\n\
range(i, j) returns [i, i+1, i+2, ..., j-1]; start (!) defaults to 0.\n\
@@ -2420,9 +2434,9 @@ builtin_sum(PyObject *self, PyObject *args)
PyDoc_STRVAR(sum_doc,
"sum(sequence[, start]) -> value\n\
\n\
-Returns the sum of a sequence of numbers (NOT strings) plus the value\n\
+Return the sum of a sequence of numbers (NOT strings) plus the value\n\
of parameter 'start' (which defaults to 0). When the sequence is\n\
-empty, returns start.");
+empty, return start.");
static PyObject *
@@ -2778,12 +2792,15 @@ filtertuple(PyObject *func, PyObject *tuple)
}
ok = PyObject_IsTrue(good);
Py_DECREF(good);
- if (ok) {
+ if (ok > 0) {
if (PyTuple_SetItem(result, j++, item) < 0)
goto Fail_1;
}
- else
+ else {
Py_DECREF(item);
+ if (ok < 0)
+ goto Fail_1;
+ }
}
if (_PyTuple_Resize(&result, j) < 0)
@@ -2845,7 +2862,7 @@ filterstring(PyObject *func, PyObject *strobj)
ok = PyObject_IsTrue(good);
Py_DECREF(good);
}
- if (ok) {
+ if (ok > 0) {
Py_ssize_t reslen;
if (!PyString_Check(item)) {
PyErr_SetString(PyExc_TypeError, "can't filter str to str:"
@@ -2911,6 +2928,8 @@ filterstring(PyObject *func, PyObject *strobj)
}
}
Py_DECREF(item);
+ if (ok < 0)
+ goto Fail_1;
}
if (j < outlen)
@@ -2971,7 +2990,7 @@ filterunicode(PyObject *func, PyObject *strobj)
ok = PyObject_IsTrue(good);
Py_DECREF(good);
}
- if (ok) {
+ if (ok > 0) {
Py_ssize_t reslen;
if (!PyUnicode_Check(item)) {
PyErr_SetString(PyExc_TypeError,
@@ -3026,6 +3045,8 @@ filterunicode(PyObject *func, PyObject *strobj)
}
}
Py_DECREF(item);
+ if (ok < 0)
+ goto Fail_1;
}
if (j < outlen)
diff --git a/Python/ceval.c b/Python/ceval.c
index 06ada97..e008608 100644
--- a/Python/ceval.c
+++ b/Python/ceval.c
@@ -355,6 +355,12 @@ PyEval_RestoreThread(PyThreadState *tstate)
if (interpreter_lock) {
int err = errno;
PyThread_acquire_lock(interpreter_lock, 1);
+ /* _Py_Finalizing is protected by the GIL */
+ if (_Py_Finalizing && tstate != _Py_Finalizing) {
+ PyThread_release_lock(interpreter_lock);
+ PyThread_exit_thread();
+ assert(0); /* unreachable */
+ }
errno = err;
}
#endif
@@ -1018,6 +1024,13 @@ PyEval_EvalFrameEx(PyFrameObject *f, int throwflag)
/* Other threads may run now */
PyThread_acquire_lock(interpreter_lock, 1);
+
+ /* Check if we should make a quick exit. */
+ if (_Py_Finalizing && _Py_Finalizing != tstate) {
+ PyThread_release_lock(interpreter_lock);
+ PyThread_exit_thread();
+ }
+
if (PyThreadState_Swap(tstate) != NULL)
Py_FatalError("ceval: orphan tstate");
@@ -3240,8 +3253,7 @@ PyEval_EvalCodeEx(PyCodeObject *co, PyObject *globals, PyObject *locals,
if (co->co_flags & CO_GENERATOR) {
/* Don't need to keep the reference to f_back, it will be set
* when the generator is resumed. */
- Py_XDECREF(f->f_back);
- f->f_back = NULL;
+ Py_CLEAR(f->f_back);
PCALL(PCALL_GENERATOR);
diff --git a/Python/codecs.c b/Python/codecs.c
index 7334eb3..69498c4 100644
--- a/Python/codecs.c
+++ b/Python/codecs.c
@@ -521,7 +521,7 @@ PyObject *PyCodec_ReplaceErrors(PyObject *exc)
Py_UNICODE res = Py_UNICODE_REPLACEMENT_CHARACTER;
if (PyUnicodeDecodeError_GetEnd(exc, &end))
return NULL;
- return Py_BuildValue("(u#n)", &res, 1, end);
+ return Py_BuildValue("(u#n)", &res, (Py_ssize_t)1, end);
}
else if (PyObject_IsInstance(exc, PyExc_UnicodeTranslateError)) {
PyObject *res;
@@ -556,6 +556,7 @@ PyObject *PyCodec_XMLCharRefReplaceErrors(PyObject *exc)
PyObject *res;
Py_UNICODE *p;
Py_UNICODE *startp;
+ Py_UNICODE *e;
Py_UNICODE *outp;
int ressize;
if (PyUnicodeEncodeError_GetStart(exc, &start))
@@ -565,26 +566,31 @@ PyObject *PyCodec_XMLCharRefReplaceErrors(PyObject *exc)
if (!(object = PyUnicodeEncodeError_GetObject(exc)))
return NULL;
startp = PyUnicode_AS_UNICODE(object);
- for (p = startp+start, ressize = 0; p < startp+end; ++p) {
- if (*p<10)
+ e = startp + end;
+ for (p = startp+start, ressize = 0; p < e;) {
+ Py_UCS4 ch = *p++;
+#ifndef Py_UNICODE_WIDE
+ if ((0xD800 <= ch && ch <= 0xDBFF) &&
+ (p < e) &&
+ (0xDC00 <= *p && *p <= 0xDFFF)) {
+ ch = ((((ch & 0x03FF) << 10) |
+ ((Py_UCS4)*p++ & 0x03FF)) + 0x10000);
+ }
+#endif
+ if (ch < 10)
ressize += 2+1+1;
- else if (*p<100)
+ else if (ch < 100)
ressize += 2+2+1;
- else if (*p<1000)
+ else if (ch < 1000)
ressize += 2+3+1;
- else if (*p<10000)
+ else if (ch < 10000)
ressize += 2+4+1;
-#ifndef Py_UNICODE_WIDE
- else
- ressize += 2+5+1;
-#else
- else if (*p<100000)
+ else if (ch < 100000)
ressize += 2+5+1;
- else if (*p<1000000)
+ else if (ch < 1000000)
ressize += 2+6+1;
else
ressize += 2+7+1;
-#endif
}
/* allocate replacement */
res = PyUnicode_FromUnicode(NULL, ressize);
@@ -593,40 +599,41 @@ PyObject *PyCodec_XMLCharRefReplaceErrors(PyObject *exc)
return NULL;
}
/* generate replacement */
- for (p = startp+start, outp = PyUnicode_AS_UNICODE(res);
- p < startp+end; ++p) {
- Py_UNICODE c = *p;
+ for (p = startp+start, outp = PyUnicode_AS_UNICODE(res); p < e;) {
int digits;
int base;
+ Py_UCS4 ch = *p++;
+#ifndef Py_UNICODE_WIDE
+ if ((0xD800 <= ch && ch <= 0xDBFF) &&
+ (p < startp+end) &&
+ (0xDC00 <= *p && *p <= 0xDFFF)) {
+ ch = ((((ch & 0x03FF) << 10) |
+ ((Py_UCS4)*p++ & 0x03FF)) + 0x10000);
+ }
+#endif
*outp++ = '&';
*outp++ = '#';
- if (*p<10) {
+ if (ch < 10) {
digits = 1;
base = 1;
}
- else if (*p<100) {
+ else if (ch < 100) {
digits = 2;
base = 10;
}
- else if (*p<1000) {
+ else if (ch < 1000) {
digits = 3;
base = 100;
}
- else if (*p<10000) {
+ else if (ch < 10000) {
digits = 4;
base = 1000;
}
-#ifndef Py_UNICODE_WIDE
- else {
- digits = 5;
- base = 10000;
- }
-#else
- else if (*p<100000) {
+ else if (ch < 100000) {
digits = 5;
base = 10000;
}
- else if (*p<1000000) {
+ else if (ch < 1000000) {
digits = 6;
base = 100000;
}
@@ -634,10 +641,9 @@ PyObject *PyCodec_XMLCharRefReplaceErrors(PyObject *exc)
digits = 7;
base = 1000000;
}
-#endif
while (digits-->0) {
- *outp++ = '0' + c/base;
- c %= base;
+ *outp++ = '0' + ch/base;
+ ch %= base;
base /= 10;
}
*outp++ = ';';
diff --git a/Python/compile.c b/Python/compile.c
index 119c60f..1cf53f9 100644
--- a/Python/compile.c
+++ b/Python/compile.c
@@ -221,8 +221,11 @@ _Py_Mangle(PyObject *privateobj, PyObject *ident)
}
plen = strlen(p);
- assert(1 <= PY_SSIZE_T_MAX - nlen);
- assert(1 + nlen <= PY_SSIZE_T_MAX - plen);
+ if (plen + nlen >= PY_SSIZE_T_MAX - 1) {
+ PyErr_SetString(PyExc_OverflowError,
+ "private identifier too large to be mangled");
+ return NULL;
+ }
ident = PyString_FromStringAndSize(NULL, 1 + nlen + plen);
if (!ident)
@@ -359,14 +362,31 @@ each key.
static PyObject *
dictbytype(PyObject *src, int scope_type, int flag, int offset)
{
- Py_ssize_t pos = 0, i = offset, scope;
+ Py_ssize_t i = offset, scope, num_keys, key_i;
PyObject *k, *v, *dest = PyDict_New();
+ PyObject *sorted_keys;
assert(offset >= 0);
if (dest == NULL)
return NULL;
- while (PyDict_Next(src, &pos, &k, &v)) {
+ /* Sort the keys so that we have a deterministic order on the indexes
+ saved in the returned dictionary. These indexes are used as indexes
+ into the free and cell var storage. Therefore if they aren't
+ deterministic, then the generated bytecode is not deterministic.
+ */
+ sorted_keys = PyDict_Keys(src);
+ if (sorted_keys == NULL)
+ return NULL;
+ if (PyList_Sort(sorted_keys) != 0) {
+ Py_DECREF(sorted_keys);
+ return NULL;
+ }
+ num_keys = PyList_GET_SIZE(sorted_keys);
+
+ for (key_i = 0; key_i < num_keys; key_i++) {
+ k = PyList_GET_ITEM(sorted_keys, key_i);
+ v = PyDict_GetItem(src, k);
/* XXX this should probably be a macro in symtable.h */
assert(PyInt_Check(v));
scope = (PyInt_AS_LONG(v) >> SCOPE_OFF) & SCOPE_MASK;
@@ -374,12 +394,14 @@ dictbytype(PyObject *src, int scope_type, int flag, int offset)
if (scope == scope_type || PyInt_AS_LONG(v) & flag) {
PyObject *tuple, *item = PyInt_FromLong(i);
if (item == NULL) {
+ Py_DECREF(sorted_keys);
Py_DECREF(dest);
return NULL;
}
i++;
tuple = PyTuple_Pack(2, k, k->ob_type);
if (!tuple || PyDict_SetItem(dest, tuple, item) < 0) {
+ Py_DECREF(sorted_keys);
Py_DECREF(item);
Py_DECREF(dest);
Py_XDECREF(tuple);
@@ -389,6 +411,7 @@ dictbytype(PyObject *src, int scope_type, int flag, int offset)
Py_DECREF(tuple);
}
}
+ Py_DECREF(sorted_keys);
return dest;
}
@@ -3460,12 +3483,16 @@ stackdepth_walk(struct compiler *c, basicblock *b, int depth, int maxdepth)
target_depth = depth;
if (instr->i_opcode == FOR_ITER) {
target_depth = depth-2;
- } else if (instr->i_opcode == SETUP_FINALLY ||
- instr->i_opcode == SETUP_EXCEPT) {
+ }
+ else if (instr->i_opcode == SETUP_FINALLY ||
+ instr->i_opcode == SETUP_EXCEPT) {
target_depth = depth+3;
if (target_depth > maxdepth)
maxdepth = target_depth;
}
+ else if (instr->i_opcode == JUMP_IF_TRUE_OR_POP ||
+ instr->i_opcode == JUMP_IF_FALSE_OR_POP)
+ depth = depth - 1;
maxdepth = stackdepth_walk(c, instr->i_target,
target_depth, maxdepth);
if (instr->i_opcode == JUMP_ABSOLUTE ||
diff --git a/Python/dtoa.c b/Python/dtoa.c
index 44dc01f..73e23af 100644
--- a/Python/dtoa.c
+++ b/Python/dtoa.c
@@ -204,7 +204,24 @@ typedef union { double d; ULong L[2]; } U;
MAX_ABS_EXP in absolute value get truncated to +-MAX_ABS_EXP. MAX_ABS_EXP
should fit into an int. */
#ifndef MAX_ABS_EXP
-#define MAX_ABS_EXP 19999U
+#define MAX_ABS_EXP 1100000000U
+#endif
+/* Bound on length of pieces of input strings in _Py_dg_strtod; specifically,
+ this is used to bound the total number of digits ignoring leading zeros and
+ the number of digits that follow the decimal point. Ideally, MAX_DIGITS
+ should satisfy MAX_DIGITS + 400 < MAX_ABS_EXP; that ensures that the
+ exponent clipping in _Py_dg_strtod can't affect the value of the output. */
+#ifndef MAX_DIGITS
+#define MAX_DIGITS 1000000000U
+#endif
+
+/* Guard against trying to use the above values on unusual platforms with ints
+ * of width less than 32 bits. */
+#if MAX_ABS_EXP > INT_MAX
+#error "MAX_ABS_EXP should fit in an int"
+#endif
+#if MAX_DIGITS > INT_MAX
+#error "MAX_DIGITS should fit in an int"
#endif
/* The following definition of Storeinc is appropriate for MIPS processors.
@@ -1498,6 +1515,7 @@ _Py_dg_strtod(const char *s00, char **se)
Long L;
BCinfo bc;
Bigint *bb, *bb1, *bd, *bd0, *bs, *delta;
+ size_t ndigits, fraclen;
dval(&rv) = 0.;
@@ -1520,39 +1538,52 @@ _Py_dg_strtod(const char *s00, char **se)
c = *++s;
lz = s != s1;
- /* Point s0 at the first nonzero digit (if any). nd0 will be the position
- of the point relative to s0. nd will be the total number of digits
- ignoring leading zeros. */
+ /* Point s0 at the first nonzero digit (if any). fraclen will be the
+ number of digits between the decimal point and the end of the
+ digit string. ndigits will be the total number of digits ignoring
+ leading zeros. */
s0 = s1 = s;
while ('0' <= c && c <= '9')
c = *++s;
- nd0 = nd = s - s1;
+ ndigits = s - s1;
+ fraclen = 0;
/* Parse decimal point and following digits. */
if (c == '.') {
c = *++s;
- if (!nd) {
+ if (!ndigits) {
s1 = s;
while (c == '0')
c = *++s;
lz = lz || s != s1;
- nd0 -= s - s1;
+ fraclen += (s - s1);
s0 = s;
}
s1 = s;
while ('0' <= c && c <= '9')
c = *++s;
- nd += s - s1;
+ ndigits += s - s1;
+ fraclen += s - s1;
+ }
+
+ /* Now lz is true if and only if there were leading zero digits, and
+ ndigits gives the total number of digits ignoring leading zeros. A
+ valid input must have at least one digit. */
+ if (!ndigits && !lz) {
+ if (se)
+ *se = (char *)s00;
+ goto parse_error;
}
- /* Now lz is true if and only if there were leading zero digits, and nd
- gives the total number of digits ignoring leading zeros. A valid input
- must have at least one digit. */
- if (!nd && !lz) {
+ /* Range check ndigits and fraclen to make sure that they, and values
+ computed with them, can safely fit in an int. */
+ if (ndigits > MAX_DIGITS || fraclen > MAX_DIGITS) {
if (se)
*se = (char *)s00;
goto parse_error;
}
+ nd = (int)ndigits;
+ nd0 = (int)ndigits - (int)fraclen;
/* Parse exponent. */
e = 0;
@@ -1886,20 +1917,20 @@ _Py_dg_strtod(const char *s00, char **se)
bd2++;
/* At this stage bd5 - bb5 == e == bd2 - bb2 + bbe, bb2 - bs2 == 1,
- and bs == 1, so:
+ and bs == 1, so:
tdv == bd * 10**e = bd * 2**(bbe - bb2 + bd2) * 5**(bd5 - bb5)
srv == bb * 2**bbe = bb * 2**(bbe - bb2 + bb2)
- 0.5 ulp(srv) == 2**(bbe-1) = bs * 2**(bbe - bb2 + bs2)
+ 0.5 ulp(srv) == 2**(bbe-1) = bs * 2**(bbe - bb2 + bs2)
- It follows that:
+ It follows that:
M * tdv = bd * 2**bd2 * 5**bd5
M * srv = bb * 2**bb2 * 5**bb5
M * 0.5 ulp(srv) = bs * 2**bs2 * 5**bb5
- for some constant M. (Actually, M == 2**(bb2 - bbe) * 5**bb5, but
- this fact is not needed below.)
+ for some constant M. (Actually, M == 2**(bb2 - bbe) * 5**bb5, but
+ this fact is not needed below.)
*/
/* Remove factor of 2**i, where i = min(bb2, bd2, bs2). */
diff --git a/Python/future.c b/Python/future.c
index 96be757..0e68845 100644
--- a/Python/future.c
+++ b/Python/future.c
@@ -59,13 +59,6 @@ future_parse(PyFutureFeatures *ff, mod_ty mod, const char *filename)
{
int i, found_docstring = 0, done = 0, prev_line = 0;
- static PyObject *future;
- if (!future) {
- future = PyString_InternFromString("__future__");
- if (!future)
- return 0;
- }
-
if (!(mod->kind == Module_kind || mod->kind == Interactive_kind))
return 1;
@@ -92,7 +85,9 @@ future_parse(PyFutureFeatures *ff, mod_ty mod, const char *filename)
*/
if (s->kind == ImportFrom_kind) {
- if (s->v.ImportFrom.module == future) {
+ identifier modname = s->v.ImportFrom.module;
+ if (modname && PyString_GET_SIZE(modname) == 10 &&
+ !strcmp(PyString_AS_STRING(modname), "__future__")) {
if (done) {
PyErr_SetString(PyExc_SyntaxError,
ERR_LATE_FUTURE);
diff --git a/Python/getargs.c b/Python/getargs.c
index eccdc9b..81a2721 100644
--- a/Python/getargs.c
+++ b/Python/getargs.c
@@ -1410,7 +1410,7 @@ getbuffer(PyObject *arg, Py_buffer *view, char **errmsg)
*errmsg = "convertible to a buffer";
return count;
}
- PyBuffer_FillInfo(view, NULL, buf, count, 1, 0);
+ PyBuffer_FillInfo(view, arg, buf, count, 1, 0);
return 0;
}
@@ -1837,6 +1837,7 @@ PyArg_UnpackTuple(PyObject *args, const char *name, Py_ssize_t min, Py_ssize_t m
assert(min >= 0);
assert(min <= max);
if (!PyTuple_Check(args)) {
+ va_end(vargs);
PyErr_SetString(PyExc_SystemError,
"PyArg_UnpackTuple() argument list is not a tuple");
return 0;
diff --git a/Python/getcopyright.c b/Python/getcopyright.c
index d91a879..629a240 100644
--- a/Python/getcopyright.c
+++ b/Python/getcopyright.c
@@ -4,7 +4,7 @@
static char cprt[] =
"\
-Copyright (c) 2001-2012 Python Software Foundation.\n\
+Copyright (c) 2001-2014 Python Software Foundation.\n\
All Rights Reserved.\n\
\n\
Copyright (c) 2000 BeOpen.com.\n\
diff --git a/Python/getopt.c b/Python/getopt.c
index e96eb6c..af5b03c 100644
--- a/Python/getopt.c
+++ b/Python/getopt.c
@@ -86,17 +86,19 @@ int _PyOS_GetOpt(int argc, char **argv, char *optstring)
opt_ptr = &argv[_PyOS_optind++][1];
}
- if ( (option = *opt_ptr++) == '\0')
+ if ((option = *opt_ptr++) == '\0')
return -1;
if (option == 'J') {
- fprintf(stderr, "-J is reserved for Jython\n");
+ if (_PyOS_opterr)
+ fprintf(stderr, "-J is reserved for Jython\n");
return '_';
}
if (option == 'X') {
- fprintf(stderr,
- "-X is reserved for implementation-specific arguments\n");
+ if (_PyOS_opterr)
+ fprintf(stderr,
+ "-X is reserved for implementation-specific arguments\n");
return '_';
}
@@ -117,7 +119,7 @@ int _PyOS_GetOpt(int argc, char **argv, char *optstring)
if (_PyOS_optind >= argc) {
if (_PyOS_opterr)
fprintf(stderr,
- "Argument expected for the -%c option\n", option);
+ "Argument expected for the -%c option\n", option);
return '_';
}
diff --git a/Python/import.c b/Python/import.c
index 2cac9b5..92363b3 100644
--- a/Python/import.c
+++ b/Python/import.c
@@ -114,6 +114,34 @@ static const struct filedescr _PyImport_StandardFiletab[] = {
};
#endif
+#ifdef MS_WINDOWS
+static int isdir(char *path) {
+ DWORD rv;
+ /* see issue1293 and issue3677:
+ * stat() on Windows doesn't recognise paths like
+ * "e:\\shared\\" and "\\\\whiterab-c2znlh\\shared" as dirs.
+ * Also reference issue6727:
+ * stat() on Windows is broken and doesn't resolve symlinks properly.
+ */
+ rv = GetFileAttributesA(path);
+ return rv != INVALID_FILE_ATTRIBUTES && rv & FILE_ATTRIBUTE_DIRECTORY;
+}
+#else
+#ifdef HAVE_STAT
+static int isdir(char *path) {
+ struct stat statbuf;
+ return stat(path, &statbuf) == 0 && S_ISDIR(statbuf.st_mode);
+}
+#else
+#ifdef RISCOS
+/* with RISCOS, isdir is in unixstuff */
+#else
+int isdir(char *path) {
+ return 0;
+}
+#endif /* RISCOS */
+#endif /* HAVE_STAT */
+#endif /* MS_WINDOWS */
/* Initialize things */
@@ -876,12 +904,17 @@ open_exclusive(char *filename, mode_t mode)
remove the file. */
static void
-write_compiled_module(PyCodeObject *co, char *cpathname, struct stat *srcstat)
+write_compiled_module(PyCodeObject *co, char *cpathname, struct stat *srcstat, time_t mtime)
{
FILE *fp;
- time_t mtime = srcstat->st_mtime;
#ifdef MS_WINDOWS /* since Windows uses different permissions */
mode_t mode = srcstat->st_mode & ~S_IEXEC;
+ /* Issue #6074: We ensure user write access, so we can delete it later
+ * when the source file changes. (On POSIX, this only requires write
+ * access to the directory, on Windows, we need write access to the file
+ * as well)
+ */
+ mode |= _S_IWRITE;
#else
mode_t mode = srcstat->st_mode & ~S_IXUSR & ~S_IXGRP & ~S_IXOTH;
#endif
@@ -959,6 +992,38 @@ update_compiled_module(PyCodeObject *co, char *pathname)
return 1;
}
+#ifdef MS_WINDOWS
+
+/* Seconds between 1.1.1601 and 1.1.1970 */
+static __int64 secs_between_epochs = 11644473600;
+
+/* Get mtime from file pointer. */
+
+static time_t
+win32_mtime(FILE *fp, char *pathname)
+{
+ __int64 filetime;
+ HANDLE fh;
+ BY_HANDLE_FILE_INFORMATION file_information;
+
+ fh = (HANDLE)_get_osfhandle(fileno(fp));
+ if (fh == INVALID_HANDLE_VALUE ||
+ !GetFileInformationByHandle(fh, &file_information)) {
+ PyErr_Format(PyExc_RuntimeError,
+ "unable to get file status from '%s'",
+ pathname);
+ return -1;
+ }
+ /* filetime represents the number of 100ns intervals since
+ 1.1.1601 (UTC). Convert to seconds since 1.1.1970 (UTC). */
+ filetime = (__int64)file_information.ftLastWriteTime.dwHighDateTime << 32 |
+ file_information.ftLastWriteTime.dwLowDateTime;
+ return filetime / 10000000 - secs_between_epochs;
+}
+
+#endif /* #ifdef MS_WINDOWS */
+
+
/* Load a source module from a given file and return its module
object WITH INCREMENTED REFERENCE COUNT. If there's a matching
byte-compiled file, use that instead. */
@@ -968,10 +1033,11 @@ load_source_module(char *name, char *pathname, FILE *fp)
{
struct stat st;
FILE *fpc;
- char buf[MAXPATHLEN+1];
+ char *buf;
char *cpathname;
- PyCodeObject *co;
+ PyCodeObject *co = NULL;
PyObject *m;
+ time_t mtime;
if (fstat(fileno(fp), &st) != 0) {
PyErr_Format(PyExc_RuntimeError,
@@ -979,24 +1045,36 @@ load_source_module(char *name, char *pathname, FILE *fp)
pathname);
return NULL;
}
- if (sizeof st.st_mtime > 4) {
+
+#ifdef MS_WINDOWS
+ mtime = win32_mtime(fp, pathname);
+ if (mtime == (time_t)-1 && PyErr_Occurred())
+ return NULL;
+#else
+ mtime = st.st_mtime;
+#endif
+ if (sizeof mtime > 4) {
/* Python's .pyc timestamp handling presumes that the timestamp fits
in 4 bytes. Since the code only does an equality comparison,
ordering is not important and we can safely ignore the higher bits
(collisions are extremely unlikely).
*/
- st.st_mtime &= 0xFFFFFFFF;
+ mtime &= 0xFFFFFFFF;
+ }
+ buf = PyMem_MALLOC(MAXPATHLEN+1);
+ if (buf == NULL) {
+ return PyErr_NoMemory();
}
cpathname = make_compiled_pathname(pathname, buf,
(size_t)MAXPATHLEN + 1);
if (cpathname != NULL &&
- (fpc = check_compiled_module(pathname, st.st_mtime, cpathname))) {
+ (fpc = check_compiled_module(pathname, mtime, cpathname))) {
co = read_compiled_module(cpathname, fpc);
fclose(fpc);
if (co == NULL)
- return NULL;
+ goto error_exit;
if (update_compiled_module(co, pathname) < 0)
- return NULL;
+ goto error_exit;
if (Py_VerboseFlag)
PySys_WriteStderr("import %s # precompiled from %s\n",
name, cpathname);
@@ -1005,20 +1083,29 @@ load_source_module(char *name, char *pathname, FILE *fp)
else {
co = parse_source_module(pathname, fp);
if (co == NULL)
- return NULL;
+ goto error_exit;
if (Py_VerboseFlag)
PySys_WriteStderr("import %s # from %s\n",
name, pathname);
if (cpathname) {
PyObject *ro = PySys_GetObject("dont_write_bytecode");
- if (ro == NULL || !PyObject_IsTrue(ro))
- write_compiled_module(co, cpathname, &st);
+ int b = (ro == NULL) ? 0 : PyObject_IsTrue(ro);
+ if (b < 0)
+ goto error_exit;
+ if (!b)
+ write_compiled_module(co, cpathname, &st, mtime);
}
}
m = PyImport_ExecCodeModuleEx(name, (PyObject *)co, pathname);
Py_DECREF(co);
+ PyMem_FREE(buf);
return m;
+
+error_exit:
+ Py_XDECREF(co);
+ PyMem_FREE(buf);
+ return NULL;
}
@@ -1038,7 +1125,7 @@ load_package(char *name, char *pathname)
PyObject *file = NULL;
PyObject *path = NULL;
int err;
- char buf[MAXPATHLEN+1];
+ char *buf = NULL;
FILE *fp = NULL;
struct filedescr *fdp;
@@ -1060,8 +1147,13 @@ load_package(char *name, char *pathname)
err = PyDict_SetItemString(d, "__path__", path);
if (err != 0)
goto error;
+ buf = PyMem_MALLOC(MAXPATHLEN+1);
+ if (buf == NULL) {
+ PyErr_NoMemory();
+ goto error;
+ }
buf[0] = '\0';
- fdp = find_module(name, "__init__", path, buf, sizeof(buf), &fp, NULL);
+ fdp = find_module(name, "__init__", path, buf, MAXPATHLEN+1, &fp, NULL);
if (fdp == NULL) {
if (PyErr_ExceptionMatches(PyExc_ImportError)) {
PyErr_Clear();
@@ -1079,6 +1171,8 @@ load_package(char *name, char *pathname)
error:
m = NULL;
cleanup:
+ if (buf)
+ PyMem_FREE(buf);
Py_XDECREF(path);
Py_XDECREF(file);
return m;
@@ -1204,13 +1298,10 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
char *filemode;
FILE *fp = NULL;
PyObject *path_hooks, *path_importer_cache;
-#ifndef RISCOS
- struct stat statbuf;
-#endif
static struct filedescr fd_frozen = {"", "", PY_FROZEN};
static struct filedescr fd_builtin = {"", "", C_BUILTIN};
static struct filedescr fd_package = {"", "", PKG_DIRECTORY};
- char name[MAXPATHLEN+1];
+ char *name;
#if defined(PYOS_OS2)
size_t saved_len;
size_t saved_namelen;
@@ -1224,6 +1315,11 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
"module name is too long");
return NULL;
}
+ name = PyMem_MALLOC(MAXPATHLEN+1);
+ if (name == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
strcpy(name, subname);
/* sys.meta_path import hook */
@@ -1235,7 +1331,7 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
PyErr_SetString(PyExc_RuntimeError,
"sys.meta_path must be a list of "
"import hooks");
- return NULL;
+ goto error_exit;
}
Py_INCREF(meta_path); /* zap guard */
npath = PyList_Size(meta_path);
@@ -1248,12 +1344,13 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
path : Py_None);
if (loader == NULL) {
Py_DECREF(meta_path);
- return NULL; /* true error */
+ goto error_exit; /* true error */
}
if (loader != Py_None) {
/* a loader was found */
*p_loader = loader;
Py_DECREF(meta_path);
+ PyMem_FREE(name);
return &importhookdescr;
}
Py_DECREF(loader);
@@ -1267,7 +1364,7 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
if (PyString_Size(path) + 1 + strlen(name) >= (size_t)buflen) {
PyErr_SetString(PyExc_ImportError,
"full frozen module name too long");
- return NULL;
+ goto error_exit;
}
strcpy(buf, PyString_AsString(path));
strcat(buf, ".");
@@ -1275,19 +1372,22 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
strcpy(name, buf);
if (find_frozen(name) != NULL) {
strcpy(buf, name);
+ PyMem_FREE(name);
return &fd_frozen;
}
PyErr_Format(PyExc_ImportError,
"No frozen submodule named %.200s", name);
- return NULL;
+ goto error_exit;
}
if (path == NULL) {
if (is_builtin(name)) {
strcpy(buf, name);
+ PyMem_FREE(name);
return &fd_builtin;
}
if ((find_frozen(name)) != NULL) {
strcpy(buf, name);
+ PyMem_FREE(name);
return &fd_frozen;
}
@@ -1295,6 +1395,7 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
fp = PyWin_FindRegisteredModule(name, &fdp, buf, buflen);
if (fp != NULL) {
*p_fp = fp;
+ PyMem_FREE(name);
return fdp;
}
#endif
@@ -1303,7 +1404,7 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
if (path == NULL || !PyList_Check(path)) {
PyErr_SetString(PyExc_RuntimeError,
"sys.path must be a list of directory names");
- return NULL;
+ goto error_exit;
}
path_hooks = PySys_GetObject("path_hooks");
@@ -1311,14 +1412,14 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
PyErr_SetString(PyExc_RuntimeError,
"sys.path_hooks must be a list of "
"import hooks");
- return NULL;
+ goto error_exit;
}
path_importer_cache = PySys_GetObject("path_importer_cache");
if (path_importer_cache == NULL ||
!PyDict_Check(path_importer_cache)) {
PyErr_SetString(PyExc_RuntimeError,
"sys.path_importer_cache must be a dict");
- return NULL;
+ goto error_exit;
}
npath = PyList_Size(path);
@@ -1327,13 +1428,13 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
PyObject *copy = NULL;
PyObject *v = PyList_GetItem(path, i);
if (!v)
- return NULL;
+ goto error_exit;
#ifdef Py_USING_UNICODE
if (PyUnicode_Check(v)) {
copy = PyUnicode_Encode(PyUnicode_AS_UNICODE(v),
PyUnicode_GET_SIZE(v), Py_FileSystemDefaultEncoding, NULL);
if (copy == NULL)
- return NULL;
+ goto error_exit;
v = copy;
}
else
@@ -1359,7 +1460,7 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
path_hooks, v);
if (importer == NULL) {
Py_XDECREF(copy);
- return NULL;
+ goto error_exit;
}
/* Note: importer is a borrowed reference */
if (importer != Py_None) {
@@ -1369,10 +1470,11 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
"s", fullname);
Py_XDECREF(copy);
if (loader == NULL)
- return NULL; /* error */
+ goto error_exit; /* error */
if (loader != Py_None) {
/* a loader was found */
*p_loader = loader;
+ PyMem_FREE(name);
return &importhookdescr;
}
Py_DECREF(loader);
@@ -1392,12 +1494,11 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
/* Check for package import (buf holds a directory name,
and there's an __init__ module in that directory */
-#ifdef HAVE_STAT
- if (stat(buf, &statbuf) == 0 && /* it exists */
- S_ISDIR(statbuf.st_mode) && /* it's a directory */
+ if (isdir(buf) && /* it's an existing directory */
case_ok(buf, len, namelen, name)) { /* case matches */
if (find_init_module(buf)) { /* and has __init__.py */
Py_XDECREF(copy);
+ PyMem_FREE(name);
return &fd_package;
}
else {
@@ -1408,32 +1509,10 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
if (PyErr_Warn(PyExc_ImportWarning,
warnstr)) {
Py_XDECREF(copy);
- return NULL;
+ goto error_exit;
}
}
}
-#else
- /* XXX How are you going to test for directories? */
-#ifdef RISCOS
- if (isdir(buf) &&
- case_ok(buf, len, namelen, name)) {
- if (find_init_module(buf)) {
- Py_XDECREF(copy);
- return &fd_package;
- }
- else {
- char warnstr[MAXPATHLEN+80];
- sprintf(warnstr, "Not importing directory "
- "'%.*s': missing __init__.py",
- MAXPATHLEN, buf);
- if (PyErr_Warn(PyExc_ImportWarning,
- warnstr)) {
- Py_XDECREF(copy);
- return NULL;
- }
- }
-#endif
-#endif
#if defined(PYOS_OS2)
/* take a snapshot of the module spec for restoration
* after the 8 character DLL hackery
@@ -1505,10 +1584,15 @@ find_module(char *fullname, char *subname, PyObject *path, char *buf,
if (fp == NULL) {
PyErr_Format(PyExc_ImportError,
"No module named %.200s", name);
- return NULL;
+ goto error_exit;
}
*p_fp = fp;
+ PyMem_FREE(name);
return fdp;
+
+error_exit:
+ PyMem_FREE(name);
+ return NULL;
}
/* Helpers for main.c
@@ -2115,7 +2199,7 @@ static PyObject *
import_module_level(char *name, PyObject *globals, PyObject *locals,
PyObject *fromlist, int level)
{
- char buf[MAXPATHLEN+1];
+ char *buf;
Py_ssize_t buflen = 0;
PyObject *parent, *head, *next, *tail;
@@ -2129,14 +2213,18 @@ import_module_level(char *name, PyObject *globals, PyObject *locals,
return NULL;
}
+ buf = PyMem_MALLOC(MAXPATHLEN+1);
+ if (buf == NULL) {
+ return PyErr_NoMemory();
+ }
parent = get_parent(globals, buf, &buflen, level);
if (parent == NULL)
- return NULL;
+ goto error_exit;
head = load_next(parent, level < 0 ? Py_None : parent, &name, buf,
&buflen);
if (head == NULL)
- return NULL;
+ goto error_exit;
tail = head;
Py_INCREF(tail);
@@ -2145,7 +2233,7 @@ import_module_level(char *name, PyObject *globals, PyObject *locals,
Py_DECREF(tail);
if (next == NULL) {
Py_DECREF(head);
- return NULL;
+ goto error_exit;
}
tail = next;
}
@@ -2157,26 +2245,38 @@ import_module_level(char *name, PyObject *globals, PyObject *locals,
Py_DECREF(head);
PyErr_SetString(PyExc_ValueError,
"Empty module name");
- return NULL;
+ goto error_exit;
}
if (fromlist != NULL) {
- if (fromlist == Py_None || !PyObject_IsTrue(fromlist))
+ int b = (fromlist == Py_None) ? 0 : PyObject_IsTrue(fromlist);
+ if (b < 0) {
+ Py_DECREF(tail);
+ Py_DECREF(head);
+ goto error_exit;
+ }
+ if (!b)
fromlist = NULL;
}
if (fromlist == NULL) {
Py_DECREF(tail);
+ PyMem_FREE(buf);
return head;
}
Py_DECREF(head);
if (!ensure_fromlist(tail, fromlist, buf, buflen, 0)) {
Py_DECREF(tail);
- return NULL;
+ goto error_exit;
}
+ PyMem_FREE(buf);
return tail;
+
+error_exit:
+ PyMem_FREE(buf);
+ return NULL;
}
PyObject *
@@ -2566,7 +2666,7 @@ import_submodule(PyObject *mod, char *subname, char *fullname)
}
else {
PyObject *path, *loader = NULL;
- char buf[MAXPATHLEN+1];
+ char *buf;
struct filedescr *fdp;
FILE *fp = NULL;
@@ -2581,11 +2681,16 @@ import_submodule(PyObject *mod, char *subname, char *fullname)
}
}
+ buf = PyMem_MALLOC(MAXPATHLEN+1);
+ if (buf == NULL) {
+ return PyErr_NoMemory();
+ }
buf[0] = '\0';
fdp = find_module(fullname, subname, path, buf, MAXPATHLEN+1,
&fp, &loader);
Py_XDECREF(path);
if (fdp == NULL) {
+ PyMem_FREE(buf);
if (!PyErr_ExceptionMatches(PyExc_ImportError))
return NULL;
PyErr_Clear();
@@ -2600,6 +2705,7 @@ import_submodule(PyObject *mod, char *subname, char *fullname)
Py_XDECREF(m);
m = NULL;
}
+ PyMem_FREE(buf);
}
return m;
@@ -2617,7 +2723,7 @@ PyImport_ReloadModule(PyObject *m)
PyObject *modules = PyImport_GetModuleDict();
PyObject *path = NULL, *loader = NULL, *existing_m = NULL;
char *name, *subname;
- char buf[MAXPATHLEN+1];
+ char *buf;
struct filedescr *fdp;
FILE *fp = NULL;
PyObject *newm;
@@ -2677,6 +2783,11 @@ PyImport_ReloadModule(PyObject *m)
if (path == NULL)
PyErr_Clear();
}
+ buf = PyMem_MALLOC(MAXPATHLEN+1);
+ if (buf == NULL) {
+ Py_XDECREF(path);
+ return PyErr_NoMemory();
+ }
buf[0] = '\0';
fdp = find_module(name, subname, path, buf, MAXPATHLEN+1, &fp, &loader);
Py_XDECREF(path);
@@ -2684,6 +2795,7 @@ PyImport_ReloadModule(PyObject *m)
if (fdp == NULL) {
Py_XDECREF(loader);
imp_modules_reloading_clear();
+ PyMem_FREE(buf);
return NULL;
}
@@ -2701,6 +2813,7 @@ PyImport_ReloadModule(PyObject *m)
PyDict_SetItemString(modules, name, m);
}
imp_modules_reloading_clear();
+ PyMem_FREE(buf);
return newm;
}
@@ -2831,19 +2944,27 @@ call_find_module(char *name, PyObject *path)
extern int fclose(FILE *);
PyObject *fob, *ret;
struct filedescr *fdp;
- char pathname[MAXPATHLEN+1];
+ char *pathname;
FILE *fp = NULL;
+ pathname = PyMem_MALLOC(MAXPATHLEN+1);
+ if (pathname == NULL) {
+ return PyErr_NoMemory();
+ }
pathname[0] = '\0';
if (path == Py_None)
path = NULL;
fdp = find_module(NULL, name, path, pathname, MAXPATHLEN+1, &fp, NULL);
- if (fdp == NULL)
+ if (fdp == NULL) {
+ PyMem_FREE(pathname);
return NULL;
+ }
if (fp != NULL) {
fob = PyFile_FromFile(fp, pathname, fdp->mode, fclose);
- if (fob == NULL)
+ if (fob == NULL) {
+ PyMem_FREE(pathname);
return NULL;
+ }
}
else {
fob = Py_None;
@@ -2852,6 +2973,7 @@ call_find_module(char *name, PyObject *path)
ret = Py_BuildValue("Os(ssi)",
fob, pathname, fdp->suffix, fdp->mode, fdp->type);
Py_DECREF(fob);
+ PyMem_FREE(pathname);
return ret;
}
@@ -3199,49 +3321,11 @@ NullImporter_init(NullImporter *self, PyObject *args, PyObject *kwds)
PyErr_SetString(PyExc_ImportError, "empty pathname");
return -1;
} else {
-#ifndef RISCOS
-#ifndef MS_WINDOWS
- struct stat statbuf;
- int rv;
-
- rv = stat(path, &statbuf);
- if (rv == 0) {
- /* it exists */
- if (S_ISDIR(statbuf.st_mode)) {
- /* it's a directory */
- PyErr_SetString(PyExc_ImportError,
- "existing directory");
- return -1;
- }
- }
-#else /* MS_WINDOWS */
- DWORD rv;
- /* see issue1293 and issue3677:
- * stat() on Windows doesn't recognise paths like
- * "e:\\shared\\" and "\\\\whiterab-c2znlh\\shared" as dirs.
- */
- rv = GetFileAttributesA(path);
- if (rv != INVALID_FILE_ATTRIBUTES) {
- /* it exists */
- if (rv & FILE_ATTRIBUTE_DIRECTORY) {
- /* it's a directory */
- PyErr_SetString(PyExc_ImportError,
- "existing directory");
- return -1;
- }
- }
-#endif
-#else /* RISCOS */
- if (object_exists(path)) {
- /* it exists */
- if (isdir(path)) {
- /* it's a directory */
- PyErr_SetString(PyExc_ImportError,
- "existing directory");
- return -1;
- }
+ if(isdir(path)) {
+ PyErr_SetString(PyExc_ImportError,
+ "existing directory");
+ return -1;
}
-#endif
}
return 0;
}
diff --git a/Python/marshal.c b/Python/marshal.c
index faca027..6b285aa 100644
--- a/Python/marshal.c
+++ b/Python/marshal.c
@@ -88,7 +88,7 @@ w_more(int c, WFILE *p)
}
static void
-w_string(char *s, int n, WFILE *p)
+w_string(const char *s, Py_ssize_t n, WFILE *p)
{
if (p->fp != NULL) {
fwrite(s, 1, n, p->fp);
@@ -126,6 +126,28 @@ w_long64(long x, WFILE *p)
}
#endif
+#define SIZE32_MAX 0x7FFFFFFF
+
+#if SIZEOF_SIZE_T > 4
+# define W_SIZE(n, p) do { \
+ if ((n) > SIZE32_MAX) { \
+ (p)->depth--; \
+ (p)->error = WFERR_UNMARSHALLABLE; \
+ return; \
+ } \
+ w_long((long)(n), p); \
+ } while(0)
+#else
+# define W_SIZE w_long
+#endif
+
+static void
+w_pstring(const char *s, Py_ssize_t n, WFILE *p)
+{
+ W_SIZE(n, p);
+ w_string(s, n, p);
+}
+
/* We assume that Python longs are stored internally in base some power of
2**15; for the sake of portability we'll always read and write them in base
exactly 2**15. */
@@ -159,6 +181,11 @@ w_PyLong(const PyLongObject *ob, WFILE *p)
d >>= PyLong_MARSHAL_SHIFT;
l++;
} while (d != 0);
+ if (l > SIZE32_MAX) {
+ p->depth--;
+ p->error = WFERR_UNMARSHALLABLE;
+ return;
+ }
w_long((long)(Py_SIZE(ob) > 0 ? l : -l), p);
for (i=0; i < n-1; i++) {
@@ -244,7 +271,7 @@ w_object(PyObject *v, WFILE *p)
n = strlen(buf);
w_byte(TYPE_FLOAT, p);
w_byte((int)n, p);
- w_string(buf, (int)n, p);
+ w_string(buf, n, p);
PyMem_Free(buf);
}
}
@@ -277,7 +304,7 @@ w_object(PyObject *v, WFILE *p)
}
n = strlen(buf);
w_byte((int)n, p);
- w_string(buf, (int)n, p);
+ w_string(buf, n, p);
PyMem_Free(buf);
buf = PyOS_double_to_string(PyComplex_ImagAsDouble(v),
'g', 17, 0, NULL);
@@ -287,7 +314,7 @@ w_object(PyObject *v, WFILE *p)
}
n = strlen(buf);
w_byte((int)n, p);
- w_string(buf, (int)n, p);
+ w_string(buf, n, p);
PyMem_Free(buf);
}
}
@@ -318,15 +345,7 @@ w_object(PyObject *v, WFILE *p)
else {
w_byte(TYPE_STRING, p);
}
- n = PyString_GET_SIZE(v);
- if (n > INT_MAX) {
- /* huge strings are not supported */
- p->depth--;
- p->error = WFERR_UNMARSHALLABLE;
- return;
- }
- w_long((long)n, p);
- w_string(PyString_AS_STRING(v), (int)n, p);
+ w_pstring(PyBytes_AS_STRING(v), PyString_GET_SIZE(v), p);
}
#ifdef Py_USING_UNICODE
else if (PyUnicode_CheckExact(v)) {
@@ -338,21 +357,14 @@ w_object(PyObject *v, WFILE *p)
return;
}
w_byte(TYPE_UNICODE, p);
- n = PyString_GET_SIZE(utf8);
- if (n > INT_MAX) {
- p->depth--;
- p->error = WFERR_UNMARSHALLABLE;
- return;
- }
- w_long((long)n, p);
- w_string(PyString_AS_STRING(utf8), (int)n, p);
+ w_pstring(PyString_AS_STRING(utf8), PyString_GET_SIZE(utf8), p);
Py_DECREF(utf8);
}
#endif
else if (PyTuple_CheckExact(v)) {
w_byte(TYPE_TUPLE, p);
n = PyTuple_Size(v);
- w_long((long)n, p);
+ W_SIZE(n, p);
for (i = 0; i < n; i++) {
w_object(PyTuple_GET_ITEM(v, i), p);
}
@@ -360,7 +372,7 @@ w_object(PyObject *v, WFILE *p)
else if (PyList_CheckExact(v)) {
w_byte(TYPE_LIST, p);
n = PyList_GET_SIZE(v);
- w_long((long)n, p);
+ W_SIZE(n, p);
for (i = 0; i < n; i++) {
w_object(PyList_GET_ITEM(v, i), p);
}
@@ -390,7 +402,7 @@ w_object(PyObject *v, WFILE *p)
p->error = WFERR_UNMARSHALLABLE;
return;
}
- w_long((long)n, p);
+ W_SIZE(n, p);
it = PyObject_GetIter(v);
if (it == NULL) {
p->depth--;
@@ -432,13 +444,7 @@ w_object(PyObject *v, WFILE *p)
PyBufferProcs *pb = v->ob_type->tp_as_buffer;
w_byte(TYPE_STRING, p);
n = (*pb->bf_getreadbuffer)(v, 0, (void **)&s);
- if (n > INT_MAX) {
- p->depth--;
- p->error = WFERR_UNMARSHALLABLE;
- return;
- }
- w_long((long)n, p);
- w_string(s, (int)n, p);
+ w_pstring(s, n, p);
}
else {
w_byte(TYPE_UNKNOWN, p);
@@ -480,14 +486,14 @@ typedef WFILE RFILE; /* Same struct with different invariants */
#define r_byte(p) ((p)->fp ? getc((p)->fp) : rs_byte(p))
-static int
-r_string(char *s, int n, RFILE *p)
+static Py_ssize_t
+r_string(char *s, Py_ssize_t n, RFILE *p)
{
if (p->fp != NULL)
/* The result fits into int because it must be <=n. */
- return (int)fread(s, 1, n, p->fp);
+ return fread(s, 1, n, p->fp);
if (p->end - p->ptr < n)
- n = (int)(p->end - p->ptr);
+ n = p->end - p->ptr;
memcpy(s, p->ptr, n);
p->ptr += n;
return n;
@@ -563,14 +569,14 @@ static PyObject *
r_PyLong(RFILE *p)
{
PyLongObject *ob;
- int size, i, j, md, shorts_in_top_digit;
- long n;
+ long n, size, i;
+ int j, md, shorts_in_top_digit;
digit d;
n = r_long(p);
if (n == 0)
return (PyObject *)_PyLong_New(0);
- if (n < -INT_MAX || n > INT_MAX) {
+ if (n < -SIZE32_MAX || n > SIZE32_MAX) {
PyErr_SetString(PyExc_ValueError,
"bad marshal data (long size out of range)");
return NULL;
@@ -691,7 +697,7 @@ r_object(RFILE *p)
char buf[256];
double dx;
n = r_byte(p);
- if (n == EOF || r_string(buf, (int)n, p) != n) {
+ if (n == EOF || r_string(buf, n, p) != n) {
PyErr_SetString(PyExc_EOFError,
"EOF read where object expected");
retval = NULL;
@@ -732,7 +738,7 @@ r_object(RFILE *p)
char buf[256];
Py_complex c;
n = r_byte(p);
- if (n == EOF || r_string(buf, (int)n, p) != n) {
+ if (n == EOF || r_string(buf, n, p) != n) {
PyErr_SetString(PyExc_EOFError,
"EOF read where object expected");
retval = NULL;
@@ -745,7 +751,7 @@ r_object(RFILE *p)
break;
}
n = r_byte(p);
- if (n == EOF || r_string(buf, (int)n, p) != n) {
+ if (n == EOF || r_string(buf, n, p) != n) {
PyErr_SetString(PyExc_EOFError,
"EOF read where object expected");
retval = NULL;
@@ -795,7 +801,7 @@ r_object(RFILE *p)
case TYPE_INTERNED:
case TYPE_STRING:
n = r_long(p);
- if (n < 0 || n > INT_MAX) {
+ if (n < 0 || n > SIZE32_MAX) {
PyErr_SetString(PyExc_ValueError, "bad marshal data (string size out of range)");
retval = NULL;
break;
@@ -805,7 +811,7 @@ r_object(RFILE *p)
retval = NULL;
break;
}
- if (r_string(PyString_AS_STRING(v), (int)n, p) != n) {
+ if (r_string(PyString_AS_STRING(v), n, p) != n) {
Py_DECREF(v);
PyErr_SetString(PyExc_EOFError,
"EOF read where object expected");
@@ -840,7 +846,7 @@ r_object(RFILE *p)
char *buffer;
n = r_long(p);
- if (n < 0 || n > INT_MAX) {
+ if (n < 0 || n > SIZE32_MAX) {
PyErr_SetString(PyExc_ValueError, "bad marshal data (unicode size out of range)");
retval = NULL;
break;
@@ -850,7 +856,7 @@ r_object(RFILE *p)
retval = PyErr_NoMemory();
break;
}
- if (r_string(buffer, (int)n, p) != n) {
+ if (r_string(buffer, n, p) != n) {
PyMem_DEL(buffer);
PyErr_SetString(PyExc_EOFError,
"EOF read where object expected");
@@ -866,12 +872,12 @@ r_object(RFILE *p)
case TYPE_TUPLE:
n = r_long(p);
- if (n < 0 || n > INT_MAX) {
+ if (n < 0 || n > SIZE32_MAX) {
PyErr_SetString(PyExc_ValueError, "bad marshal data (tuple size out of range)");
retval = NULL;
break;
}
- v = PyTuple_New((int)n);
+ v = PyTuple_New(n);
if (v == NULL) {
retval = NULL;
break;
@@ -886,19 +892,19 @@ r_object(RFILE *p)
v = NULL;
break;
}
- PyTuple_SET_ITEM(v, (int)i, v2);
+ PyTuple_SET_ITEM(v, i, v2);
}
retval = v;
break;
case TYPE_LIST:
n = r_long(p);
- if (n < 0 || n > INT_MAX) {
+ if (n < 0 || n > SIZE32_MAX) {
PyErr_SetString(PyExc_ValueError, "bad marshal data (list size out of range)");
retval = NULL;
break;
}
- v = PyList_New((int)n);
+ v = PyList_New(n);
if (v == NULL) {
retval = NULL;
break;
@@ -913,7 +919,7 @@ r_object(RFILE *p)
v = NULL;
break;
}
- PyList_SET_ITEM(v, (int)i, v2);
+ PyList_SET_ITEM(v, i, v2);
}
retval = v;
break;
@@ -945,7 +951,7 @@ r_object(RFILE *p)
case TYPE_SET:
case TYPE_FROZENSET:
n = r_long(p);
- if (n < 0 || n > INT_MAX) {
+ if (n < 0 || n > SIZE32_MAX) {
PyErr_SetString(PyExc_ValueError, "bad marshal data (set size out of range)");
retval = NULL;
break;
@@ -1134,12 +1140,8 @@ PyMarshal_ReadLastObjectFromFile(FILE *fp)
if (filesize > 0 && filesize <= REASONABLE_FILE_LIMIT) {
char* pBuf = (char *)PyMem_MALLOC(filesize);
if (pBuf != NULL) {
- PyObject* v;
- size_t n;
- /* filesize must fit into an int, because it
- is smaller than REASONABLE_FILE_LIMIT */
- n = fread(pBuf, 1, (int)filesize, fp);
- v = PyMarshal_ReadObjectFromString(pBuf, n);
+ size_t n = fread(pBuf, 1, (size_t)filesize, fp);
+ PyObject* v = PyMarshal_ReadObjectFromString(pBuf, n);
PyMem_FREE(pBuf);
return v;
}
diff --git a/Python/peephole.c b/Python/peephole.c
index 433fe27..fb6cd03 100644
--- a/Python/peephole.c
+++ b/Python/peephole.c
@@ -128,25 +128,14 @@ fold_binops_on_constants(unsigned char *codestr, PyObject *consts)
newconst = PyNumber_Subtract(v, w);
break;
case BINARY_SUBSCR:
- newconst = PyObject_GetItem(v, w);
/* #5057: if v is unicode, there might be differences between
- wide and narrow builds in cases like u'\U00012345'[0].
- Wide builds will return a non-BMP char, whereas narrow builds
- will return a surrogate. In both the cases skip the
- optimization in order to produce compatible pycs.
- */
- if (newconst != NULL &&
- PyUnicode_Check(v) && PyUnicode_Check(newconst)) {
- Py_UNICODE ch = PyUnicode_AS_UNICODE(newconst)[0];
-#ifdef Py_UNICODE_WIDE
- if (ch > 0xFFFF) {
-#else
- if (ch >= 0xD800 && ch <= 0xDFFF) {
-#endif
- Py_DECREF(newconst);
- return 0;
- }
- }
+ wide and narrow builds in cases like '\U00012345'[0] or
+ '\U00012345abcdef'[3], so it's better to skip the optimization
+ in order to produce compatible pycs.
+ */
+ if (PyUnicode_Check(v))
+ return 0;
+ newconst = PyObject_GetItem(v, w);
break;
case BINARY_LSHIFT:
newconst = PyNumber_Lshift(v, w);
@@ -345,7 +334,7 @@ PyCode_Optimize(PyObject *code, PyObject* consts, PyObject *names,
codestr = (unsigned char *)memcpy(codestr,
PyString_AS_STRING(code), codelen);
- /* Verify that RETURN_VALUE terminates the codestring. This allows
+ /* Verify that RETURN_VALUE terminates the codestring. This allows
the various transformation patterns to look ahead several
instructions without additional checks to make sure they are not
looking beyond the end of the code string.
@@ -443,8 +432,8 @@ PyCode_Optimize(PyObject *code, PyObject* consts, PyObject *names,
case BUILD_LIST:
j = GETARG(codestr, i);
h = i - 3 * j;
- if (h >= 0 &&
- j <= lastlc &&
+ if (h >= 0 &&
+ j <= lastlc &&
((opcode == BUILD_TUPLE &&
ISBASICBLOCK(blocks, h, 3*(j+1))) ||
(opcode == BUILD_LIST &&
@@ -488,8 +477,8 @@ PyCode_Optimize(PyObject *code, PyObject* consts, PyObject *names,
case BINARY_AND:
case BINARY_XOR:
case BINARY_OR:
- if (lastlc >= 2 &&
- ISBASICBLOCK(blocks, i-6, 7) &&
+ if (lastlc >= 2 &&
+ ISBASICBLOCK(blocks, i-6, 7) &&
fold_binops_on_constants(&codestr[i-6], consts)) {
i -= 2;
assert(codestr[i] == LOAD_CONST);
@@ -498,13 +487,13 @@ PyCode_Optimize(PyObject *code, PyObject* consts, PyObject *names,
break;
/* Fold unary ops on constants.
- LOAD_CONST c1 UNARY_OP --> LOAD_CONST unary_op(c) */
+ LOAD_CONST c1 UNARY_OP --> LOAD_CONST unary_op(c) */
case UNARY_NEGATIVE:
case UNARY_CONVERT:
case UNARY_INVERT:
- if (lastlc >= 1 &&
- ISBASICBLOCK(blocks, i-3, 4) &&
- fold_unaryops_on_constants(&codestr[i-3], consts)) {
+ if (lastlc >= 1 &&
+ ISBASICBLOCK(blocks, i-3, 4) &&
+ fold_unaryops_on_constants(&codestr[i-3], consts)) {
i -= 2;
assert(codestr[i] == LOAD_CONST);
cumlc = 1;
@@ -530,8 +519,7 @@ PyCode_Optimize(PyObject *code, PyObject* consts, PyObject *names,
tgt = GETJUMPTGT(codestr, i);
j = codestr[tgt];
if (CONDITIONAL_JUMP(j)) {
- /* NOTE: all possible jumps here are
- absolute! */
+ /* NOTE: all possible jumps here are absolute! */
if (JUMPS_ON_TRUE(j) == JUMPS_ON_TRUE(opcode)) {
/* The second jump will be
taken iff the first is. */
@@ -542,13 +530,10 @@ PyCode_Optimize(PyObject *code, PyObject* consts, PyObject *names,
SETARG(codestr, i, tgttgt);
goto reoptimize_current;
} else {
- /* The second jump is not taken
- if the first is (so jump past
- it), and all conditional
- jumps pop their argument when
- they're not taken (so change
- the first jump to pop its
- argument when it's taken). */
+ /* The second jump is not taken if the first is (so
+ jump past it), and all conditional jumps pop their
+ argument when they're not taken (so change the
+ first jump to pop its argument when it's taken). */
if (JUMPS_ON_TRUE(opcode))
codestr[i] = POP_JUMP_IF_TRUE;
else
@@ -584,8 +569,8 @@ PyCode_Optimize(PyObject *code, PyObject* consts, PyObject *names,
if (opcode == JUMP_FORWARD) /* JMP_ABS can go backwards */
opcode = JUMP_ABSOLUTE;
if (!ABSOLUTE_JUMP(opcode))
- tgttgt -= i + 3; /* Calc relative jump addr */
- if (tgttgt < 0) /* No backward relative jumps */
+ tgttgt -= i + 3; /* Calc relative jump addr */
+ if (tgttgt < 0) /* No backward relative jumps */
continue;
codestr[i] = opcode;
SETARG(codestr, i, tgttgt);
diff --git a/Python/pyarena.c b/Python/pyarena.c
index 2d63638..513b379 100644
--- a/Python/pyarena.c
+++ b/Python/pyarena.c
@@ -159,7 +159,6 @@ PyArena_New()
void
PyArena_Free(PyArena *arena)
{
- int r;
assert(arena);
#if defined(Py_DEBUG)
/*
@@ -176,12 +175,6 @@ PyArena_Free(PyArena *arena)
assert(arena->a_objects->ob_refcnt == 1);
*/
- /* Clear all the elements from the list. This is necessary
- to guarantee that they will be DECREFed. */
- r = PyList_SetSlice(arena->a_objects,
- 0, PyList_GET_SIZE(arena->a_objects), NULL);
- assert(r == 0);
- assert(PyList_GET_SIZE(arena->a_objects) == 0);
Py_DECREF(arena->a_objects);
free(arena);
}
diff --git a/Python/pystate.c b/Python/pystate.c
index ddb7d42..eb992c1 100644
--- a/Python/pystate.c
+++ b/Python/pystate.c
@@ -22,6 +22,9 @@ the expense of doing their own locking).
#endif
#endif
+#ifdef __cplusplus
+extern "C" {
+#endif
#ifdef WITH_THREAD
#include "pythread.h"
@@ -30,10 +33,6 @@ static PyThread_type_lock head_mutex = NULL; /* Protects interp->tstate_head */
#define HEAD_LOCK() PyThread_acquire_lock(head_mutex, WAIT_LOCK)
#define HEAD_UNLOCK() PyThread_release_lock(head_mutex)
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/* The single PyInterpreterState used by this process'
GILState implementation
*/
@@ -193,6 +192,9 @@ new_threadstate(PyInterpreterState *interp, int init)
tstate->c_profileobj = NULL;
tstate->c_traceobj = NULL;
+ tstate->trash_delete_nesting = 0;
+ tstate->trash_delete_later = NULL;
+
if (init)
_PyThreadState_Init(tstate);
@@ -313,9 +315,9 @@ PyThreadState_DeleteCurrent()
Py_FatalError(
"PyThreadState_DeleteCurrent: no current tstate");
_PyThreadState_Current = NULL;
- tstate_delete_common(tstate);
if (autoInterpreterState && PyThread_get_key_value(autoTLSkey) == tstate)
PyThread_delete_key_value(autoTLSkey);
+ tstate_delete_common(tstate);
PyEval_ReleaseLock();
}
#endif /* WITH_THREAD */
@@ -654,10 +656,10 @@ PyGILState_Release(PyGILState_STATE oldstate)
PyEval_SaveThread();
}
+#endif /* WITH_THREAD */
+
#ifdef __cplusplus
}
#endif
-#endif /* WITH_THREAD */
-
diff --git a/Python/pythonrun.c b/Python/pythonrun.c
index dcb2c0e..748a63b 100644
--- a/Python/pythonrun.c
+++ b/Python/pythonrun.c
@@ -91,6 +91,13 @@ int _Py_QnewFlag = 0;
int Py_NoUserSiteDirectory = 0; /* for -s and site.py */
int Py_HashRandomizationFlag = 0; /* for -R and PYTHONHASHSEED */
+PyThreadState *_Py_Finalizing = NULL;
+
+
+/* Hack to force loading of object files */
+int (*_PyOS_mystrnicmp_hack)(const char *, const char *, Py_ssize_t) = \
+ PyOS_mystrnicmp; /* Python/pystrcmp.o */
+
/* PyModule_GetWarningsModule is no longer necessary as of 2.6
since _warnings is builtin. This API should not be used. */
PyObject *
@@ -158,6 +165,7 @@ Py_InitializeEx(int install_sigs)
if (initialized)
return;
initialized = 1;
+ _Py_Finalizing = NULL;
if ((p = Py_GETENV("PYTHONDEBUG")) && *p != '\0')
Py_DebugFlag = add_flag(Py_DebugFlag, p);
@@ -417,12 +425,16 @@ Py_Finalize(void)
* the threads created via Threading.
*/
call_sys_exitfunc();
- initialized = 0;
/* Get current thread state and interpreter pointer */
tstate = PyThreadState_GET();
interp = tstate->interp;
+ /* Remaining threads (e.g. daemon threads) will automatically exit
+ after taking the GIL (in PyEval_RestoreThread()). */
+ _Py_Finalizing = tstate;
+ initialized = 0;
+
/* Disable signal handling */
PyOS_FiniInterrupts();
@@ -907,19 +919,20 @@ PyRun_SimpleFileExFlags(FILE *fp, const char *filename, int closeit,
{
PyObject *m, *d, *v;
const char *ext;
- int set_file_name = 0, ret, len;
+ int set_file_name = 0, len, ret = -1;
m = PyImport_AddModule("__main__");
if (m == NULL)
return -1;
+ Py_INCREF(m);
d = PyModule_GetDict(m);
if (PyDict_GetItemString(d, "__file__") == NULL) {
PyObject *f = PyString_FromString(filename);
if (f == NULL)
- return -1;
+ goto done;
if (PyDict_SetItemString(d, "__file__", f) < 0) {
Py_DECREF(f);
- return -1;
+ goto done;
}
set_file_name = 1;
Py_DECREF(f);
@@ -932,7 +945,6 @@ PyRun_SimpleFileExFlags(FILE *fp, const char *filename, int closeit,
fclose(fp);
if ((fp = fopen(filename, "rb")) == NULL) {
fprintf(stderr, "python: Can't reopen .pyc file\n");
- ret = -1;
goto done;
}
/* Turn on optimization if a .pyo file is given */
@@ -945,7 +957,6 @@ PyRun_SimpleFileExFlags(FILE *fp, const char *filename, int closeit,
}
if (v == NULL) {
PyErr_Print();
- ret = -1;
goto done;
}
Py_DECREF(v);
@@ -955,6 +966,7 @@ PyRun_SimpleFileExFlags(FILE *fp, const char *filename, int closeit,
done:
if (set_file_name && PyDict_DelItemString(d, "__file__"))
PyErr_Clear();
+ Py_DECREF(m);
return ret;
}
@@ -989,55 +1001,67 @@ parse_syntax_error(PyObject *err, PyObject **message, const char **filename,
return PyArg_ParseTuple(err, "O(ziiz)", message, filename,
lineno, offset, text);
- /* new style errors. `err' is an instance */
+ *message = NULL;
- if (! (v = PyObject_GetAttrString(err, "msg")))
+ /* new style errors. `err' is an instance */
+ *message = PyObject_GetAttrString(err, "msg");
+ if (!*message)
goto finally;
- *message = v;
- if (!(v = PyObject_GetAttrString(err, "filename")))
+ v = PyObject_GetAttrString(err, "filename");
+ if (!v)
goto finally;
- if (v == Py_None)
+ if (v == Py_None) {
+ Py_DECREF(v);
*filename = NULL;
- else if (! (*filename = PyString_AsString(v)))
- goto finally;
+ }
+ else {
+ *filename = PyString_AsString(v);
+ Py_DECREF(v);
+ if (!*filename)
+ goto finally;
+ }
- Py_DECREF(v);
- if (!(v = PyObject_GetAttrString(err, "lineno")))
+ v = PyObject_GetAttrString(err, "lineno");
+ if (!v)
goto finally;
hold = PyInt_AsLong(v);
Py_DECREF(v);
- v = NULL;
if (hold < 0 && PyErr_Occurred())
goto finally;
*lineno = (int)hold;
- if (!(v = PyObject_GetAttrString(err, "offset")))
+ v = PyObject_GetAttrString(err, "offset");
+ if (!v)
goto finally;
if (v == Py_None) {
*offset = -1;
Py_DECREF(v);
- v = NULL;
} else {
hold = PyInt_AsLong(v);
Py_DECREF(v);
- v = NULL;
if (hold < 0 && PyErr_Occurred())
goto finally;
*offset = (int)hold;
}
- if (!(v = PyObject_GetAttrString(err, "text")))
+ v = PyObject_GetAttrString(err, "text");
+ if (!v)
goto finally;
- if (v == Py_None)
+ if (v == Py_None) {
+ Py_DECREF(v);
*text = NULL;
- else if (! (*text = PyString_AsString(v)))
- goto finally;
- Py_DECREF(v);
+ }
+ else {
+ *text = PyString_AsString(v);
+ Py_DECREF(v);
+ if (!*text)
+ goto finally;
+ }
return 1;
finally:
- Py_XDECREF(v);
+ Py_XDECREF(*message);
return 0;
}
diff --git a/Python/random.c b/Python/random.c
index a2ae002..d615923 100644
--- a/Python/random.c
+++ b/Python/random.c
@@ -165,7 +165,12 @@ dev_urandom_python(char *buffer, Py_ssize_t size)
Py_END_ALLOW_THREADS
if (fd < 0)
{
- PyErr_SetFromErrnoWithFilename(PyExc_OSError, "/dev/urandom");
+ if (errno == ENOENT || errno == ENXIO ||
+ errno == ENODEV || errno == EACCES)
+ PyErr_SetString(PyExc_NotImplementedError,
+ "/dev/urandom (or equivalent) not found");
+ else
+ PyErr_SetFromErrno(PyExc_OSError);
return -1;
}
@@ -219,8 +224,9 @@ lcg_urandom(unsigned int x0, unsigned char *buffer, size_t size)
}
}
-/* Fill buffer with size pseudo-random bytes, not suitable for cryptographic
- use, from the operating random number generator (RNG).
+/* Fill buffer with size pseudo-random bytes from the operating system random
+ number generator (RNG). It is suitable for for most cryptographic purposes
+ except long living private keys for asymmetric encryption.
Return 0 on success, raise an exception and return -1 on error. */
int
diff --git a/Python/symtable.c b/Python/symtable.c
index 51b59f0..99f4191 100644
--- a/Python/symtable.c
+++ b/Python/symtable.c
@@ -22,16 +22,18 @@ ste_new(struct symtable *st, identifier name, _Py_block_ty block,
void *key, int lineno)
{
PySTEntryObject *ste = NULL;
- PyObject *k;
+ PyObject *k = NULL;
k = PyLong_FromVoidPtr(key);
if (k == NULL)
goto fail;
ste = PyObject_New(PySTEntryObject, &PySTEntry_Type);
- if (ste == NULL)
+ if (ste == NULL) {
+ Py_DECREF(k);
goto fail;
+ }
ste->ste_table = st;
- ste->ste_id = k;
+ ste->ste_id = k; /* ste owns reference to k */
ste->ste_name = name;
Py_INCREF(name);
@@ -466,7 +468,7 @@ analyze_cells(PyObject *scope, PyObject *free)
*/
if (PyDict_SetItem(scope, name, w) < 0)
goto error;
- if (!PyDict_DelItem(free, name) < 0)
+ if (PyDict_DelItem(free, name) < 0)
goto error;
}
success = 1;
diff --git a/Python/sysmodule.c b/Python/sysmodule.c
index 814eccb..560ea4e 100644
--- a/Python/sysmodule.c
+++ b/Python/sysmodule.c
@@ -219,7 +219,7 @@ PyDoc_STRVAR(exit_doc,
\n\
Exit the interpreter by raising SystemExit(status).\n\
If the status is omitted or None, it defaults to zero (i.e., success).\n\
-If the status is numeric, it will be used as the system exit status.\n\
+If the status is an integer, it will be used as the system exit status.\n\
If it is another kind of object, it will be printed and the system\n\
exit status will be one (i.e., failure)."
);
@@ -367,8 +367,7 @@ trace_trampoline(PyObject *self, PyFrameObject *frame,
result = call_trampoline(tstate, callback, frame, what, arg);
if (result == NULL) {
PyEval_SetTrace(NULL, NULL);
- Py_XDECREF(frame->f_trace);
- frame->f_trace = NULL;
+ Py_CLEAR(frame->f_trace);
return -1;
}
if (result != Py_None) {
@@ -616,6 +615,10 @@ sys_getwindowsversion(PyObject *self)
PyStructSequence_SET_ITEM(version, pos++, PyInt_FromLong(ver.wSuiteMask));
PyStructSequence_SET_ITEM(version, pos++, PyInt_FromLong(ver.wProductType));
+ if (PyErr_Occurred()) {
+ Py_DECREF(version);
+ return NULL;
+ }
return version;
}
@@ -1261,6 +1264,7 @@ make_flags(void)
#undef SetFlag
if (PyErr_Occurred()) {
+ Py_DECREF(seq);
return NULL;
}
return seq;
diff --git a/Python/thread.c b/Python/thread.c
index dd359e9..dd333e8 100644
--- a/Python/thread.c
+++ b/Python/thread.c
@@ -24,7 +24,7 @@
#include <stdlib.h>
#ifdef __sgi
-#ifndef HAVE_PTHREAD_H /* XXX Need to check in configure.in */
+#ifndef HAVE_PTHREAD_H /* XXX Need to check in configure.ac */
#undef _POSIX_THREADS
#endif
#endif
diff --git a/Python/thread_pthread.h b/Python/thread_pthread.h
index 44e2552..0c1fdfe 100644
--- a/Python/thread_pthread.h
+++ b/Python/thread_pthread.h
@@ -145,6 +145,7 @@ static void
PyThread__init_thread(void)
{
#if defined(_AIX) && defined(__GNUC__)
+ extern void pthread_init(void);
pthread_init();
#endif
}
@@ -241,9 +242,9 @@ void
PyThread_exit_thread(void)
{
dprintf(("PyThread_exit_thread called\n"));
- if (!initialized) {
+ if (!initialized)
exit(0);
- }
+ pthread_exit(0);
}
#ifdef USE_SEMAPHORES
@@ -284,6 +285,7 @@ PyThread_free_lock(PyThread_type_lock lock)
sem_t *thelock = (sem_t *)lock;
int status, error = 0;
+ (void) error; /* silence unused-but-set-variable warning */
dprintf(("PyThread_free_lock(%p) called\n", lock));
if (!thelock)
@@ -314,6 +316,7 @@ PyThread_acquire_lock(PyThread_type_lock lock, int waitflag)
sem_t *thelock = (sem_t *)lock;
int status, error = 0;
+ (void) error; /* silence unused-but-set-variable warning */
dprintf(("PyThread_acquire_lock(%p, %d) called\n", lock, waitflag));
do {
@@ -341,6 +344,7 @@ PyThread_release_lock(PyThread_type_lock lock)
sem_t *thelock = (sem_t *)lock;
int status, error = 0;
+ (void) error; /* silence unused-but-set-variable warning */
dprintf(("PyThread_release_lock(%p) called\n", lock));
status = sem_post(thelock);
@@ -391,6 +395,7 @@ PyThread_free_lock(PyThread_type_lock lock)
pthread_lock *thelock = (pthread_lock *)lock;
int status, error = 0;
+ (void) error; /* silence unused-but-set-variable warning */
dprintf(("PyThread_free_lock(%p) called\n", lock));
status = pthread_mutex_destroy( &thelock->mut );
@@ -442,6 +447,7 @@ PyThread_release_lock(PyThread_type_lock lock)
pthread_lock *thelock = (pthread_lock *)lock;
int status, error = 0;
+ (void) error; /* silence unused-but-set-variable warning */
dprintf(("PyThread_release_lock(%p) called\n", lock));
status = pthread_mutex_lock( &thelock->mut );
diff --git a/README b/README
index 0ae8f10..d11ff3a 100644
--- a/README
+++ b/README
@@ -1,8 +1,8 @@
-This is Python version 2.7.3
+This is Python version 2.7.8
============================
Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
-2012 Python Software Foundation. All rights reserved.
+2012, 2013, 2014 Python Software Foundation. All rights reserved.
Copyright (c) 2000 BeOpen.com.
All rights reserved.
@@ -89,6 +89,13 @@ reStructuredText (2.6+) formats; the LaTeX and reStructuredText versions are
primarily for documentation authors, translators, and people with special
formatting requirements.
+If you would like to contribute to the development of Python, relevant
+documentation is available at:
+
+ http://docs.python.org/devguide/
+
+For information about building Python's documentation, refer to Doc/README.txt.
+
Web sites
---------
@@ -241,7 +248,7 @@ longer:
- NeXT
- Irix 4 and --with-sgi-dl
- Linux 1
-- Systems defining __d6_pthread_create (configure.in)
+- Systems defining __d6_pthread_create (configure.ac)
- Systems defining PY_PTHREAD_D4, PY_PTHREAD_D6,
or PY_PTHREAD_D7 in thread_pthread.h
- Systems using --with-dl-dld
@@ -680,10 +687,10 @@ platforms, additional compiler and/or linker options are required for
threads to work properly. Below is a table of those options,
collected by Bill Janssen. We would love to automate this process
more, but the information below is not enough to write a patch for the
-configure.in file, so manual intervention is required. If you patch
-the configure.in file and are confident that the patch works, please
+configure.ac file, so manual intervention is required. If you patch
+the configure.ac file and are confident that the patch works, please
send in the patch. (Don't bother patching the configure script itself
--- it is regenerated each time the configure.in file changes.)
+-- it is regenerated each time the configure.ac file changes.)
Compiler switches for threads
.............................
@@ -1201,7 +1208,7 @@ RISCOS/ Files specific to RISC OS port
Tools/ Some useful programs written in Python
pyconfig.h.in Source from which pyconfig.h is created (GNU autoheader output)
configure Configuration shell script (GNU autoconf output)
-configure.in Configuration specification (input for GNU autoconf)
+configure.ac Configuration specification (input for GNU autoconf)
install-sh Shell script used to install files
setup.py Python script used to build extension modules
diff --git a/Tools/buildbot/build-amd64.bat b/Tools/buildbot/build-amd64.bat
index 5175c62..8713b38 100644
--- a/Tools/buildbot/build-amd64.bat
+++ b/Tools/buildbot/build-amd64.bat
@@ -1,4 +1,5 @@
@rem Used by the buildbot "compile" step.
+set HOST_PYTHON="%CD%\PCbuild\amd64\python_d.exe"
cmd /c Tools\buildbot\external-amd64.bat
call "%VS90COMNTOOLS%\..\..\VC\vcvarsall.bat" x86_amd64
cmd /c Tools\buildbot\clean-amd64.bat
diff --git a/Tools/buildbot/external-amd64.bat b/Tools/buildbot/external-amd64.bat
index e9c7011..9905989 100644
--- a/Tools/buildbot/external-amd64.bat
+++ b/Tools/buildbot/external-amd64.bat
@@ -5,16 +5,24 @@ call "Tools\buildbot\external-common.bat"
call "%VS90COMNTOOLS%\..\..\VC\vcvarsall.bat" x86_amd64
if not exist tcltk64\bin\tcl85g.dll (
- cd tcl-8.5.2.1\win
+ cd tcl-8.5.15.0\win
nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 DEBUG=1 MACHINE=AMD64 INSTALLDIR=..\..\tcltk64 clean all
nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 DEBUG=1 MACHINE=AMD64 INSTALLDIR=..\..\tcltk64 install
cd ..\..
)
if not exist tcltk64\bin\tk85g.dll (
- cd tk-8.5.2.0\win
- nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 MACHINE=AMD64 INSTALLDIR=..\..\tcltk64 TCLDIR=..\..\tcl-8.5.2.1 clean
- nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 MACHINE=AMD64 INSTALLDIR=..\..\tcltk64 TCLDIR=..\..\tcl-8.5.2.1 all
- nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 MACHINE=AMD64 INSTALLDIR=..\..\tcltk64 TCLDIR=..\..\tcl-8.5.2.1 install
+ cd tk-8.5.15.0\win
+ nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 MACHINE=AMD64 INSTALLDIR=..\..\tcltk64 TCLDIR=..\..\tcl-8.5.15.0 clean
+ nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 MACHINE=AMD64 INSTALLDIR=..\..\tcltk64 TCLDIR=..\..\tcl-8.5.15.0 all
+ nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 MACHINE=AMD64 INSTALLDIR=..\..\tcltk64 TCLDIR=..\..\tcl-8.5.15.0 install
cd ..\..
)
+
+if not exist tcltk64\lib\tix8.4.3\tix84g.dll (
+ cd tix-8.4.3.5\win
+ nmake -f python.mak DEBUG=1 MACHINE=AMD64 COMPILERFLAGS=-DWINVER=0x0500 TCL_DIR=..\..\tcl-8.5.15.0 TK_DIR=..\..\tk-8.5.15.0 INSTALL_DIR=..\..\tcltk64 clean
+ nmake -f python.mak DEBUG=1 MACHINE=AMD64 COMPILERFLAGS=-DWINVER=0x0500 TCL_DIR=..\..\tcl-8.5.15.0 TK_DIR=..\..\tk-8.5.15.0 INSTALL_DIR=..\..\tcltk64 all
+ nmake -f python.mak DEBUG=1 MACHINE=AMD64 COMPILERFLAGS=-DWINVER=0x0500 TCL_DIR=..\..\tcl-8.5.15.0 TK_DIR=..\..\tk-8.5.15.0 INSTALL_DIR=..\..\tcltk64 install
+ cd ..\..
+) \ No newline at end of file
diff --git a/Tools/buildbot/external-common.bat b/Tools/buildbot/external-common.bat
index 834920e..319ef15 100644
--- a/Tools/buildbot/external-common.bat
+++ b/Tools/buildbot/external-common.bat
@@ -4,24 +4,31 @@
cd ..
@rem XXX: If you need to force the buildbots to start from a fresh environment, uncomment
@rem the following, check it in, then check it out, comment it out, then check it back in.
-@rem if exist bzip2-1.0.5 rd /s/q bzip2-1.0.5
+@rem if exist bzip2-1.0.6 rd /s/q bzip2-1.0.6
@rem if exist tcltk rd /s/q tcltk
@rem if exist tcltk64 rd /s/q tcltk64
@rem if exist tcl8.4.12 rd /s/q tcl8.4.12
@rem if exist tcl8.4.16 rd /s/q tcl8.4.16
@rem if exist tcl-8.4.18.1 rd /s/q tcl-8.4.18.1
+@rem if exist tcl-8.5.2.1 rd /s/q tcl-8.5.2.1
+@rem if exist tcl-8.5.15.0 rd /s/q tcl-8.5.15.0
@rem if exist tk8.4.12 rd /s/q tk8.4.12
@rem if exist tk8.4.16 rd /s/q tk8.4.16
@rem if exist tk-8.4.18.1 rd /s/q tk-8.4.18.1
+@rem if exist tk-8.5.2.0 rd /s/q tk-8.5.2.0
+@rem if exist tk-8.5.15.0 rd /s/q tk-8.5.15.0
+@rem if exist tix-8.4.3.5 rd /s/q tix-8.4.3.5
@rem if exist db-4.4.20 rd /s/q db-4.4.20
@rem if exist db-4.7.25.0 rd /s/q db-4.7.25.0
-@rem if exist openssl-0.9.8l rd /s/q openssl-0.9.8l
-@rem if exist sqlite-3.6.21 rd /s/q sqlite-3.6.21
+@rem if exist openssl-0.9.8y rd /s/q openssl-0.9.8y
+@rem if exist openssl-1.0.1g rd /s/q openssl-1.0.1g
+@rem if exist openssl-1.0.1h rd /s/q openssl-1.0.1h
+@rem if exist sqlite-3.6.21 rd /s/q sqlite-3.6.21
@rem bzip
-if not exist bzip2-1.0.5 (
- rd /s/q bzip2-1.0.3
- svn export http://svn.python.org/projects/external/bzip2-1.0.5
+if not exist bzip2-1.0.6 (
+ rd /s/q bzip2-1.0.5
+ svn export http://svn.python.org/projects/external/bzip2-1.0.6
)
@rem Berkeley DB
@@ -29,14 +36,16 @@ if exist db-4.4.20 rd /s/q db-4.4.20
if not exist db-4.7.25.0 svn export http://svn.python.org/projects/external/db-4.7.25.0
@rem OpenSSL
-if not exist openssl-0.9.8l svn export http://svn.python.org/projects/external/openssl-0.9.8l
+if exist openssl-1.0.1g rd /s/q openssl-1.0.1g
+if not exist openssl-1.0.1h svn export http://svn.python.org/projects/external/openssl-1.0.1h
@rem tcl/tk
-if not exist tcl-8.5.2.1 (
- rd /s/q tcltk tcltk64
- svn export http://svn.python.org/projects/external/tcl-8.5.2.1
+if not exist tcl-8.5.15.0 (
+ rd /s/q tcltk tcltk64 tcl-8.5.2.1 tk-8.5.2.0
+ svn export http://svn.python.org/projects/external/tcl-8.5.15.0
)
-if not exist tk-8.5.2.0 svn export http://svn.python.org/projects/external/tk-8.5.2.0
+if not exist tk-8.5.15.0 svn export http://svn.python.org/projects/external/tk-8.5.15.0
+if not exist tix-8.4.3.5 svn export http://svn.python.org/projects/external/tix-8.4.3.5
@rem sqlite3
if not exist sqlite-3.6.21 (
diff --git a/Tools/buildbot/external.bat b/Tools/buildbot/external.bat
index d90e8ce..136a396 100644
--- a/Tools/buildbot/external.bat
+++ b/Tools/buildbot/external.bat
@@ -6,16 +6,24 @@ call "%VS90COMNTOOLS%\vsvars32.bat"
if not exist tcltk\bin\tcl85g.dll (
@rem all and install need to be separate invocations, otherwise nmakehlp is not found on install
- cd tcl-8.5.2.1\win
- nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 DEBUG=1 INSTALLDIR=..\..\tcltk clean all
+ cd tcl-8.5.15.0\win
+ nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 DEBUG=1 INSTALLDIR=..\..\tcltk clean all
nmake -f makefile.vc DEBUG=1 INSTALLDIR=..\..\tcltk install
cd ..\..
)
if not exist tcltk\bin\tk85g.dll (
- cd tk-8.5.2.0\win
- nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl-8.5.2.1 clean
- nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl-8.5.2.1 all
- nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl-8.5.2.1 install
+ cd tk-8.5.15.0\win
+ nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl-8.5.15.0 clean
+ nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl-8.5.15.0 all
+ nmake -f makefile.vc COMPILERFLAGS=-DWINVER=0x0500 OPTS=noxp DEBUG=1 INSTALLDIR=..\..\tcltk TCLDIR=..\..\tcl-8.5.15.0 install
+ cd ..\..
+)
+
+if not exist tcltk\lib\tix8.4.3\tix84g.dll (
+ cd tix-8.4.3.5\win
+ nmake -f python.mak DEBUG=1 MACHINE=IX86 COMPILERFLAGS=-DWINVER=0x0500 TCL_DIR=..\..\tcl-8.5.15.0 TK_DIR=..\..\tk-8.5.15.0 INSTALL_DIR=..\..\tcltk clean
+ nmake -f python.mak DEBUG=1 MACHINE=IX86 COMPILERFLAGS=-DWINVER=0x0500 TCL_DIR=..\..\tcl-8.5.15.0 TK_DIR=..\..\tk-8.5.15.0 INSTALL_DIR=..\..\tcltk all
+ nmake -f python.mak DEBUG=1 MACHINE=IX86 COMPILERFLAGS=-DWINVER=0x0500 TCL_DIR=..\..\tcl-8.5.15.0 TK_DIR=..\..\tk-8.5.15.0 INSTALL_DIR=..\..\tcltk install
cd ..\..
)
diff --git a/Tools/buildbot/test-amd64.bat b/Tools/buildbot/test-amd64.bat
index d178c92..0003761 100644
--- a/Tools/buildbot/test-amd64.bat
+++ b/Tools/buildbot/test-amd64.bat
@@ -1,3 +1,3 @@
@rem Used by the buildbot "test" step.
cd PCbuild
-call rt.bat -q -d -x64 -uall -rw
+call rt.bat -d -q -x64 -uall -rwW %1 %2 %3 %4 %5 %6 %7 %8 %9
diff --git a/Tools/buildbot/test.bat b/Tools/buildbot/test.bat
index 3eccce8..a3a3579 100644
--- a/Tools/buildbot/test.bat
+++ b/Tools/buildbot/test.bat
@@ -1,3 +1,3 @@
@rem Used by the buildbot "test" step.
cd PCbuild
-call rt.bat -d -q -uall -rwW
+call rt.bat -d -q -uall -rwW %1 %2 %3 %4 %5 %6 %7 %8 %9
diff --git a/Tools/freeze/checkextensions_win32.py b/Tools/freeze/checkextensions_win32.py
index e5a8b29..8c6444c 100644
--- a/Tools/freeze/checkextensions_win32.py
+++ b/Tools/freeze/checkextensions_win32.py
@@ -3,7 +3,7 @@
Under Windows it is unlikely the .obj files are of use, as special compiler options
are needed (primarily to toggle the behavior of "public" symbols.
-I dont consider it worth parsing the MSVC makefiles for compiler options. Even if
+I don't consider it worth parsing the MSVC makefiles for compiler options. Even if
we get it just right, a specific freeze application may have specific compiler
options anyway (eg, to enable or disable specific functionality)
@@ -14,7 +14,7 @@ So my basic strategy is:
your own).
* This description can include:
- The MSVC .dsp file for the extension. The .c source file names
- are extraced from there.
+ are extracted from there.
- Specific compiler/linker options
- Flag to indicate if Unicode compilation is expected.
diff --git a/Tools/freeze/makefreeze.py b/Tools/freeze/makefreeze.py
index 1208b67..c0f5056 100644
--- a/Tools/freeze/makefreeze.py
+++ b/Tools/freeze/makefreeze.py
@@ -62,7 +62,7 @@ def makefreeze(base, dict, debug=0, entry_point=None, fail_import=()):
outfp.write('\t{"%s", M_%s, %d},\n' % (mod, mangled, size))
outfp.write('\n')
# The following modules have a NULL code pointer, indicating
- # that the prozen program should not search for them on the host
+ # that the frozen program should not search for them on the host
# system. Importing them will *always* raise an ImportError.
# The zero value size is never used.
for mod in fail_import:
diff --git a/Tools/gdb/libpython.py b/Tools/gdb/libpython.py
index e12769d..bbb22d0 100644..100755
--- a/Tools/gdb/libpython.py
+++ b/Tools/gdb/libpython.py
@@ -39,10 +39,20 @@ the type names are known to the debugger
The module also extends gdb with some python-specific commands.
'''
-from __future__ import with_statement
+
+# NOTE: some gdbs are linked with Python 3, so this file should be dual-syntax
+# compatible (2.6+ and 3.0+). See #19308.
+
+from __future__ import print_function, with_statement
import gdb
+import os
import sys
+if sys.version_info[0] >= 3:
+ unichr = chr
+ xrange = range
+ long = int
+
# Look up the gdb.Type for some standard types:
_type_char_ptr = gdb.lookup_type('char').pointer() # char*
_type_unsigned_char_ptr = gdb.lookup_type('unsigned char').pointer() # unsigned char*
@@ -51,17 +61,17 @@ _type_void_ptr = gdb.lookup_type('void').pointer() # void*
SIZEOF_VOID_P = _type_void_ptr.sizeof
-Py_TPFLAGS_HEAPTYPE = (1L << 9)
+Py_TPFLAGS_HEAPTYPE = (1 << 9)
-Py_TPFLAGS_INT_SUBCLASS = (1L << 23)
-Py_TPFLAGS_LONG_SUBCLASS = (1L << 24)
-Py_TPFLAGS_LIST_SUBCLASS = (1L << 25)
-Py_TPFLAGS_TUPLE_SUBCLASS = (1L << 26)
-Py_TPFLAGS_STRING_SUBCLASS = (1L << 27)
-Py_TPFLAGS_UNICODE_SUBCLASS = (1L << 28)
-Py_TPFLAGS_DICT_SUBCLASS = (1L << 29)
-Py_TPFLAGS_BASE_EXC_SUBCLASS = (1L << 30)
-Py_TPFLAGS_TYPE_SUBCLASS = (1L << 31)
+Py_TPFLAGS_INT_SUBCLASS = (1 << 23)
+Py_TPFLAGS_LONG_SUBCLASS = (1 << 24)
+Py_TPFLAGS_LIST_SUBCLASS = (1 << 25)
+Py_TPFLAGS_TUPLE_SUBCLASS = (1 << 26)
+Py_TPFLAGS_STRING_SUBCLASS = (1 << 27)
+Py_TPFLAGS_UNICODE_SUBCLASS = (1 << 28)
+Py_TPFLAGS_DICT_SUBCLASS = (1 << 29)
+Py_TPFLAGS_BASE_EXC_SUBCLASS = (1 << 30)
+Py_TPFLAGS_TYPE_SUBCLASS = (1 << 31)
MAX_OUTPUT_LEN=1024
@@ -80,7 +90,7 @@ def safety_limit(val):
def safe_range(val):
# As per range, but don't trust the value too much: cap it to a safety
# threshold in case the data was corrupted
- return xrange(safety_limit(val))
+ return xrange(safety_limit(int(val)))
class StringTruncated(RuntimeError):
@@ -292,8 +302,8 @@ class PyObjectPtr(object):
# class
return cls
- #print 'tp_flags = 0x%08x' % tp_flags
- #print 'tp_name = %r' % tp_name
+ #print('tp_flags = 0x%08x' % tp_flags)
+ #print('tp_name = %r' % tp_name)
name_map = {'bool': PyBoolObjectPtr,
'classobj': PyClassObjectPtr,
@@ -617,7 +627,7 @@ class PyDictObjectPtr(PyObjectPtr):
def iteritems(self):
'''
Yields a sequence of (PyObjectPtr key, PyObjectPtr value) pairs,
- analagous to dict.iteritems()
+ analogous to dict.iteritems()
'''
for i in safe_range(self.field('ma_mask') + 1):
ep = self.field('ma_table') + i
@@ -758,14 +768,14 @@ class PyLongObjectPtr(PyObjectPtr):
'''
ob_size = long(self.field('ob_size'))
if ob_size == 0:
- return 0L
+ return 0
ob_digit = self.field('ob_digit')
if gdb.lookup_type('digit').sizeof == 2:
- SHIFT = 15L
+ SHIFT = 15
else:
- SHIFT = 30L
+ SHIFT = 30
digits = [long(ob_digit[i]) * 2**(SHIFT*i)
for i in safe_range(abs(ob_size))]
@@ -774,6 +784,12 @@ class PyLongObjectPtr(PyObjectPtr):
result = -result
return result
+ def write_repr(self, out, visited):
+ # This ensures the trailing 'L' is printed when gdb is linked
+ # with a Python 3 interpreter.
+ out.write(repr(self.proxyval(visited)).rstrip('L'))
+ out.write('L')
+
class PyNoneStructPtr(PyObjectPtr):
"""
@@ -969,11 +985,19 @@ class PyStringObjectPtr(PyObjectPtr):
field_ob_size = self.field('ob_size')
field_ob_sval = self.field('ob_sval')
char_ptr = field_ob_sval.address.cast(_type_unsigned_char_ptr)
+ # When gdb is linked with a Python 3 interpreter, this is really
+ # a latin-1 mojibake decoding of the original string...
return ''.join([chr(char_ptr[i]) for i in safe_range(field_ob_size)])
def proxyval(self, visited):
return str(self)
+ def write_repr(self, out, visited):
+ val = repr(self.proxyval(visited))
+ if sys.version_info[0] >= 3:
+ val = val.encode('ascii', 'backslashreplace').decode('ascii')
+ out.write(val)
+
class PyTupleObjectPtr(PyObjectPtr):
_typename = 'PyTupleObject'
@@ -1072,6 +1096,15 @@ class PyUnicodeObjectPtr(PyObjectPtr):
result = u''.join([_unichr(ucs) for ucs in Py_UNICODEs])
return result
+ def write_repr(self, out, visited):
+ val = repr(self.proxyval(visited))
+ if sys.version_info[0] >= 3:
+ val = val.encode('ascii', 'backslashreplace').decode('ascii')
+ # This ensures the 'u' prefix is printed when gdb is linked
+ # with a Python 3 interpreter.
+ out.write('u')
+ out.write(val.lstrip('u'))
+
def int_from_int(gdbval):
return int(str(gdbval))
@@ -1295,12 +1328,12 @@ class PyList(gdb.Command):
frame = Frame.get_selected_python_frame()
if not frame:
- print 'Unable to locate python frame'
+ print('Unable to locate python frame')
return
pyop = frame.get_pyop()
if not pyop or pyop.is_optimized_out():
- print 'Unable to read information on python frame'
+ print('Unable to read information on python frame')
return
filename = pyop.filename()
@@ -1350,9 +1383,9 @@ def move_in_stack(move_up):
frame = iter_frame
if move_up:
- print 'Unable to find an older python frame'
+ print('Unable to find an older python frame')
else:
- print 'Unable to find a newer python frame'
+ print('Unable to find a newer python frame')
class PyUp(gdb.Command):
'Select and print the python stack frame that called this one (if any)'
@@ -1415,23 +1448,23 @@ class PyPrint(gdb.Command):
frame = Frame.get_selected_python_frame()
if not frame:
- print 'Unable to locate python frame'
+ print('Unable to locate python frame')
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
- print 'Unable to read information on python frame'
+ print('Unable to read information on python frame')
return
pyop_var, scope = pyop_frame.get_var_by_name(name)
if pyop_var:
- print ('%s %r = %s'
+ print('%s %r = %s'
% (scope,
name,
pyop_var.get_truncated_repr(MAX_OUTPUT_LEN)))
else:
- print '%r not found' % name
+ print('%r not found' % name)
PyPrint()
@@ -1449,16 +1482,16 @@ class PyLocals(gdb.Command):
frame = Frame.get_selected_python_frame()
if not frame:
- print 'Unable to locate python frame'
+ print('Unable to locate python frame')
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
- print 'Unable to read information on python frame'
+ print('Unable to read information on python frame')
return
for pyop_name, pyop_value in pyop_frame.iter_locals():
- print ('%s = %s'
+ print('%s = %s'
% (pyop_name.proxyval(set()),
pyop_value.get_truncated_repr(MAX_OUTPUT_LEN)))
diff --git a/Tools/i18n/makelocalealias.py b/Tools/i18n/makelocalealias.py
index 5b3631e..d75892b 100644..100755
--- a/Tools/i18n/makelocalealias.py
+++ b/Tools/i18n/makelocalealias.py
@@ -23,6 +23,12 @@ def parse(filename):
if line[:1] == '#':
continue
locale, alias = line.split()
+ # Fix non-standard locale names, e.g. ks_IN@devanagari.UTF-8
+ if '@' in alias:
+ alias_lang, _, alias_mod = alias.partition('@')
+ if '.' in alias_mod:
+ alias_mod, _, alias_enc = alias_mod.partition('.')
+ alias = alias_lang + '.' + alias_enc + '@' + alias_mod
# Strip ':'
if locale[-1] == ':':
locale = locale[:-1]
diff --git a/Tools/i18n/msgfmt.py b/Tools/i18n/msgfmt.py
index 5dd5430..2502a10 100755
--- a/Tools/i18n/msgfmt.py
+++ b/Tools/i18n/msgfmt.py
@@ -25,8 +25,9 @@ Options:
Display version information and exit.
"""
-import sys
import os
+import sys
+import ast
import getopt
import struct
import array
@@ -143,7 +144,7 @@ def make(filename, outfile):
# This is a message with plural forms
elif l.startswith('msgid_plural'):
if section != ID:
- print >> sys.stderr, 'msgid_plural not preceeded by msgid on %s:%d' %\
+ print >> sys.stderr, 'msgid_plural not preceded by msgid on %s:%d' %\
(infile, lno)
sys.exit(1)
l = l[12:]
@@ -170,8 +171,7 @@ def make(filename, outfile):
l = l.strip()
if not l:
continue
- # XXX: Does this always follow Python escape semantics?
- l = eval(l)
+ l = ast.literal_eval(l)
if section == ID:
msgid += l
elif section == STR:
diff --git a/Tools/i18n/pygettext.py b/Tools/i18n/pygettext.py
index bb0dd35..af24b47 100755
--- a/Tools/i18n/pygettext.py
+++ b/Tools/i18n/pygettext.py
@@ -1,6 +1,6 @@
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
-# Originally written by Barry Warsaw <barry@zope.com>
+# Originally written by Barry Warsaw <barry@python.org>
#
# Minimally patched to make it even more xgettext compatible
# by Peter Funk <pf@artcom-gmbh.de>
@@ -208,6 +208,7 @@ escapes = []
def make_escapes(pass_iso8859):
global escapes
+ escapes = [chr(i) for i in range(256)]
if pass_iso8859:
# Allow iso-8859 characters to pass through so that e.g. 'msgid
# "Höhe"' would result not result in 'msgid "H\366he"'. Otherwise we
@@ -215,11 +216,9 @@ def make_escapes(pass_iso8859):
mod = 128
else:
mod = 256
- for i in range(256):
- if 32 <= (i % mod) <= 126:
- escapes.append(chr(i))
- else:
- escapes.append("\\%03o" % i)
+ for i in range(mod):
+ if not(32 <= i <= 126):
+ escapes[i] = "\\%03o" % i
escapes[ord('\\')] = '\\\\'
escapes[ord('\t')] = '\\t'
escapes[ord('\r')] = '\\r'
@@ -593,7 +592,7 @@ def main():
fp.close()
# calculate escapes
- make_escapes(options.escape)
+ make_escapes(not options.escape)
# calculate all keywords
options.keywords.extend(default_keywords)
diff --git a/Tools/msi/msi.py b/Tools/msi/msi.py
index d2caf34..fb57a43 100644
--- a/Tools/msi/msi.py
+++ b/Tools/msi/msi.py
@@ -445,6 +445,10 @@ def add_ui(db):
("SetDLLDirToTarget", 'DLLDIR=""', 751),
])
+ # Prepend TARGETDIR to the system path, and remove it on uninstall.
+ add_data(db, "Environment",
+ [("PathAddition", "=-*Path", "[TARGETDIR];[~]", "REGISTRY.path")])
+
# Execute Sequences
add_data(db, "InstallExecuteSequence",
[("InitialTargetDir", 'TARGETDIR=""', 750),
@@ -668,11 +672,11 @@ def add_ui(db):
c=features.xbutton("Advanced", "Advanced", None, 0.30)
c.event("SpawnDialog", "AdvancedDlg")
- c=features.text("ItemDescription", 140, 180, 210, 30, 3,
+ c=features.text("ItemDescription", 140, 180, 210, 40, 3,
"Multiline description of the currently selected item.")
c.mapping("SelectionDescription","Text")
- c=features.text("ItemSize", 140, 210, 210, 45, 3,
+ c=features.text("ItemSize", 140, 225, 210, 33, 3,
"The size of the currently selected item.")
c.mapping("SelectionSize", "Text")
@@ -826,7 +830,7 @@ def add_features(db):
# (i.e. additional Python libraries) need to follow the parent feature.
# Features that have no advertisement trigger (e.g. the test suite)
# must not support advertisement
- global default_feature, tcltk, htmlfiles, tools, testsuite, ext_feature, private_crt
+ global default_feature, tcltk, htmlfiles, tools, testsuite, ext_feature, private_crt, prepend_path
default_feature = Feature(db, "DefaultFeature", "Python",
"Python Interpreter and Libraries",
1, directory = "TARGETDIR")
@@ -851,6 +855,15 @@ def add_features(db):
testsuite = Feature(db, "Testsuite", "Test suite",
"Python test suite (Lib/test/)", 11,
parent = default_feature, attributes=2|8)
+ # prepend_path is an additional feature which is to be off by default.
+ # Since the default level for the above features is 1, this needs to be
+ # at least level higher.
+ prepend_path = Feature(db, "PrependPath", "Add python.exe to Path",
+ "Prepend [TARGETDIR] to the system Path variable. "
+ "This allows you to type 'python' into a command "
+ "prompt without needing the full path.", 13,
+ parent = default_feature, attributes=2|8,
+ level=2)
def extract_msvcr90():
# Find the redistributable files
@@ -1022,8 +1035,12 @@ def add_files(db):
lib.add_file("zipdir.zip")
if dir=='tests' and parent.physical=='distutils':
lib.add_file("Setup.sample")
+ if dir=='audiodata':
+ lib.glob("*.*")
if dir=='decimaltestdata':
lib.glob("*.decTest")
+ if dir=='imghdrdata':
+ lib.glob("*.*")
if dir=='xmltestdata':
lib.glob("*.xml")
lib.add_file("test.xml.out")
@@ -1034,6 +1051,7 @@ def add_files(db):
lib.add_file("idle.bat")
if dir=="Icons":
lib.glob("*.gif")
+ lib.glob("*.ico")
lib.add_file("idle.icns")
if dir=="command" and parent.physical=="distutils":
lib.glob("wininst*.exe")
@@ -1168,6 +1186,8 @@ def add_registry(db):
"InstallPath"),
("REGISTRY.doc", msilib.gen_uuid(), "TARGETDIR", registry_component, None,
"Documentation"),
+ ("REGISTRY.path", msilib.gen_uuid(), "TARGETDIR", registry_component, None,
+ None),
("REGISTRY.def", msilib.gen_uuid(), "TARGETDIR", registry_component,
None, None)] + tcldata)
# See "FeatureComponents Table".
@@ -1184,6 +1204,7 @@ def add_registry(db):
add_data(db, "FeatureComponents",
[(default_feature.id, "REGISTRY"),
(htmlfiles.id, "REGISTRY.doc"),
+ (prepend_path.id, "REGISTRY.path"),
(ext_feature.id, "REGISTRY.def")] +
tcldata
)
@@ -1392,7 +1413,10 @@ merge(msiname, "SharedCRT", "TARGETDIR", modules)
# certname (from config.py) should be (a substring of)
# the certificate subject, e.g. "Python Software Foundation"
if certname:
- os.system('signtool sign /n "%s" /t http://timestamp.verisign.com/scripts/timestamp.dll %s' % (certname, msiname))
+ os.system('signtool sign /n "%s" '
+ '/t http://timestamp.verisign.com/scripts/timestamp.dll '
+ '/d "Python %s" '
+ '%s' % (certname, full_current_version, msiname))
if pdbzip:
build_pdbzip()
diff --git a/Tools/msi/msilib.py b/Tools/msi/msilib.py
index 6f49b4c..760471c 100644
--- a/Tools/msi/msilib.py
+++ b/Tools/msi/msilib.py
@@ -305,7 +305,7 @@ def init_database(name, schema,
t.create(db)
# Fill the validation table
add_data(db, "_Validation", schema._Validation_records)
- # Initialize the summary information, allowing atmost 20 properties
+ # Initialize the summary information, allowing at most 20 properties
si = db.GetSummaryInformation(20)
si.SetProperty(PID_TITLE, "Installation Database")
si.SetProperty(PID_SUBJECT, ProductName)
@@ -516,7 +516,7 @@ class Directory:
def add_file(self, file, src=None, version=None, language=None):
"""Add a file to the current component of the directory, starting a new one
- one if there is no current component. By default, the file name in the source
+ if there is no current component. By default, the file name in the source
and the file table will be identical. If the src file is specified, it is
interpreted relative to the current directory. Optionally, a version and a
language can be specified for the entry in the File table."""
diff --git a/Tools/msi/uuids.py b/Tools/msi/uuids.py
index cf5bfd2..88c2d9e 100644
--- a/Tools/msi/uuids.py
+++ b/Tools/msi/uuids.py
@@ -60,4 +60,9 @@ product_codes = {
'2.7.3150':'{C0C31BCC-56FB-42a7-8766-D29E1BD74C7C}', # 2.7.3
'2.7.4121':'{47F45F45-72D7-4e54-AF41-26767EDE95CF}', # 2.7.4rc1
'2.7.4150':'{84ADC96C-B7E0-4938-9D6E-2B640D5DA224}', # 2.7.4
+ '2.7.5150':'{DBDD570E-0952-475f-9453-AB88F3DD5659}', # 2.7.5
+ '2.7.6121':'{D1EBC07F-A7B1-4163-83DB-AE813CEF392F}', # 2.7.6rc1
+ '2.7.6150':'{C3CC4DF5-39A5-4027-B136-2B3E1F5AB6E2}', # 2.7.6
+ '2.7.7121':'{5E0D187D-238B-4e96-9C75-C4CF141F5385}', # 2.7.7rc1
+ '2.7.7150':'{049CA433-77A0-4e48-AC76-180A282C4E10}', # 2.7.7
}
diff --git a/Tools/pybench/CommandLine.py b/Tools/pybench/CommandLine.py
index 6601be5..fde3178 100644
--- a/Tools/pybench/CommandLine.py
+++ b/Tools/pybench/CommandLine.py
@@ -458,7 +458,7 @@ class Application:
handler = getattr(self, handlername)
except AttributeError:
if value == '':
- # count the number of occurances
+ # count the number of occurrences
if values.has_key(optionname):
values[optionname] = values[optionname] + 1
else:
diff --git a/Tools/pybench/README b/Tools/pybench/README
index 022c8de..2061cab 100644
--- a/Tools/pybench/README
+++ b/Tools/pybench/README
@@ -3,7 +3,7 @@ ________________________________________________________________________
PYBENCH - A Python Benchmark Suite
________________________________________________________________________
- Extendable suite of of low-level benchmarks for measuring
+ Extendable suite of low-level benchmarks for measuring
the performance of the Python implementation
(interpreter, compiler or VM).
diff --git a/Tools/pybench/Setup.py b/Tools/pybench/Setup.py
index 309f3db..309f3db 100644..100755
--- a/Tools/pybench/Setup.py
+++ b/Tools/pybench/Setup.py
diff --git a/Tools/pybench/clockres.py b/Tools/pybench/clockres.py
index 64095b3..64095b3 100644..100755
--- a/Tools/pybench/clockres.py
+++ b/Tools/pybench/clockres.py
diff --git a/Tools/pybench/systimes.py b/Tools/pybench/systimes.py
index 013add2..db1210d 100644..100755
--- a/Tools/pybench/systimes.py
+++ b/Tools/pybench/systimes.py
@@ -5,7 +5,7 @@
This module implements various different strategies for measuring
performance timings. It tries to choose the best available method
- based on the platforma and available tools.
+ based on the platform and available tools.
On Windows, it is recommended to have the Mark Hammond win32
package installed. Alternatively, the Thomas Heller ctypes
diff --git a/Tools/pynche/DetailsViewer.py b/Tools/pynche/DetailsViewer.py
index 11a99a6..fb597b5 100644
--- a/Tools/pynche/DetailsViewer.py
+++ b/Tools/pynche/DetailsViewer.py
@@ -26,7 +26,7 @@ option menu:
other side. Thus if red were at 238 and 25 were added to it, red
would have the value 7.
- Preseve Distance
+ Preserve Distance
When the increment or decrement would send any of the tied variations
out of bounds, all tied variations are wrapped as one, so as to
preserve the distance between them. Thus if green and blue were tied,
diff --git a/Tools/scripts/byext.py b/Tools/scripts/byext.py
index 138d8dd..ca18776 100755
--- a/Tools/scripts/byext.py
+++ b/Tools/scripts/byext.py
@@ -2,6 +2,8 @@
"""Show file statistics by extension."""
+from __future__ import print_function
+
import os
import sys
diff --git a/Tools/scripts/findnocoding.py b/Tools/scripts/findnocoding.py
index e49fc42..5d93290 100755
--- a/Tools/scripts/findnocoding.py
+++ b/Tools/scripts/findnocoding.py
@@ -32,13 +32,13 @@ except ImportError:
"no sophisticated Python source file search will be done.")
-decl_re = re.compile(r"coding[=:]\s*([-\w.]+)")
+decl_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)')
def get_declaration(line):
- match = decl_re.search(line)
+ match = decl_re.match(line)
if match:
return match.group(1)
- return ''
+ return b''
def has_correct_encoding(text, codec):
try:
diff --git a/Tools/scripts/fixnotice.py b/Tools/scripts/fixnotice.py
index 0ae4872..e613b65 100755
--- a/Tools/scripts/fixnotice.py
+++ b/Tools/scripts/fixnotice.py
@@ -2,7 +2,7 @@
"""(Ostensibly) fix copyright notices in files.
-Actually, this sript will simply replace a block of text in a file from one
+Actually, this script will simply replace a block of text in a file from one
string to another. It will only do this once though, i.e. not globally
throughout the file. It writes a backup file and then does an os.rename()
dance for atomicity.
diff --git a/Tools/scripts/gprof2html.py b/Tools/scripts/gprof2html.py
index cb01c2c..76408ca 100755
--- a/Tools/scripts/gprof2html.py
+++ b/Tools/scripts/gprof2html.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python2.3
+#! /usr/bin/env python
"""Transform gprof(1) output into useful HTML."""
diff --git a/Tools/scripts/h2py.py b/Tools/scripts/h2py.py
index c681e23..c64501e 100755
--- a/Tools/scripts/h2py.py
+++ b/Tools/scripts/h2py.py
@@ -58,6 +58,12 @@ except KeyError:
raise KeyError
except KeyError:
searchdirs=['/usr/include']
+ try:
+ searchdirs.insert(0, os.path.join('/usr/include',
+ os.environ['MULTIARCH']))
+ except KeyError:
+ pass
+
def main():
global filedict
diff --git a/Tools/scripts/ifdef.py b/Tools/scripts/ifdef.py
index 2ed7a66..5487f1a 100755
--- a/Tools/scripts/ifdef.py
+++ b/Tools/scripts/ifdef.py
@@ -9,11 +9,11 @@
# options. On standard output it writes a copy of the input file(s)
# minus those code sections that are suppressed by the selected
# combination of defined/undefined symbols. The #if(n)def/#else/#else
-# lines themselfs (if the #if(n)def tests for one of the mentioned
+# lines themselves (if the #if(n)def tests for one of the mentioned
# names) are removed as well.
# Features: Arbitrary nesting of recognized and unrecognized
-# preprocesor statements works correctly. Unrecognized #if* commands
+# preprocessor statements works correctly. Unrecognized #if* commands
# are left in place, so it will never remove too much, only too
# little. It does accept whitespace around the '#' character.
diff --git a/Tools/scripts/patchcheck.py b/Tools/scripts/patchcheck.py
index 438e44e..418dd26 100755
--- a/Tools/scripts/patchcheck.py
+++ b/Tools/scripts/patchcheck.py
@@ -144,13 +144,13 @@ def docs_modified(file_paths):
@status("Misc/ACKS updated", modal=True)
def credit_given(file_paths):
"""Check if Misc/ACKS has been changed."""
- return 'Misc/ACKS' in file_paths
+ return os.path.join('Misc', 'ACKS') in file_paths
@status("Misc/NEWS updated", modal=True)
def reported_news(file_paths):
"""Check if Misc/NEWS has been changed."""
- return 'Misc/NEWS' in file_paths
+ return os.path.join('Misc', 'NEWS') in file_paths
def main():
@@ -158,7 +158,8 @@ def main():
python_files = [fn for fn in file_paths if fn.endswith('.py')]
c_files = [fn for fn in file_paths if fn.endswith(('.c', '.h'))]
doc_files = [fn for fn in file_paths if fn.startswith('Doc')]
- special_files = {'Misc/ACKS', 'Misc/NEWS'} & set(file_paths)
+ misc_files = {os.path.join('Misc', 'ACKS'), os.path.join('Misc', 'NEWS')}\
+ & set(file_paths)
# PEP 8 whitespace rules enforcement.
normalize_whitespace(python_files)
# C rules enforcement.
@@ -168,14 +169,15 @@ def main():
# Docs updated.
docs_modified(doc_files)
# Misc/ACKS changed.
- credit_given(special_files)
+ credit_given(misc_files)
# Misc/NEWS changed.
- reported_news(special_files)
+ reported_news(misc_files)
# Test suite run and passed.
if python_files or c_files:
+ end = " and check for refleaks?" if c_files else "?"
print
- print "Did you run the test suite?"
+ print "Did you run the test suite" + end
if __name__ == '__main__':
diff --git a/Tools/scripts/pathfix.py b/Tools/scripts/pathfix.py
index 7f6f191..850903a 100755
--- a/Tools/scripts/pathfix.py
+++ b/Tools/scripts/pathfix.py
@@ -135,7 +135,7 @@ def fix(filename):
except os.error, msg:
err('%s: rename failed (%r)\n' % (filename, msg))
return 1
- # Return succes
+ # Return success
return 0
def fixline(line):
diff --git a/Tools/scripts/pindent.py b/Tools/scripts/pindent.py
index 7bfc415..6e40d60 100755
--- a/Tools/scripts/pindent.py
+++ b/Tools/scripts/pindent.py
@@ -76,11 +76,14 @@
# - realign comments
# - optionally do much more thorough reformatting, a la C indent
+from __future__ import print_function
+
# Defaults
STEPSIZE = 8
TABSIZE = 8
-EXPANDTABS = 0
+EXPANDTABS = False
+import io
import re
import sys
@@ -89,7 +92,8 @@ next['if'] = next['elif'] = 'elif', 'else', 'end'
next['while'] = next['for'] = 'else', 'end'
next['try'] = 'except', 'finally'
next['except'] = 'except', 'else', 'finally', 'end'
-next['else'] = next['finally'] = next['def'] = next['class'] = 'end'
+next['else'] = next['finally'] = next['with'] = \
+ next['def'] = next['class'] = 'end'
next['end'] = ()
start = 'if', 'while', 'for', 'try', 'with', 'def', 'class'
@@ -105,11 +109,11 @@ class PythonIndenter:
self.expandtabs = expandtabs
self._write = fpo.write
self.kwprog = re.compile(
- r'^\s*(?P<kw>[a-z]+)'
- r'(\s+(?P<id>[a-zA-Z_]\w*))?'
+ r'^(?:\s|\\\n)*(?P<kw>[a-z]+)'
+ r'((?:\s|\\\n)+(?P<id>[a-zA-Z_]\w*))?'
r'[^\w]')
self.endprog = re.compile(
- r'^\s*#?\s*end\s+(?P<kw>[a-z]+)'
+ r'^(?:\s|\\\n)*#?\s*end\s+(?P<kw>[a-z]+)'
r'(\s+(?P<id>[a-zA-Z_]\w*))?'
r'[^\w]')
self.wsprog = re.compile(r'^[ \t]*')
@@ -125,7 +129,7 @@ class PythonIndenter:
def readline(self):
line = self.fpi.readline()
- if line: self.lineno = self.lineno + 1
+ if line: self.lineno += 1
# end if
return line
# end def readline
@@ -143,27 +147,24 @@ class PythonIndenter:
line2 = self.readline()
if not line2: break
# end if
- line = line + line2
+ line += line2
# end while
return line
# end def getline
- def putline(self, line, indent = None):
- if indent is None:
- self.write(line)
- return
- # end if
+ def putline(self, line, indent):
tabs, spaces = divmod(indent*self.indentsize, self.tabsize)
- i = 0
- m = self.wsprog.match(line)
- if m: i = m.end()
+ i = self.wsprog.match(line).end()
+ line = line[i:]
+ if line[:1] not in ('\n', '\r', ''):
+ line = '\t'*tabs + ' '*spaces + line
# end if
- self.write('\t'*tabs + ' '*spaces + line[i:])
+ self.write(line)
# end def putline
def reformat(self):
stack = []
- while 1:
+ while True:
line = self.getline()
if not line: break # EOF
# end if
@@ -173,10 +174,9 @@ class PythonIndenter:
kw2 = m.group('kw')
if not stack:
self.error('unexpected end')
- elif stack[-1][0] != kw2:
+ elif stack.pop()[0] != kw2:
self.error('unmatched end')
# end if
- del stack[-1:]
self.putline(line, len(stack))
continue
# end if
@@ -208,23 +208,23 @@ class PythonIndenter:
def delete(self):
begin_counter = 0
end_counter = 0
- while 1:
+ while True:
line = self.getline()
if not line: break # EOF
# end if
m = self.endprog.match(line)
if m:
- end_counter = end_counter + 1
+ end_counter += 1
continue
# end if
m = self.kwprog.match(line)
if m:
kw = m.group('kw')
if kw in start:
- begin_counter = begin_counter + 1
+ begin_counter += 1
# end if
# end if
- self.putline(line)
+ self.write(line)
# end while
if begin_counter - end_counter < 0:
sys.stderr.write('Warning: input contained more end tags than expected\n')
@@ -234,17 +234,12 @@ class PythonIndenter:
# end def delete
def complete(self):
- self.indentsize = 1
stack = []
todo = []
- thisid = ''
- current, firstkw, lastkw, topid = 0, '', '', ''
- while 1:
+ currentws = thisid = firstkw = lastkw = topid = ''
+ while True:
line = self.getline()
- i = 0
- m = self.wsprog.match(line)
- if m: i = m.end()
- # end if
+ i = self.wsprog.match(line).end()
m = self.endprog.match(line)
if m:
thiskw = 'end'
@@ -269,7 +264,9 @@ class PythonIndenter:
thiskw = ''
# end if
# end if
- indent = len(line[:i].expandtabs(self.tabsize))
+ indentws = line[:i]
+ indent = len(indentws.expandtabs(self.tabsize))
+ current = len(currentws.expandtabs(self.tabsize))
while indent < current:
if firstkw:
if topid:
@@ -278,11 +275,11 @@ class PythonIndenter:
else:
s = '# end %s\n' % firstkw
# end if
- self.putline(s, current)
+ self.write(currentws + s)
firstkw = lastkw = ''
# end if
- current, firstkw, lastkw, topid = stack[-1]
- del stack[-1]
+ currentws, firstkw, lastkw, topid = stack.pop()
+ current = len(currentws.expandtabs(self.tabsize))
# end while
if indent == current and firstkw:
if thiskw == 'end':
@@ -297,18 +294,18 @@ class PythonIndenter:
else:
s = '# end %s\n' % firstkw
# end if
- self.putline(s, current)
+ self.write(currentws + s)
firstkw = lastkw = topid = ''
# end if
# end if
if indent > current:
- stack.append((current, firstkw, lastkw, topid))
+ stack.append((currentws, firstkw, lastkw, topid))
if thiskw and thiskw not in start:
# error
thiskw = ''
# end if
- current, firstkw, lastkw, topid = \
- indent, thiskw, thiskw, thisid
+ currentws, firstkw, lastkw, topid = \
+ indentws, thiskw, thiskw, thisid
# end if
if thiskw:
if thiskw in start:
@@ -326,7 +323,6 @@ class PythonIndenter:
self.write(line)
# end while
# end def complete
-
# end class PythonIndenter
# Simplified user interface
@@ -352,116 +348,86 @@ def reformat_filter(input = sys.stdin, output = sys.stdout,
pi.reformat()
# end def reformat_filter
-class StringReader:
- def __init__(self, buf):
- self.buf = buf
- self.pos = 0
- self.len = len(self.buf)
- # end def __init__
- def read(self, n = 0):
- if n <= 0:
- n = self.len - self.pos
- else:
- n = min(n, self.len - self.pos)
- # end if
- r = self.buf[self.pos : self.pos + n]
- self.pos = self.pos + n
- return r
- # end def read
- def readline(self):
- i = self.buf.find('\n', self.pos)
- return self.read(i + 1 - self.pos)
- # end def readline
- def readlines(self):
- lines = []
- line = self.readline()
- while line:
- lines.append(line)
- line = self.readline()
- # end while
- return lines
- # end def readlines
- # seek/tell etc. are left as an exercise for the reader
-# end class StringReader
-
-class StringWriter:
- def __init__(self):
- self.buf = ''
- # end def __init__
- def write(self, s):
- self.buf = self.buf + s
- # end def write
- def getvalue(self):
- return self.buf
- # end def getvalue
-# end class StringWriter
-
def complete_string(source, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
- input = StringReader(source)
- output = StringWriter()
+ input = io.BytesIO(source)
+ output = io.BytesIO()
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
pi.complete()
return output.getvalue()
# end def complete_string
def delete_string(source, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
- input = StringReader(source)
- output = StringWriter()
+ input = io.BytesIO(source)
+ output = io.BytesIO()
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
pi.delete()
return output.getvalue()
# end def delete_string
def reformat_string(source, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
- input = StringReader(source)
- output = StringWriter()
+ input = io.BytesIO(source)
+ output = io.BytesIO()
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
pi.reformat()
return output.getvalue()
# end def reformat_string
+def make_backup(filename):
+ import os, os.path
+ backup = filename + '~'
+ if os.path.lexists(backup):
+ try:
+ os.remove(backup)
+ except os.error:
+ print("Can't remove backup %r" % (backup,), file=sys.stderr)
+ # end try
+ # end if
+ try:
+ os.rename(filename, backup)
+ except os.error:
+ print("Can't rename %r to %r" % (filename, backup), file=sys.stderr)
+ # end try
+# end def make_backup
+
def complete_file(filename, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
- source = open(filename, 'r').read()
+ with open(filename, 'r') as f:
+ source = f.read()
+ # end with
result = complete_string(source, stepsize, tabsize, expandtabs)
if source == result: return 0
# end if
- import os
- try: os.rename(filename, filename + '~')
- except os.error: pass
- # end try
- f = open(filename, 'w')
- f.write(result)
- f.close()
+ make_backup(filename)
+ with open(filename, 'w') as f:
+ f.write(result)
+ # end with
return 1
# end def complete_file
def delete_file(filename, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
- source = open(filename, 'r').read()
+ with open(filename, 'r') as f:
+ source = f.read()
+ # end with
result = delete_string(source, stepsize, tabsize, expandtabs)
if source == result: return 0
# end if
- import os
- try: os.rename(filename, filename + '~')
- except os.error: pass
- # end try
- f = open(filename, 'w')
- f.write(result)
- f.close()
+ make_backup(filename)
+ with open(filename, 'w') as f:
+ f.write(result)
+ # end with
return 1
# end def delete_file
def reformat_file(filename, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
- source = open(filename, 'r').read()
+ with open(filename, 'r') as f:
+ source = f.read()
+ # end with
result = reformat_string(source, stepsize, tabsize, expandtabs)
if source == result: return 0
# end if
- import os
- try: os.rename(filename, filename + '~')
- except os.error: pass
- # end try
- f = open(filename, 'w')
- f.write(result)
- f.close()
+ make_backup(filename)
+ with open(filename, 'w') as f:
+ f.write(result)
+ # end with
return 1
# end def reformat_file
@@ -474,7 +440,7 @@ usage: pindent (-c|-d|-r) [-s stepsize] [-t tabsize] [-e] [file] ...
-r : reformat a completed program (use #end directives)
-s stepsize: indentation step (default %(STEPSIZE)d)
-t tabsize : the worth in spaces of a tab (default %(TABSIZE)d)
--e : expand TABs into spaces (defailt OFF)
+-e : expand TABs into spaces (default OFF)
[file] ... : files are changed in place, with backups in file~
If no files are specified or a single - is given,
the program acts as a filter (reads stdin, writes stdout).
@@ -517,7 +483,7 @@ def test():
elif o == '-t':
tabsize = int(a)
elif o == '-e':
- expandtabs = 1
+ expandtabs = True
# end if
# end for
if not action:
diff --git a/Tools/scripts/serve.py b/Tools/scripts/serve.py
index 50061d5..399da1a 100755
--- a/Tools/scripts/serve.py
+++ b/Tools/scripts/serve.py
@@ -2,7 +2,7 @@
'''
Small wsgiref based web server. Takes a path to serve from and an
optional port number (defaults to 8000), then tries to serve files.
-Mime types are guessed from the file names, 404 errors are thrown
+Mime types are guessed from the file names, 404 errors are raised
if the file is not found. Used for the make serve target in Doc.
'''
import sys
diff --git a/Tools/scripts/svneol.py b/Tools/scripts/svneol.py
index 9357c7e..9357c7e 100644..100755
--- a/Tools/scripts/svneol.py
+++ b/Tools/scripts/svneol.py
diff --git a/Tools/ssl/get-remote-certificate.py b/Tools/ssl/get-remote-certificate.py
index b63428a..b63428a 100644..100755
--- a/Tools/ssl/get-remote-certificate.py
+++ b/Tools/ssl/get-remote-certificate.py
diff --git a/Tools/unicode/comparecodecs.py b/Tools/unicode/comparecodecs.py
index cd417a4..cd417a4 100644..100755
--- a/Tools/unicode/comparecodecs.py
+++ b/Tools/unicode/comparecodecs.py
diff --git a/config.guess b/config.guess
new file mode 100755
index 0000000..b79252d
--- /dev/null
+++ b/config.guess
@@ -0,0 +1,1558 @@
+#! /bin/sh
+# Attempt to guess a canonical system name.
+# Copyright 1992-2013 Free Software Foundation, Inc.
+
+timestamp='2013-06-10'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
+#
+# Originally written by Per Bothner.
+#
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+#
+# Please send patches with a ChangeLog entry to config-patches@gnu.org.
+
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of the system \`$me' is run on.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.guess ($timestamp)
+
+Originally written by Per Bothner.
+Copyright 1992-2013 Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help" >&2
+ exit 1 ;;
+ * )
+ break ;;
+ esac
+done
+
+if test $# != 0; then
+ echo "$me: too many arguments$help" >&2
+ exit 1
+fi
+
+trap 'exit 1' 1 2 15
+
+# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
+# compiler to aid in system detection is discouraged as it requires
+# temporary files to be created and, as you can see below, it is a
+# headache to deal with in a portable fashion.
+
+# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
+# use `HOST_CC' if defined, but it is deprecated.
+
+# Portable tmp directory creation inspired by the Autoconf team.
+
+set_cc_for_build='
+trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
+trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
+: ${TMPDIR=/tmp} ;
+ { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
+ { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
+ { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
+ { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
+dummy=$tmp/dummy ;
+tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
+case $CC_FOR_BUILD,$HOST_CC,$CC in
+ ,,) echo "int x;" > $dummy.c ;
+ for c in cc gcc c89 c99 ; do
+ if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
+ CC_FOR_BUILD="$c"; break ;
+ fi ;
+ done ;
+ if test x"$CC_FOR_BUILD" = x ; then
+ CC_FOR_BUILD=no_compiler_found ;
+ fi
+ ;;
+ ,,*) CC_FOR_BUILD=$CC ;;
+ ,*,*) CC_FOR_BUILD=$HOST_CC ;;
+esac ; set_cc_for_build= ;'
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi@noc.rutgers.edu 1994-08-24)
+if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+ PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
+UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
+UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
+UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+
+case "${UNAME_SYSTEM}" in
+Linux|GNU|GNU/*)
+ # If the system lacks a compiler, then just pick glibc.
+ # We could probably try harder.
+ LIBC=gnu
+
+ eval $set_cc_for_build
+ cat <<-EOF > $dummy.c
+ #include <features.h>
+ #if defined(__UCLIBC__)
+ LIBC=uclibc
+ #elif defined(__dietlibc__)
+ LIBC=dietlibc
+ #else
+ LIBC=gnu
+ #endif
+ EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'`
+ ;;
+esac
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+ *:NetBSD:*:*)
+ # NetBSD (nbsd) targets should (where applicable) match one or
+ # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
+ # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
+ # switched to ELF, *-*-netbsd* would select the old
+ # object file format. This provides both forward
+ # compatibility and a consistent mechanism for selecting the
+ # object file format.
+ #
+ # Note: NetBSD doesn't particularly care about the vendor
+ # portion of the name. We always set it to "unknown".
+ sysctl="sysctl -n hw.machine_arch"
+ UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
+ /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
+ case "${UNAME_MACHINE_ARCH}" in
+ armeb) machine=armeb-unknown ;;
+ arm*) machine=arm-unknown ;;
+ sh3el) machine=shl-unknown ;;
+ sh3eb) machine=sh-unknown ;;
+ sh5el) machine=sh5le-unknown ;;
+ *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
+ esac
+ # The Operating System including object format, if it has switched
+ # to ELF recently, or will in the future.
+ case "${UNAME_MACHINE_ARCH}" in
+ arm*|i386|m68k|ns32k|sh3*|sparc|vax)
+ eval $set_cc_for_build
+ if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ELF__
+ then
+ # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+ # Return netbsd for either. FIX?
+ os=netbsd
+ else
+ os=netbsdelf
+ fi
+ ;;
+ *)
+ os=netbsd
+ ;;
+ esac
+ # The OS release
+ # Debian GNU/NetBSD machines have a different userland, and
+ # thus, need a distinct triplet. However, they do not need
+ # kernel version information, so it can be replaced with a
+ # suitable tag, in the style of linux-gnu.
+ case "${UNAME_VERSION}" in
+ Debian*)
+ release='-gnu'
+ ;;
+ *)
+ release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+ ;;
+ esac
+ # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+ # contains redundant information, the shorter form:
+ # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+ echo "${machine}-${os}${release}"
+ exit ;;
+ *:Bitrig:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE}
+ exit ;;
+ *:OpenBSD:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
+ exit ;;
+ *:ekkoBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
+ exit ;;
+ *:SolidBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
+ exit ;;
+ macppc:MirBSD:*:*)
+ echo powerpc-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ *:MirBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ alpha:OSF1:*:*)
+ case $UNAME_RELEASE in
+ *4.0)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+ ;;
+ *5.*)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
+ ;;
+ esac
+ # According to Compaq, /usr/sbin/psrinfo has been available on
+ # OSF/1 and Tru64 systems produced since 1995. I hope that
+ # covers most systems running today. This code pipes the CPU
+ # types through head -n 1, so we only detect the type of CPU 0.
+ ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1`
+ case "$ALPHA_CPU_TYPE" in
+ "EV4 (21064)")
+ UNAME_MACHINE="alpha" ;;
+ "EV4.5 (21064)")
+ UNAME_MACHINE="alpha" ;;
+ "LCA4 (21066/21068)")
+ UNAME_MACHINE="alpha" ;;
+ "EV5 (21164)")
+ UNAME_MACHINE="alphaev5" ;;
+ "EV5.6 (21164A)")
+ UNAME_MACHINE="alphaev56" ;;
+ "EV5.6 (21164PC)")
+ UNAME_MACHINE="alphapca56" ;;
+ "EV5.7 (21164PC)")
+ UNAME_MACHINE="alphapca57" ;;
+ "EV6 (21264)")
+ UNAME_MACHINE="alphaev6" ;;
+ "EV6.7 (21264A)")
+ UNAME_MACHINE="alphaev67" ;;
+ "EV6.8CB (21264C)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.8AL (21264B)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.8CX (21264D)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.9A (21264/EV69A)")
+ UNAME_MACHINE="alphaev69" ;;
+ "EV7 (21364)")
+ UNAME_MACHINE="alphaev7" ;;
+ "EV7.9 (21364A)")
+ UNAME_MACHINE="alphaev79" ;;
+ esac
+ # A Pn.n version is a patched version.
+ # A Vn.n version is a released version.
+ # A Tn.n version is a released field test version.
+ # A Xn.n version is an unreleased experimental baselevel.
+ # 1.2 uses "1.2" for uname -r.
+ echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ # Reset EXIT trap before exiting to avoid spurious non-zero exit code.
+ exitcode=$?
+ trap '' 0
+ exit $exitcode ;;
+ Alpha\ *:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # Should we change UNAME_MACHINE based on the output of uname instead
+ # of the specific Alpha model?
+ echo alpha-pc-interix
+ exit ;;
+ 21064:Windows_NT:50:3)
+ echo alpha-dec-winnt3.5
+ exit ;;
+ Amiga*:UNIX_System_V:4.0:*)
+ echo m68k-unknown-sysv4
+ exit ;;
+ *:[Aa]miga[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-amigaos
+ exit ;;
+ *:[Mm]orph[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-morphos
+ exit ;;
+ *:OS/390:*:*)
+ echo i370-ibm-openedition
+ exit ;;
+ *:z/VM:*:*)
+ echo s390-ibm-zvmoe
+ exit ;;
+ *:OS400:*:*)
+ echo powerpc-ibm-os400
+ exit ;;
+ arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+ echo arm-acorn-riscix${UNAME_RELEASE}
+ exit ;;
+ arm*:riscos:*:*|arm*:RISCOS:*:*)
+ echo arm-unknown-riscos
+ exit ;;
+ SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
+ echo hppa1.1-hitachi-hiuxmpp
+ exit ;;
+ Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+ # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+ if test "`(/bin/universe) 2>/dev/null`" = att ; then
+ echo pyramid-pyramid-sysv3
+ else
+ echo pyramid-pyramid-bsd
+ fi
+ exit ;;
+ NILE*:*:*:dcosx)
+ echo pyramid-pyramid-svr4
+ exit ;;
+ DRS?6000:unix:4.0:6*)
+ echo sparc-icl-nx6
+ exit ;;
+ DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
+ case `/usr/bin/uname -p` in
+ sparc) echo sparc-icl-nx7; exit ;;
+ esac ;;
+ s390x:SunOS:*:*)
+ echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4H:SunOS:5.*:*)
+ echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+ echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
+ echo i386-pc-auroraux${UNAME_RELEASE}
+ exit ;;
+ i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
+ eval $set_cc_for_build
+ SUN_ARCH="i386"
+ # If there is a compiler, see if it is configured for 64-bit objects.
+ # Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
+ # This test works for both compilers.
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+ if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ SUN_ARCH="x86_64"
+ fi
+ fi
+ echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:6*:*)
+ # According to config.sub, this is the proper way to canonicalize
+ # SunOS6. Hard to guess exactly what SunOS6 will be like, but
+ # it's likely to be more like Solaris than SunOS4.
+ echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:*:*)
+ case "`/usr/bin/arch -k`" in
+ Series*|S4*)
+ UNAME_RELEASE=`uname -v`
+ ;;
+ esac
+ # Japanese Language versions have a version number like `4.1.3-JL'.
+ echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+ exit ;;
+ sun3*:SunOS:*:*)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ exit ;;
+ sun*:*:4.2BSD:*)
+ UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
+ test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
+ case "`/bin/arch`" in
+ sun3)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ ;;
+ sun4)
+ echo sparc-sun-sunos${UNAME_RELEASE}
+ ;;
+ esac
+ exit ;;
+ aushp:SunOS:*:*)
+ echo sparc-auspex-sunos${UNAME_RELEASE}
+ exit ;;
+ # The situation for MiNT is a little confusing. The machine name
+ # can be virtually everything (everything which is not
+ # "atarist" or "atariste" at least should have a processor
+ # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
+ # to the lowercase version "mint" (or "freemint"). Finally
+ # the system name "TOS" denotes a system which is actually not
+ # MiNT. But MiNT is downward compatible to TOS, so this should
+ # be no problem.
+ atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+ echo m68k-milan-mint${UNAME_RELEASE}
+ exit ;;
+ hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+ echo m68k-hades-mint${UNAME_RELEASE}
+ exit ;;
+ *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+ echo m68k-unknown-mint${UNAME_RELEASE}
+ exit ;;
+ m68k:machten:*:*)
+ echo m68k-apple-machten${UNAME_RELEASE}
+ exit ;;
+ powerpc:machten:*:*)
+ echo powerpc-apple-machten${UNAME_RELEASE}
+ exit ;;
+ RISC*:Mach:*:*)
+ echo mips-dec-mach_bsd4.3
+ exit ;;
+ RISC*:ULTRIX:*:*)
+ echo mips-dec-ultrix${UNAME_RELEASE}
+ exit ;;
+ VAX*:ULTRIX*:*:*)
+ echo vax-dec-ultrix${UNAME_RELEASE}
+ exit ;;
+ 2020:CLIX:*:* | 2430:CLIX:*:*)
+ echo clipper-intergraph-clix${UNAME_RELEASE}
+ exit ;;
+ mips:*:*:UMIPS | mips:*:*:RISCos)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+#ifdef __cplusplus
+#include <stdio.h> /* for printf() prototype */
+ int main (int argc, char *argv[]) {
+#else
+ int main (argc, argv) int argc; char *argv[]; {
+#endif
+ #if defined (host_mips) && defined (MIPSEB)
+ #if defined (SYSTYPE_SYSV)
+ printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_SVR4)
+ printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+ printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+ #endif
+ #endif
+ exit (-1);
+ }
+EOF
+ $CC_FOR_BUILD -o $dummy $dummy.c &&
+ dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
+ SYSTEM_NAME=`$dummy $dummyarg` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo mips-mips-riscos${UNAME_RELEASE}
+ exit ;;
+ Motorola:PowerMAX_OS:*:*)
+ echo powerpc-motorola-powermax
+ exit ;;
+ Motorola:*:4.3:PL8-*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:Power_UNIX:*:*)
+ echo powerpc-harris-powerunix
+ exit ;;
+ m88k:CX/UX:7*:*)
+ echo m88k-harris-cxux7
+ exit ;;
+ m88k:*:4*:R4*)
+ echo m88k-motorola-sysv4
+ exit ;;
+ m88k:*:3*:R3*)
+ echo m88k-motorola-sysv3
+ exit ;;
+ AViiON:dgux:*:*)
+ # DG/UX returns AViiON for all architectures
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+ then
+ if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
+ [ ${TARGET_BINARY_INTERFACE}x = x ]
+ then
+ echo m88k-dg-dgux${UNAME_RELEASE}
+ else
+ echo m88k-dg-dguxbcs${UNAME_RELEASE}
+ fi
+ else
+ echo i586-dg-dgux${UNAME_RELEASE}
+ fi
+ exit ;;
+ M88*:DolphinOS:*:*) # DolphinOS (SVR3)
+ echo m88k-dolphin-sysv3
+ exit ;;
+ M88*:*:R3*:*)
+ # Delta 88k system running SVR3
+ echo m88k-motorola-sysv3
+ exit ;;
+ XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+ echo m88k-tektronix-sysv3
+ exit ;;
+ Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+ echo m68k-tektronix-bsd
+ exit ;;
+ *:IRIX*:*:*)
+ echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+ exit ;;
+ ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+ echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
+ exit ;; # Note that: echo "'`uname -s`'" gives 'AIX '
+ i*86:AIX:*:*)
+ echo i386-ibm-aix
+ exit ;;
+ ia64:AIX:*:*)
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
+ exit ;;
+ *:AIX:2:3)
+ if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <sys/systemcfg.h>
+
+ main()
+ {
+ if (!__power_pc())
+ exit(1);
+ puts("powerpc-ibm-aix3.2.5");
+ exit(0);
+ }
+EOF
+ if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
+ then
+ echo "$SYSTEM_NAME"
+ else
+ echo rs6000-ibm-aix3.2.5
+ fi
+ elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+ echo rs6000-ibm-aix3.2.4
+ else
+ echo rs6000-ibm-aix3.2
+ fi
+ exit ;;
+ *:AIX:*:[4567])
+ IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
+ if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
+ IBM_ARCH=rs6000
+ else
+ IBM_ARCH=powerpc
+ fi
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+ exit ;;
+ *:AIX:*:*)
+ echo rs6000-ibm-aix
+ exit ;;
+ ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+ echo romp-ibm-bsd4.4
+ exit ;;
+ ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
+ echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
+ exit ;; # report: romp-ibm BSD 4.3
+ *:BOSX:*:*)
+ echo rs6000-bull-bosx
+ exit ;;
+ DPX/2?00:B.O.S.:*:*)
+ echo m68k-bull-sysv3
+ exit ;;
+ 9000/[34]??:4.3bsd:1.*:*)
+ echo m68k-hp-bsd
+ exit ;;
+ hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+ echo m68k-hp-bsd4.4
+ exit ;;
+ 9000/[34678]??:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ case "${UNAME_MACHINE}" in
+ 9000/31? ) HP_ARCH=m68000 ;;
+ 9000/[34]?? ) HP_ARCH=m68k ;;
+ 9000/[678][0-9][0-9])
+ if [ -x /usr/bin/getconf ]; then
+ sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
+ sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+ case "${sc_cpu_version}" in
+ 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
+ 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
+ 532) # CPU_PA_RISC2_0
+ case "${sc_kernel_bits}" in
+ 32) HP_ARCH="hppa2.0n" ;;
+ 64) HP_ARCH="hppa2.0w" ;;
+ '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
+ esac ;;
+ esac
+ fi
+ if [ "${HP_ARCH}" = "" ]; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+
+ #define _HPUX_SOURCE
+ #include <stdlib.h>
+ #include <unistd.h>
+
+ int main ()
+ {
+ #if defined(_SC_KERNEL_BITS)
+ long bits = sysconf(_SC_KERNEL_BITS);
+ #endif
+ long cpu = sysconf (_SC_CPU_VERSION);
+
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+ case CPU_PA_RISC2_0:
+ #if defined(_SC_KERNEL_BITS)
+ switch (bits)
+ {
+ case 64: puts ("hppa2.0w"); break;
+ case 32: puts ("hppa2.0n"); break;
+ default: puts ("hppa2.0"); break;
+ } break;
+ #else /* !defined(_SC_KERNEL_BITS) */
+ puts ("hppa2.0"); break;
+ #endif
+ default: puts ("hppa1.0"); break;
+ }
+ exit (0);
+ }
+EOF
+ (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
+ test -z "$HP_ARCH" && HP_ARCH=hppa
+ fi ;;
+ esac
+ if [ ${HP_ARCH} = "hppa2.0w" ]
+ then
+ eval $set_cc_for_build
+
+ # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
+ # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler
+ # generating 64-bit code. GNU and HP use different nomenclature:
+ #
+ # $ CC_FOR_BUILD=cc ./config.guess
+ # => hppa2.0w-hp-hpux11.23
+ # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
+ # => hppa64-hp-hpux11.23
+
+ if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
+ grep -q __LP64__
+ then
+ HP_ARCH="hppa2.0w"
+ else
+ HP_ARCH="hppa64"
+ fi
+ fi
+ echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+ exit ;;
+ ia64:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ echo ia64-hp-hpux${HPUX_REV}
+ exit ;;
+ 3050*:HI-UX:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <unistd.h>
+ int
+ main ()
+ {
+ long cpu = sysconf (_SC_CPU_VERSION);
+ /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+ true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
+ results, however. */
+ if (CPU_IS_PA_RISC (cpu))
+ {
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+ default: puts ("hppa-hitachi-hiuxwe2"); break;
+ }
+ }
+ else if (CPU_IS_HP_MC68K (cpu))
+ puts ("m68k-hitachi-hiuxwe2");
+ else puts ("unknown-hitachi-hiuxwe2");
+ exit (0);
+ }
+EOF
+ $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo unknown-hitachi-hiuxwe2
+ exit ;;
+ 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+ echo hppa1.1-hp-bsd
+ exit ;;
+ 9000/8??:4.3bsd:*:*)
+ echo hppa1.0-hp-bsd
+ exit ;;
+ *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
+ echo hppa1.0-hp-mpeix
+ exit ;;
+ hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+ echo hppa1.1-hp-osf
+ exit ;;
+ hp8??:OSF1:*:*)
+ echo hppa1.0-hp-osf
+ exit ;;
+ i*86:OSF1:*:*)
+ if [ -x /usr/sbin/sysversion ] ; then
+ echo ${UNAME_MACHINE}-unknown-osf1mk
+ else
+ echo ${UNAME_MACHINE}-unknown-osf1
+ fi
+ exit ;;
+ parisc*:Lites*:*:*)
+ echo hppa1.1-hp-lites
+ exit ;;
+ C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+ echo c1-convex-bsd
+ exit ;;
+ C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit ;;
+ C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+ echo c34-convex-bsd
+ exit ;;
+ C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+ echo c38-convex-bsd
+ exit ;;
+ C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+ echo c4-convex-bsd
+ exit ;;
+ CRAY*Y-MP:*:*:*)
+ echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*[A-Z]90:*:*:*)
+ echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
+ -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*TS:*:*:*)
+ echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*T3E:*:*:*)
+ echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*SV1:*:*:*)
+ echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ *:UNICOS/mp:*:*)
+ echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
+ FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+ echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ 5000:UNIX_System_V:4.*:*)
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
+ echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+ echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+ exit ;;
+ sparc*:BSD/OS:*:*)
+ echo sparc-unknown-bsdi${UNAME_RELEASE}
+ exit ;;
+ *:BSD/OS:*:*)
+ echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+ exit ;;
+ *:FreeBSD:*:*)
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ case ${UNAME_PROCESSOR} in
+ amd64)
+ echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ *)
+ echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ esac
+ exit ;;
+ i*:CYGWIN*:*)
+ echo ${UNAME_MACHINE}-pc-cygwin
+ exit ;;
+ *:MINGW64*:*)
+ echo ${UNAME_MACHINE}-pc-mingw64
+ exit ;;
+ *:MINGW*:*)
+ echo ${UNAME_MACHINE}-pc-mingw32
+ exit ;;
+ i*:MSYS*:*)
+ echo ${UNAME_MACHINE}-pc-msys
+ exit ;;
+ i*:windows32*:*)
+ # uname -m includes "-pc" on this system.
+ echo ${UNAME_MACHINE}-mingw32
+ exit ;;
+ i*:PW*:*)
+ echo ${UNAME_MACHINE}-pc-pw32
+ exit ;;
+ *:Interix*:*)
+ case ${UNAME_MACHINE} in
+ x86)
+ echo i586-pc-interix${UNAME_RELEASE}
+ exit ;;
+ authenticamd | genuineintel | EM64T)
+ echo x86_64-unknown-interix${UNAME_RELEASE}
+ exit ;;
+ IA64)
+ echo ia64-unknown-interix${UNAME_RELEASE}
+ exit ;;
+ esac ;;
+ [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
+ echo i${UNAME_MACHINE}-pc-mks
+ exit ;;
+ 8664:Windows_NT:*)
+ echo x86_64-pc-mks
+ exit ;;
+ i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
+ # UNAME_MACHINE based on the output of uname instead of i386?
+ echo i586-pc-interix
+ exit ;;
+ i*:UWIN*:*)
+ echo ${UNAME_MACHINE}-pc-uwin
+ exit ;;
+ amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
+ echo x86_64-unknown-cygwin
+ exit ;;
+ p*:CYGWIN*:*)
+ echo powerpcle-unknown-cygwin
+ exit ;;
+ prep*:SunOS:5.*:*)
+ echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ *:GNU:*:*)
+ # the GNU system
+ echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ exit ;;
+ *:GNU/*:*:*)
+ # other systems with GNU libc and userland
+ echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC}
+ exit ;;
+ i*86:Minix:*:*)
+ echo ${UNAME_MACHINE}-pc-minix
+ exit ;;
+ aarch64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ aarch64_be:Linux:*:*)
+ UNAME_MACHINE=aarch64_be
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ alpha:Linux:*:*)
+ case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+ EV5) UNAME_MACHINE=alphaev5 ;;
+ EV56) UNAME_MACHINE=alphaev56 ;;
+ PCA56) UNAME_MACHINE=alphapca56 ;;
+ PCA57) UNAME_MACHINE=alphapca56 ;;
+ EV6) UNAME_MACHINE=alphaev6 ;;
+ EV67) UNAME_MACHINE=alphaev67 ;;
+ EV68*) UNAME_MACHINE=alphaev68 ;;
+ esac
+ objdump --private-headers /bin/sh | grep -q ld.so.1
+ if test "$?" = 0 ; then LIBC="gnulibc1" ; fi
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ arc:Linux:*:* | arceb:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ arm*:Linux:*:*)
+ eval $set_cc_for_build
+ if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_EABI__
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ else
+ if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_PCS_VFP
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi
+ else
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf
+ fi
+ fi
+ exit ;;
+ avr32*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ cris:Linux:*:*)
+ echo ${UNAME_MACHINE}-axis-linux-${LIBC}
+ exit ;;
+ crisv32:Linux:*:*)
+ echo ${UNAME_MACHINE}-axis-linux-${LIBC}
+ exit ;;
+ frv:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ hexagon:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ i*86:Linux:*:*)
+ echo ${UNAME_MACHINE}-pc-linux-${LIBC}
+ exit ;;
+ ia64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ m32r*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ m68*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ mips:Linux:*:* | mips64:Linux:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #undef CPU
+ #undef ${UNAME_MACHINE}
+ #undef ${UNAME_MACHINE}el
+ #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+ CPU=${UNAME_MACHINE}el
+ #else
+ #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+ CPU=${UNAME_MACHINE}
+ #else
+ CPU=
+ #endif
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
+ test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; }
+ ;;
+ or1k:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ or32:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ padre:Linux:*:*)
+ echo sparc-unknown-linux-${LIBC}
+ exit ;;
+ parisc64:Linux:*:* | hppa64:Linux:*:*)
+ echo hppa64-unknown-linux-${LIBC}
+ exit ;;
+ parisc:Linux:*:* | hppa:Linux:*:*)
+ # Look for CPU level
+ case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
+ PA7*) echo hppa1.1-unknown-linux-${LIBC} ;;
+ PA8*) echo hppa2.0-unknown-linux-${LIBC} ;;
+ *) echo hppa-unknown-linux-${LIBC} ;;
+ esac
+ exit ;;
+ ppc64:Linux:*:*)
+ echo powerpc64-unknown-linux-${LIBC}
+ exit ;;
+ ppc:Linux:*:*)
+ echo powerpc-unknown-linux-${LIBC}
+ exit ;;
+ ppc64le:Linux:*:*)
+ echo powerpc64le-unknown-linux-${LIBC}
+ exit ;;
+ ppcle:Linux:*:*)
+ echo powerpcle-unknown-linux-${LIBC}
+ exit ;;
+ s390:Linux:*:* | s390x:Linux:*:*)
+ echo ${UNAME_MACHINE}-ibm-linux-${LIBC}
+ exit ;;
+ sh64*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ sh*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ sparc:Linux:*:* | sparc64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ tile*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ vax:Linux:*:*)
+ echo ${UNAME_MACHINE}-dec-linux-${LIBC}
+ exit ;;
+ x86_64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ xtensa*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ i*86:DYNIX/ptx:4*:*)
+ # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
+ # earlier versions are messed up and put the nodename in both
+ # sysname and nodename.
+ echo i386-sequent-sysv4
+ exit ;;
+ i*86:UNIX_SV:4.2MP:2.*)
+ # Unixware is an offshoot of SVR4, but it has its own version
+ # number series starting with 2...
+ # I am not positive that other SVR4 systems won't match this,
+ # I just have to hope. -- rms.
+ # Use sysv4.2uw... so that sysv4* matches it.
+ echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+ exit ;;
+ i*86:OS/2:*:*)
+ # If we were able to find `uname', then EMX Unix compatibility
+ # is probably installed.
+ echo ${UNAME_MACHINE}-pc-os2-emx
+ exit ;;
+ i*86:XTS-300:*:STOP)
+ echo ${UNAME_MACHINE}-unknown-stop
+ exit ;;
+ i*86:atheos:*:*)
+ echo ${UNAME_MACHINE}-unknown-atheos
+ exit ;;
+ i*86:syllable:*:*)
+ echo ${UNAME_MACHINE}-pc-syllable
+ exit ;;
+ i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
+ echo i386-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ i*86:*DOS:*:*)
+ echo ${UNAME_MACHINE}-pc-msdosdjgpp
+ exit ;;
+ i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
+ UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+ if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+ echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+ else
+ echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+ fi
+ exit ;;
+ i*86:*:5:[678]*)
+ # UnixWare 7.x, OpenUNIX and OpenServer 6.
+ case `/bin/uname -X | grep "^Machine"` in
+ *486*) UNAME_MACHINE=i486 ;;
+ *Pentium) UNAME_MACHINE=i586 ;;
+ *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
+ esac
+ echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
+ exit ;;
+ i*86:*:3.2:*)
+ if test -f /usr/options/cb.name; then
+ UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+ echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
+ elif /bin/uname -X 2>/dev/null >/dev/null ; then
+ UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
+ (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
+ (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
+ && UNAME_MACHINE=i586
+ (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
+ && UNAME_MACHINE=i686
+ (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
+ && UNAME_MACHINE=i686
+ echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+ else
+ echo ${UNAME_MACHINE}-pc-sysv32
+ fi
+ exit ;;
+ pc:*:*:*)
+ # Left here for compatibility:
+ # uname -m prints for DJGPP always 'pc', but it prints nothing about
+ # the processor, so we play safe by assuming i586.
+ # Note: whatever this is, it MUST be the same as what config.sub
+ # prints for the "djgpp" host, or else GDB configury will decide that
+ # this is a cross-build.
+ echo i586-pc-msdosdjgpp
+ exit ;;
+ Intel:Mach:3*:*)
+ echo i386-pc-mach3
+ exit ;;
+ paragon:*:*:*)
+ echo i860-intel-osf1
+ exit ;;
+ i860:*:4.*:*) # i860-SVR4
+ if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+ echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+ else # Add other i860-SVR4 vendors below as they are discovered.
+ echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
+ fi
+ exit ;;
+ mini*:CTIX:SYS*5:*)
+ # "miniframe"
+ echo m68010-convergent-sysv
+ exit ;;
+ mc68k:UNIX:SYSTEM5:3.51m)
+ echo m68k-convergent-sysv
+ exit ;;
+ M680?0:D-NIX:5.3:*)
+ echo m68k-diab-dnix
+ exit ;;
+ M68*:*:R3V[5678]*:*)
+ test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
+ 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
+ OS_REL=''
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+ 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4; exit; } ;;
+ NCR*:*:4.2:* | MPRAS*:*:4.2:*)
+ OS_REL='.3'
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+ m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
+ echo m68k-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ mc68030:UNIX_System_V:4.*:*)
+ echo m68k-atari-sysv4
+ exit ;;
+ TSUNAMI:LynxOS:2.*:*)
+ echo sparc-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ rs6000:LynxOS:2.*:*)
+ echo rs6000-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
+ echo powerpc-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ SM[BE]S:UNIX_SV:*:*)
+ echo mips-dde-sysv${UNAME_RELEASE}
+ exit ;;
+ RM*:ReliantUNIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ RM*:SINIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ *:SINIX-*:*:*)
+ if uname -p 2>/dev/null >/dev/null ; then
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ echo ${UNAME_MACHINE}-sni-sysv4
+ else
+ echo ns32k-sni-sysv
+ fi
+ exit ;;
+ PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+ # says <Richard.M.Bartel@ccMail.Census.GOV>
+ echo i586-unisys-sysv4
+ exit ;;
+ *:UNIX_System_V:4*:FTX*)
+ # From Gerald Hewes <hewes@openmarket.com>.
+ # How about differentiating between stratus architectures? -djm
+ echo hppa1.1-stratus-sysv4
+ exit ;;
+ *:*:*:FTX*)
+ # From seanf@swdc.stratus.com.
+ echo i860-stratus-sysv4
+ exit ;;
+ i*86:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo ${UNAME_MACHINE}-stratus-vos
+ exit ;;
+ *:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo hppa1.1-stratus-vos
+ exit ;;
+ mc68*:A/UX:*:*)
+ echo m68k-apple-aux${UNAME_RELEASE}
+ exit ;;
+ news*:NEWS-OS:6*:*)
+ echo mips-sony-newsos6
+ exit ;;
+ R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+ if [ -d /usr/nec ]; then
+ echo mips-nec-sysv${UNAME_RELEASE}
+ else
+ echo mips-unknown-sysv${UNAME_RELEASE}
+ fi
+ exit ;;
+ BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
+ echo powerpc-be-beos
+ exit ;;
+ BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
+ echo powerpc-apple-beos
+ exit ;;
+ BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
+ echo i586-pc-beos
+ exit ;;
+ BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
+ echo i586-pc-haiku
+ exit ;;
+ x86_64:Haiku:*:*)
+ echo x86_64-unknown-haiku
+ exit ;;
+ SX-4:SUPER-UX:*:*)
+ echo sx4-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-5:SUPER-UX:*:*)
+ echo sx5-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-6:SUPER-UX:*:*)
+ echo sx6-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-7:SUPER-UX:*:*)
+ echo sx7-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-8:SUPER-UX:*:*)
+ echo sx8-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-8R:SUPER-UX:*:*)
+ echo sx8r-nec-superux${UNAME_RELEASE}
+ exit ;;
+ Power*:Rhapsody:*:*)
+ echo powerpc-apple-rhapsody${UNAME_RELEASE}
+ exit ;;
+ *:Rhapsody:*:*)
+ echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+ exit ;;
+ *:Darwin:*:*)
+ UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
+ eval $set_cc_for_build
+ if test "$UNAME_PROCESSOR" = unknown ; then
+ UNAME_PROCESSOR=powerpc
+ fi
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+ if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ case $UNAME_PROCESSOR in
+ i386) UNAME_PROCESSOR=x86_64 ;;
+ powerpc) UNAME_PROCESSOR=powerpc64 ;;
+ esac
+ fi
+ fi
+ echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
+ exit ;;
+ *:procnto*:*:* | *:QNX:[0123456789]*:*)
+ UNAME_PROCESSOR=`uname -p`
+ if test "$UNAME_PROCESSOR" = "x86"; then
+ UNAME_PROCESSOR=i386
+ UNAME_MACHINE=pc
+ fi
+ echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
+ exit ;;
+ *:QNX:*:4*)
+ echo i386-pc-qnx
+ exit ;;
+ NEO-?:NONSTOP_KERNEL:*:*)
+ echo neo-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSE-*:NONSTOP_KERNEL:*:*)
+ echo nse-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSR-?:NONSTOP_KERNEL:*:*)
+ echo nsr-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ *:NonStop-UX:*:*)
+ echo mips-compaq-nonstopux
+ exit ;;
+ BS2000:POSIX*:*:*)
+ echo bs2000-siemens-sysv
+ exit ;;
+ DS/*:UNIX_System_V:*:*)
+ echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+ exit ;;
+ *:Plan9:*:*)
+ # "uname -m" is not consistent, so use $cputype instead. 386
+ # is converted to i386 for consistency with other x86
+ # operating systems.
+ if test "$cputype" = "386"; then
+ UNAME_MACHINE=i386
+ else
+ UNAME_MACHINE="$cputype"
+ fi
+ echo ${UNAME_MACHINE}-unknown-plan9
+ exit ;;
+ *:TOPS-10:*:*)
+ echo pdp10-unknown-tops10
+ exit ;;
+ *:TENEX:*:*)
+ echo pdp10-unknown-tenex
+ exit ;;
+ KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
+ echo pdp10-dec-tops20
+ exit ;;
+ XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
+ echo pdp10-xkl-tops20
+ exit ;;
+ *:TOPS-20:*:*)
+ echo pdp10-unknown-tops20
+ exit ;;
+ *:ITS:*:*)
+ echo pdp10-unknown-its
+ exit ;;
+ SEI:*:*:SEIUX)
+ echo mips-sei-seiux${UNAME_RELEASE}
+ exit ;;
+ *:DragonFly:*:*)
+ echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
+ exit ;;
+ *:*VMS:*:*)
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ case "${UNAME_MACHINE}" in
+ A*) echo alpha-dec-vms ; exit ;;
+ I*) echo ia64-dec-vms ; exit ;;
+ V*) echo vax-dec-vms ; exit ;;
+ esac ;;
+ *:XENIX:*:SysV)
+ echo i386-pc-xenix
+ exit ;;
+ i*86:skyos:*:*)
+ echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
+ exit ;;
+ i*86:rdos:*:*)
+ echo ${UNAME_MACHINE}-pc-rdos
+ exit ;;
+ i*86:AROS:*:*)
+ echo ${UNAME_MACHINE}-pc-aros
+ exit ;;
+ x86_64:VMkernel:*:*)
+ echo ${UNAME_MACHINE}-unknown-esx
+ exit ;;
+esac
+
+eval $set_cc_for_build
+cat >$dummy.c <<EOF
+#ifdef _SEQUENT_
+# include <sys/types.h>
+# include <sys/utsname.h>
+#endif
+main ()
+{
+#if defined (sony)
+#if defined (MIPSEB)
+ /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
+ I don't know.... */
+ printf ("mips-sony-bsd\n"); exit (0);
+#else
+#include <sys/param.h>
+ printf ("m68k-sony-newsos%s\n",
+#ifdef NEWSOS4
+ "4"
+#else
+ ""
+#endif
+ ); exit (0);
+#endif
+#endif
+
+#if defined (__arm) && defined (__acorn) && defined (__unix)
+ printf ("arm-acorn-riscix\n"); exit (0);
+#endif
+
+#if defined (hp300) && !defined (hpux)
+ printf ("m68k-hp-bsd\n"); exit (0);
+#endif
+
+#if defined (NeXT)
+#if !defined (__ARCHITECTURE__)
+#define __ARCHITECTURE__ "m68k"
+#endif
+ int version;
+ version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
+ if (version < 4)
+ printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
+ else
+ printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
+ exit (0);
+#endif
+
+#if defined (MULTIMAX) || defined (n16)
+#if defined (UMAXV)
+ printf ("ns32k-encore-sysv\n"); exit (0);
+#else
+#if defined (CMU)
+ printf ("ns32k-encore-mach\n"); exit (0);
+#else
+ printf ("ns32k-encore-bsd\n"); exit (0);
+#endif
+#endif
+#endif
+
+#if defined (__386BSD__)
+ printf ("i386-pc-bsd\n"); exit (0);
+#endif
+
+#if defined (sequent)
+#if defined (i386)
+ printf ("i386-sequent-dynix\n"); exit (0);
+#endif
+#if defined (ns32000)
+ printf ("ns32k-sequent-dynix\n"); exit (0);
+#endif
+#endif
+
+#if defined (_SEQUENT_)
+ struct utsname un;
+
+ uname(&un);
+
+ if (strncmp(un.version, "V2", 2) == 0) {
+ printf ("i386-sequent-ptx2\n"); exit (0);
+ }
+ if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
+ printf ("i386-sequent-ptx1\n"); exit (0);
+ }
+ printf ("i386-sequent-ptx\n"); exit (0);
+
+#endif
+
+#if defined (vax)
+# if !defined (ultrix)
+# include <sys/param.h>
+# if defined (BSD)
+# if BSD == 43
+ printf ("vax-dec-bsd4.3\n"); exit (0);
+# else
+# if BSD == 199006
+ printf ("vax-dec-bsd4.3reno\n"); exit (0);
+# else
+ printf ("vax-dec-bsd\n"); exit (0);
+# endif
+# endif
+# else
+ printf ("vax-dec-bsd\n"); exit (0);
+# endif
+# else
+ printf ("vax-dec-ultrix\n"); exit (0);
+# endif
+#endif
+
+#if defined (alliant) && defined (i860)
+ printf ("i860-alliant-bsd\n"); exit (0);
+#endif
+
+ exit (1);
+}
+EOF
+
+$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` &&
+ { echo "$SYSTEM_NAME"; exit; }
+
+# Apollos put the system type in the environment.
+
+test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; }
+
+# Convex versions that predate uname can use getsysinfo(1)
+
+if [ -x /usr/convex/getsysinfo ]
+then
+ case `getsysinfo -f cpu_type` in
+ c1*)
+ echo c1-convex-bsd
+ exit ;;
+ c2*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit ;;
+ c34*)
+ echo c34-convex-bsd
+ exit ;;
+ c38*)
+ echo c38-convex-bsd
+ exit ;;
+ c4*)
+ echo c4-convex-bsd
+ exit ;;
+ esac
+fi
+
+cat >&2 <<EOF
+$0: unable to guess system type
+
+This script, last modified $timestamp, has failed to recognize
+the operating system you are using. It is advised that you
+download the most up to date version of the config scripts from
+
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+and
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
+If the version you run ($0) is already up to date, please
+send the following data and any information you think might be
+pertinent to <config-patches@gnu.org> in order to provide the needed
+information to handle your system.
+
+config.guess timestamp = $timestamp
+
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
+
+hostinfo = `(hostinfo) 2>/dev/null`
+/bin/universe = `(/bin/universe) 2>/dev/null`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
+/bin/arch = `(/bin/arch) 2>/dev/null`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
+
+UNAME_MACHINE = ${UNAME_MACHINE}
+UNAME_RELEASE = ${UNAME_RELEASE}
+UNAME_SYSTEM = ${UNAME_SYSTEM}
+UNAME_VERSION = ${UNAME_VERSION}
+EOF
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/config.sub b/config.sub
new file mode 100755
index 0000000..9633db7
--- /dev/null
+++ b/config.sub
@@ -0,0 +1,1791 @@
+#! /bin/sh
+# Configuration validation subroutine script.
+# Copyright 1992-2013 Free Software Foundation, Inc.
+
+timestamp='2013-08-10'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
+
+
+# Please send patches with a ChangeLog entry to config-patches@gnu.org.
+#
+# Configuration subroutine to validate and canonicalize a configuration type.
+# Supply the specified configuration type as an argument.
+# If it is invalid, we print an error message on stderr and exit with code 1.
+# Otherwise, we print the canonical config type on stdout and succeed.
+
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
+# This file is supposed to be the same for all GNU packages
+# and recognize all the CPU types, system types and aliases
+# that are meaningful with *any* GNU software.
+# Each package is responsible for reporting which valid configurations
+# it does not support. The user should be able to distinguish
+# a failure to support a valid configuration from a meaningless
+# configuration.
+
+# The goal of this file is to map all the various variations of a given
+# machine specification into a single specification in the form:
+# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or in some cases, the newer four-part form:
+# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# It is wrong to echo any other type of specification.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION] CPU-MFR-OPSYS
+ $0 [OPTION] ALIAS
+
+Canonicalize a configuration name.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.sub ($timestamp)
+
+Copyright 1992-2013 Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help"
+ exit 1 ;;
+
+ *local*)
+ # First pass through any local machine types.
+ echo $1
+ exit ;;
+
+ * )
+ break ;;
+ esac
+done
+
+case $# in
+ 0) echo "$me: missing argument$help" >&2
+ exit 1;;
+ 1) ;;
+ *) echo "$me: too many arguments$help" >&2
+ exit 1;;
+esac
+
+# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
+# Here we must recognize all the valid KERNEL-OS combinations.
+maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
+case $maybe_os in
+ nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
+ linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
+ knetbsd*-gnu* | netbsd*-gnu* | \
+ kopensolaris*-gnu* | \
+ storm-chaos* | os2-emx* | rtmk-nova*)
+ os=-$maybe_os
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
+ ;;
+ android-linux)
+ os=-linux-android
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown
+ ;;
+ *)
+ basic_machine=`echo $1 | sed 's/-[^-]*$//'`
+ if [ $basic_machine != $1 ]
+ then os=`echo $1 | sed 's/.*-/-/'`
+ else os=; fi
+ ;;
+esac
+
+### Let's recognize common machines as not being operating systems so
+### that things like config.sub decstation-3100 work. We also
+### recognize some manufacturers as not being operating systems, so we
+### can provide default operating systems below.
+case $os in
+ -sun*os*)
+ # Prevent following clause from handling this invalid input.
+ ;;
+ -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
+ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
+ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
+ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
+ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
+ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
+ -apple | -axis | -knuth | -cray | -microblaze*)
+ os=
+ basic_machine=$1
+ ;;
+ -bluegene*)
+ os=-cnk
+ ;;
+ -sim | -cisco | -oki | -wec | -winbond)
+ os=
+ basic_machine=$1
+ ;;
+ -scout)
+ ;;
+ -wrs)
+ os=-vxworks
+ basic_machine=$1
+ ;;
+ -chorusos*)
+ os=-chorusos
+ basic_machine=$1
+ ;;
+ -chorusrdb)
+ os=-chorusrdb
+ basic_machine=$1
+ ;;
+ -hiux*)
+ os=-hiuxwe2
+ ;;
+ -sco6)
+ os=-sco5v6
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco5)
+ os=-sco3.2v5
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco4)
+ os=-sco3.2v4
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2.[4-9]*)
+ os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2v[4-9]*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco5v6*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco*)
+ os=-sco3.2v2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -udk*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -isc)
+ os=-isc2.2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -clix*)
+ basic_machine=clipper-intergraph
+ ;;
+ -isc*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -lynx*178)
+ os=-lynxos178
+ ;;
+ -lynx*5)
+ os=-lynxos5
+ ;;
+ -lynx*)
+ os=-lynxos
+ ;;
+ -ptx*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
+ ;;
+ -windowsnt*)
+ os=`echo $os | sed -e 's/windowsnt/winnt/'`
+ ;;
+ -psos*)
+ os=-psos
+ ;;
+ -mint | -mint[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+esac
+
+# Decode aliases for certain CPU-COMPANY combinations.
+case $basic_machine in
+ # Recognize the basic CPU types without company name.
+ # Some are omitted here because they have special meanings below.
+ 1750a | 580 \
+ | a29k \
+ | aarch64 | aarch64_be \
+ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
+ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
+ | am33_2.0 \
+ | arc | arceb \
+ | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \
+ | avr | avr32 \
+ | be32 | be64 \
+ | bfin \
+ | c4x | c8051 | clipper \
+ | d10v | d30v | dlx | dsp16xx \
+ | epiphany \
+ | fido | fr30 | frv \
+ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+ | hexagon \
+ | i370 | i860 | i960 | ia64 \
+ | ip2k | iq2000 \
+ | le32 | le64 \
+ | lm32 \
+ | m32c | m32r | m32rle | m68000 | m68k | m88k \
+ | maxq | mb | microblaze | microblazeel | mcore | mep | metag \
+ | mips | mipsbe | mipseb | mipsel | mipsle \
+ | mips16 \
+ | mips64 | mips64el \
+ | mips64octeon | mips64octeonel \
+ | mips64orion | mips64orionel \
+ | mips64r5900 | mips64r5900el \
+ | mips64vr | mips64vrel \
+ | mips64vr4100 | mips64vr4100el \
+ | mips64vr4300 | mips64vr4300el \
+ | mips64vr5000 | mips64vr5000el \
+ | mips64vr5900 | mips64vr5900el \
+ | mipsisa32 | mipsisa32el \
+ | mipsisa32r2 | mipsisa32r2el \
+ | mipsisa64 | mipsisa64el \
+ | mipsisa64r2 | mipsisa64r2el \
+ | mipsisa64sb1 | mipsisa64sb1el \
+ | mipsisa64sr71k | mipsisa64sr71kel \
+ | mipsr5900 | mipsr5900el \
+ | mipstx39 | mipstx39el \
+ | mn10200 | mn10300 \
+ | moxie \
+ | mt \
+ | msp430 \
+ | nds32 | nds32le | nds32be \
+ | nios | nios2 | nios2eb | nios2el \
+ | ns16k | ns32k \
+ | open8 \
+ | or1k | or32 \
+ | pdp10 | pdp11 | pj | pjl \
+ | powerpc | powerpc64 | powerpc64le | powerpcle \
+ | pyramid \
+ | rl78 | rx \
+ | score \
+ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
+ | sh64 | sh64le \
+ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
+ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \
+ | spu \
+ | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
+ | ubicom32 \
+ | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
+ | we32k \
+ | x86 | xc16x | xstormy16 | xtensa \
+ | z8k | z80)
+ basic_machine=$basic_machine-unknown
+ ;;
+ c54x)
+ basic_machine=tic54x-unknown
+ ;;
+ c55x)
+ basic_machine=tic55x-unknown
+ ;;
+ c6x)
+ basic_machine=tic6x-unknown
+ ;;
+ m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip)
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
+ ;;
+ ms1)
+ basic_machine=mt-unknown
+ ;;
+
+ strongarm | thumb | xscale)
+ basic_machine=arm-unknown
+ ;;
+ xgate)
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ xscaleeb)
+ basic_machine=armeb-unknown
+ ;;
+
+ xscaleel)
+ basic_machine=armel-unknown
+ ;;
+
+ # We use `pc' rather than `unknown'
+ # because (1) that's what they normally are, and
+ # (2) the word "unknown" tends to confuse beginning users.
+ i*86 | x86_64)
+ basic_machine=$basic_machine-pc
+ ;;
+ # Object if more than one company name word.
+ *-*-*)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+ # Recognize the basic CPU types with company name.
+ 580-* \
+ | a29k-* \
+ | aarch64-* | aarch64_be-* \
+ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
+ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
+ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \
+ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \
+ | avr-* | avr32-* \
+ | be32-* | be64-* \
+ | bfin-* | bs2000-* \
+ | c[123]* | c30-* | [cjt]90-* | c4x-* \
+ | c8051-* | clipper-* | craynv-* | cydra-* \
+ | d10v-* | d30v-* | dlx-* \
+ | elxsi-* \
+ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
+ | h8300-* | h8500-* \
+ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
+ | hexagon-* \
+ | i*86-* | i860-* | i960-* | ia64-* \
+ | ip2k-* | iq2000-* \
+ | le32-* | le64-* \
+ | lm32-* \
+ | m32c-* | m32r-* | m32rle-* \
+ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
+ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
+ | microblaze-* | microblazeel-* \
+ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
+ | mips16-* \
+ | mips64-* | mips64el-* \
+ | mips64octeon-* | mips64octeonel-* \
+ | mips64orion-* | mips64orionel-* \
+ | mips64r5900-* | mips64r5900el-* \
+ | mips64vr-* | mips64vrel-* \
+ | mips64vr4100-* | mips64vr4100el-* \
+ | mips64vr4300-* | mips64vr4300el-* \
+ | mips64vr5000-* | mips64vr5000el-* \
+ | mips64vr5900-* | mips64vr5900el-* \
+ | mipsisa32-* | mipsisa32el-* \
+ | mipsisa32r2-* | mipsisa32r2el-* \
+ | mipsisa64-* | mipsisa64el-* \
+ | mipsisa64r2-* | mipsisa64r2el-* \
+ | mipsisa64sb1-* | mipsisa64sb1el-* \
+ | mipsisa64sr71k-* | mipsisa64sr71kel-* \
+ | mipsr5900-* | mipsr5900el-* \
+ | mipstx39-* | mipstx39el-* \
+ | mmix-* \
+ | mt-* \
+ | msp430-* \
+ | nds32-* | nds32le-* | nds32be-* \
+ | nios-* | nios2-* | nios2eb-* | nios2el-* \
+ | none-* | np1-* | ns16k-* | ns32k-* \
+ | open8-* \
+ | orion-* \
+ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
+ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
+ | pyramid-* \
+ | rl78-* | romp-* | rs6000-* | rx-* \
+ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
+ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
+ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
+ | sparclite-* \
+ | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \
+ | tahoe-* \
+ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
+ | tile*-* \
+ | tron-* \
+ | ubicom32-* \
+ | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
+ | vax-* \
+ | we32k-* \
+ | x86-* | x86_64-* | xc16x-* | xps100-* \
+ | xstormy16-* | xtensa*-* \
+ | ymp-* \
+ | z8k-* | z80-*)
+ ;;
+ # Recognize the basic CPU types without company name, with glob match.
+ xtensa*)
+ basic_machine=$basic_machine-unknown
+ ;;
+ # Recognize the various machine names and aliases which stand
+ # for a CPU type and a company and sometimes even an OS.
+ 386bsd)
+ basic_machine=i386-unknown
+ os=-bsd
+ ;;
+ 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
+ basic_machine=m68000-att
+ ;;
+ 3b*)
+ basic_machine=we32k-att
+ ;;
+ a29khif)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ abacus)
+ basic_machine=abacus-unknown
+ ;;
+ adobe68k)
+ basic_machine=m68010-adobe
+ os=-scout
+ ;;
+ alliant | fx80)
+ basic_machine=fx80-alliant
+ ;;
+ altos | altos3068)
+ basic_machine=m68k-altos
+ ;;
+ am29k)
+ basic_machine=a29k-none
+ os=-bsd
+ ;;
+ amd64)
+ basic_machine=x86_64-pc
+ ;;
+ amd64-*)
+ basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ amdahl)
+ basic_machine=580-amdahl
+ os=-sysv
+ ;;
+ amiga | amiga-*)
+ basic_machine=m68k-unknown
+ ;;
+ amigaos | amigados)
+ basic_machine=m68k-unknown
+ os=-amigaos
+ ;;
+ amigaunix | amix)
+ basic_machine=m68k-unknown
+ os=-sysv4
+ ;;
+ apollo68)
+ basic_machine=m68k-apollo
+ os=-sysv
+ ;;
+ apollo68bsd)
+ basic_machine=m68k-apollo
+ os=-bsd
+ ;;
+ aros)
+ basic_machine=i386-pc
+ os=-aros
+ ;;
+ aux)
+ basic_machine=m68k-apple
+ os=-aux
+ ;;
+ balance)
+ basic_machine=ns32k-sequent
+ os=-dynix
+ ;;
+ blackfin)
+ basic_machine=bfin-unknown
+ os=-linux
+ ;;
+ blackfin-*)
+ basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ bluegene*)
+ basic_machine=powerpc-ibm
+ os=-cnk
+ ;;
+ c54x-*)
+ basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c55x-*)
+ basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c6x-*)
+ basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c90)
+ basic_machine=c90-cray
+ os=-unicos
+ ;;
+ cegcc)
+ basic_machine=arm-unknown
+ os=-cegcc
+ ;;
+ convex-c1)
+ basic_machine=c1-convex
+ os=-bsd
+ ;;
+ convex-c2)
+ basic_machine=c2-convex
+ os=-bsd
+ ;;
+ convex-c32)
+ basic_machine=c32-convex
+ os=-bsd
+ ;;
+ convex-c34)
+ basic_machine=c34-convex
+ os=-bsd
+ ;;
+ convex-c38)
+ basic_machine=c38-convex
+ os=-bsd
+ ;;
+ cray | j90)
+ basic_machine=j90-cray
+ os=-unicos
+ ;;
+ craynv)
+ basic_machine=craynv-cray
+ os=-unicosmp
+ ;;
+ cr16 | cr16-*)
+ basic_machine=cr16-unknown
+ os=-elf
+ ;;
+ crds | unos)
+ basic_machine=m68k-crds
+ ;;
+ crisv32 | crisv32-* | etraxfs*)
+ basic_machine=crisv32-axis
+ ;;
+ cris | cris-* | etrax*)
+ basic_machine=cris-axis
+ ;;
+ crx)
+ basic_machine=crx-unknown
+ os=-elf
+ ;;
+ da30 | da30-*)
+ basic_machine=m68k-da30
+ ;;
+ decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
+ basic_machine=mips-dec
+ ;;
+ decsystem10* | dec10*)
+ basic_machine=pdp10-dec
+ os=-tops10
+ ;;
+ decsystem20* | dec20*)
+ basic_machine=pdp10-dec
+ os=-tops20
+ ;;
+ delta | 3300 | motorola-3300 | motorola-delta \
+ | 3300-motorola | delta-motorola)
+ basic_machine=m68k-motorola
+ ;;
+ delta88)
+ basic_machine=m88k-motorola
+ os=-sysv3
+ ;;
+ dicos)
+ basic_machine=i686-pc
+ os=-dicos
+ ;;
+ djgpp)
+ basic_machine=i586-pc
+ os=-msdosdjgpp
+ ;;
+ dpx20 | dpx20-*)
+ basic_machine=rs6000-bull
+ os=-bosx
+ ;;
+ dpx2* | dpx2*-bull)
+ basic_machine=m68k-bull
+ os=-sysv3
+ ;;
+ ebmon29k)
+ basic_machine=a29k-amd
+ os=-ebmon
+ ;;
+ elxsi)
+ basic_machine=elxsi-elxsi
+ os=-bsd
+ ;;
+ encore | umax | mmax)
+ basic_machine=ns32k-encore
+ ;;
+ es1800 | OSE68k | ose68k | ose | OSE)
+ basic_machine=m68k-ericsson
+ os=-ose
+ ;;
+ fx2800)
+ basic_machine=i860-alliant
+ ;;
+ genix)
+ basic_machine=ns32k-ns
+ ;;
+ gmicro)
+ basic_machine=tron-gmicro
+ os=-sysv
+ ;;
+ go32)
+ basic_machine=i386-pc
+ os=-go32
+ ;;
+ h3050r* | hiux*)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ h8300hms)
+ basic_machine=h8300-hitachi
+ os=-hms
+ ;;
+ h8300xray)
+ basic_machine=h8300-hitachi
+ os=-xray
+ ;;
+ h8500hms)
+ basic_machine=h8500-hitachi
+ os=-hms
+ ;;
+ harris)
+ basic_machine=m88k-harris
+ os=-sysv3
+ ;;
+ hp300-*)
+ basic_machine=m68k-hp
+ ;;
+ hp300bsd)
+ basic_machine=m68k-hp
+ os=-bsd
+ ;;
+ hp300hpux)
+ basic_machine=m68k-hp
+ os=-hpux
+ ;;
+ hp3k9[0-9][0-9] | hp9[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k2[0-9][0-9] | hp9k31[0-9])
+ basic_machine=m68000-hp
+ ;;
+ hp9k3[2-9][0-9])
+ basic_machine=m68k-hp
+ ;;
+ hp9k6[0-9][0-9] | hp6[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k7[0-79][0-9] | hp7[0-79][0-9])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k78[0-9] | hp78[0-9])
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][13679] | hp8[0-9][13679])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][0-9] | hp8[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hppa-next)
+ os=-nextstep3
+ ;;
+ hppaosf)
+ basic_machine=hppa1.1-hp
+ os=-osf
+ ;;
+ hppro)
+ basic_machine=hppa1.1-hp
+ os=-proelf
+ ;;
+ i370-ibm* | ibm*)
+ basic_machine=i370-ibm
+ ;;
+ i*86v32)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv32
+ ;;
+ i*86v4*)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv4
+ ;;
+ i*86v)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv
+ ;;
+ i*86sol2)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-solaris2
+ ;;
+ i386mach)
+ basic_machine=i386-mach
+ os=-mach
+ ;;
+ i386-vsta | vsta)
+ basic_machine=i386-unknown
+ os=-vsta
+ ;;
+ iris | iris4d)
+ basic_machine=mips-sgi
+ case $os in
+ -irix*)
+ ;;
+ *)
+ os=-irix4
+ ;;
+ esac
+ ;;
+ isi68 | isi)
+ basic_machine=m68k-isi
+ os=-sysv
+ ;;
+ m68knommu)
+ basic_machine=m68k-unknown
+ os=-linux
+ ;;
+ m68knommu-*)
+ basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ m88k-omron*)
+ basic_machine=m88k-omron
+ ;;
+ magnum | m3230)
+ basic_machine=mips-mips
+ os=-sysv
+ ;;
+ merlin)
+ basic_machine=ns32k-utek
+ os=-sysv
+ ;;
+ microblaze*)
+ basic_machine=microblaze-xilinx
+ ;;
+ mingw64)
+ basic_machine=x86_64-pc
+ os=-mingw64
+ ;;
+ mingw32)
+ basic_machine=i686-pc
+ os=-mingw32
+ ;;
+ mingw32ce)
+ basic_machine=arm-unknown
+ os=-mingw32ce
+ ;;
+ miniframe)
+ basic_machine=m68000-convergent
+ ;;
+ *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+ mips3*-*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
+ ;;
+ mips3*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
+ ;;
+ monitor)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ morphos)
+ basic_machine=powerpc-unknown
+ os=-morphos
+ ;;
+ msdos)
+ basic_machine=i386-pc
+ os=-msdos
+ ;;
+ ms1-*)
+ basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
+ ;;
+ msys)
+ basic_machine=i686-pc
+ os=-msys
+ ;;
+ mvs)
+ basic_machine=i370-ibm
+ os=-mvs
+ ;;
+ nacl)
+ basic_machine=le32-unknown
+ os=-nacl
+ ;;
+ ncr3000)
+ basic_machine=i486-ncr
+ os=-sysv4
+ ;;
+ netbsd386)
+ basic_machine=i386-unknown
+ os=-netbsd
+ ;;
+ netwinder)
+ basic_machine=armv4l-rebel
+ os=-linux
+ ;;
+ news | news700 | news800 | news900)
+ basic_machine=m68k-sony
+ os=-newsos
+ ;;
+ news1000)
+ basic_machine=m68030-sony
+ os=-newsos
+ ;;
+ news-3600 | risc-news)
+ basic_machine=mips-sony
+ os=-newsos
+ ;;
+ necv70)
+ basic_machine=v70-nec
+ os=-sysv
+ ;;
+ next | m*-next )
+ basic_machine=m68k-next
+ case $os in
+ -nextstep* )
+ ;;
+ -ns2*)
+ os=-nextstep2
+ ;;
+ *)
+ os=-nextstep3
+ ;;
+ esac
+ ;;
+ nh3000)
+ basic_machine=m68k-harris
+ os=-cxux
+ ;;
+ nh[45]000)
+ basic_machine=m88k-harris
+ os=-cxux
+ ;;
+ nindy960)
+ basic_machine=i960-intel
+ os=-nindy
+ ;;
+ mon960)
+ basic_machine=i960-intel
+ os=-mon960
+ ;;
+ nonstopux)
+ basic_machine=mips-compaq
+ os=-nonstopux
+ ;;
+ np1)
+ basic_machine=np1-gould
+ ;;
+ neo-tandem)
+ basic_machine=neo-tandem
+ ;;
+ nse-tandem)
+ basic_machine=nse-tandem
+ ;;
+ nsr-tandem)
+ basic_machine=nsr-tandem
+ ;;
+ op50n-* | op60c-*)
+ basic_machine=hppa1.1-oki
+ os=-proelf
+ ;;
+ openrisc | openrisc-*)
+ basic_machine=or32-unknown
+ ;;
+ os400)
+ basic_machine=powerpc-ibm
+ os=-os400
+ ;;
+ OSE68000 | ose68000)
+ basic_machine=m68000-ericsson
+ os=-ose
+ ;;
+ os68k)
+ basic_machine=m68k-none
+ os=-os68k
+ ;;
+ pa-hitachi)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ paragon)
+ basic_machine=i860-intel
+ os=-osf
+ ;;
+ parisc)
+ basic_machine=hppa-unknown
+ os=-linux
+ ;;
+ parisc-*)
+ basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ pbd)
+ basic_machine=sparc-tti
+ ;;
+ pbb)
+ basic_machine=m68k-tti
+ ;;
+ pc532 | pc532-*)
+ basic_machine=ns32k-pc532
+ ;;
+ pc98)
+ basic_machine=i386-pc
+ ;;
+ pc98-*)
+ basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentium | p5 | k5 | k6 | nexgen | viac3)
+ basic_machine=i586-pc
+ ;;
+ pentiumpro | p6 | 6x86 | athlon | athlon_*)
+ basic_machine=i686-pc
+ ;;
+ pentiumii | pentium2 | pentiumiii | pentium3)
+ basic_machine=i686-pc
+ ;;
+ pentium4)
+ basic_machine=i786-pc
+ ;;
+ pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
+ basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumpro-* | p6-* | 6x86-* | athlon-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentium4-*)
+ basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pn)
+ basic_machine=pn-gould
+ ;;
+ power) basic_machine=power-ibm
+ ;;
+ ppc | ppcbe) basic_machine=powerpc-unknown
+ ;;
+ ppc-* | ppcbe-*)
+ basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppcle | powerpclittle | ppc-le | powerpc-little)
+ basic_machine=powerpcle-unknown
+ ;;
+ ppcle-* | powerpclittle-*)
+ basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64) basic_machine=powerpc64-unknown
+ ;;
+ ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64le | powerpc64little | ppc64-le | powerpc64-little)
+ basic_machine=powerpc64le-unknown
+ ;;
+ ppc64le-* | powerpc64little-*)
+ basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ps2)
+ basic_machine=i386-ibm
+ ;;
+ pw32)
+ basic_machine=i586-unknown
+ os=-pw32
+ ;;
+ rdos | rdos64)
+ basic_machine=x86_64-pc
+ os=-rdos
+ ;;
+ rdos32)
+ basic_machine=i386-pc
+ os=-rdos
+ ;;
+ rom68k)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ rm[46]00)
+ basic_machine=mips-siemens
+ ;;
+ rtpc | rtpc-*)
+ basic_machine=romp-ibm
+ ;;
+ s390 | s390-*)
+ basic_machine=s390-ibm
+ ;;
+ s390x | s390x-*)
+ basic_machine=s390x-ibm
+ ;;
+ sa29200)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ sb1)
+ basic_machine=mipsisa64sb1-unknown
+ ;;
+ sb1el)
+ basic_machine=mipsisa64sb1el-unknown
+ ;;
+ sde)
+ basic_machine=mipsisa32-sde
+ os=-elf
+ ;;
+ sei)
+ basic_machine=mips-sei
+ os=-seiux
+ ;;
+ sequent)
+ basic_machine=i386-sequent
+ ;;
+ sh)
+ basic_machine=sh-hitachi
+ os=-hms
+ ;;
+ sh5el)
+ basic_machine=sh5le-unknown
+ ;;
+ sh64)
+ basic_machine=sh64-unknown
+ ;;
+ sparclite-wrs | simso-wrs)
+ basic_machine=sparclite-wrs
+ os=-vxworks
+ ;;
+ sps7)
+ basic_machine=m68k-bull
+ os=-sysv2
+ ;;
+ spur)
+ basic_machine=spur-unknown
+ ;;
+ st2000)
+ basic_machine=m68k-tandem
+ ;;
+ stratus)
+ basic_machine=i860-stratus
+ os=-sysv4
+ ;;
+ strongarm-* | thumb-*)
+ basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ sun2)
+ basic_machine=m68000-sun
+ ;;
+ sun2os3)
+ basic_machine=m68000-sun
+ os=-sunos3
+ ;;
+ sun2os4)
+ basic_machine=m68000-sun
+ os=-sunos4
+ ;;
+ sun3os3)
+ basic_machine=m68k-sun
+ os=-sunos3
+ ;;
+ sun3os4)
+ basic_machine=m68k-sun
+ os=-sunos4
+ ;;
+ sun4os3)
+ basic_machine=sparc-sun
+ os=-sunos3
+ ;;
+ sun4os4)
+ basic_machine=sparc-sun
+ os=-sunos4
+ ;;
+ sun4sol2)
+ basic_machine=sparc-sun
+ os=-solaris2
+ ;;
+ sun3 | sun3-*)
+ basic_machine=m68k-sun
+ ;;
+ sun4)
+ basic_machine=sparc-sun
+ ;;
+ sun386 | sun386i | roadrunner)
+ basic_machine=i386-sun
+ ;;
+ sv1)
+ basic_machine=sv1-cray
+ os=-unicos
+ ;;
+ symmetry)
+ basic_machine=i386-sequent
+ os=-dynix
+ ;;
+ t3e)
+ basic_machine=alphaev5-cray
+ os=-unicos
+ ;;
+ t90)
+ basic_machine=t90-cray
+ os=-unicos
+ ;;
+ tile*)
+ basic_machine=$basic_machine-unknown
+ os=-linux-gnu
+ ;;
+ tx39)
+ basic_machine=mipstx39-unknown
+ ;;
+ tx39el)
+ basic_machine=mipstx39el-unknown
+ ;;
+ toad1)
+ basic_machine=pdp10-xkl
+ os=-tops20
+ ;;
+ tower | tower-32)
+ basic_machine=m68k-ncr
+ ;;
+ tpf)
+ basic_machine=s390x-ibm
+ os=-tpf
+ ;;
+ udi29k)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ ultra3)
+ basic_machine=a29k-nyu
+ os=-sym1
+ ;;
+ v810 | necv810)
+ basic_machine=v810-nec
+ os=-none
+ ;;
+ vaxv)
+ basic_machine=vax-dec
+ os=-sysv
+ ;;
+ vms)
+ basic_machine=vax-dec
+ os=-vms
+ ;;
+ vpp*|vx|vx-*)
+ basic_machine=f301-fujitsu
+ ;;
+ vxworks960)
+ basic_machine=i960-wrs
+ os=-vxworks
+ ;;
+ vxworks68)
+ basic_machine=m68k-wrs
+ os=-vxworks
+ ;;
+ vxworks29k)
+ basic_machine=a29k-wrs
+ os=-vxworks
+ ;;
+ w65*)
+ basic_machine=w65-wdc
+ os=-none
+ ;;
+ w89k-*)
+ basic_machine=hppa1.1-winbond
+ os=-proelf
+ ;;
+ xbox)
+ basic_machine=i686-pc
+ os=-mingw32
+ ;;
+ xps | xps100)
+ basic_machine=xps100-honeywell
+ ;;
+ xscale-* | xscalee[bl]-*)
+ basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'`
+ ;;
+ ymp)
+ basic_machine=ymp-cray
+ os=-unicos
+ ;;
+ z8k-*-coff)
+ basic_machine=z8k-unknown
+ os=-sim
+ ;;
+ z80-*-coff)
+ basic_machine=z80-unknown
+ os=-sim
+ ;;
+ none)
+ basic_machine=none-none
+ os=-none
+ ;;
+
+# Here we handle the default manufacturer of certain CPU types. It is in
+# some cases the only manufacturer, in others, it is the most popular.
+ w89k)
+ basic_machine=hppa1.1-winbond
+ ;;
+ op50n)
+ basic_machine=hppa1.1-oki
+ ;;
+ op60c)
+ basic_machine=hppa1.1-oki
+ ;;
+ romp)
+ basic_machine=romp-ibm
+ ;;
+ mmix)
+ basic_machine=mmix-knuth
+ ;;
+ rs6000)
+ basic_machine=rs6000-ibm
+ ;;
+ vax)
+ basic_machine=vax-dec
+ ;;
+ pdp10)
+ # there are many clones, so DEC is not a safe bet
+ basic_machine=pdp10-unknown
+ ;;
+ pdp11)
+ basic_machine=pdp11-dec
+ ;;
+ we32k)
+ basic_machine=we32k-att
+ ;;
+ sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
+ basic_machine=sh-unknown
+ ;;
+ sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
+ basic_machine=sparc-sun
+ ;;
+ cydra)
+ basic_machine=cydra-cydrome
+ ;;
+ orion)
+ basic_machine=orion-highlevel
+ ;;
+ orion105)
+ basic_machine=clipper-highlevel
+ ;;
+ mac | mpw | mac-mpw)
+ basic_machine=m68k-apple
+ ;;
+ pmac | pmac-mpw)
+ basic_machine=powerpc-apple
+ ;;
+ *-unknown)
+ # Make sure to match an already-canonicalized machine name.
+ ;;
+ *)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+
+# Here we canonicalize certain aliases for manufacturers.
+case $basic_machine in
+ *-digital*)
+ basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
+ ;;
+ *-commodore*)
+ basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
+ ;;
+ *)
+ ;;
+esac
+
+# Decode manufacturer-specific aliases for certain operating systems.
+
+if [ x"$os" != x"" ]
+then
+case $os in
+ # First match some system type aliases
+ # that might get confused with valid system types.
+ # -solaris* is a basic system type, with this one exception.
+ -auroraux)
+ os=-auroraux
+ ;;
+ -solaris1 | -solaris1.*)
+ os=`echo $os | sed -e 's|solaris1|sunos4|'`
+ ;;
+ -solaris)
+ os=-solaris2
+ ;;
+ -svr4*)
+ os=-sysv4
+ ;;
+ -unixware*)
+ os=-sysv4.2uw
+ ;;
+ -gnu/linux*)
+ os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
+ ;;
+ # First accept the basic system types.
+ # The portable systems comes first.
+ # Each alternative MUST END IN A *, to match a version number.
+ # -sysv* is not here because it comes later, after sysvr4.
+ -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
+ | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
+ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
+ | -sym* | -kopensolaris* | -plan9* \
+ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
+ | -aos* | -aros* \
+ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
+ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
+ | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
+ | -bitrig* | -openbsd* | -solidbsd* \
+ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
+ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
+ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
+ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
+ | -chorusos* | -chorusrdb* | -cegcc* \
+ | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+ | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
+ | -linux-newlib* | -linux-musl* | -linux-uclibc* \
+ | -uxpv* | -beos* | -mpeix* | -udk* \
+ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
+ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
+ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
+ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
+ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
+ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
+ | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*)
+ # Remember, each alternative MUST END IN *, to match a version number.
+ ;;
+ -qnx*)
+ case $basic_machine in
+ x86-* | i*86-*)
+ ;;
+ *)
+ os=-nto$os
+ ;;
+ esac
+ ;;
+ -nto-qnx*)
+ ;;
+ -nto*)
+ os=`echo $os | sed -e 's|nto|nto-qnx|'`
+ ;;
+ -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
+ | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
+ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
+ ;;
+ -mac*)
+ os=`echo $os | sed -e 's|mac|macos|'`
+ ;;
+ -linux-dietlibc)
+ os=-linux-dietlibc
+ ;;
+ -linux*)
+ os=`echo $os | sed -e 's|linux|linux-gnu|'`
+ ;;
+ -sunos5*)
+ os=`echo $os | sed -e 's|sunos5|solaris2|'`
+ ;;
+ -sunos6*)
+ os=`echo $os | sed -e 's|sunos6|solaris3|'`
+ ;;
+ -opened*)
+ os=-openedition
+ ;;
+ -os400*)
+ os=-os400
+ ;;
+ -wince*)
+ os=-wince
+ ;;
+ -osfrose*)
+ os=-osfrose
+ ;;
+ -osf*)
+ os=-osf
+ ;;
+ -utek*)
+ os=-bsd
+ ;;
+ -dynix*)
+ os=-bsd
+ ;;
+ -acis*)
+ os=-aos
+ ;;
+ -atheos*)
+ os=-atheos
+ ;;
+ -syllable*)
+ os=-syllable
+ ;;
+ -386bsd)
+ os=-bsd
+ ;;
+ -ctix* | -uts*)
+ os=-sysv
+ ;;
+ -nova*)
+ os=-rtmk-nova
+ ;;
+ -ns2 )
+ os=-nextstep2
+ ;;
+ -nsk*)
+ os=-nsk
+ ;;
+ # Preserve the version number of sinix5.
+ -sinix5.*)
+ os=`echo $os | sed -e 's|sinix|sysv|'`
+ ;;
+ -sinix*)
+ os=-sysv4
+ ;;
+ -tpf*)
+ os=-tpf
+ ;;
+ -triton*)
+ os=-sysv3
+ ;;
+ -oss*)
+ os=-sysv3
+ ;;
+ -svr4)
+ os=-sysv4
+ ;;
+ -svr3)
+ os=-sysv3
+ ;;
+ -sysvr4)
+ os=-sysv4
+ ;;
+ # This must come after -sysvr4.
+ -sysv*)
+ ;;
+ -ose*)
+ os=-ose
+ ;;
+ -es1800*)
+ os=-ose
+ ;;
+ -xenix)
+ os=-xenix
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ os=-mint
+ ;;
+ -aros*)
+ os=-aros
+ ;;
+ -zvmoe)
+ os=-zvmoe
+ ;;
+ -dicos*)
+ os=-dicos
+ ;;
+ -nacl*)
+ ;;
+ -none)
+ ;;
+ *)
+ # Get rid of the `-' at the beginning of $os.
+ os=`echo $os | sed 's/[^-]*-//'`
+ echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+else
+
+# Here we handle the default operating systems that come with various machines.
+# The value should be what the vendor currently ships out the door with their
+# machine or put another way, the most popular os provided with the machine.
+
+# Note that if you're going to try to match "-MANUFACTURER" here (say,
+# "-sun"), then you have to tell the case statement up towards the top
+# that MANUFACTURER isn't an operating system. Otherwise, code above
+# will signal an error saying that MANUFACTURER isn't an operating
+# system, and we'll never get to this point.
+
+case $basic_machine in
+ score-*)
+ os=-elf
+ ;;
+ spu-*)
+ os=-elf
+ ;;
+ *-acorn)
+ os=-riscix1.2
+ ;;
+ arm*-rebel)
+ os=-linux
+ ;;
+ arm*-semi)
+ os=-aout
+ ;;
+ c4x-* | tic4x-*)
+ os=-coff
+ ;;
+ c8051-*)
+ os=-elf
+ ;;
+ hexagon-*)
+ os=-elf
+ ;;
+ tic54x-*)
+ os=-coff
+ ;;
+ tic55x-*)
+ os=-coff
+ ;;
+ tic6x-*)
+ os=-coff
+ ;;
+ # This must come before the *-dec entry.
+ pdp10-*)
+ os=-tops20
+ ;;
+ pdp11-*)
+ os=-none
+ ;;
+ *-dec | vax-*)
+ os=-ultrix4.2
+ ;;
+ m68*-apollo)
+ os=-domain
+ ;;
+ i386-sun)
+ os=-sunos4.0.2
+ ;;
+ m68000-sun)
+ os=-sunos3
+ ;;
+ m68*-cisco)
+ os=-aout
+ ;;
+ mep-*)
+ os=-elf
+ ;;
+ mips*-cisco)
+ os=-elf
+ ;;
+ mips*-*)
+ os=-elf
+ ;;
+ or1k-*)
+ os=-elf
+ ;;
+ or32-*)
+ os=-coff
+ ;;
+ *-tti) # must be before sparc entry or we get the wrong os.
+ os=-sysv3
+ ;;
+ sparc-* | *-sun)
+ os=-sunos4.1.1
+ ;;
+ *-be)
+ os=-beos
+ ;;
+ *-haiku)
+ os=-haiku
+ ;;
+ *-ibm)
+ os=-aix
+ ;;
+ *-knuth)
+ os=-mmixware
+ ;;
+ *-wec)
+ os=-proelf
+ ;;
+ *-winbond)
+ os=-proelf
+ ;;
+ *-oki)
+ os=-proelf
+ ;;
+ *-hp)
+ os=-hpux
+ ;;
+ *-hitachi)
+ os=-hiux
+ ;;
+ i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
+ os=-sysv
+ ;;
+ *-cbm)
+ os=-amigaos
+ ;;
+ *-dg)
+ os=-dgux
+ ;;
+ *-dolphin)
+ os=-sysv3
+ ;;
+ m68k-ccur)
+ os=-rtu
+ ;;
+ m88k-omron*)
+ os=-luna
+ ;;
+ *-next )
+ os=-nextstep
+ ;;
+ *-sequent)
+ os=-ptx
+ ;;
+ *-crds)
+ os=-unos
+ ;;
+ *-ns)
+ os=-genix
+ ;;
+ i370-*)
+ os=-mvs
+ ;;
+ *-next)
+ os=-nextstep3
+ ;;
+ *-gould)
+ os=-sysv
+ ;;
+ *-highlevel)
+ os=-bsd
+ ;;
+ *-encore)
+ os=-bsd
+ ;;
+ *-sgi)
+ os=-irix
+ ;;
+ *-siemens)
+ os=-sysv4
+ ;;
+ *-masscomp)
+ os=-rtu
+ ;;
+ f30[01]-fujitsu | f700-fujitsu)
+ os=-uxpv
+ ;;
+ *-rom68k)
+ os=-coff
+ ;;
+ *-*bug)
+ os=-coff
+ ;;
+ *-apple)
+ os=-macos
+ ;;
+ *-atari*)
+ os=-mint
+ ;;
+ *)
+ os=-none
+ ;;
+esac
+fi
+
+# Here we handle the case where we know the os, and the CPU type, but not the
+# manufacturer. We pick the logical manufacturer.
+vendor=unknown
+case $basic_machine in
+ *-unknown)
+ case $os in
+ -riscix*)
+ vendor=acorn
+ ;;
+ -sunos*)
+ vendor=sun
+ ;;
+ -cnk*|-aix*)
+ vendor=ibm
+ ;;
+ -beos*)
+ vendor=be
+ ;;
+ -hpux*)
+ vendor=hp
+ ;;
+ -mpeix*)
+ vendor=hp
+ ;;
+ -hiux*)
+ vendor=hitachi
+ ;;
+ -unos*)
+ vendor=crds
+ ;;
+ -dgux*)
+ vendor=dg
+ ;;
+ -luna*)
+ vendor=omron
+ ;;
+ -genix*)
+ vendor=ns
+ ;;
+ -mvs* | -opened*)
+ vendor=ibm
+ ;;
+ -os400*)
+ vendor=ibm
+ ;;
+ -ptx*)
+ vendor=sequent
+ ;;
+ -tpf*)
+ vendor=ibm
+ ;;
+ -vxsim* | -vxworks* | -windiss*)
+ vendor=wrs
+ ;;
+ -aux*)
+ vendor=apple
+ ;;
+ -hms*)
+ vendor=hitachi
+ ;;
+ -mpw* | -macos*)
+ vendor=apple
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ vendor=atari
+ ;;
+ -vos*)
+ vendor=stratus
+ ;;
+ esac
+ basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
+ ;;
+esac
+
+echo $basic_machine$os
+exit
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/configure b/configure
index 3948080..2c1c82d 100755
--- a/configure
+++ b/configure
@@ -1,14 +1,12 @@
#! /bin/sh
-# From configure.in Revision.
+# From configure.ac Revision.
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.67 for python 2.7.
+# Generated by GNU Autoconf 2.69 for python 2.7.
#
# Report bugs to <http://bugs.python.org/>.
#
#
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
-# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software
-# Foundation, Inc.
+# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
#
#
# This configure script is free software; the Free Software Foundation
@@ -92,6 +90,7 @@ fi
IFS=" "" $as_nl"
# Find who we are. Look in the path if we contain no directory separator.
+as_myself=
case $0 in #((
*[\\/]* ) as_myself=$0 ;;
*) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
@@ -136,6 +135,31 @@ export LANGUAGE
# CDPATH.
(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+# Use a proper internal environment variable to ensure we don't fall
+ # into an infinite loop, continuously re-executing ourselves.
+ if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then
+ _as_can_reexec=no; export _as_can_reexec;
+ # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+as_fn_exit 255
+ fi
+ # We don't want this to propagate to other subprocesses.
+ { _as_can_reexec=; unset _as_can_reexec;}
if test "x$CONFIG_SHELL" = x; then
as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then :
emulate sh
@@ -169,7 +193,8 @@ if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then :
else
exitcode=1; echo positional parameters were not saved.
fi
-test x\$exitcode = x0 || exit 1"
+test x\$exitcode = x0 || exit 1
+test -x / || exit 1"
as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
@@ -214,14 +239,25 @@ IFS=$as_save_IFS
if test "x$CONFIG_SHELL" != x; then :
- # We cannot yet assume a decent shell, so we have to provide a
- # neutralization value for shells without unset; and this also
- # works around shells that cannot unset nonexistent variables.
- BASH_ENV=/dev/null
- ENV=/dev/null
- (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
- export CONFIG_SHELL
- exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"}
+ export CONFIG_SHELL
+ # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+ *v*x* | *x*v* ) as_opts=-vx ;;
+ *v* ) as_opts=-v ;;
+ *x* ) as_opts=-x ;;
+ * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+exit 255
fi
if test x$as_have_required = xno; then :
@@ -324,6 +360,14 @@ $as_echo X"$as_dir" |
} # as_fn_mkdir_p
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+ test -f "$1" && test -x "$1"
+} # as_fn_executable_p
# as_fn_append VAR VALUE
# ----------------------
# Append the text in VALUE to the end of the definition contained in VAR. Take
@@ -445,6 +489,10 @@ as_cr_alnum=$as_cr_Letters$as_cr_digits
chmod +x "$as_me.lineno" ||
{ $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
+ # If we had to re-execute with $CONFIG_SHELL, we're ensured to have
+ # already done that, so ensure we don't try to do so again and fall
+ # in an infinite loop. This has already happened in practice.
+ _as_can_reexec=no; export _as_can_reexec
# Don't try to exec as it changes $[0], causing all sort of problems
# (the dirname of $[0] is not the place where we might find the
# original and so on. Autoconf is especially sensitive to this).
@@ -479,16 +527,16 @@ if (echo >conf$$.file) 2>/dev/null; then
# ... but there are two gotchas:
# 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
# 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
- # In both cases, we have to default to `cp -p'.
+ # In both cases, we have to default to `cp -pR'.
ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
elif ln conf$$.file conf$$ 2>/dev/null; then
as_ln_s=ln
else
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
fi
else
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
fi
rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
rmdir conf$$.dir 2>/dev/null
@@ -500,28 +548,8 @@ else
as_mkdir_p=false
fi
-if test -x / >/dev/null 2>&1; then
- as_test_x='test -x'
-else
- if ls -dL / >/dev/null 2>&1; then
- as_ls_L_option=L
- else
- as_ls_L_option=
- fi
- as_test_x='
- eval sh -c '\''
- if test -d "$1"; then
- test -d "$1/.";
- else
- case $1 in #(
- -*)set "./$1";;
- esac;
- case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
- ???[sx]*):;;*)false;;esac;fi
- '\'' sh
- '
-fi
-as_executable_p=$as_test_x
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
# Sed expression to map a string onto a valid CPP name.
as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
@@ -616,6 +644,8 @@ LDLAST
USE_THREAD_MODULE
SIGNAL_OBJS
USE_SIGNAL_MODULE
+TCLTK_LIBS
+TCLTK_INCLUDES
LIBFFI_INCLUDEDIR
PKG_CONFIG
SHLIBS
@@ -632,6 +662,7 @@ UNIVERSAL_ARCH_FLAGS
BASECFLAGS
OPT
LN
+MKDIR_P
INSTALL_DATA
INSTALL_SCRIPT
INSTALL_PROGRAM
@@ -639,8 +670,10 @@ HAS_HG
HGBRANCH
HGTAG
HGVERSION
+BASECPPFLAGS
SVNVERSION
ARFLAGS
+ac_ct_AR
AR
RANLIB
GNULD
@@ -656,6 +689,8 @@ BUILDEXEEXT
EGREP
GREP
CPP
+MULTIARCH
+ac_ct_CXX
MAINCC
CXX
OBJEXT
@@ -670,6 +705,7 @@ CONFIGURE_MACOSX_DEPLOYMENT_TARGET
EXTRAMACHDEPPATH
EXTRAPLATDIR
SGI_ABI
+_PYTHON_HOST_PLATFORM
MACHDEP
FRAMEWORKINSTALLAPPSPREFIX
FRAMEWORKUNIXTOOLSPREFIX
@@ -688,6 +724,15 @@ UNIVERSALSDK
CONFIG_ARGS
SOVERSION
VERSION
+PYTHON_FOR_BUILD
+host_os
+host_vendor
+host_cpu
+host
+build_os
+build_vendor
+build_cpu
+build
target_alias
host_alias
build_alias
@@ -743,6 +788,8 @@ enable_toolbox_glue
with_libs
with_system_expat
with_system_ffi
+with_tcltk_includes
+with_tcltk_libs
with_dbmliborder
with_signal_module
with_dec_threads
@@ -1174,7 +1221,7 @@ Try \`$0 --help' for more information"
$as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
$as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
- : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}
+ : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}"
;;
esac
@@ -1225,8 +1272,6 @@ target=$target_alias
if test "x$host_alias" != x; then
if test "x$build_alias" = x; then
cross_compiling=maybe
- $as_echo "$as_me: WARNING: if you wanted to set the --build type, don't use --host.
- If a cross compiler is detected then cross compile mode will be used" >&2
elif test "x$build_alias" != "x$host_alias"; then
cross_compiling=yes
fi
@@ -1368,6 +1413,10 @@ Fine tuning of the installation directories:
_ACEOF
cat <<\_ACEOF
+
+System types:
+ --build=BUILD configure for building on BUILD [guessed]
+ --host=HOST cross-compile to build programs to run on HOST [BUILD]
_ACEOF
fi
@@ -1414,6 +1463,10 @@ Optional Packages:
--with-system-expat build pyexpat module using an installed expat
library
--with-system-ffi build _ctypes module using an installed ffi library
+ --with-tcltk-includes='-I...'
+ override search for Tcl and Tk include files
+ --with-tcltk-libs='-L...'
+ override search for Tcl and Tk libs
--with-dbmliborder=db1:db2:...
order to check db backends for dbm. Valid value is a
colon separated string with the backend names
@@ -1511,9 +1564,9 @@ test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
python configure 2.7
-generated by GNU Autoconf 2.67
+generated by GNU Autoconf 2.69
-Copyright (C) 2010 Free Software Foundation, Inc.
+Copyright (C) 2012 Free Software Foundation, Inc.
This configure script is free software; the Free Software Foundation
gives unlimited permission to copy, distribute and modify it.
_ACEOF
@@ -1557,7 +1610,7 @@ sed 's/^/| /' conftest.$ac_ext >&5
ac_retval=1
fi
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
as_fn_set_status $ac_retval
} # ac_fn_c_try_compile
@@ -1594,7 +1647,7 @@ sed 's/^/| /' conftest.$ac_ext >&5
ac_retval=1
fi
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
as_fn_set_status $ac_retval
} # ac_fn_c_try_cpp
@@ -1607,10 +1660,10 @@ fi
ac_fn_c_check_header_mongrel ()
{
as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
- if eval "test \"\${$3+set}\"" = set; then :
+ if eval \${$3+:} false; then :
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
$as_echo_n "checking for $2... " >&6; }
-if eval "test \"\${$3+set}\"" = set; then :
+if eval \${$3+:} false; then :
$as_echo_n "(cached) " >&6
fi
eval ac_res=\$$3
@@ -1677,7 +1730,7 @@ $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
esac
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
$as_echo_n "checking for $2... " >&6; }
-if eval "test \"\${$3+set}\"" = set; then :
+if eval \${$3+:} false; then :
$as_echo_n "(cached) " >&6
else
eval "$3=\$ac_header_compiler"
@@ -1686,7 +1739,7 @@ eval ac_res=\$$3
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
$as_echo "$ac_res" >&6; }
fi
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
} # ac_fn_c_check_header_mongrel
@@ -1727,7 +1780,7 @@ sed 's/^/| /' conftest.$ac_ext >&5
ac_retval=$ac_status
fi
rm -rf conftest.dSYM conftest_ipa8_conftest.oo
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
as_fn_set_status $ac_retval
} # ac_fn_c_try_run
@@ -1741,7 +1794,7 @@ ac_fn_c_check_header_compile ()
as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
$as_echo_n "checking for $2... " >&6; }
-if eval "test \"\${$3+set}\"" = set; then :
+if eval \${$3+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -1759,7 +1812,7 @@ fi
eval ac_res=\$$3
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
$as_echo "$ac_res" >&6; }
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
} # ac_fn_c_check_header_compile
@@ -1790,7 +1843,7 @@ $as_echo "$ac_try_echo"; } >&5
test ! -s conftest.err
} && test -s conftest$ac_exeext && {
test "$cross_compiling" = yes ||
- $as_test_x conftest$ac_exeext
+ test -x conftest$ac_exeext
}; then :
ac_retval=0
else
@@ -1804,7 +1857,7 @@ fi
# interfere with the next link command; also delete a directory that is
# left behind by Apple's compiler. We do this before executing the actions.
rm -rf conftest.dSYM conftest_ipa8_conftest.oo
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
as_fn_set_status $ac_retval
} # ac_fn_c_try_link
@@ -1818,7 +1871,7 @@ ac_fn_c_check_type ()
as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
$as_echo_n "checking for $2... " >&6; }
-if eval "test \"\${$3+set}\"" = set; then :
+if eval \${$3+:} false; then :
$as_echo_n "(cached) " >&6
else
eval "$3=no"
@@ -1859,7 +1912,7 @@ fi
eval ac_res=\$$3
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
$as_echo "$ac_res" >&6; }
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
} # ac_fn_c_check_type
@@ -1872,7 +1925,7 @@ ac_fn_c_find_uintX_t ()
as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for uint$2_t" >&5
$as_echo_n "checking for uint$2_t... " >&6; }
-if eval "test \"\${$3+set}\"" = set; then :
+if eval \${$3+:} false; then :
$as_echo_n "(cached) " >&6
else
eval "$3=no"
@@ -1887,7 +1940,8 @@ int
main ()
{
static int test_array [1 - 2 * !((($ac_type) -1 >> ($2 / 2 - 1)) >> ($2 / 2 - 1) == 3)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -1912,7 +1966,7 @@ fi
eval ac_res=\$$3
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
$as_echo "$ac_res" >&6; }
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
} # ac_fn_c_find_uintX_t
@@ -1925,7 +1979,7 @@ ac_fn_c_find_intX_t ()
as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for int$2_t" >&5
$as_echo_n "checking for int$2_t... " >&6; }
-if eval "test \"\${$3+set}\"" = set; then :
+if eval \${$3+:} false; then :
$as_echo_n "(cached) " >&6
else
eval "$3=no"
@@ -1941,7 +1995,8 @@ int
main ()
{
static int test_array [1 - 2 * !(0 < ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1))];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -1957,7 +2012,8 @@ main ()
{
static int test_array [1 - 2 * !(($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1)
< ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 2))];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -1986,7 +2042,7 @@ fi
eval ac_res=\$$3
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
$as_echo "$ac_res" >&6; }
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
} # ac_fn_c_find_intX_t
@@ -2007,7 +2063,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) >= 0)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -2023,7 +2080,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) <= $ac_mid)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -2049,7 +2107,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) < 0)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -2065,7 +2124,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) >= $ac_mid)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -2099,7 +2159,8 @@ int
main ()
{
static int test_array [1 - 2 * !(($2) <= $ac_mid)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -2163,7 +2224,7 @@ rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
rm -f conftest.val
fi
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
as_fn_set_status $ac_retval
} # ac_fn_c_compute_int
@@ -2176,7 +2237,7 @@ ac_fn_c_check_func ()
as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
$as_echo_n "checking for $2... " >&6; }
-if eval "test \"\${$3+set}\"" = set; then :
+if eval \${$3+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -2231,7 +2292,7 @@ fi
eval ac_res=\$$3
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
$as_echo "$ac_res" >&6; }
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
} # ac_fn_c_check_func
@@ -2244,7 +2305,7 @@ ac_fn_c_check_member ()
as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2.$3" >&5
$as_echo_n "checking for $2.$3... " >&6; }
-if eval "test \"\${$4+set}\"" = set; then :
+if eval \${$4+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -2288,7 +2349,7 @@ fi
eval ac_res=\$$4
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
$as_echo "$ac_res" >&6; }
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
} # ac_fn_c_check_member
@@ -2303,7 +2364,7 @@ ac_fn_c_check_decl ()
as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'`
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5
$as_echo_n "checking whether $as_decl_name is declared... " >&6; }
-if eval "test \"\${$3+set}\"" = set; then :
+if eval \${$3+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -2334,7 +2395,7 @@ fi
eval ac_res=\$$3
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
$as_echo "$ac_res" >&6; }
- eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+ eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
} # ac_fn_c_check_decl
cat >config.log <<_ACEOF
@@ -2342,7 +2403,7 @@ This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
It was created by python $as_me 2.7, which was
-generated by GNU Autoconf 2.67. Invocation command line was
+generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@@ -2600,7 +2661,7 @@ $as_echo "$as_me: loading site script $ac_site_file" >&6;}
|| { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error $? "failed to load site script $ac_site_file
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
fi
done
@@ -2693,6 +2754,134 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
ac_config_headers="$ac_config_headers pyconfig.h"
+ac_aux_dir=
+for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do
+ if test -f "$ac_dir/install-sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install-sh -c"
+ break
+ elif test -f "$ac_dir/install.sh"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/install.sh -c"
+ break
+ elif test -f "$ac_dir/shtool"; then
+ ac_aux_dir=$ac_dir
+ ac_install_sh="$ac_aux_dir/shtool install -c"
+ break
+ fi
+done
+if test -z "$ac_aux_dir"; then
+ as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5
+fi
+
+# These three variables are undocumented and unsupported,
+# and are intended to be withdrawn in a future Autoconf release.
+# They can cause serious problems if a builder's source tree is in a directory
+# whose full name contains unusual characters.
+ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var.
+ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var.
+ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
+
+
+# Make sure we can run config.sub.
+$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 ||
+ as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5
+$as_echo_n "checking build system type... " >&6; }
+if ${ac_cv_build+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ ac_build_alias=$build_alias
+test "x$ac_build_alias" = x &&
+ ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"`
+test "x$ac_build_alias" = x &&
+ as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5
+ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` ||
+ as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5
+$as_echo "$ac_cv_build" >&6; }
+case $ac_cv_build in
+*-*-*) ;;
+*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;;
+esac
+build=$ac_cv_build
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_build
+shift
+build_cpu=$1
+build_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+build_os=$*
+IFS=$ac_save_IFS
+case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5
+$as_echo_n "checking host system type... " >&6; }
+if ${ac_cv_host+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test "x$host_alias" = x; then
+ ac_cv_host=$ac_cv_build
+else
+ ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` ||
+ as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5
+$as_echo "$ac_cv_host" >&6; }
+case $ac_cv_host in
+*-*-*) ;;
+*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;;
+esac
+host=$ac_cv_host
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_host
+shift
+host_cpu=$1
+host_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+host_os=$*
+IFS=$ac_save_IFS
+case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
+
+
+
+
+
+if test "$cross_compiling" = yes; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for python interpreter for cross build" >&5
+$as_echo_n "checking for python interpreter for cross build... " >&6; }
+ if test -z "$PYTHON_FOR_BUILD"; then
+ for interp in python$PACKAGE_VERSION python2 python; do
+ which $interp >/dev/null 2>&1 || continue
+ if $interp -c 'import sys;sys.exit(not (sys.version_info[:2] >= (2,7) and sys.version_info[0] < 3))'; then
+ break
+ fi
+ interp=
+ done
+ if test x$interp = x; then
+ as_fn_error $? "python$PACKAGE_VERSION interpreter not found" "$LINENO" 5
+ fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $interp" >&5
+$as_echo "$interp" >&6; }
+ PYTHON_FOR_BUILD='_PYTHON_PROJECT_BASE=$(abs_builddir) _PYTHON_HOST_PLATFORM=$(_PYTHON_HOST_PLATFORM) PYTHONPATH=$(shell test -f pybuilddir.txt && echo $(abs_builddir)/`cat pybuilddir.txt`:)$(srcdir)/Lib:$(srcdir)/Lib/plat-$(MACHDEP) '$interp
+ fi
+elif test "$cross_compiling" = maybe; then
+ as_fn_error $? "Cross compiling required --host=HOST-TUPLE and --build=ARCH" "$LINENO" 5
+else
+ PYTHON_FOR_BUILD='./$(BUILDPYTHON) -E'
+fi
+
+
if test "$prefix" != "/"; then
prefix=`echo "$prefix" | sed -e 's/\/$//g'`
@@ -2800,6 +2989,7 @@ fi
+ARCH_RUN_32BIT=""
UNIVERSAL_ARCHS="32-bit"
@@ -2981,6 +3171,25 @@ fi
$as_echo_n "checking MACHDEP... " >&6; }
if test -z "$MACHDEP"
then
+ # avoid using uname for cross builds
+ if test "$cross_compiling" = yes; then
+ # ac_sys_system and ac_sys_release are only used for setting
+ # `define_xopen_source' in the case statement below. For the
+ # current supported cross builds, this macro is not adjusted.
+ case "$host" in
+ *-*-linux*)
+ ac_sys_system=Linux
+ ;;
+ *-*-cygwin*)
+ ac_sys_system=Cygwin
+ ;;
+ *)
+ # for now, limit cross builds to known configurations
+ MACHDEP="unknown"
+ as_fn_error $? "cross build not supported for $host" "$LINENO" 5
+ esac
+ ac_sys_release=
+ else
ac_sys_system=`uname -s`
if test "$ac_sys_system" = "AIX" \
-o "$ac_sys_system" = "UnixWare" -o "$ac_sys_system" = "OpenUNIX"; then
@@ -2988,20 +3197,44 @@ then
else
ac_sys_release=`uname -r`
fi
- ac_md_system=`echo $ac_sys_system |
- tr -d '/ ' | tr '[A-Z]' '[a-z]'`
- ac_md_release=`echo $ac_sys_release |
- tr -d '/ ' | sed 's/^[A-Z]\.//' | sed 's/\..*//'`
- MACHDEP="$ac_md_system$ac_md_release"
+ fi
+ ac_md_system=`echo $ac_sys_system |
+ tr -d '/ ' | tr '[A-Z]' '[a-z]'`
+ ac_md_release=`echo $ac_sys_release |
+ tr -d '/ ' | sed 's/^[A-Z]\.//' | sed 's/\..*//'`
+ MACHDEP="$ac_md_system$ac_md_release"
- case $MACHDEP in
+ case $MACHDEP in
linux*) MACHDEP="linux2";;
cygwin*) MACHDEP="cygwin";;
darwin*) MACHDEP="darwin";;
atheos*) MACHDEP="atheos";;
irix646) MACHDEP="irix6";;
'') MACHDEP="unknown";;
+ esac
+fi
+
+
+if test "$cross_compiling" = yes; then
+ case "$host" in
+ *-*-linux*)
+ case "$host_cpu" in
+ arm*)
+ _host_cpu=arm
+ ;;
+ *)
+ _host_cpu=$host_cpu
+ esac
+ ;;
+ *-*-cygwin*)
+ _host_cpu=
+ ;;
+ *)
+ # for now, limit cross builds to known configurations
+ MACHDEP="unknown"
+ as_fn_error $? "cross build not supported for $host" "$LINENO" 5
esac
+ _PYTHON_HOST_PLATFORM="$MACHDEP${_host_cpu:+-$_host_cpu}"
fi
# Some systems cannot stand _XOPEN_SOURCE being defined at all; they
@@ -3158,12 +3391,6 @@ $as_echo "$EXTRAPLATDIR" >&6; }
CONFIGURE_MACOSX_DEPLOYMENT_TARGET=
EXPORT_MACOSX_DEPLOYMENT_TARGET='#'
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking machine type as reported by uname -m" >&5
-$as_echo_n "checking machine type as reported by uname -m... " >&6; }
-ac_sys_machine=`uname -m`
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_sys_machine" >&5
-$as_echo "$ac_sys_machine" >&6; }
-
# checks for alternative programs
# compiler flags are generated in two sets, BASECFLAGS and OPT. OPT is just
@@ -3228,6 +3455,39 @@ then
(it is also a good idea to do 'make clean' before compiling)" "$LINENO" 5
fi
+if test "$MACHDEP" = "irix6" && test "$CC" != "gcc"; then
+ # Normally, MIPSpro CC treats #error directives as warnings, which means
+ # a successful exit code is returned (0). This is a problem because IRIX
+ # has a bunch of system headers with this guard at the top:
+ #
+ # #ifndef __c99
+ # #error This header file is to be used only for c99 mode compilations
+ # #else
+ #
+ # When autoconf tests for such a header, like stdint.h, this happens:
+ #
+ # configure:4619: cc -c conftest.c >&5
+ # cc-1035 cc: WARNING File = /usr/include/stdint.h, Line = 5
+ # #error directive: This header file is to be used only for c99 mode
+ # compilations
+ #
+ # #error This header file is to be used only for c99 mode compilations
+ # ^
+ #
+ # configure:4619: $? = 0
+ # configure:4619: result: yes
+ #
+ # Therefore, we use `-diag_error 1035` to have the compiler treat the
+ # warning as an error, which causes cc to return a non-zero result,
+ # which autoconf can interpret correctly.
+ CFLAGS="$CFLAGS -diag_error 1035"
+ # Whilst we're here, we might as well make sure CXX defaults to something
+ # sensible if we're not using gcc.
+ if test -z "$CXX"; then
+ CXX="CC"
+ fi
+fi
+
# If the user set CFLAGS, use this instead of the automatically
# determined setting
preset_cflags="$CFLAGS"
@@ -3241,7 +3501,7 @@ if test -n "$ac_tool_prefix"; then
set dummy ${ac_tool_prefix}gcc; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
+if ${ac_cv_prog_CC+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$CC"; then
@@ -3253,7 +3513,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_CC="${ac_tool_prefix}gcc"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3281,7 +3541,7 @@ if test -z "$ac_cv_prog_CC"; then
set dummy gcc; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_CC+set}" = set; then :
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$ac_ct_CC"; then
@@ -3293,7 +3553,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_CC="gcc"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3334,7 +3594,7 @@ if test -z "$CC"; then
set dummy ${ac_tool_prefix}cc; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
+if ${ac_cv_prog_CC+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$CC"; then
@@ -3346,7 +3606,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_CC="${ac_tool_prefix}cc"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3374,7 +3634,7 @@ if test -z "$CC"; then
set dummy cc; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
+if ${ac_cv_prog_CC+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$CC"; then
@@ -3387,7 +3647,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
ac_prog_rejected=yes
continue
@@ -3433,7 +3693,7 @@ if test -z "$CC"; then
set dummy $ac_tool_prefix$ac_prog; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
+if ${ac_cv_prog_CC+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$CC"; then
@@ -3445,7 +3705,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3477,7 +3737,7 @@ do
set dummy $ac_prog; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_CC+set}" = set; then :
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$ac_ct_CC"; then
@@ -3489,7 +3749,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_CC="$ac_prog"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -3532,7 +3792,7 @@ fi
test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error $? "no acceptable C compiler found in \$PATH
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
# Provide some information about the compiler.
$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
@@ -3647,7 +3907,7 @@ sed 's/^/| /' conftest.$ac_ext >&5
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error 77 "C compiler cannot create executables
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
@@ -3690,7 +3950,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error $? "cannot compute suffix of executables: cannot compile and link
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
fi
rm -f conftest conftest$ac_cv_exeext
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
@@ -3749,7 +4009,7 @@ $as_echo "$ac_try_echo"; } >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error $? "cannot run C compiled programs.
If you meant to cross compile, use \`--host'.
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
fi
fi
fi
@@ -3760,7 +4020,7 @@ rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out
ac_clean_files=$ac_clean_files_save
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
$as_echo_n "checking for suffix of object files... " >&6; }
-if test "${ac_cv_objext+set}" = set; then :
+if ${ac_cv_objext+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -3801,7 +4061,7 @@ sed 's/^/| /' conftest.$ac_ext >&5
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error $? "cannot compute suffix of object files: cannot compile
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
fi
rm -f conftest.$ac_cv_objext conftest.$ac_ext
fi
@@ -3811,7 +4071,7 @@ OBJEXT=$ac_cv_objext
ac_objext=$OBJEXT
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5
$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
-if test "${ac_cv_c_compiler_gnu+set}" = set; then :
+if ${ac_cv_c_compiler_gnu+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -3848,7 +4108,7 @@ ac_test_CFLAGS=${CFLAGS+set}
ac_save_CFLAGS=$CFLAGS
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
$as_echo_n "checking whether $CC accepts -g... " >&6; }
-if test "${ac_cv_prog_cc_g+set}" = set; then :
+if ${ac_cv_prog_cc_g+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_save_c_werror_flag=$ac_c_werror_flag
@@ -3926,7 +4186,7 @@ else
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5
$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
-if test "${ac_cv_prog_cc_c89+set}" = set; then :
+if ${ac_cv_prog_cc_c89+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_cv_prog_cc_c89=no
@@ -3935,8 +4195,7 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
#include <stdarg.h>
#include <stdio.h>
-#include <sys/types.h>
-#include <sys/stat.h>
+struct stat;
/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */
struct buf { int x; };
FILE * (*rcsopen) (struct buf *, struct stat *, int);
@@ -4061,11 +4320,12 @@ preset_cxx="$CXX"
if test -z "$CXX"
then
case "$CC" in
- gcc) # Extract the first word of "g++", so it can be a program name with args.
-set dummy g++; ac_word=$2
+ gcc) if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}g++", so it can be a program name with args.
+set dummy ${ac_tool_prefix}g++; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_path_CXX+set}" = set; then :
+if ${ac_cv_path_CXX+:} false; then :
$as_echo_n "(cached) " >&6
else
case $CXX in
@@ -4079,7 +4339,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_path_CXX="$as_dir/$ac_word$ac_exec_ext"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -4088,7 +4348,6 @@ done
done
IFS=$as_save_IFS
- test -z "$ac_cv_path_CXX" && ac_cv_path_CXX="g++"
;;
esac
fi
@@ -4101,12 +4360,70 @@ else
$as_echo "no" >&6; }
fi
+
+fi
+if test -z "$ac_cv_path_CXX"; then
+ ac_pt_CXX=$CXX
+ # Extract the first word of "g++", so it can be a program name with args.
+set dummy g++; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_ac_pt_CXX+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $ac_pt_CXX in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_ac_pt_CXX="$ac_pt_CXX" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in notfound
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_ac_pt_CXX="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+fi
+ac_pt_CXX=$ac_cv_path_ac_pt_CXX
+if test -n "$ac_pt_CXX"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_CXX" >&5
+$as_echo "$ac_pt_CXX" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_pt_CXX" = x; then
+ CXX="g++"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CXX=$ac_pt_CXX
+ fi
+else
+ CXX="$ac_cv_path_CXX"
+fi
;;
- cc) # Extract the first word of "c++", so it can be a program name with args.
-set dummy c++; ac_word=$2
+ cc) if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}c++", so it can be a program name with args.
+set dummy ${ac_tool_prefix}c++; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_path_CXX+set}" = set; then :
+if ${ac_cv_path_CXX+:} false; then :
$as_echo_n "(cached) " >&6
else
case $CXX in
@@ -4120,7 +4437,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_path_CXX="$as_dir/$ac_word$ac_exec_ext"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -4129,7 +4446,6 @@ done
done
IFS=$as_save_IFS
- test -z "$ac_cv_path_CXX" && ac_cv_path_CXX="c++"
;;
esac
fi
@@ -4142,6 +4458,63 @@ else
$as_echo "no" >&6; }
fi
+
+fi
+if test -z "$ac_cv_path_CXX"; then
+ ac_pt_CXX=$CXX
+ # Extract the first word of "c++", so it can be a program name with args.
+set dummy c++; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_ac_pt_CXX+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $ac_pt_CXX in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_ac_pt_CXX="$ac_pt_CXX" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in notfound
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_ac_pt_CXX="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+fi
+ac_pt_CXX=$ac_cv_path_ac_pt_CXX
+if test -n "$ac_pt_CXX"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_CXX" >&5
+$as_echo "$ac_pt_CXX" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+ if test "x$ac_pt_CXX" = x; then
+ CXX="c++"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CXX=$ac_pt_CXX
+ fi
+else
+ CXX="$ac_cv_path_CXX"
+fi
;;
esac
if test "$CXX" = "notfound"
@@ -4151,13 +4524,14 @@ fi
fi
if test -z "$CXX"
then
- for ac_prog in $CCC c++ g++ gcc CC cxx cc++ cl
-do
- # Extract the first word of "$ac_prog", so it can be a program name with args.
-set dummy $ac_prog; ac_word=$2
+ if test -n "$ac_tool_prefix"; then
+ for ac_prog in $CCC c++ g++ gcc CC cxx cc++ cl
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CXX+set}" = set; then :
+if ${ac_cv_prog_CXX+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$CXX"; then
@@ -4169,8 +4543,8 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
- ac_cv_prog_CXX="$ac_prog"
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_CXX="$ac_tool_prefix$ac_prog"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
fi
@@ -4190,9 +4564,65 @@ $as_echo "no" >&6; }
fi
- test -n "$CXX" && break
+ test -n "$CXX" && break
+ done
+fi
+if test -z "$CXX"; then
+ ac_ct_CXX=$CXX
+ for ac_prog in $CCC c++ g++ gcc CC cxx cc++ cl
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CXX+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_CXX"; then
+ ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_CXX="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
done
-test -n "$CXX" || CXX="notfound"
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CXX=$ac_cv_prog_ac_ct_CXX
+if test -n "$ac_ct_CXX"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5
+$as_echo "$ac_ct_CXX" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_CXX" && break
+done
+
+ if test "x$ac_ct_CXX" = x; then
+ CXX="notfound"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ CXX=$ac_ct_CXX
+ fi
+fi
if test "$CXX" = "notfound"
then
@@ -4213,6 +4643,9 @@ $as_echo "$as_me: WARNING:
" >&2;}
fi
+MULTIARCH=$($CC --print-multiarch 2>/dev/null)
+
+
# checks for UNIX variants that set C preprocessor variables
@@ -4228,7 +4661,7 @@ if test -n "$CPP" && test -d "$CPP"; then
CPP=
fi
if test -z "$CPP"; then
- if test "${ac_cv_prog_CPP+set}" = set; then :
+ if ${ac_cv_prog_CPP+:} false; then :
$as_echo_n "(cached) " >&6
else
# Double quotes because CPP needs to be expanded
@@ -4344,7 +4777,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error $? "C preprocessor \"$CPP\" fails sanity check
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
fi
ac_ext=c
@@ -4356,7 +4789,7 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
-if test "${ac_cv_path_GREP+set}" = set; then :
+if ${ac_cv_path_GREP+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -z "$GREP"; then
@@ -4370,7 +4803,7 @@ do
for ac_prog in grep ggrep; do
for ac_exec_ext in '' $ac_executable_extensions; do
ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
- { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue
+ as_fn_executable_p "$ac_path_GREP" || continue
# Check for GNU ac_path_GREP and select it if it is found.
# Check for GNU $ac_path_GREP
case `"$ac_path_GREP" --version 2>&1` in
@@ -4419,7 +4852,7 @@ $as_echo "$ac_cv_path_GREP" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
$as_echo_n "checking for egrep... " >&6; }
-if test "${ac_cv_path_EGREP+set}" = set; then :
+if ${ac_cv_path_EGREP+:} false; then :
$as_echo_n "(cached) " >&6
else
if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
@@ -4436,7 +4869,7 @@ do
for ac_prog in egrep; do
for ac_exec_ext in '' $ac_executable_extensions; do
ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
- { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue
+ as_fn_executable_p "$ac_path_EGREP" || continue
# Check for GNU ac_path_EGREP and select it if it is found.
# Check for GNU $ac_path_EGREP
case `"$ac_path_EGREP" --version 2>&1` in
@@ -4486,7 +4919,7 @@ $as_echo "$ac_cv_path_EGREP" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
$as_echo_n "checking for ANSI C header files... " >&6; }
-if test "${ac_cv_header_stdc+set}" = set; then :
+if ${ac_cv_header_stdc+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -4615,7 +5048,7 @@ done
ac_fn_c_check_header_mongrel "$LINENO" "minix/config.h" "ac_cv_header_minix_config_h" "$ac_includes_default"
-if test "x$ac_cv_header_minix_config_h" = x""yes; then :
+if test "x$ac_cv_header_minix_config_h" = xyes; then :
MINIX=yes
else
MINIX=
@@ -4637,14 +5070,14 @@ $as_echo "#define _MINIX 1" >>confdefs.h
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether it is safe to define __EXTENSIONS__" >&5
$as_echo_n "checking whether it is safe to define __EXTENSIONS__... " >&6; }
-if test "${ac_cv_safe_to_define___extensions__+set}" = set; then :
+if ${ac_cv_safe_to_define___extensions__+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
-# define __EXTENSIONS__ 1
- $ac_includes_default
+# define __EXTENSIONS__ 1
+ $ac_includes_default
int
main ()
{
@@ -4852,37 +5285,35 @@ $as_echo "$enable_shared" >&6; }
$as_echo_n "checking for --enable-profiling... " >&6; }
# Check whether --enable-profiling was given.
if test "${enable_profiling+set}" = set; then :
- enableval=$enable_profiling; ac_save_cc="$CC"
- CC="$CC -pg"
- if test "$cross_compiling" = yes; then :
- ac_enable_profiling="no"
-else
+ enableval=$enable_profiling;
+fi
+
+if test "x$enable_profiling" = xyes; then
+ ac_save_cc="$CC"
+ CC="$CC -pg"
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
int main() { return 0; }
_ACEOF
-if ac_fn_c_try_run "$LINENO"; then :
- ac_enable_profiling="yes"
+if ac_fn_c_try_link "$LINENO"; then :
+
else
- ac_enable_profiling="no"
+ enable_profiling=no
fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
- conftest.$ac_objext conftest.beam conftest.$ac_ext
+rm -f core conftest.err conftest.$ac_objext \
+ conftest$ac_exeext conftest.$ac_ext
+ CC="$ac_save_cc"
+else
+ enable_profiling=no
fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_profiling" >&5
+$as_echo "$enable_profiling" >&6; }
- CC="$ac_save_cc"
+if test "x$enable_profiling" = xyes; then
+ BASECFLAGS="-pg $BASECFLAGS"
+ LDFLAGS="-pg $LDFLAGS"
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_enable_profiling" >&5
-$as_echo "$ac_enable_profiling" >&6; }
-
-case "$ac_enable_profiling" in
- "yes")
- BASECFLAGS="-pg $BASECFLAGS"
- LDFLAGS="-pg $LDFLAGS"
- ;;
-esac
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking LDLIBRARY" >&5
$as_echo_n "checking LDLIBRARY... " >&6; }
@@ -4895,7 +5326,7 @@ $as_echo_n "checking LDLIBRARY... " >&6; }
if test "$enable_framework"
then
LDLIBRARY='$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
- RUNSHARED=DYLD_FRAMEWORK_PATH="`pwd`:$DYLD_FRAMEWORK_PATH"
+ RUNSHARED=DYLD_FRAMEWORK_PATH=`pwd`${DYLD_FRAMEWORK_PATH:+:${DYLD_FRAMEWORK_PATH}}
BLDLIBRARY=''
else
BLDLIBRARY='$(LDLIBRARY)'
@@ -4917,13 +5348,13 @@ $as_echo "#define Py_ENABLE_SHARED 1" >>confdefs.h
SunOS*)
LDLIBRARY='libpython$(VERSION).so'
BLDLIBRARY='-Wl,-R,$(LIBDIR) -L. -lpython$(VERSION)'
- RUNSHARED=LD_LIBRARY_PATH=`pwd`:${LD_LIBRARY_PATH}
+ RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
INSTSONAME="$LDLIBRARY".$SOVERSION
;;
Linux*|GNU*|NetBSD*|FreeBSD*|DragonFly*|OpenBSD*)
LDLIBRARY='libpython$(VERSION).so'
BLDLIBRARY='-L. -lpython$(VERSION)'
- RUNSHARED=LD_LIBRARY_PATH=`pwd`:${LD_LIBRARY_PATH}
+ RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
case $ac_sys_system in
FreeBSD*)
SOVERSION=`echo $SOVERSION|cut -d "." -f 1`
@@ -4941,12 +5372,12 @@ $as_echo "#define Py_ENABLE_SHARED 1" >>confdefs.h
;;
esac
BLDLIBRARY='-Wl,+b,$(LIBDIR) -L. -lpython$(VERSION)'
- RUNSHARED=SHLIB_PATH=`pwd`:${SHLIB_PATH}
+ RUNSHARED=SHLIB_PATH=`pwd`${SHLIB_PATH:+:${SHLIB_PATH}}
;;
OSF*)
LDLIBRARY='libpython$(VERSION).so'
BLDLIBRARY='-rpath $(LIBDIR) -L. -lpython$(VERSION)'
- RUNSHARED=LD_LIBRARY_PATH=`pwd`:${LD_LIBRARY_PATH}
+ RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
;;
atheos*)
LDLIBRARY='libpython$(VERSION).so'
@@ -4956,11 +5387,11 @@ $as_echo "#define Py_ENABLE_SHARED 1" >>confdefs.h
Darwin*)
LDLIBRARY='libpython$(VERSION).dylib'
BLDLIBRARY='-L. -lpython$(VERSION)'
- RUNSHARED='DYLD_LIBRARY_PATH=`pwd`:${DYLD_LIBRARY_PATH}'
+ RUNSHARED=DYLD_LIBRARY_PATH=`pwd`${DYLD_LIBRARY_PATH:+:${DYLD_LIBRARY_PATH}}
;;
AIX*)
LDLIBRARY='libpython$(VERSION).so'
- RUNSHARED=LIBPATH=`pwd`:${LIBPATH}
+ RUNSHARED=LIBPATH=`pwd`${LIBPATH:+:${LIBPATH}}
;;
esac
@@ -4973,6 +5404,10 @@ else # shared is disabled
esac
fi
+if test "$cross_compiling" = yes; then
+ RUNSHARED=
+fi
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $LDLIBRARY" >&5
$as_echo "$LDLIBRARY" >&6; }
@@ -4981,7 +5416,7 @@ if test -n "$ac_tool_prefix"; then
set dummy ${ac_tool_prefix}ranlib; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_RANLIB+set}" = set; then :
+if ${ac_cv_prog_RANLIB+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$RANLIB"; then
@@ -4993,7 +5428,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -5021,7 +5456,7 @@ if test -z "$ac_cv_prog_RANLIB"; then
set dummy ranlib; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then :
+if ${ac_cv_prog_ac_ct_RANLIB+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$ac_ct_RANLIB"; then
@@ -5033,7 +5468,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_ac_ct_RANLIB="ranlib"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -5069,13 +5504,14 @@ else
fi
-for ac_prog in ar aal
-do
- # Extract the first word of "$ac_prog", so it can be a program name with args.
-set dummy $ac_prog; ac_word=$2
+if test -n "$ac_tool_prefix"; then
+ for ac_prog in ar aal
+ do
+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_AR+set}" = set; then :
+if ${ac_cv_prog_AR+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$AR"; then
@@ -5087,8 +5523,8 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
- ac_cv_prog_AR="$ac_prog"
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_AR="$ac_tool_prefix$ac_prog"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
fi
@@ -5108,9 +5544,65 @@ $as_echo "no" >&6; }
fi
- test -n "$AR" && break
+ test -n "$AR" && break
+ done
+fi
+if test -z "$AR"; then
+ ac_ct_AR=$AR
+ for ac_prog in ar aal
+do
+ # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_AR+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ if test -n "$ac_ct_AR"; then
+ ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_prog_ac_ct_AR="$ac_prog"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
done
-test -n "$AR" || AR="ar"
+ done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_AR=$ac_cv_prog_ac_ct_AR
+if test -n "$ac_ct_AR"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5
+$as_echo "$ac_ct_AR" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ test -n "$ac_ct_AR" && break
+done
+
+ if test "x$ac_ct_AR" = x; then
+ AR="ar"
+ else
+ case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+ AR=$ac_ct_AR
+ fi
+fi
# tweak ARFLAGS only if the user didn't set it on the command line
@@ -5125,7 +5617,7 @@ fi
set dummy svnversion; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_SVNVERSION+set}" = set; then :
+if ${ac_cv_prog_SVNVERSION+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$SVNVERSION"; then
@@ -5137,7 +5629,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_SVNVERSION="found"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -5167,13 +5659,27 @@ else
fi
+if test "$abs_srcdir" != "$abs_builddir"; then
+ # If we're building out-of-tree make sure Include (in the current dir)
+ # gets picked up before its $srcdir counterpart in order for Python-ast.h
+ # and graminit.h to get picked up from the correct directory.
+ # (A side effect of this is that these resources will automatically be
+ # regenerated when building out-of-tree, regardless of whether or not
+ # the $srcdir counterpart is up-to-date. This is an acceptable trade
+ # off.)
+ BASECPPFLAGS="-IInclude"
+else
+ BASECPPFLAGS=""
+fi
+
+
# Extract the first word of "hg", so it can be a program name with args.
set dummy hg; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_HAS_HG+set}" = set; then :
+if ${ac_cv_prog_HAS_HG+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$HAS_HG"; then
@@ -5185,7 +5691,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_HAS_HG="found"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -5226,35 +5732,6 @@ bsdos*|hp*|HP*)
INSTALL="${srcdir}/install-sh -c"
fi
esac
-ac_aux_dir=
-for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do
- if test -f "$ac_dir/install-sh"; then
- ac_aux_dir=$ac_dir
- ac_install_sh="$ac_aux_dir/install-sh -c"
- break
- elif test -f "$ac_dir/install.sh"; then
- ac_aux_dir=$ac_dir
- ac_install_sh="$ac_aux_dir/install.sh -c"
- break
- elif test -f "$ac_dir/shtool"; then
- ac_aux_dir=$ac_dir
- ac_install_sh="$ac_aux_dir/shtool install -c"
- break
- fi
-done
-if test -z "$ac_aux_dir"; then
- as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5
-fi
-
-# These three variables are undocumented and unsupported,
-# and are intended to be withdrawn in a future Autoconf release.
-# They can cause serious problems if a builder's source tree is in a directory
-# whose full name contains unusual characters.
-ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var.
-ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var.
-ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
-
-
# Find a good install program. We prefer a C program (faster),
# so one script is as good as another. But avoid the broken or
# incompatible versions:
@@ -5272,7 +5749,7 @@ ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5
$as_echo_n "checking for a BSD-compatible install... " >&6; }
if test -z "$INSTALL"; then
-if test "${ac_cv_path_install+set}" = set; then :
+if ${ac_cv_path_install+:} false; then :
$as_echo_n "(cached) " >&6
else
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
@@ -5292,7 +5769,7 @@ case $as_dir/ in #((
# by default.
for ac_prog in ginstall scoinst install; do
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then
if test $ac_prog = install &&
grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
# AIX install. It has an incompatible calling convention.
@@ -5348,6 +5825,48 @@ test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5
+$as_echo_n "checking for a thread-safe mkdir -p... " >&6; }
+if test -z "$MKDIR_P"; then
+ if ${ac_cv_path_mkdir+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_prog in mkdir gmkdir; do
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue
+ case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #(
+ 'mkdir (GNU coreutils) '* | \
+ 'mkdir (coreutils) '* | \
+ 'mkdir (fileutils) '4.1*)
+ ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext
+ break 3;;
+ esac
+ done
+ done
+ done
+IFS=$as_save_IFS
+
+fi
+
+ test -d ./--version && rmdir ./--version
+ if test "${ac_cv_path_mkdir+set}" = set; then
+ MKDIR_P="$ac_cv_path_mkdir -p"
+ else
+ # As a last resort, use the slow shell script. Don't cache a
+ # value for MKDIR_P within a source directory, because that will
+ # break other packages using the cache if that directory is
+ # removed, or if the value is a relative name.
+ MKDIR_P="$ac_install_sh -d"
+ fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5
+$as_echo "$MKDIR_P" >&6; }
+
# Not every filesystem supports hard links
@@ -5460,7 +5979,7 @@ yes)
$as_echo_n "checking whether $CC accepts -fno-strict-aliasing... " >&6; }
ac_save_cc="$CC"
CC="$CC -fno-strict-aliasing"
- if test "${ac_cv_no_strict_aliasing_ok+set}" = set; then :
+ if ${ac_cv_no_strict_aliasing_ok+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -5493,7 +6012,7 @@ $as_echo "$ac_cv_no_strict_aliasing_ok" >&6; }
# if using gcc on alpha, use -mieee to get (near) full IEEE 754
# support. Without this, treatment of subnormals doesn't follow
# the standard.
- case $ac_sys_machine in
+ case $host in
alpha*)
BASECFLAGS="$BASECFLAGS -mieee"
;;
@@ -5526,8 +6045,14 @@ $as_echo "$CC" >&6; }
# Calculate the right deployment target for this build.
#
- cur_target=`sw_vers -productVersion | sed 's/\(10\.[0-9]*\).*/\1/'`
- if test ${cur_target} '>' 10.2; then
+ cur_target_major=`sw_vers -productVersion | \
+ sed 's/\([0-9]*\)\.\([0-9]*\).*/\1/'`
+ cur_target_minor=`sw_vers -productVersion | \
+ sed 's/\([0-9]*\)\.\([0-9]*\).*/\2/'`
+ cur_target="${cur_target_major}.${cur_target_minor}"
+ if test ${cur_target_major} -eq 10 && \
+ test ${cur_target_minor} -ge 3
+ then
cur_target=10.3
if test ${enable_universalsdk}; then
if test "${UNIVERSAL_ARCHS}" = "all"; then
@@ -5650,7 +6175,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -OPT:Olimit=0" >&5
$as_echo_n "checking whether $CC accepts -OPT:Olimit=0... " >&6; }
-if test "${ac_cv_opt_olimit_ok+set}" = set; then :
+if ${ac_cv_opt_olimit_ok+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_save_cc="$CC"
@@ -5685,6 +6210,11 @@ if test $ac_cv_opt_olimit_ok = yes; then
# environment?
Darwin*)
;;
+ # XXX thankfully this useless troublemaker of a flag has been
+ # eradicated in the 3.x line. For now, make sure it isn't picked
+ # up by any of our other platforms that use CC.
+ AIX*|SunOS*|HP-UX*|IRIX*)
+ ;;
*)
BASECFLAGS="$BASECFLAGS -OPT:Olimit=0"
;;
@@ -5692,7 +6222,7 @@ if test $ac_cv_opt_olimit_ok = yes; then
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -Olimit 1500" >&5
$as_echo_n "checking whether $CC accepts -Olimit 1500... " >&6; }
- if test "${ac_cv_olimit_ok+set}" = set; then :
+ if ${ac_cv_olimit_ok+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_save_cc="$CC"
@@ -5721,7 +6251,14 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_olimit_ok" >&5
$as_echo "$ac_cv_olimit_ok" >&6; }
if test $ac_cv_olimit_ok = yes; then
- BASECFLAGS="$BASECFLAGS -Olimit 1500"
+ case $ac_sys_system in
+ # Issue #16534: On HP-UX ac_cv_olimit_ok=yes is a false positive.
+ HP-UX*)
+ ;;
+ *)
+ BASECFLAGS="$BASECFLAGS -Olimit 1500"
+ ;;
+ esac
fi
fi
@@ -5731,7 +6268,7 @@ then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether gcc supports ParseTuple __format__" >&5
$as_echo_n "checking whether gcc supports ParseTuple __format__... " >&6; }
save_CFLAGS=$CFLAGS
- CFLAGS="$CFLAGS -Werror"
+ CFLAGS="$CFLAGS -Werror -Wformat"
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
@@ -5770,7 +6307,7 @@ fi
# options before we can check whether -Kpthread improves anything.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads are available without options" >&5
$as_echo_n "checking whether pthreads are available without options... " >&6; }
-if test "${ac_cv_pthread_is_default+set}" = set; then :
+if ${ac_cv_pthread_is_default+:} false; then :
$as_echo_n "(cached) " >&6
else
if test "$cross_compiling" = yes; then :
@@ -5779,6 +6316,7 @@ else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
+#include <stdio.h>
#include <pthread.h>
void* routine(void* p){return NULL;}
@@ -5823,7 +6361,7 @@ else
# function available.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -Kpthread" >&5
$as_echo_n "checking whether $CC accepts -Kpthread... " >&6; }
-if test "${ac_cv_kpthread+set}" = set; then :
+if ${ac_cv_kpthread+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_save_cc="$CC"
@@ -5834,6 +6372,7 @@ else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
+#include <stdio.h>
#include <pthread.h>
void* routine(void* p){return NULL;}
@@ -5872,7 +6411,7 @@ then
# function available.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -Kthread" >&5
$as_echo_n "checking whether $CC accepts -Kthread... " >&6; }
-if test "${ac_cv_kthread+set}" = set; then :
+if ${ac_cv_kthread+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_save_cc="$CC"
@@ -5883,6 +6422,7 @@ else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
+#include <stdio.h>
#include <pthread.h>
void* routine(void* p){return NULL;}
@@ -5921,7 +6461,7 @@ then
# function available.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -pthread" >&5
$as_echo_n "checking whether $CC accepts -pthread... " >&6; }
-if test "${ac_cv_thread+set}" = set; then :
+if ${ac_cv_pthread+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_save_cc="$CC"
@@ -5932,6 +6472,7 @@ else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
+#include <stdio.h>
#include <pthread.h>
void* routine(void* p){return NULL;}
@@ -6006,7 +6547,7 @@ CXX="$ac_save_cxx"
# checks for header files
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
$as_echo_n "checking for ANSI C header files... " >&6; }
-if test "${ac_cv_header_stdc+set}" = set; then :
+if ${ac_cv_header_stdc+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -6116,9 +6657,9 @@ $as_echo "#define STDC_HEADERS 1" >>confdefs.h
fi
-for ac_header in asm/types.h conio.h curses.h direct.h dlfcn.h errno.h \
+for ac_header in asm/types.h conio.h direct.h dlfcn.h errno.h \
fcntl.h grp.h \
-ieeefp.h io.h langinfo.h libintl.h ncurses.h poll.h process.h pthread.h \
+ieeefp.h io.h langinfo.h libintl.h poll.h process.h pthread.h \
shadow.h signal.h stdint.h stropts.h termios.h thread.h \
unistd.h utime.h \
sys/audioio.h sys/bsdtty.h sys/epoll.h sys/event.h sys/file.h sys/loadavg.h \
@@ -6127,7 +6668,7 @@ sys/param.h sys/poll.h sys/select.h sys/socket.h sys/statvfs.h sys/stat.h \
sys/termio.h sys/time.h \
sys/times.h sys/types.h sys/un.h sys/utsname.h sys/wait.h pty.h libutil.h \
sys/resource.h netpacket/packet.h sysexits.h bluetooth.h \
-bluetooth/bluetooth.h linux/tipc.h spawn.h util.h
+bluetooth/bluetooth.h linux/tipc.h spawn.h util.h alloca.h
do :
as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
@@ -6145,7 +6686,7 @@ for ac_hdr in dirent.h sys/ndir.h sys/dir.h ndir.h; do
as_ac_Header=`$as_echo "ac_cv_header_dirent_$ac_hdr" | $as_tr_sh`
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_hdr that defines DIR" >&5
$as_echo_n "checking for $ac_hdr that defines DIR... " >&6; }
-if eval "test \"\${$as_ac_Header+set}\"" = set; then :
+if eval \${$as_ac_Header+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -6185,7 +6726,7 @@ done
if test $ac_header_dirent = dirent.h; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing opendir" >&5
$as_echo_n "checking for library containing opendir... " >&6; }
-if test "${ac_cv_search_opendir+set}" = set; then :
+if ${ac_cv_search_opendir+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_func_search_save_LIBS=$LIBS
@@ -6219,11 +6760,11 @@ for ac_lib in '' dir; do
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext
- if test "${ac_cv_search_opendir+set}" = set; then :
+ if ${ac_cv_search_opendir+:} false; then :
break
fi
done
-if test "${ac_cv_search_opendir+set}" = set; then :
+if ${ac_cv_search_opendir+:} false; then :
else
ac_cv_search_opendir=no
@@ -6242,7 +6783,7 @@ fi
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing opendir" >&5
$as_echo_n "checking for library containing opendir... " >&6; }
-if test "${ac_cv_search_opendir+set}" = set; then :
+if ${ac_cv_search_opendir+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_func_search_save_LIBS=$LIBS
@@ -6276,11 +6817,11 @@ for ac_lib in '' x; do
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext
- if test "${ac_cv_search_opendir+set}" = set; then :
+ if ${ac_cv_search_opendir+:} false; then :
break
fi
done
-if test "${ac_cv_search_opendir+set}" = set; then :
+if ${ac_cv_search_opendir+:} false; then :
else
ac_cv_search_opendir=no
@@ -6300,7 +6841,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether sys/types.h defines makedev" >&5
$as_echo_n "checking whether sys/types.h defines makedev... " >&6; }
-if test "${ac_cv_header_sys_types_h_makedev+set}" = set; then :
+if ${ac_cv_header_sys_types_h_makedev+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -6328,7 +6869,7 @@ $as_echo "$ac_cv_header_sys_types_h_makedev" >&6; }
if test $ac_cv_header_sys_types_h_makedev = no; then
ac_fn_c_check_header_mongrel "$LINENO" "sys/mkdev.h" "ac_cv_header_sys_mkdev_h" "$ac_includes_default"
-if test "x$ac_cv_header_sys_mkdev_h" = x""yes; then :
+if test "x$ac_cv_header_sys_mkdev_h" = xyes; then :
$as_echo "#define MAJOR_IN_MKDEV 1" >>confdefs.h
@@ -6338,7 +6879,7 @@ fi
if test $ac_cv_header_sys_mkdev_h = no; then
ac_fn_c_check_header_mongrel "$LINENO" "sys/sysmacros.h" "ac_cv_header_sys_sysmacros_h" "$ac_includes_default"
-if test "x$ac_cv_header_sys_sysmacros_h" = x""yes; then :
+if test "x$ac_cv_header_sys_sysmacros_h" = xyes; then :
$as_echo "#define MAJOR_IN_SYSMACROS 1" >>confdefs.h
@@ -6349,25 +6890,6 @@ fi
fi
-# On Solaris, term.h requires curses.h
-for ac_header in term.h
-do :
- ac_fn_c_check_header_compile "$LINENO" "term.h" "ac_cv_header_term_h" "
-#ifdef HAVE_CURSES_H
-#include <curses.h>
-#endif
-
-"
-if test "x$ac_cv_header_term_h" = x""yes; then :
- cat >>confdefs.h <<_ACEOF
-#define HAVE_TERM_H 1
-_ACEOF
-
-fi
-
-done
-
-
# On Linux, netlink.h requires asm/types.h
for ac_header in linux/netlink.h
do :
@@ -6380,7 +6902,7 @@ do :
#endif
"
-if test "x$ac_cv_header_linux_netlink_h" = x""yes; then :
+if test "x$ac_cv_header_linux_netlink_h" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_LINUX_NETLINK_H 1
_ACEOF
@@ -6543,7 +7065,7 @@ EOF
# Type availability checks
ac_fn_c_check_type "$LINENO" "mode_t" "ac_cv_type_mode_t" "$ac_includes_default"
-if test "x$ac_cv_type_mode_t" = x""yes; then :
+if test "x$ac_cv_type_mode_t" = xyes; then :
else
@@ -6554,7 +7076,7 @@ _ACEOF
fi
ac_fn_c_check_type "$LINENO" "off_t" "ac_cv_type_off_t" "$ac_includes_default"
-if test "x$ac_cv_type_off_t" = x""yes; then :
+if test "x$ac_cv_type_off_t" = xyes; then :
else
@@ -6565,7 +7087,7 @@ _ACEOF
fi
ac_fn_c_check_type "$LINENO" "pid_t" "ac_cv_type_pid_t" "$ac_includes_default"
-if test "x$ac_cv_type_pid_t" = x""yes; then :
+if test "x$ac_cv_type_pid_t" = xyes; then :
else
@@ -6581,7 +7103,7 @@ cat >>confdefs.h <<_ACEOF
_ACEOF
ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default"
-if test "x$ac_cv_type_size_t" = x""yes; then :
+if test "x$ac_cv_type_size_t" = xyes; then :
else
@@ -6593,7 +7115,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for uid_t in sys/types.h" >&5
$as_echo_n "checking for uid_t in sys/types.h... " >&6; }
-if test "${ac_cv_type_uid_t+set}" = set; then :
+if ${ac_cv_type_uid_t+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -6621,6 +7143,21 @@ $as_echo "#define gid_t int" >>confdefs.h
fi
+
+# There are two separate checks for each of the exact-width integer types we
+# need. First we check whether the type is available using the usual
+# AC_CHECK_TYPE macro with the default includes (which includes <inttypes.h>
+# and <stdint.h> where available). We then also use the special type checks of
+# the form AC_TYPE_UINT32_T, which in the case that uint32_t is not available
+# directly, #define's uint32_t to be a suitable type.
+
+ac_fn_c_check_type "$LINENO" "uint32_t" "ac_cv_type_uint32_t" "$ac_includes_default"
+if test "x$ac_cv_type_uint32_t" = xyes; then :
+
+$as_echo "#define HAVE_UINT32_T 1" >>confdefs.h
+
+fi
+
ac_fn_c_find_uintX_t "$LINENO" "32" "ac_cv_c_uint32_t"
case $ac_cv_c_uint32_t in #(
no|yes) ;; #(
@@ -6635,6 +7172,14 @@ _ACEOF
;;
esac
+
+ac_fn_c_check_type "$LINENO" "uint64_t" "ac_cv_type_uint64_t" "$ac_includes_default"
+if test "x$ac_cv_type_uint64_t" = xyes; then :
+
+$as_echo "#define HAVE_UINT64_T 1" >>confdefs.h
+
+fi
+
ac_fn_c_find_uintX_t "$LINENO" "64" "ac_cv_c_uint64_t"
case $ac_cv_c_uint64_t in #(
no|yes) ;; #(
@@ -6649,6 +7194,14 @@ _ACEOF
;;
esac
+
+ac_fn_c_check_type "$LINENO" "int32_t" "ac_cv_type_int32_t" "$ac_includes_default"
+if test "x$ac_cv_type_int32_t" = xyes; then :
+
+$as_echo "#define HAVE_INT32_T 1" >>confdefs.h
+
+fi
+
ac_fn_c_find_intX_t "$LINENO" "32" "ac_cv_c_int32_t"
case $ac_cv_c_int32_t in #(
no|yes) ;; #(
@@ -6660,6 +7213,14 @@ _ACEOF
;;
esac
+
+ac_fn_c_check_type "$LINENO" "int64_t" "ac_cv_type_int64_t" "$ac_includes_default"
+if test "x$ac_cv_type_int64_t" = xyes; then :
+
+$as_echo "#define HAVE_INT64_T 1" >>confdefs.h
+
+fi
+
ac_fn_c_find_intX_t "$LINENO" "64" "ac_cv_c_int64_t"
case $ac_cv_c_int64_t in #(
no|yes) ;; #(
@@ -6671,8 +7232,9 @@ _ACEOF
;;
esac
+
ac_fn_c_check_type "$LINENO" "ssize_t" "ac_cv_type_ssize_t" "$ac_includes_default"
-if test "x$ac_cv_type_ssize_t" = x""yes; then :
+if test "x$ac_cv_type_ssize_t" = xyes; then :
$as_echo "#define HAVE_SSIZE_T 1" >>confdefs.h
@@ -6687,7 +7249,7 @@ fi
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of int" >&5
$as_echo_n "checking size of int... " >&6; }
-if test "${ac_cv_sizeof_int+set}" = set; then :
+if ${ac_cv_sizeof_int+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (int))" "ac_cv_sizeof_int" "$ac_includes_default"; then :
@@ -6697,7 +7259,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error 77 "cannot compute sizeof (int)
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof_int=0
fi
@@ -6720,7 +7282,7 @@ _ACEOF
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long" >&5
$as_echo_n "checking size of long... " >&6; }
-if test "${ac_cv_sizeof_long+set}" = set; then :
+if ${ac_cv_sizeof_long+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long))" "ac_cv_sizeof_long" "$ac_includes_default"; then :
@@ -6730,7 +7292,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error 77 "cannot compute sizeof (long)
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof_long=0
fi
@@ -6753,7 +7315,7 @@ _ACEOF
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of void *" >&5
$as_echo_n "checking size of void *... " >&6; }
-if test "${ac_cv_sizeof_void_p+set}" = set; then :
+if ${ac_cv_sizeof_void_p+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (void *))" "ac_cv_sizeof_void_p" "$ac_includes_default"; then :
@@ -6763,7 +7325,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error 77 "cannot compute sizeof (void *)
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof_void_p=0
fi
@@ -6786,7 +7348,7 @@ _ACEOF
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of short" >&5
$as_echo_n "checking size of short... " >&6; }
-if test "${ac_cv_sizeof_short+set}" = set; then :
+if ${ac_cv_sizeof_short+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (short))" "ac_cv_sizeof_short" "$ac_includes_default"; then :
@@ -6796,7 +7358,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error 77 "cannot compute sizeof (short)
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof_short=0
fi
@@ -6819,7 +7381,7 @@ _ACEOF
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of float" >&5
$as_echo_n "checking size of float... " >&6; }
-if test "${ac_cv_sizeof_float+set}" = set; then :
+if ${ac_cv_sizeof_float+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (float))" "ac_cv_sizeof_float" "$ac_includes_default"; then :
@@ -6829,7 +7391,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error 77 "cannot compute sizeof (float)
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof_float=0
fi
@@ -6852,7 +7414,7 @@ _ACEOF
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of double" >&5
$as_echo_n "checking size of double... " >&6; }
-if test "${ac_cv_sizeof_double+set}" = set; then :
+if ${ac_cv_sizeof_double+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (double))" "ac_cv_sizeof_double" "$ac_includes_default"; then :
@@ -6862,7 +7424,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error 77 "cannot compute sizeof (double)
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof_double=0
fi
@@ -6885,7 +7447,7 @@ _ACEOF
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of fpos_t" >&5
$as_echo_n "checking size of fpos_t... " >&6; }
-if test "${ac_cv_sizeof_fpos_t+set}" = set; then :
+if ${ac_cv_sizeof_fpos_t+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (fpos_t))" "ac_cv_sizeof_fpos_t" "$ac_includes_default"; then :
@@ -6895,7 +7457,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error 77 "cannot compute sizeof (fpos_t)
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof_fpos_t=0
fi
@@ -6918,7 +7480,7 @@ _ACEOF
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of size_t" >&5
$as_echo_n "checking size of size_t... " >&6; }
-if test "${ac_cv_sizeof_size_t+set}" = set; then :
+if ${ac_cv_sizeof_size_t+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (size_t))" "ac_cv_sizeof_size_t" "$ac_includes_default"; then :
@@ -6928,7 +7490,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error 77 "cannot compute sizeof (size_t)
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof_size_t=0
fi
@@ -6951,7 +7513,7 @@ _ACEOF
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of pid_t" >&5
$as_echo_n "checking size of pid_t... " >&6; }
-if test "${ac_cv_sizeof_pid_t+set}" = set; then :
+if ${ac_cv_sizeof_pid_t+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (pid_t))" "ac_cv_sizeof_pid_t" "$ac_includes_default"; then :
@@ -6961,7 +7523,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error 77 "cannot compute sizeof (pid_t)
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof_pid_t=0
fi
@@ -7011,7 +7573,7 @@ if test "$have_long_long" = yes ; then
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long long" >&5
$as_echo_n "checking size of long long... " >&6; }
-if test "${ac_cv_sizeof_long_long+set}" = set; then :
+if ${ac_cv_sizeof_long_long+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long long))" "ac_cv_sizeof_long_long" "$ac_includes_default"; then :
@@ -7021,7 +7583,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error 77 "cannot compute sizeof (long long)
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof_long_long=0
fi
@@ -7072,7 +7634,7 @@ if test "$have_long_double" = yes ; then
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long double" >&5
$as_echo_n "checking size of long double... " >&6; }
-if test "${ac_cv_sizeof_long_double+set}" = set; then :
+if ${ac_cv_sizeof_long_double+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long double))" "ac_cv_sizeof_long_double" "$ac_includes_default"; then :
@@ -7082,7 +7644,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error 77 "cannot compute sizeof (long double)
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof_long_double=0
fi
@@ -7133,7 +7695,7 @@ if test "$have_c99_bool" = yes ; then
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of _Bool" >&5
$as_echo_n "checking size of _Bool... " >&6; }
-if test "${ac_cv_sizeof__Bool+set}" = set; then :
+if ${ac_cv_sizeof__Bool+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (_Bool))" "ac_cv_sizeof__Bool" "$ac_includes_default"; then :
@@ -7143,7 +7705,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error 77 "cannot compute sizeof (_Bool)
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof__Bool=0
fi
@@ -7169,7 +7731,7 @@ ac_fn_c_check_type "$LINENO" "uintptr_t" "ac_cv_type_uintptr_t" "#ifdef HAVE_STD
#include <inttypes.h>
#endif
"
-if test "x$ac_cv_type_uintptr_t" = x""yes; then :
+if test "x$ac_cv_type_uintptr_t" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_UINTPTR_T 1
@@ -7181,7 +7743,7 @@ _ACEOF
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of uintptr_t" >&5
$as_echo_n "checking size of uintptr_t... " >&6; }
-if test "${ac_cv_sizeof_uintptr_t+set}" = set; then :
+if ${ac_cv_sizeof_uintptr_t+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (uintptr_t))" "ac_cv_sizeof_uintptr_t" "$ac_includes_default"; then :
@@ -7191,7 +7753,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error 77 "cannot compute sizeof (uintptr_t)
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof_uintptr_t=0
fi
@@ -7217,7 +7779,7 @@ fi
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of off_t" >&5
$as_echo_n "checking size of off_t... " >&6; }
-if test "${ac_cv_sizeof_off_t+set}" = set; then :
+if ${ac_cv_sizeof_off_t+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (off_t))" "ac_cv_sizeof_off_t" "
@@ -7232,7 +7794,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error 77 "cannot compute sizeof (off_t)
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof_off_t=0
fi
@@ -7276,7 +7838,7 @@ fi
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of time_t" >&5
$as_echo_n "checking size of time_t... " >&6; }
-if test "${ac_cv_sizeof_time_t+set}" = set; then :
+if ${ac_cv_sizeof_time_t+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (time_t))" "ac_cv_sizeof_time_t" "
@@ -7294,7 +7856,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error 77 "cannot compute sizeof (time_t)
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof_time_t=0
fi
@@ -7350,7 +7912,7 @@ if test "$have_pthread_t" = yes ; then
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of pthread_t" >&5
$as_echo_n "checking size of pthread_t... " >&6; }
-if test "${ac_cv_sizeof_pthread_t+set}" = set; then :
+if ${ac_cv_sizeof_pthread_t+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (pthread_t))" "ac_cv_sizeof_pthread_t" "
@@ -7365,7 +7927,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error 77 "cannot compute sizeof (pthread_t)
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof_pthread_t=0
fi
@@ -7430,7 +7992,6 @@ case $ac_sys_system/$ac_sys_release in
esac
-ARCH_RUN_32BIT=""
case $ac_sys_system/$ac_sys_release in
Darwin/[01567]\..*)
@@ -7656,15 +8217,14 @@ then
# Use -undefined dynamic_lookup whenever possible (10.3 and later).
# This allows an extension to be used in any Python
- if test ${MACOSX_DEPLOYMENT_TARGET} '>' 10.2
+ dep_target_major=`echo ${MACOSX_DEPLOYMENT_TARGET} | \
+ sed 's/\([0-9]*\)\.\([0-9]*\).*/\1/'`
+ dep_target_minor=`echo ${MACOSX_DEPLOYMENT_TARGET} | \
+ sed 's/\([0-9]*\)\.\([0-9]*\).*/\2/'`
+ if test ${dep_target_major} -eq 10 && \
+ test ${dep_target_minor} -le 2
then
- if test "${enable_universalsdk}"; then
- LDFLAGS="${UNIVERSAL_ARCH_FLAGS} -isysroot ${UNIVERSALSDK} ${LDFLAGS}"
- fi
- LDSHARED='$(CC) -bundle -undefined dynamic_lookup'
- LDCXXSHARED='$(CXX) -bundle -undefined dynamic_lookup'
- BLDSHARED="$LDSHARED"
- else
+ # building for OS X 10.0 through 10.2
LDSHARED='$(CC) -bundle'
LDCXXSHARED='$(CXX) -bundle'
if test "$enable_framework" ; then
@@ -7678,6 +8238,14 @@ then
LDSHARED="$LDSHARED "'-bundle_loader $(BINDIR)/python$(VERSION)$(EXE)'
LDCXXSHARED="$LDCXXSHARED "'-bundle_loader $(BINDIR)/python$(VERSION)$(EXE)'
fi
+ else
+ # building for OS X 10.3 and later
+ if test "${enable_universalsdk}"; then
+ LDFLAGS="${UNIVERSAL_ARCH_FLAGS} -isysroot ${UNIVERSALSDK} ${LDFLAGS}"
+ fi
+ LDSHARED='$(CC) -bundle -undefined dynamic_lookup'
+ LDCXXSHARED='$(CXX) -bundle -undefined dynamic_lookup'
+ BLDSHARED="$LDSHARED"
fi
;;
Linux*|GNU*|QNX*)
@@ -7877,7 +8445,7 @@ $as_echo "$SHLIBS" >&6; }
# checks for libraries
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5
$as_echo_n "checking for dlopen in -ldl... " >&6; }
-if test "${ac_cv_lib_dl_dlopen+set}" = set; then :
+if ${ac_cv_lib_dl_dlopen+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -7911,7 +8479,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5
$as_echo "$ac_cv_lib_dl_dlopen" >&6; }
-if test "x$ac_cv_lib_dl_dlopen" = x""yes; then :
+if test "x$ac_cv_lib_dl_dlopen" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_LIBDL 1
_ACEOF
@@ -7922,7 +8490,7 @@ fi
# Dynamic linking for SunOS/Solaris and SYSV
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5
$as_echo_n "checking for shl_load in -ldld... " >&6; }
-if test "${ac_cv_lib_dld_shl_load+set}" = set; then :
+if ${ac_cv_lib_dld_shl_load+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -7956,7 +8524,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5
$as_echo "$ac_cv_lib_dld_shl_load" >&6; }
-if test "x$ac_cv_lib_dld_shl_load" = x""yes; then :
+if test "x$ac_cv_lib_dld_shl_load" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_LIBDLD 1
_ACEOF
@@ -7970,7 +8538,7 @@ fi
if test "$with_threads" = "yes" -o -z "$with_threads"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing sem_init" >&5
$as_echo_n "checking for library containing sem_init... " >&6; }
-if test "${ac_cv_search_sem_init+set}" = set; then :
+if ${ac_cv_search_sem_init+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_func_search_save_LIBS=$LIBS
@@ -8004,11 +8572,11 @@ for ac_lib in '' pthread rt posix4; do
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext
- if test "${ac_cv_search_sem_init+set}" = set; then :
+ if ${ac_cv_search_sem_init+:} false; then :
break
fi
done
-if test "${ac_cv_search_sem_init+set}" = set; then :
+if ${ac_cv_search_sem_init+:} false; then :
else
ac_cv_search_sem_init=no
@@ -8031,7 +8599,7 @@ fi
# check if we need libintl for locale functions
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for textdomain in -lintl" >&5
$as_echo_n "checking for textdomain in -lintl... " >&6; }
-if test "${ac_cv_lib_intl_textdomain+set}" = set; then :
+if ${ac_cv_lib_intl_textdomain+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -8065,7 +8633,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_intl_textdomain" >&5
$as_echo "$ac_cv_lib_intl_textdomain" >&6; }
-if test "x$ac_cv_lib_intl_textdomain" = x""yes; then :
+if test "x$ac_cv_lib_intl_textdomain" = xyes; then :
$as_echo "#define WITH_LIBINTL 1" >>confdefs.h
@@ -8112,7 +8680,7 @@ esac
# BeOS' sockets are stashed in libnet.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for t_open in -lnsl" >&5
$as_echo_n "checking for t_open in -lnsl... " >&6; }
-if test "${ac_cv_lib_nsl_t_open+set}" = set; then :
+if ${ac_cv_lib_nsl_t_open+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -8146,13 +8714,13 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_nsl_t_open" >&5
$as_echo "$ac_cv_lib_nsl_t_open" >&6; }
-if test "x$ac_cv_lib_nsl_t_open" = x""yes; then :
+if test "x$ac_cv_lib_nsl_t_open" = xyes; then :
LIBS="-lnsl $LIBS"
fi
# SVR4
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for socket in -lsocket" >&5
$as_echo_n "checking for socket in -lsocket... " >&6; }
-if test "${ac_cv_lib_socket_socket+set}" = set; then :
+if ${ac_cv_lib_socket_socket+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -8186,7 +8754,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_socket_socket" >&5
$as_echo "$ac_cv_lib_socket_socket" >&6; }
-if test "x$ac_cv_lib_socket_socket" = x""yes; then :
+if test "x$ac_cv_lib_socket_socket" = xyes; then :
LIBS="-lsocket $LIBS"
fi
# SVR4 sockets
@@ -8195,7 +8763,7 @@ case "$ac_sys_system" in
BeOS*)
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for socket in -lnet" >&5
$as_echo_n "checking for socket in -lnet... " >&6; }
-if test "${ac_cv_lib_net_socket+set}" = set; then :
+if ${ac_cv_lib_net_socket+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -8229,7 +8797,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_net_socket" >&5
$as_echo "$ac_cv_lib_net_socket" >&6; }
-if test "x$ac_cv_lib_net_socket" = x""yes; then :
+if test "x$ac_cv_lib_net_socket" = xyes; then :
LIBS="-lnet $LIBS"
fi
# BeOS
@@ -8257,7 +8825,7 @@ if test -n "$ac_tool_prefix"; then
set dummy ${ac_tool_prefix}pkg-config; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_path_PKG_CONFIG+set}" = set; then :
+if ${ac_cv_path_PKG_CONFIG+:} false; then :
$as_echo_n "(cached) " >&6
else
case $PKG_CONFIG in
@@ -8271,7 +8839,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -8300,7 +8868,7 @@ if test -z "$ac_cv_path_PKG_CONFIG"; then
set dummy pkg-config; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_path_ac_pt_PKG_CONFIG+set}" = set; then :
+if ${ac_cv_path_ac_pt_PKG_CONFIG+:} false; then :
$as_echo_n "(cached) " >&6
else
case $ac_pt_PKG_CONFIG in
@@ -8314,7 +8882,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_path_ac_pt_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -8388,6 +8956,46 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_system_ffi" >&5
$as_echo "$with_system_ffi" >&6; }
+# Check for --with-tcltk-includes=path and --with-tcltk-libs=path
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for --with-tcltk-includes" >&5
+$as_echo_n "checking for --with-tcltk-includes... " >&6; }
+
+# Check whether --with-tcltk-includes was given.
+if test "${with_tcltk_includes+set}" = set; then :
+ withval=$with_tcltk_includes;
+else
+ with_tcltk_includes="default"
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_tcltk_includes" >&5
+$as_echo "$with_tcltk_includes" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for --with-tcltk-libs" >&5
+$as_echo_n "checking for --with-tcltk-libs... " >&6; }
+
+# Check whether --with-tcltk-libs was given.
+if test "${with_tcltk_libs+set}" = set; then :
+ withval=$with_tcltk_libs;
+else
+ with_tcltk_libs="default"
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_tcltk_libs" >&5
+$as_echo "$with_tcltk_libs" >&6; }
+if test "x$with_tcltk_includes" = xdefault || test "x$with_tcltk_libs" = xdefault
+then
+ if test "x$with_tcltk_includes" != "x$with_tcltk_libs"
+ then
+ as_fn_error $? "use both --with-tcltk-includes='...' and --with-tcltk-libs='...' or neither" "$LINENO" 5
+ fi
+ TCLTK_INCLUDES=""
+ TCLTK_LIBS=""
+else
+ TCLTK_INCLUDES="$with_tcltk_includes"
+ TCLTK_LIBS="$with_tcltk_libs"
+fi
+
# Check for --with-dbmliborder
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for --with-dbmliborder" >&5
$as_echo_n "checking for --with-dbmliborder... " >&6; }
@@ -8568,7 +9176,7 @@ $as_echo "$unistd_defines_pthreads" >&6; }
$as_echo "#define _REENTRANT 1" >>confdefs.h
ac_fn_c_check_header_mongrel "$LINENO" "cthreads.h" "ac_cv_header_cthreads_h" "$ac_includes_default"
-if test "x$ac_cv_header_cthreads_h" = x""yes; then :
+if test "x$ac_cv_header_cthreads_h" = xyes; then :
$as_echo "#define WITH_THREAD 1" >>confdefs.h
$as_echo "#define C_THREADS 1" >>confdefs.h
@@ -8581,7 +9189,7 @@ $as_echo "#define HURD_C_THREADS 1" >>confdefs.h
else
ac_fn_c_check_header_mongrel "$LINENO" "mach/cthreads.h" "ac_cv_header_mach_cthreads_h" "$ac_includes_default"
-if test "x$ac_cv_header_mach_cthreads_h" = x""yes; then :
+if test "x$ac_cv_header_mach_cthreads_h" = xyes; then :
$as_echo "#define WITH_THREAD 1" >>confdefs.h
$as_echo "#define C_THREADS 1" >>confdefs.h
@@ -8619,6 +9227,8 @@ $as_echo "no" >&6; }
$as_echo_n "checking for pthread_create in -lpthread... " >&6; }
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
+
+#include <stdio.h>
#include <pthread.h>
void * start_routine (void *arg) { exit (0); }
@@ -8643,7 +9253,7 @@ else
LIBS=$_libs
ac_fn_c_check_func "$LINENO" "pthread_detach" "ac_cv_func_pthread_detach"
-if test "x$ac_cv_func_pthread_detach" = x""yes; then :
+if test "x$ac_cv_func_pthread_detach" = xyes; then :
$as_echo "#define WITH_THREAD 1" >>confdefs.h
posix_threads=yes
@@ -8651,7 +9261,7 @@ if test "x$ac_cv_func_pthread_detach" = x""yes; then :
else
ac_fn_c_check_header_mongrel "$LINENO" "atheos/threads.h" "ac_cv_header_atheos_threads_h" "$ac_includes_default"
-if test "x$ac_cv_header_atheos_threads_h" = x""yes; then :
+if test "x$ac_cv_header_atheos_threads_h" = xyes; then :
$as_echo "#define WITH_THREAD 1" >>confdefs.h
@@ -8661,7 +9271,7 @@ $as_echo "#define ATHEOS_THREADS 1" >>confdefs.h
else
ac_fn_c_check_header_mongrel "$LINENO" "kernel/OS.h" "ac_cv_header_kernel_OS_h" "$ac_includes_default"
-if test "x$ac_cv_header_kernel_OS_h" = x""yes; then :
+if test "x$ac_cv_header_kernel_OS_h" = xyes; then :
$as_echo "#define WITH_THREAD 1" >>confdefs.h
@@ -8672,7 +9282,7 @@ else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lpthreads" >&5
$as_echo_n "checking for pthread_create in -lpthreads... " >&6; }
-if test "${ac_cv_lib_pthreads_pthread_create+set}" = set; then :
+if ${ac_cv_lib_pthreads_pthread_create+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -8706,7 +9316,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pthreads_pthread_create" >&5
$as_echo "$ac_cv_lib_pthreads_pthread_create" >&6; }
-if test "x$ac_cv_lib_pthreads_pthread_create" = x""yes; then :
+if test "x$ac_cv_lib_pthreads_pthread_create" = xyes; then :
$as_echo "#define WITH_THREAD 1" >>confdefs.h
posix_threads=yes
@@ -8716,7 +9326,7 @@ else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lc_r" >&5
$as_echo_n "checking for pthread_create in -lc_r... " >&6; }
-if test "${ac_cv_lib_c_r_pthread_create+set}" = set; then :
+if ${ac_cv_lib_c_r_pthread_create+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -8750,7 +9360,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_c_r_pthread_create" >&5
$as_echo "$ac_cv_lib_c_r_pthread_create" >&6; }
-if test "x$ac_cv_lib_c_r_pthread_create" = x""yes; then :
+if test "x$ac_cv_lib_c_r_pthread_create" = xyes; then :
$as_echo "#define WITH_THREAD 1" >>confdefs.h
posix_threads=yes
@@ -8760,7 +9370,7 @@ else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __pthread_create_system in -lpthread" >&5
$as_echo_n "checking for __pthread_create_system in -lpthread... " >&6; }
-if test "${ac_cv_lib_pthread___pthread_create_system+set}" = set; then :
+if ${ac_cv_lib_pthread___pthread_create_system+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -8794,7 +9404,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pthread___pthread_create_system" >&5
$as_echo "$ac_cv_lib_pthread___pthread_create_system" >&6; }
-if test "x$ac_cv_lib_pthread___pthread_create_system" = x""yes; then :
+if test "x$ac_cv_lib_pthread___pthread_create_system" = xyes; then :
$as_echo "#define WITH_THREAD 1" >>confdefs.h
posix_threads=yes
@@ -8804,7 +9414,7 @@ else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lcma" >&5
$as_echo_n "checking for pthread_create in -lcma... " >&6; }
-if test "${ac_cv_lib_cma_pthread_create+set}" = set; then :
+if ${ac_cv_lib_cma_pthread_create+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -8838,7 +9448,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_cma_pthread_create" >&5
$as_echo "$ac_cv_lib_cma_pthread_create" >&6; }
-if test "x$ac_cv_lib_cma_pthread_create" = x""yes; then :
+if test "x$ac_cv_lib_cma_pthread_create" = xyes; then :
$as_echo "#define WITH_THREAD 1" >>confdefs.h
posix_threads=yes
@@ -8878,7 +9488,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for usconfig in -lmpc" >&5
$as_echo_n "checking for usconfig in -lmpc... " >&6; }
-if test "${ac_cv_lib_mpc_usconfig+set}" = set; then :
+if ${ac_cv_lib_mpc_usconfig+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -8912,7 +9522,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_mpc_usconfig" >&5
$as_echo "$ac_cv_lib_mpc_usconfig" >&6; }
-if test "x$ac_cv_lib_mpc_usconfig" = x""yes; then :
+if test "x$ac_cv_lib_mpc_usconfig" = xyes; then :
$as_echo "#define WITH_THREAD 1" >>confdefs.h
LIBS="$LIBS -lmpc"
@@ -8924,7 +9534,7 @@ fi
if test "$posix_threads" != "yes"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for thr_create in -lthread" >&5
$as_echo_n "checking for thr_create in -lthread... " >&6; }
-if test "${ac_cv_lib_thread_thr_create+set}" = set; then :
+if ${ac_cv_lib_thread_thr_create+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -8958,7 +9568,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_thread_thr_create" >&5
$as_echo "$ac_cv_lib_thread_thr_create" >&6; }
-if test "x$ac_cv_lib_thread_thr_create" = x""yes; then :
+if test "x$ac_cv_lib_thread_thr_create" = xyes; then :
$as_echo "#define WITH_THREAD 1" >>confdefs.h
LIBS="$LIBS -lthread"
@@ -9003,7 +9613,7 @@ $as_echo "#define HAVE_BROKEN_POSIX_SEMAPHORES 1" >>confdefs.h
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if PTHREAD_SCOPE_SYSTEM is supported" >&5
$as_echo_n "checking if PTHREAD_SCOPE_SYSTEM is supported... " >&6; }
- if test "${ac_cv_pthread_system_supported+set}" = set; then :
+ if ${ac_cv_pthread_system_supported+:} false; then :
$as_echo_n "(cached) " >&6
else
if test "$cross_compiling" = yes; then :
@@ -9011,7 +9621,9 @@ else
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
-#include <pthread.h>
+
+ #include <stdio.h>
+ #include <pthread.h>
void *foo(void *parm) {
return NULL;
}
@@ -9046,7 +9658,7 @@ $as_echo "#define PTHREAD_SYSTEM_SCHED_SUPPORTED 1" >>confdefs.h
for ac_func in pthread_sigmask
do :
ac_fn_c_check_func "$LINENO" "pthread_sigmask" "ac_cv_func_pthread_sigmask"
-if test "x$ac_cv_func_pthread_sigmask" = x""yes; then :
+if test "x$ac_cv_func_pthread_sigmask" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_PTHREAD_SIGMASK 1
_ACEOF
@@ -9060,6 +9672,17 @@ $as_echo "#define HAVE_BROKEN_PTHREAD_SIGMASK 1" >>confdefs.h
fi
done
+ for ac_func in pthread_atfork
+do :
+ ac_fn_c_check_func "$LINENO" "pthread_atfork" "ac_cv_func_pthread_atfork"
+if test "x$ac_cv_func_pthread_atfork" = xyes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_PTHREAD_ATFORK 1
+_ACEOF
+
+fi
+done
+
fi
@@ -9084,28 +9707,20 @@ $as_echo "yes" >&6; }
esac
else
- if test "$cross_compiling" = yes; then :
-
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
- ipv6=no
-
-else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
/* AF_INET6 available check */
#include <sys/types.h>
#include <sys/socket.h>
-main()
+int
+main ()
{
- if (socket(AF_INET6, SOCK_STREAM, 0) < 0)
- exit(1);
- else
- exit(0);
+int domain = AF_INET6;
+ ;
+ return 0;
}
-
_ACEOF
-if ac_fn_c_try_run "$LINENO"; then :
+if ac_fn_c_try_compile "$LINENO"; then :
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
@@ -9118,10 +9733,7 @@ $as_echo "no" >&6; }
ipv6=no
fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
- conftest.$ac_objext conftest.beam conftest.$ac_ext
-fi
-
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
if test "$ipv6" = "yes"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if RFC2553 API is available" >&5
@@ -9436,7 +10048,7 @@ fi
$as_echo "$with_valgrind" >&6; }
if test "$with_valgrind" != no; then
ac_fn_c_check_header_mongrel "$LINENO" "valgrind/valgrind.h" "ac_cv_header_valgrind_valgrind_h" "$ac_includes_default"
-if test "x$ac_cv_header_valgrind_valgrind_h" = x""yes; then :
+if test "x$ac_cv_header_valgrind_valgrind_h" = xyes; then :
$as_echo "#define WITH_VALGRIND 1" >>confdefs.h
@@ -9480,7 +10092,7 @@ DLINCLDIR=.
for ac_func in dlopen
do :
ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen"
-if test "x$ac_cv_func_dlopen" = x""yes; then :
+if test "x$ac_cv_func_dlopen" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_DLOPEN 1
_ACEOF
@@ -9546,7 +10158,7 @@ for ac_func in alarm setitimer getitimer bind_textdomain_codeset chown \
clock confstr ctermid execv fchmod fchown fork fpathconf ftime ftruncate \
gai_strerror getgroups getlogin getloadavg getpeername getpgid getpid \
getpriority getresuid getresgid getpwent getspnam getspent getsid getwd \
- initgroups kill killpg lchmod lchown lstat mkfifo mknod mktime \
+ initgroups kill killpg lchmod lchown lstat mkfifo mknod mktime mmap \
mremap nice pathconf pause plock poll pthread_init \
putenv readlink realpath \
select sem_open sem_timedwait sem_getvalue sem_unlink setegid seteuid \
@@ -9810,7 +10422,7 @@ rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for flock declaration" >&5
$as_echo_n "checking for flock declaration... " >&6; }
-if test "${ac_cv_flock_decl+set}" = set; then :
+if ${ac_cv_flock_decl+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -9840,7 +10452,7 @@ if test "x${ac_cv_flock_decl}" = xyes; then
for ac_func in flock
do :
ac_fn_c_check_func "$LINENO" "flock" "ac_cv_func_flock"
-if test "x$ac_cv_func_flock" = x""yes; then :
+if test "x$ac_cv_func_flock" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_FLOCK 1
_ACEOF
@@ -9848,7 +10460,7 @@ _ACEOF
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for flock in -lbsd" >&5
$as_echo_n "checking for flock in -lbsd... " >&6; }
-if test "${ac_cv_lib_bsd_flock+set}" = set; then :
+if ${ac_cv_lib_bsd_flock+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -9882,7 +10494,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_bsd_flock" >&5
$as_echo "$ac_cv_lib_bsd_flock" >&6; }
-if test "x$ac_cv_lib_bsd_flock" = x""yes; then :
+if test "x$ac_cv_lib_bsd_flock" = xyes; then :
$as_echo "#define HAVE_FLOCK 1" >>confdefs.h
@@ -9959,7 +10571,7 @@ do
set dummy $ac_prog; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_TRUE+set}" = set; then :
+if ${ac_cv_prog_TRUE+:} false; then :
$as_echo_n "(cached) " >&6
else
if test -n "$TRUE"; then
@@ -9971,7 +10583,7 @@ do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
- if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_prog_TRUE="$ac_prog"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
@@ -9999,7 +10611,7 @@ test -n "$TRUE" || TRUE="/bin/true"
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for inet_aton in -lc" >&5
$as_echo_n "checking for inet_aton in -lc... " >&6; }
-if test "${ac_cv_lib_c_inet_aton+set}" = set; then :
+if ${ac_cv_lib_c_inet_aton+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -10033,12 +10645,12 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_c_inet_aton" >&5
$as_echo "$ac_cv_lib_c_inet_aton" >&6; }
-if test "x$ac_cv_lib_c_inet_aton" = x""yes; then :
+if test "x$ac_cv_lib_c_inet_aton" = xyes; then :
$ac_cv_prog_TRUE
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for inet_aton in -lresolv" >&5
$as_echo_n "checking for inet_aton in -lresolv... " >&6; }
-if test "${ac_cv_lib_resolv_inet_aton+set}" = set; then :
+if ${ac_cv_lib_resolv_inet_aton+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -10072,7 +10684,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_resolv_inet_aton" >&5
$as_echo "$ac_cv_lib_resolv_inet_aton" >&6; }
-if test "x$ac_cv_lib_resolv_inet_aton" = x""yes; then :
+if test "x$ac_cv_lib_resolv_inet_aton" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_LIBRESOLV 1
_ACEOF
@@ -10089,7 +10701,7 @@ fi
# exit Python
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for chflags" >&5
$as_echo_n "checking for chflags... " >&6; }
-if test "${ac_cv_have_chflags+set}" = set; then :
+if ${ac_cv_have_chflags+:} false; then :
$as_echo_n "(cached) " >&6
else
if test "$cross_compiling" = yes; then :
@@ -10123,7 +10735,7 @@ fi
$as_echo "$ac_cv_have_chflags" >&6; }
if test "$ac_cv_have_chflags" = cross ; then
ac_fn_c_check_func "$LINENO" "chflags" "ac_cv_func_chflags"
-if test "x$ac_cv_func_chflags" = x""yes; then :
+if test "x$ac_cv_func_chflags" = xyes; then :
ac_cv_have_chflags="yes"
else
ac_cv_have_chflags="no"
@@ -10138,7 +10750,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for lchflags" >&5
$as_echo_n "checking for lchflags... " >&6; }
-if test "${ac_cv_have_lchflags+set}" = set; then :
+if ${ac_cv_have_lchflags+:} false; then :
$as_echo_n "(cached) " >&6
else
if test "$cross_compiling" = yes; then :
@@ -10172,7 +10784,7 @@ fi
$as_echo "$ac_cv_have_lchflags" >&6; }
if test "$ac_cv_have_lchflags" = cross ; then
ac_fn_c_check_func "$LINENO" "lchflags" "ac_cv_func_lchflags"
-if test "x$ac_cv_func_lchflags" = x""yes; then :
+if test "x$ac_cv_func_lchflags" = xyes; then :
ac_cv_have_lchflags="yes"
else
ac_cv_have_lchflags="no"
@@ -10196,7 +10808,7 @@ esac
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for inflateCopy in -lz" >&5
$as_echo_n "checking for inflateCopy in -lz... " >&6; }
-if test "${ac_cv_lib_z_inflateCopy+set}" = set; then :
+if ${ac_cv_lib_z_inflateCopy+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -10230,7 +10842,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_z_inflateCopy" >&5
$as_echo "$ac_cv_lib_z_inflateCopy" >&6; }
-if test "x$ac_cv_lib_z_inflateCopy" = x""yes; then :
+if test "x$ac_cv_lib_z_inflateCopy" = xyes; then :
$as_echo "#define HAVE_ZLIB_COPY 1" >>confdefs.h
@@ -10373,7 +10985,7 @@ rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
for ac_func in openpty
do :
ac_fn_c_check_func "$LINENO" "openpty" "ac_cv_func_openpty"
-if test "x$ac_cv_func_openpty" = x""yes; then :
+if test "x$ac_cv_func_openpty" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_OPENPTY 1
_ACEOF
@@ -10381,7 +10993,7 @@ _ACEOF
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for openpty in -lutil" >&5
$as_echo_n "checking for openpty in -lutil... " >&6; }
-if test "${ac_cv_lib_util_openpty+set}" = set; then :
+if ${ac_cv_lib_util_openpty+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -10415,13 +11027,13 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_util_openpty" >&5
$as_echo "$ac_cv_lib_util_openpty" >&6; }
-if test "x$ac_cv_lib_util_openpty" = x""yes; then :
+if test "x$ac_cv_lib_util_openpty" = xyes; then :
$as_echo "#define HAVE_OPENPTY 1" >>confdefs.h
LIBS="$LIBS -lutil"
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for openpty in -lbsd" >&5
$as_echo_n "checking for openpty in -lbsd... " >&6; }
-if test "${ac_cv_lib_bsd_openpty+set}" = set; then :
+if ${ac_cv_lib_bsd_openpty+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -10455,7 +11067,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_bsd_openpty" >&5
$as_echo "$ac_cv_lib_bsd_openpty" >&6; }
-if test "x$ac_cv_lib_bsd_openpty" = x""yes; then :
+if test "x$ac_cv_lib_bsd_openpty" = xyes; then :
$as_echo "#define HAVE_OPENPTY 1" >>confdefs.h
LIBS="$LIBS -lbsd"
fi
@@ -10470,7 +11082,7 @@ done
for ac_func in forkpty
do :
ac_fn_c_check_func "$LINENO" "forkpty" "ac_cv_func_forkpty"
-if test "x$ac_cv_func_forkpty" = x""yes; then :
+if test "x$ac_cv_func_forkpty" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_FORKPTY 1
_ACEOF
@@ -10478,7 +11090,7 @@ _ACEOF
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for forkpty in -lutil" >&5
$as_echo_n "checking for forkpty in -lutil... " >&6; }
-if test "${ac_cv_lib_util_forkpty+set}" = set; then :
+if ${ac_cv_lib_util_forkpty+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -10512,13 +11124,13 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_util_forkpty" >&5
$as_echo "$ac_cv_lib_util_forkpty" >&6; }
-if test "x$ac_cv_lib_util_forkpty" = x""yes; then :
+if test "x$ac_cv_lib_util_forkpty" = xyes; then :
$as_echo "#define HAVE_FORKPTY 1" >>confdefs.h
LIBS="$LIBS -lutil"
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for forkpty in -lbsd" >&5
$as_echo_n "checking for forkpty in -lbsd... " >&6; }
-if test "${ac_cv_lib_bsd_forkpty+set}" = set; then :
+if ${ac_cv_lib_bsd_forkpty+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -10552,7 +11164,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_bsd_forkpty" >&5
$as_echo "$ac_cv_lib_bsd_forkpty" >&6; }
-if test "x$ac_cv_lib_bsd_forkpty" = x""yes; then :
+if test "x$ac_cv_lib_bsd_forkpty" = xyes; then :
$as_echo "#define HAVE_FORKPTY 1" >>confdefs.h
LIBS="$LIBS -lbsd"
fi
@@ -10569,7 +11181,7 @@ done
for ac_func in memmove
do :
ac_fn_c_check_func "$LINENO" "memmove" "ac_cv_func_memmove"
-if test "x$ac_cv_func_memmove" = x""yes; then :
+if test "x$ac_cv_func_memmove" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_MEMMOVE 1
_ACEOF
@@ -10593,7 +11205,7 @@ done
ac_fn_c_check_func "$LINENO" "dup2" "ac_cv_func_dup2"
-if test "x$ac_cv_func_dup2" = x""yes; then :
+if test "x$ac_cv_func_dup2" = xyes; then :
$as_echo "#define HAVE_DUP2 1" >>confdefs.h
else
@@ -10606,7 +11218,7 @@ esac
fi
ac_fn_c_check_func "$LINENO" "getcwd" "ac_cv_func_getcwd"
-if test "x$ac_cv_func_getcwd" = x""yes; then :
+if test "x$ac_cv_func_getcwd" = xyes; then :
$as_echo "#define HAVE_GETCWD 1" >>confdefs.h
else
@@ -10619,7 +11231,7 @@ esac
fi
ac_fn_c_check_func "$LINENO" "strdup" "ac_cv_func_strdup"
-if test "x$ac_cv_func_strdup" = x""yes; then :
+if test "x$ac_cv_func_strdup" = xyes; then :
$as_echo "#define HAVE_STRDUP 1" >>confdefs.h
else
@@ -10635,7 +11247,7 @@ fi
for ac_func in getpgrp
do :
ac_fn_c_check_func "$LINENO" "getpgrp" "ac_cv_func_getpgrp"
-if test "x$ac_cv_func_getpgrp" = x""yes; then :
+if test "x$ac_cv_func_getpgrp" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_GETPGRP 1
_ACEOF
@@ -10663,7 +11275,7 @@ done
for ac_func in setpgrp
do :
ac_fn_c_check_func "$LINENO" "setpgrp" "ac_cv_func_setpgrp"
-if test "x$ac_cv_func_setpgrp" = x""yes; then :
+if test "x$ac_cv_func_setpgrp" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_SETPGRP 1
_ACEOF
@@ -10691,7 +11303,7 @@ done
for ac_func in gettimeofday
do :
ac_fn_c_check_func "$LINENO" "gettimeofday" "ac_cv_func_gettimeofday"
-if test "x$ac_cv_func_gettimeofday" = x""yes; then :
+if test "x$ac_cv_func_gettimeofday" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_GETTIMEOFDAY 1
_ACEOF
@@ -10793,15 +11405,21 @@ if test $have_getaddrinfo = yes
then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking getaddrinfo bug" >&5
$as_echo_n "checking getaddrinfo bug... " >&6; }
- if test "${ac_cv_buggy_getaddrinfo+set}" = set; then :
+ if ${ac_cv_buggy_getaddrinfo+:} false; then :
$as_echo_n "(cached) " >&6
else
if test "$cross_compiling" = yes; then :
+
+if test "${enable_ipv6+set}" = set; then
+ ac_cv_buggy_getaddrinfo="no -- configured with --(en|dis)able-ipv6"
+else
ac_cv_buggy_getaddrinfo=yes
+fi
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
+#include <stdio.h>
#include <sys/types.h>
#include <netdb.h>
#include <string.h>
@@ -10922,7 +11540,7 @@ fi
for ac_func in getnameinfo
do :
ac_fn_c_check_func "$LINENO" "getnameinfo" "ac_cv_func_getnameinfo"
-if test "x$ac_cv_func_getnameinfo" = x""yes; then :
+if test "x$ac_cv_func_getnameinfo" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_GETNAMEINFO 1
_ACEOF
@@ -10934,7 +11552,7 @@ done
# checks for structures
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether time.h and sys/time.h may both be included" >&5
$as_echo_n "checking whether time.h and sys/time.h may both be included... " >&6; }
-if test "${ac_cv_header_time+set}" = set; then :
+if ${ac_cv_header_time+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -10969,7 +11587,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether struct tm is in sys/time.h or time.h" >&5
$as_echo_n "checking whether struct tm is in sys/time.h or time.h... " >&6; }
-if test "${ac_cv_struct_tm+set}" = set; then :
+if ${ac_cv_struct_tm+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -11006,7 +11624,7 @@ ac_fn_c_check_member "$LINENO" "struct tm" "tm_zone" "ac_cv_member_struct_tm_tm_
#include <$ac_cv_struct_tm>
"
-if test "x$ac_cv_member_struct_tm_tm_zone" = x""yes; then :
+if test "x$ac_cv_member_struct_tm_tm_zone" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_STRUCT_TM_TM_ZONE 1
@@ -11022,7 +11640,7 @@ $as_echo "#define HAVE_TM_ZONE 1" >>confdefs.h
else
ac_fn_c_check_decl "$LINENO" "tzname" "ac_cv_have_decl_tzname" "#include <time.h>
"
-if test "x$ac_cv_have_decl_tzname" = x""yes; then :
+if test "x$ac_cv_have_decl_tzname" = xyes; then :
ac_have_decl=1
else
ac_have_decl=0
@@ -11034,7 +11652,7 @@ _ACEOF
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for tzname" >&5
$as_echo_n "checking for tzname... " >&6; }
-if test "${ac_cv_var_tzname+set}" = set; then :
+if ${ac_cv_var_tzname+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -11070,7 +11688,7 @@ $as_echo "#define HAVE_TZNAME 1" >>confdefs.h
fi
ac_fn_c_check_member "$LINENO" "struct stat" "st_rdev" "ac_cv_member_struct_stat_st_rdev" "$ac_includes_default"
-if test "x$ac_cv_member_struct_stat_st_rdev" = x""yes; then :
+if test "x$ac_cv_member_struct_stat_st_rdev" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_STRUCT_STAT_ST_RDEV 1
@@ -11080,7 +11698,7 @@ _ACEOF
fi
ac_fn_c_check_member "$LINENO" "struct stat" "st_blksize" "ac_cv_member_struct_stat_st_blksize" "$ac_includes_default"
-if test "x$ac_cv_member_struct_stat_st_blksize" = x""yes; then :
+if test "x$ac_cv_member_struct_stat_st_blksize" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_STRUCT_STAT_ST_BLKSIZE 1
@@ -11090,7 +11708,7 @@ _ACEOF
fi
ac_fn_c_check_member "$LINENO" "struct stat" "st_flags" "ac_cv_member_struct_stat_st_flags" "$ac_includes_default"
-if test "x$ac_cv_member_struct_stat_st_flags" = x""yes; then :
+if test "x$ac_cv_member_struct_stat_st_flags" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_STRUCT_STAT_ST_FLAGS 1
@@ -11100,7 +11718,7 @@ _ACEOF
fi
ac_fn_c_check_member "$LINENO" "struct stat" "st_gen" "ac_cv_member_struct_stat_st_gen" "$ac_includes_default"
-if test "x$ac_cv_member_struct_stat_st_gen" = x""yes; then :
+if test "x$ac_cv_member_struct_stat_st_gen" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_STRUCT_STAT_ST_GEN 1
@@ -11110,7 +11728,7 @@ _ACEOF
fi
ac_fn_c_check_member "$LINENO" "struct stat" "st_birthtime" "ac_cv_member_struct_stat_st_birthtime" "$ac_includes_default"
-if test "x$ac_cv_member_struct_stat_st_birthtime" = x""yes; then :
+if test "x$ac_cv_member_struct_stat_st_birthtime" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_STRUCT_STAT_ST_BIRTHTIME 1
@@ -11120,7 +11738,7 @@ _ACEOF
fi
ac_fn_c_check_member "$LINENO" "struct stat" "st_blocks" "ac_cv_member_struct_stat_st_blocks" "$ac_includes_default"
-if test "x$ac_cv_member_struct_stat_st_blocks" = x""yes; then :
+if test "x$ac_cv_member_struct_stat_st_blocks" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_STRUCT_STAT_ST_BLOCKS 1
@@ -11142,7 +11760,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for time.h that defines altzone" >&5
$as_echo_n "checking for time.h that defines altzone... " >&6; }
-if test "${ac_cv_header_time_altzone+set}" = set; then :
+if ${ac_cv_header_time_altzone+:} false; then :
$as_echo_n "(cached) " >&6
else
@@ -11206,7 +11824,7 @@ $as_echo "$was_it_defined" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for addrinfo" >&5
$as_echo_n "checking for addrinfo... " >&6; }
-if test "${ac_cv_struct_addrinfo+set}" = set; then :
+if ${ac_cv_struct_addrinfo+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -11238,7 +11856,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sockaddr_storage" >&5
$as_echo_n "checking for sockaddr_storage... " >&6; }
-if test "${ac_cv_struct_sockaddr_storage+set}" = set; then :
+if ${ac_cv_struct_sockaddr_storage+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -11274,7 +11892,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether char is unsigned" >&5
$as_echo_n "checking whether char is unsigned... " >&6; }
-if test "${ac_cv_c_char_unsigned+set}" = set; then :
+if ${ac_cv_c_char_unsigned+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -11284,7 +11902,8 @@ int
main ()
{
static int test_array [1 - 2 * !(((char) -1) < 0)];
-test_array [0] = 0
+test_array [0] = 0;
+return test_array [0];
;
return 0;
@@ -11306,7 +11925,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5
$as_echo_n "checking for an ANSI C-conforming const... " >&6; }
-if test "${ac_cv_c_const+set}" = set; then :
+if ${ac_cv_c_const+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -11315,11 +11934,11 @@ else
int
main ()
{
-/* FIXME: Include the comments suggested by Paul. */
+
#ifndef __cplusplus
- /* Ultrix mips cc rejects this. */
+ /* Ultrix mips cc rejects this sort of thing. */
typedef int charset[2];
- const charset cs;
+ const charset cs = { 0, 0 };
/* SunOS 4.1.1 cc rejects this. */
char const *const *pcpcc;
char **ppc;
@@ -11336,8 +11955,9 @@ main ()
++pcpcc;
ppc = (char**) pcpcc;
pcpcc = (char const *const *) ppc;
- { /* SCO 3.2v4 cc rejects this. */
- char *t;
+ { /* SCO 3.2v4 cc rejects this sort of thing. */
+ char tx;
+ char *t = &tx;
char const *s = 0 ? (char *) 0 : (char const *) 0;
*t++ = 0;
@@ -11353,10 +11973,10 @@ main ()
iptr p = 0;
++p;
}
- { /* AIX XL C 1.02.0.0 rejects this saying
+ { /* AIX XL C 1.02.0.0 rejects this sort of thing, saying
"k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */
- struct s { int j; const int *ap[3]; };
- struct s *b; b->j = 5;
+ struct s { int j; const int *ap[3]; } bx;
+ struct s *b = &bx; b->j = 5;
}
{ /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */
const int foo = 10;
@@ -11594,7 +12214,7 @@ $as_echo "$va_list_is_array" >&6; }
ac_fn_c_check_func "$LINENO" "gethostbyname_r" "ac_cv_func_gethostbyname_r"
-if test "x$ac_cv_func_gethostbyname_r" = x""yes; then :
+if test "x$ac_cv_func_gethostbyname_r" = xyes; then :
$as_echo "#define HAVE_GETHOSTBYNAME_R 1" >>confdefs.h
@@ -11725,7 +12345,7 @@ else
for ac_func in gethostbyname
do :
ac_fn_c_check_func "$LINENO" "gethostbyname" "ac_cv_func_gethostbyname"
-if test "x$ac_cv_func_gethostbyname" = x""yes; then :
+if test "x$ac_cv_func_gethostbyname" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_GETHOSTBYNAME 1
_ACEOF
@@ -11747,12 +12367,12 @@ fi
# Linux requires this for correct f.p. operations
ac_fn_c_check_func "$LINENO" "__fpu_control" "ac_cv_func___fpu_control"
-if test "x$ac_cv_func___fpu_control" = x""yes; then :
+if test "x$ac_cv_func___fpu_control" = xyes; then :
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __fpu_control in -lieee" >&5
$as_echo_n "checking for __fpu_control in -lieee... " >&6; }
-if test "${ac_cv_lib_ieee___fpu_control+set}" = set; then :
+if ${ac_cv_lib_ieee___fpu_control+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -11786,7 +12406,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ieee___fpu_control" >&5
$as_echo "$ac_cv_lib_ieee___fpu_control" >&6; }
-if test "x$ac_cv_lib_ieee___fpu_control" = x""yes; then :
+if test "x$ac_cv_lib_ieee___fpu_control" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_LIBIEEE 1
_ACEOF
@@ -11881,7 +12501,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C doubles are little-endian IEEE 754 binary64" >&5
$as_echo_n "checking whether C doubles are little-endian IEEE 754 binary64... " >&6; }
-if test "${ac_cv_little_endian_double+set}" = set; then :
+if ${ac_cv_little_endian_double+:} false; then :
$as_echo_n "(cached) " >&6
else
@@ -11923,7 +12543,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C doubles are big-endian IEEE 754 binary64" >&5
$as_echo_n "checking whether C doubles are big-endian IEEE 754 binary64... " >&6; }
-if test "${ac_cv_big_endian_double+set}" = set; then :
+if ${ac_cv_big_endian_double+:} false; then :
$as_echo_n "(cached) " >&6
else
@@ -11969,7 +12589,7 @@ fi
# conversions work.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C doubles are ARM mixed-endian IEEE 754 binary64" >&5
$as_echo_n "checking whether C doubles are ARM mixed-endian IEEE 754 binary64... " >&6; }
-if test "${ac_cv_mixed_endian_double+set}" = set; then :
+if ${ac_cv_mixed_endian_double+:} false; then :
$as_echo_n "(cached) " >&6
else
@@ -12117,7 +12737,7 @@ LIBS="$LIBS $LIBM"
# -0. on some architectures.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether tanh preserves the sign of zero" >&5
$as_echo_n "checking whether tanh preserves the sign of zero... " >&6; }
-if test "${ac_cv_tanh_preserves_zero_sign+set}" = set; then :
+if ${ac_cv_tanh_preserves_zero_sign+:} false; then :
$as_echo_n "(cached) " >&6
else
@@ -12185,7 +12805,7 @@ done
ac_fn_c_check_decl "$LINENO" "isinf" "ac_cv_have_decl_isinf" "#include <math.h>
"
-if test "x$ac_cv_have_decl_isinf" = x""yes; then :
+if test "x$ac_cv_have_decl_isinf" = xyes; then :
ac_have_decl=1
else
ac_have_decl=0
@@ -12196,7 +12816,7 @@ cat >>confdefs.h <<_ACEOF
_ACEOF
ac_fn_c_check_decl "$LINENO" "isnan" "ac_cv_have_decl_isnan" "#include <math.h>
"
-if test "x$ac_cv_have_decl_isnan" = x""yes; then :
+if test "x$ac_cv_have_decl_isnan" = xyes; then :
ac_have_decl=1
else
ac_have_decl=0
@@ -12207,7 +12827,7 @@ cat >>confdefs.h <<_ACEOF
_ACEOF
ac_fn_c_check_decl "$LINENO" "isfinite" "ac_cv_have_decl_isfinite" "#include <math.h>
"
-if test "x$ac_cv_have_decl_isfinite" = x""yes; then :
+if test "x$ac_cv_have_decl_isfinite" = xyes; then :
ac_have_decl=1
else
ac_have_decl=0
@@ -12227,7 +12847,7 @@ LIBS=$LIBS_SAVE
# sem_open results in a 'Signal 12' error.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether POSIX semaphores are enabled" >&5
$as_echo_n "checking whether POSIX semaphores are enabled... " >&6; }
-if test "${ac_cv_posix_semaphores_enabled+set}" = set; then :
+if ${ac_cv_posix_semaphores_enabled+:} false; then :
$as_echo_n "(cached) " >&6
else
if test "$cross_compiling" = yes; then :
@@ -12278,7 +12898,7 @@ fi
# Multiprocessing check for broken sem_getvalue
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for broken sem_getvalue" >&5
$as_echo_n "checking for broken sem_getvalue... " >&6; }
-if test "${ac_cv_broken_sem_getvalue+set}" = set; then :
+if ${ac_cv_broken_sem_getvalue+:} false; then :
$as_echo_n "(cached) " >&6
else
if test "$cross_compiling" = yes; then :
@@ -12343,7 +12963,7 @@ no)
15|30)
;;
*)
- as_fn_error $? "bad value $enable_big_digits for --enable-big-digits; value should be 15 or 30" "$LINENO" 5 ;;
+ as_fn_error $? "bad value $enable_big_digits for --enable-big-digits; value should be 15 or 30" "$LINENO" 5 ;;
esac
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_big_digits" >&5
$as_echo "$enable_big_digits" >&6; }
@@ -12361,7 +12981,7 @@ fi
# check for wchar.h
ac_fn_c_check_header_mongrel "$LINENO" "wchar.h" "ac_cv_header_wchar_h" "$ac_includes_default"
-if test "x$ac_cv_header_wchar_h" = x""yes; then :
+if test "x$ac_cv_header_wchar_h" = xyes; then :
$as_echo "#define HAVE_WCHAR_H 1" >>confdefs.h
@@ -12384,7 +13004,7 @@ then
# This bug is HP SR number 8606223364.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of wchar_t" >&5
$as_echo_n "checking size of wchar_t... " >&6; }
-if test "${ac_cv_sizeof_wchar_t+set}" = set; then :
+if ${ac_cv_sizeof_wchar_t+:} false; then :
$as_echo_n "(cached) " >&6
else
if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (wchar_t))" "ac_cv_sizeof_wchar_t" "#include <wchar.h>
@@ -12395,7 +13015,7 @@ else
{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
as_fn_error 77 "cannot compute sizeof (wchar_t)
-See \`config.log' for more details" "$LINENO" 5 ; }
+See \`config.log' for more details" "$LINENO" 5; }
else
ac_cv_sizeof_wchar_t=0
fi
@@ -12450,7 +13070,7 @@ then
# check whether wchar_t is signed or not
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether wchar_t is signed" >&5
$as_echo_n "checking whether wchar_t is signed... " >&6; }
- if test "${ac_cv_wchar_t_signed+set}" = set; then :
+ if ${ac_cv_wchar_t_signed+:} false; then :
$as_echo_n "(cached) " >&6
else
@@ -12514,7 +13134,8 @@ ucs4) unicode_size="4"
$as_echo "#define Py_UNICODE_SIZE 4" >>confdefs.h
;;
-*) as_fn_error $? "invalid value for --enable-unicode. Use either ucs2 or ucs4 (lowercase)." "$LINENO" 5 ;;
+no) ;; # To allow --disable-unicode
+*) as_fn_error $? "invalid value for --enable-unicode. Use either ucs2 or ucs4 (lowercase)." "$LINENO" 5 ;;
esac
@@ -12561,7 +13182,7 @@ fi
# check for endianness
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5
$as_echo_n "checking whether byte ordering is bigendian... " >&6; }
-if test "${ac_cv_c_bigendian+set}" = set; then :
+if ${ac_cv_c_bigendian+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_cv_c_bigendian=unknown
@@ -12780,7 +13401,7 @@ $as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h
;; #(
*)
as_fn_error $? "unknown endianness
- presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;;
+ presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;;
esac
@@ -12788,7 +13409,7 @@ $as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h
# or fills with zeros (like the Cray J90, according to Tim Peters).
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether right shift extends the sign bit" >&5
$as_echo_n "checking whether right shift extends the sign bit... " >&6; }
-if test "${ac_cv_rshift_extends_sign+set}" = set; then :
+if ${ac_cv_rshift_extends_sign+:} false; then :
$as_echo_n "(cached) " >&6
else
@@ -12827,7 +13448,7 @@ fi
# check for getc_unlocked and related locking functions
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for getc_unlocked() and friends" >&5
$as_echo_n "checking for getc_unlocked() and friends... " >&6; }
-if test "${ac_cv_have_getc_unlocked+set}" = set; then :
+if ${ac_cv_have_getc_unlocked+:} false; then :
$as_echo_n "(cached) " >&6
else
@@ -12925,7 +13546,7 @@ fi
# check for readline 2.1
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for rl_callback_handler_install in -lreadline" >&5
$as_echo_n "checking for rl_callback_handler_install in -lreadline... " >&6; }
-if test "${ac_cv_lib_readline_rl_callback_handler_install+set}" = set; then :
+if ${ac_cv_lib_readline_rl_callback_handler_install+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -12959,7 +13580,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_readline_rl_callback_handler_install" >&5
$as_echo "$ac_cv_lib_readline_rl_callback_handler_install" >&6; }
-if test "x$ac_cv_lib_readline_rl_callback_handler_install" = x""yes; then :
+if test "x$ac_cv_lib_readline_rl_callback_handler_install" = xyes; then :
$as_echo "#define HAVE_RL_CALLBACK 1" >>confdefs.h
@@ -13011,7 +13632,7 @@ fi
# check for readline 4.0
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for rl_pre_input_hook in -lreadline" >&5
$as_echo_n "checking for rl_pre_input_hook in -lreadline... " >&6; }
-if test "${ac_cv_lib_readline_rl_pre_input_hook+set}" = set; then :
+if ${ac_cv_lib_readline_rl_pre_input_hook+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -13045,7 +13666,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_readline_rl_pre_input_hook" >&5
$as_echo "$ac_cv_lib_readline_rl_pre_input_hook" >&6; }
-if test "x$ac_cv_lib_readline_rl_pre_input_hook" = x""yes; then :
+if test "x$ac_cv_lib_readline_rl_pre_input_hook" = xyes; then :
$as_echo "#define HAVE_RL_PRE_INPUT_HOOK 1" >>confdefs.h
@@ -13055,7 +13676,7 @@ fi
# also in 4.0
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for rl_completion_display_matches_hook in -lreadline" >&5
$as_echo_n "checking for rl_completion_display_matches_hook in -lreadline... " >&6; }
-if test "${ac_cv_lib_readline_rl_completion_display_matches_hook+set}" = set; then :
+if ${ac_cv_lib_readline_rl_completion_display_matches_hook+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -13089,7 +13710,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_readline_rl_completion_display_matches_hook" >&5
$as_echo "$ac_cv_lib_readline_rl_completion_display_matches_hook" >&6; }
-if test "x$ac_cv_lib_readline_rl_completion_display_matches_hook" = x""yes; then :
+if test "x$ac_cv_lib_readline_rl_completion_display_matches_hook" = xyes; then :
$as_echo "#define HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK 1" >>confdefs.h
@@ -13099,7 +13720,7 @@ fi
# check for readline 4.2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for rl_completion_matches in -lreadline" >&5
$as_echo_n "checking for rl_completion_matches in -lreadline... " >&6; }
-if test "${ac_cv_lib_readline_rl_completion_matches+set}" = set; then :
+if ${ac_cv_lib_readline_rl_completion_matches+:} false; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
@@ -13133,7 +13754,7 @@ LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_readline_rl_completion_matches" >&5
$as_echo "$ac_cv_lib_readline_rl_completion_matches" >&6; }
-if test "x$ac_cv_lib_readline_rl_completion_matches" = x""yes; then :
+if test "x$ac_cv_lib_readline_rl_completion_matches" = xyes; then :
$as_echo "#define HAVE_RL_COMPLETION_MATCHES 1" >>confdefs.h
@@ -13174,7 +13795,7 @@ LIBS=$LIBS_no_readline
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for broken nice()" >&5
$as_echo_n "checking for broken nice()... " >&6; }
-if test "${ac_cv_broken_nice+set}" = set; then :
+if ${ac_cv_broken_nice+:} false; then :
$as_echo_n "(cached) " >&6
else
@@ -13215,7 +13836,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for broken poll()" >&5
$as_echo_n "checking for broken poll()... " >&6; }
-if test "${ac_cv_broken_poll+set}" = set; then :
+if ${ac_cv_broken_poll+:} false; then :
$as_echo_n "(cached) " >&6
else
if test "$cross_compiling" = yes; then :
@@ -13270,7 +13891,7 @@ ac_fn_c_check_member "$LINENO" "struct tm" "tm_zone" "ac_cv_member_struct_tm_tm_
#include <$ac_cv_struct_tm>
"
-if test "x$ac_cv_member_struct_tm_tm_zone" = x""yes; then :
+if test "x$ac_cv_member_struct_tm_tm_zone" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_STRUCT_TM_TM_ZONE 1
@@ -13286,7 +13907,7 @@ $as_echo "#define HAVE_TM_ZONE 1" >>confdefs.h
else
ac_fn_c_check_decl "$LINENO" "tzname" "ac_cv_have_decl_tzname" "#include <time.h>
"
-if test "x$ac_cv_have_decl_tzname" = x""yes; then :
+if test "x$ac_cv_have_decl_tzname" = xyes; then :
ac_have_decl=1
else
ac_have_decl=0
@@ -13298,7 +13919,7 @@ _ACEOF
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for tzname" >&5
$as_echo_n "checking for tzname... " >&6; }
-if test "${ac_cv_var_tzname+set}" = set; then :
+if ${ac_cv_var_tzname+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -13337,7 +13958,7 @@ fi
# check tzset(3) exists and works like we expect it to
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for working tzset()" >&5
$as_echo_n "checking for working tzset()... " >&6; }
-if test "${ac_cv_working_tzset+set}" = set; then :
+if ${ac_cv_working_tzset+:} false; then :
$as_echo_n "(cached) " >&6
else
@@ -13434,7 +14055,7 @@ fi
# Look for subsecond timestamps in struct stat
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for tv_nsec in struct stat" >&5
$as_echo_n "checking for tv_nsec in struct stat... " >&6; }
-if test "${ac_cv_stat_tv_nsec+set}" = set; then :
+if ${ac_cv_stat_tv_nsec+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -13471,7 +14092,7 @@ fi
# Look for BSD style subsecond timestamps in struct stat
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for tv_nsec2 in struct stat" >&5
$as_echo_n "checking for tv_nsec2 in struct stat... " >&6; }
-if test "${ac_cv_stat_tv_nsec2+set}" = set; then :
+if ${ac_cv_stat_tv_nsec2+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -13505,10 +14126,47 @@ $as_echo "#define HAVE_STAT_TV_NSEC2 1" >>confdefs.h
fi
+# first curses configure check
+ac_save_cppflags="$CPPFLAGS"
+CPPFLAGS="$CPPFLAGS -I/usr/include/ncursesw"
+
+for ac_header in curses.h ncurses.h
+do :
+ as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
+ cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+# On Solaris, term.h requires curses.h
+for ac_header in term.h
+do :
+ ac_fn_c_check_header_compile "$LINENO" "term.h" "ac_cv_header_term_h" "
+#ifdef HAVE_CURSES_H
+#include <curses.h>
+#endif
+
+"
+if test "x$ac_cv_header_term_h" = xyes; then :
+ cat >>confdefs.h <<_ACEOF
+#define HAVE_TERM_H 1
+_ACEOF
+
+fi
+
+done
+
+
# On HP/UX 11.0, mvwdelch is a block with a return statement
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether mvwdelch is an expression" >&5
$as_echo_n "checking whether mvwdelch is an expression... " >&6; }
-if test "${ac_cv_mvwdelch_is_expression+set}" = set; then :
+if ${ac_cv_mvwdelch_is_expression+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -13545,7 +14203,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether WINDOW has _flags" >&5
$as_echo_n "checking whether WINDOW has _flags... " >&6; }
-if test "${ac_cv_window_has_flags+set}" = set; then :
+if ${ac_cv_window_has_flags+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
@@ -13658,46 +14316,116 @@ $as_echo "no" >&6; }
fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+# last curses configure check
+CPPFLAGS=$ac_save_cppflags
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for device files" >&5
+$as_echo "$as_me: checking for device files" >&6;}
+
+if test "x$cross_compiling" = xyes; then
+ if test "${ac_cv_file__dev_ptmx+set}" != set; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for /dev/ptmx" >&5
+$as_echo_n "checking for /dev/ptmx... " >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: not set" >&5
+$as_echo "not set" >&6; }
+ as_fn_error $? "set ac_cv_file__dev_ptmx to yes/no in your CONFIG_SITE file when cross compiling" "$LINENO" 5
+ fi
+ if test "${ac_cv_file__dev_ptc+set}" != set; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for /dev/ptc" >&5
+$as_echo_n "checking for /dev/ptc... " >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: not set" >&5
+$as_echo "not set" >&6; }
+ as_fn_error $? "set ac_cv_file__dev_ptc to yes/no in your CONFIG_SITE file when cross compiling" "$LINENO" 5
+ fi
+fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for /dev/ptmx" >&5
$as_echo_n "checking for /dev/ptmx... " >&6; }
+if ${ac_cv_file__dev_ptmx+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ test "$cross_compiling" = yes &&
+ as_fn_error $? "cannot check for file existence when cross compiling" "$LINENO" 5
+if test -r "/dev/ptmx"; then
+ ac_cv_file__dev_ptmx=yes
+else
+ ac_cv_file__dev_ptmx=no
+fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_file__dev_ptmx" >&5
+$as_echo "$ac_cv_file__dev_ptmx" >&6; }
+if test "x$ac_cv_file__dev_ptmx" = xyes; then :
-if test -r /dev/ptmx
-then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
+fi
+
+if test "x$ac_cv_file__dev_ptmx" = xyes; then
$as_echo "#define HAVE_DEV_PTMX 1" >>confdefs.h
-else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
fi
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for /dev/ptc" >&5
$as_echo_n "checking for /dev/ptc... " >&6; }
+if ${ac_cv_file__dev_ptc+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ test "$cross_compiling" = yes &&
+ as_fn_error $? "cannot check for file existence when cross compiling" "$LINENO" 5
+if test -r "/dev/ptc"; then
+ ac_cv_file__dev_ptc=yes
+else
+ ac_cv_file__dev_ptc=no
+fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_file__dev_ptc" >&5
+$as_echo "$ac_cv_file__dev_ptc" >&6; }
+if test "x$ac_cv_file__dev_ptc" = xyes; then :
-if test -r /dev/ptc
-then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
+fi
+
+if test "x$ac_cv_file__dev_ptc" = xyes; then
$as_echo "#define HAVE_DEV_PTC 1" >>confdefs.h
-else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
fi
if test "$have_long_long" = yes
then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for %lld and %llu printf() format support" >&5
$as_echo_n "checking for %lld and %llu printf() format support... " >&6; }
- if test "${ac_cv_have_long_long_format+set}" = set; then :
+ if ${ac_cv_have_long_long_format+:} false; then :
$as_echo_n "(cached) " >&6
else
if test "$cross_compiling" = yes; then :
- ac_cv_have_long_long_format=no
+ ac_cv_have_long_long_format="cross -- assuming no"
+ if test x$GCC = xyes; then
+ save_CFLAGS=$CFLAGS
+ CFLAGS="$CFLAGS -Werror -Wformat"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+ #include <stdio.h>
+ #include <stddef.h>
+
+int
+main ()
+{
+
+ char *buffer;
+ sprintf(buffer, "%lld", (long long)123);
+ sprintf(buffer, "%lld", (long long)-123);
+ sprintf(buffer, "%llu", (unsigned long long)123);
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+ ac_cv_have_long_long_format=yes
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ CFLAGS=$save_CFLAGS
+ fi
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
@@ -13764,7 +14492,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for %zd printf() format support" >&5
$as_echo_n "checking for %zd printf() format support... " >&6; }
-if test "${ac_cv_have_size_t_format+set}" = set; then :
+if ${ac_cv_have_size_t_format+:} false; then :
$as_echo_n "(cached) " >&6
else
if test "$cross_compiling" = yes; then :
@@ -13837,7 +14565,7 @@ ac_fn_c_check_type "$LINENO" "socklen_t" "ac_cv_type_socklen_t" "
#endif
"
-if test "x$ac_cv_type_socklen_t" = x""yes; then :
+if test "x$ac_cv_type_socklen_t" = xyes; then :
else
@@ -13942,10 +14670,21 @@ $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
:end' >>confcache
if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
if test -w "$cache_file"; then
- test "x$cache_file" != "x/dev/null" &&
+ if test "x$cache_file" != "x/dev/null"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
$as_echo "$as_me: updating cache $cache_file" >&6;}
- cat confcache >$cache_file
+ if test ! -f "$cache_file" || test -h "$cache_file"; then
+ cat confcache >"$cache_file"
+ else
+ case $cache_file in #(
+ */* | ?:*)
+ mv -f confcache "$cache_file"$$ &&
+ mv -f "$cache_file"$$ "$cache_file" ;; #(
+ *)
+ mv -f confcache "$cache_file" ;;
+ esac
+ fi
+ fi
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;}
@@ -13978,7 +14717,7 @@ LTLIBOBJS=$ac_ltlibobjs
-: ${CONFIG_STATUS=./config.status}
+: "${CONFIG_STATUS=./config.status}"
ac_write_fail=0
ac_clean_files_save=$ac_clean_files
ac_clean_files="$ac_clean_files $CONFIG_STATUS"
@@ -14079,6 +14818,7 @@ fi
IFS=" "" $as_nl"
# Find who we are. Look in the path if we contain no directory separator.
+as_myself=
case $0 in #((
*[\\/]* ) as_myself=$0 ;;
*) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
@@ -14274,16 +15014,16 @@ if (echo >conf$$.file) 2>/dev/null; then
# ... but there are two gotchas:
# 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
# 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
- # In both cases, we have to default to `cp -p'.
+ # In both cases, we have to default to `cp -pR'.
ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
elif ln conf$$.file conf$$ 2>/dev/null; then
as_ln_s=ln
else
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
fi
else
- as_ln_s='cp -p'
+ as_ln_s='cp -pR'
fi
rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
rmdir conf$$.dir 2>/dev/null
@@ -14343,28 +15083,16 @@ else
as_mkdir_p=false
fi
-if test -x / >/dev/null 2>&1; then
- as_test_x='test -x'
-else
- if ls -dL / >/dev/null 2>&1; then
- as_ls_L_option=L
- else
- as_ls_L_option=
- fi
- as_test_x='
- eval sh -c '\''
- if test -d "$1"; then
- test -d "$1/.";
- else
- case $1 in #(
- -*)set "./$1";;
- esac;
- case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
- ???[sx]*):;;*)false;;esac;fi
- '\'' sh
- '
-fi
-as_executable_p=$as_test_x
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+ test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
# Sed expression to map a string onto a valid CPP name.
as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
@@ -14386,7 +15114,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# values after options handling.
ac_log="
This file was extended by python $as_me 2.7, which was
-generated by GNU Autoconf 2.67. Invocation command line was
+generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
CONFIG_HEADERS = $CONFIG_HEADERS
@@ -14448,16 +15176,17 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
python config.status 2.7
-configured by $0, generated by GNU Autoconf 2.67,
+configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"
-Copyright (C) 2010 Free Software Foundation, Inc.
+Copyright (C) 2012 Free Software Foundation, Inc.
This config.status script is free software; the Free Software Foundation
gives unlimited permission to copy, distribute and modify it."
ac_pwd='$ac_pwd'
srcdir='$srcdir'
INSTALL='$INSTALL'
+MKDIR_P='$MKDIR_P'
test -n "\$AWK" || AWK=awk
_ACEOF
@@ -14540,7 +15269,7 @@ fi
_ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
if \$ac_cs_recheck; then
- set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+ set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
shift
\$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
CONFIG_SHELL='$SHELL'
@@ -14580,7 +15309,7 @@ do
"Misc/python.pc") CONFIG_FILES="$CONFIG_FILES Misc/python.pc" ;;
"Modules/ld_so_aix") CONFIG_FILES="$CONFIG_FILES Modules/ld_so_aix" ;;
- *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5 ;;
+ *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
esac
done
@@ -14602,9 +15331,10 @@ fi
# after its creation but before its name has been assigned to `$tmp'.
$debug ||
{
- tmp=
+ tmp= ac_tmp=
trap 'exit_status=$?
- { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status
+ : "${ac_tmp:=$tmp}"
+ { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status
' 0
trap 'as_fn_exit 1' 1 2 13 15
}
@@ -14612,12 +15342,13 @@ $debug ||
{
tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
- test -n "$tmp" && test -d "$tmp"
+ test -d "$tmp"
} ||
{
tmp=./conf$$-$RANDOM
(umask 077 && mkdir "$tmp")
} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5
+ac_tmp=$tmp
# Set up the scripts for CONFIG_FILES section.
# No need to generate them if there are no CONFIG_FILES.
@@ -14639,7 +15370,7 @@ else
ac_cs_awk_cr=$ac_cr
fi
-echo 'BEGIN {' >"$tmp/subs1.awk" &&
+echo 'BEGIN {' >"$ac_tmp/subs1.awk" &&
_ACEOF
@@ -14667,7 +15398,7 @@ done
rm -f conf$$subs.sh
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-cat >>"\$tmp/subs1.awk" <<\\_ACAWK &&
+cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK &&
_ACEOF
sed -n '
h
@@ -14715,7 +15446,7 @@ t delim
rm -f conf$$subs.awk
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
_ACAWK
-cat >>"\$tmp/subs1.awk" <<_ACAWK &&
+cat >>"\$ac_tmp/subs1.awk" <<_ACAWK &&
for (key in S) S_is_set[key] = 1
FS = ""
@@ -14747,7 +15478,7 @@ if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
else
cat
-fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \
+fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \
|| as_fn_error $? "could not setup config files machinery" "$LINENO" 5
_ACEOF
@@ -14781,7 +15512,7 @@ fi # test -n "$CONFIG_FILES"
# No need to generate them if there are no CONFIG_HEADERS.
# This happens for instance with `./config.status Makefile'.
if test -n "$CONFIG_HEADERS"; then
-cat >"$tmp/defines.awk" <<\_ACAWK ||
+cat >"$ac_tmp/defines.awk" <<\_ACAWK ||
BEGIN {
_ACEOF
@@ -14793,8 +15524,8 @@ _ACEOF
# handling of long lines.
ac_delim='%!_!# '
for ac_last_try in false false :; do
- ac_t=`sed -n "/$ac_delim/p" confdefs.h`
- if test -z "$ac_t"; then
+ ac_tt=`sed -n "/$ac_delim/p" confdefs.h`
+ if test -z "$ac_tt"; then
break
elif $ac_last_try; then
as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5
@@ -14895,7 +15626,7 @@ do
esac
case $ac_mode$ac_tag in
:[FHL]*:*);;
- :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5 ;;
+ :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;;
:[FH]-) ac_tag=-:-;;
:[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
esac
@@ -14914,7 +15645,7 @@ do
for ac_f
do
case $ac_f in
- -) ac_f="$tmp/stdin";;
+ -) ac_f="$ac_tmp/stdin";;
*) # Look for the file first in the build tree, then in the source tree
# (if the path is not absolute). The absolute path cannot be DOS-style,
# because $ac_f cannot contain `:'.
@@ -14923,7 +15654,7 @@ do
[\\/$]*) false;;
*) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
esac ||
- as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5 ;;
+ as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;;
esac
case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
as_fn_append ac_file_inputs " '$ac_f'"
@@ -14949,8 +15680,8 @@ $as_echo "$as_me: creating $ac_file" >&6;}
esac
case $ac_tag in
- *:-:* | *:-) cat >"$tmp/stdin" \
- || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;;
+ *:-:* | *:-) cat >"$ac_tmp/stdin" \
+ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;;
esac
;;
esac
@@ -15024,6 +15755,11 @@ ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
[\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
*) ac_INSTALL=$ac_top_build_prefix$INSTALL ;;
esac
+ ac_MKDIR_P=$MKDIR_P
+ case $MKDIR_P in
+ [\\/$]* | ?:[\\/]* ) ;;
+ */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;;
+ esac
_ACEOF
cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
@@ -15078,23 +15814,25 @@ s&@builddir@&$ac_builddir&;t t
s&@abs_builddir@&$ac_abs_builddir&;t t
s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
s&@INSTALL@&$ac_INSTALL&;t t
+s&@MKDIR_P@&$ac_MKDIR_P&;t t
$ac_datarootdir_hack
"
-eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \
- || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \
+ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5
test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
- { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } &&
- { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } &&
+ { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } &&
+ { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \
+ "$ac_tmp/out"`; test -z "$ac_out"; } &&
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
which seems to be undefined. Please make sure it is defined" >&5
$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
which seems to be undefined. Please make sure it is defined" >&2;}
- rm -f "$tmp/stdin"
+ rm -f "$ac_tmp/stdin"
case $ac_file in
- -) cat "$tmp/out" && rm -f "$tmp/out";;
- *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";;
+ -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";;
+ *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";;
esac \
|| as_fn_error $? "could not create $ac_file" "$LINENO" 5
;;
@@ -15105,20 +15843,20 @@ which seems to be undefined. Please make sure it is defined" >&2;}
if test x"$ac_file" != x-; then
{
$as_echo "/* $configure_input */" \
- && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs"
- } >"$tmp/config.h" \
+ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs"
+ } >"$ac_tmp/config.h" \
|| as_fn_error $? "could not create $ac_file" "$LINENO" 5
- if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then
+ if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
$as_echo "$as_me: $ac_file is unchanged" >&6;}
else
rm -f "$ac_file"
- mv "$tmp/config.h" "$ac_file" \
+ mv "$ac_tmp/config.h" "$ac_file" \
|| as_fn_error $? "could not create $ac_file" "$LINENO" 5
fi
else
$as_echo "/* $configure_input */" \
- && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \
+ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \
|| as_fn_error $? "could not create -" "$LINENO" 5
fi
;;
diff --git a/configure.in b/configure.ac
index 3f5b3b9..54f8c0f 100644
--- a/configure.in
+++ b/configure.ac
@@ -12,6 +12,33 @@ AC_INIT(python, PYTHON_VERSION, http://bugs.python.org/)
AC_CONFIG_SRCDIR([Include/object.h])
AC_CONFIG_HEADER(pyconfig.h)
+AC_CANONICAL_HOST
+AC_SUBST(build)
+AC_SUBST(host)
+
+if test "$cross_compiling" = yes; then
+ AC_MSG_CHECKING([for python interpreter for cross build])
+ if test -z "$PYTHON_FOR_BUILD"; then
+ for interp in python$PACKAGE_VERSION python2 python; do
+ which $interp >/dev/null 2>&1 || continue
+ if $interp -c 'import sys;sys.exit(not (sys.version_info@<:@:2@:>@ >= (2,7) and sys.version_info@<:@0@:>@ < 3))'; then
+ break
+ fi
+ interp=
+ done
+ if test x$interp = x; then
+ AC_MSG_ERROR([python$PACKAGE_VERSION interpreter not found])
+ fi
+ AC_MSG_RESULT($interp)
+ PYTHON_FOR_BUILD='_PYTHON_PROJECT_BASE=$(abs_builddir) _PYTHON_HOST_PLATFORM=$(_PYTHON_HOST_PLATFORM) PYTHONPATH=$(shell test -f pybuilddir.txt && echo $(abs_builddir)/`cat pybuilddir.txt`:)$(srcdir)/Lib:$(srcdir)/Lib/plat-$(MACHDEP) '$interp
+ fi
+elif test "$cross_compiling" = maybe; then
+ AC_MSG_ERROR([Cross compiling required --host=HOST-TUPLE and --build=ARCH])
+else
+ PYTHON_FOR_BUILD='./$(BUILDPYTHON) -E'
+fi
+AC_SUBST(PYTHON_FOR_BUILD)
+
dnl Ensure that if prefix is specified, it does not end in a slash. If
dnl it does, we get path names containing '//' which is both ugly and
dnl can cause trouble.
@@ -118,6 +145,7 @@ fi
AC_SUBST(UNIVERSALSDK)
AC_SUBST(ARCH_RUN_32BIT)
+ARCH_RUN_32BIT=""
UNIVERSAL_ARCHS="32-bit"
AC_SUBST(LIPO_32BIT_FLAGS)
@@ -279,6 +307,25 @@ AC_SUBST(MACHDEP)
AC_MSG_CHECKING(MACHDEP)
if test -z "$MACHDEP"
then
+ # avoid using uname for cross builds
+ if test "$cross_compiling" = yes; then
+ # ac_sys_system and ac_sys_release are only used for setting
+ # `define_xopen_source' in the case statement below. For the
+ # current supported cross builds, this macro is not adjusted.
+ case "$host" in
+ *-*-linux*)
+ ac_sys_system=Linux
+ ;;
+ *-*-cygwin*)
+ ac_sys_system=Cygwin
+ ;;
+ *)
+ # for now, limit cross builds to known configurations
+ MACHDEP="unknown"
+ AC_MSG_ERROR([cross build not supported for $host])
+ esac
+ ac_sys_release=
+ else
ac_sys_system=`uname -s`
if test "$ac_sys_system" = "AIX" \
-o "$ac_sys_system" = "UnixWare" -o "$ac_sys_system" = "OpenUNIX"; then
@@ -286,20 +333,44 @@ then
else
ac_sys_release=`uname -r`
fi
- ac_md_system=`echo $ac_sys_system |
- tr -d '[/ ]' | tr '[[A-Z]]' '[[a-z]]'`
- ac_md_release=`echo $ac_sys_release |
- tr -d '[/ ]' | sed 's/^[[A-Z]]\.//' | sed 's/\..*//'`
- MACHDEP="$ac_md_system$ac_md_release"
+ fi
+ ac_md_system=`echo $ac_sys_system |
+ tr -d '[/ ]' | tr '[[A-Z]]' '[[a-z]]'`
+ ac_md_release=`echo $ac_sys_release |
+ tr -d '[/ ]' | sed 's/^[[A-Z]]\.//' | sed 's/\..*//'`
+ MACHDEP="$ac_md_system$ac_md_release"
- case $MACHDEP in
+ case $MACHDEP in
linux*) MACHDEP="linux2";;
cygwin*) MACHDEP="cygwin";;
darwin*) MACHDEP="darwin";;
atheos*) MACHDEP="atheos";;
irix646) MACHDEP="irix6";;
'') MACHDEP="unknown";;
+ esac
+fi
+
+AC_SUBST(_PYTHON_HOST_PLATFORM)
+if test "$cross_compiling" = yes; then
+ case "$host" in
+ *-*-linux*)
+ case "$host_cpu" in
+ arm*)
+ _host_cpu=arm
+ ;;
+ *)
+ _host_cpu=$host_cpu
+ esac
+ ;;
+ *-*-cygwin*)
+ _host_cpu=
+ ;;
+ *)
+ # for now, limit cross builds to known configurations
+ MACHDEP="unknown"
+ AC_MSG_ERROR([cross build not supported for $host])
esac
+ _PYTHON_HOST_PLATFORM="$MACHDEP${_host_cpu:+-$_host_cpu}"
fi
# Some systems cannot stand _XOPEN_SOURCE being defined at all; they
@@ -446,10 +517,6 @@ AC_SUBST(EXPORT_MACOSX_DEPLOYMENT_TARGET)
CONFIGURE_MACOSX_DEPLOYMENT_TARGET=
EXPORT_MACOSX_DEPLOYMENT_TARGET='#'
-AC_MSG_CHECKING(machine type as reported by uname -m)
-ac_sys_machine=`uname -m`
-AC_MSG_RESULT($ac_sys_machine)
-
# checks for alternative programs
# compiler flags are generated in two sets, BASECFLAGS and OPT. OPT is just
@@ -507,6 +574,39 @@ then
(it is also a good idea to do 'make clean' before compiling)])
fi
+if test "$MACHDEP" = "irix6" && test "$CC" != "gcc"; then
+ # Normally, MIPSpro CC treats #error directives as warnings, which means
+ # a successful exit code is returned (0). This is a problem because IRIX
+ # has a bunch of system headers with this guard at the top:
+ #
+ # #ifndef __c99
+ # #error This header file is to be used only for c99 mode compilations
+ # #else
+ #
+ # When autoconf tests for such a header, like stdint.h, this happens:
+ #
+ # configure:4619: cc -c conftest.c >&5
+ # cc-1035 cc: WARNING File = /usr/include/stdint.h, Line = 5
+ # #error directive: This header file is to be used only for c99 mode
+ # compilations
+ #
+ # #error This header file is to be used only for c99 mode compilations
+ # ^
+ #
+ # configure:4619: $? = 0
+ # configure:4619: result: yes
+ #
+ # Therefore, we use `-diag_error 1035` to have the compiler treat the
+ # warning as an error, which causes cc to return a non-zero result,
+ # which autoconf can interpret correctly.
+ CFLAGS="$CFLAGS -diag_error 1035"
+ # Whilst we're here, we might as well make sure CXX defaults to something
+ # sensible if we're not using gcc.
+ if test -z "$CXX"; then
+ CXX="CC"
+ fi
+fi
+
# If the user set CFLAGS, use this instead of the automatically
# determined setting
preset_cflags="$CFLAGS"
@@ -545,8 +645,8 @@ preset_cxx="$CXX"
if test -z "$CXX"
then
case "$CC" in
- gcc) AC_PATH_PROG(CXX, [g++], [g++], [notfound]) ;;
- cc) AC_PATH_PROG(CXX, [c++], [c++], [notfound]) ;;
+ gcc) AC_PATH_TOOL(CXX, [g++], [g++], [notfound]) ;;
+ cc) AC_PATH_TOOL(CXX, [c++], [c++], [notfound]) ;;
esac
if test "$CXX" = "notfound"
then
@@ -555,7 +655,7 @@ then
fi
if test -z "$CXX"
then
- AC_CHECK_PROGS(CXX, $CCC c++ g++ gcc CC cxx cc++ cl, notfound)
+ AC_CHECK_TOOLS(CXX, $CCC c++ g++ gcc CC cxx cc++ cl, notfound)
if test "$CXX" = "notfound"
then
CXX=""
@@ -570,6 +670,9 @@ then
])
fi
+MULTIARCH=$($CC --print-multiarch 2>/dev/null)
+AC_SUBST(MULTIARCH)
+
# checks for UNIX variants that set C preprocessor variables
AC_USE_SYSTEM_EXTENSIONS
@@ -728,22 +831,23 @@ AC_MSG_RESULT($enable_shared)
AC_MSG_CHECKING(for --enable-profiling)
AC_ARG_ENABLE(profiling,
- AS_HELP_STRING([--enable-profiling], [enable C-level code profiling]),
-[ac_save_cc="$CC"
- CC="$CC -pg"
- AC_RUN_IFELSE([AC_LANG_SOURCE([[int main() { return 0; }]])],
- [ac_enable_profiling="yes"],
- [ac_enable_profiling="no"],
- [ac_enable_profiling="no"])
- CC="$ac_save_cc"])
-AC_MSG_RESULT($ac_enable_profiling)
-
-case "$ac_enable_profiling" in
- "yes")
- BASECFLAGS="-pg $BASECFLAGS"
- LDFLAGS="-pg $LDFLAGS"
- ;;
-esac
+ AS_HELP_STRING([--enable-profiling], [enable C-level code profiling]))
+if test "x$enable_profiling" = xyes; then
+ ac_save_cc="$CC"
+ CC="$CC -pg"
+ AC_LINK_IFELSE([AC_LANG_SOURCE([[int main() { return 0; }]])],
+ [],
+ [enable_profiling=no])
+ CC="$ac_save_cc"
+else
+ enable_profiling=no
+fi
+AC_MSG_RESULT($enable_profiling)
+
+if test "x$enable_profiling" = xyes; then
+ BASECFLAGS="-pg $BASECFLAGS"
+ LDFLAGS="-pg $LDFLAGS"
+fi
AC_MSG_CHECKING(LDLIBRARY)
@@ -756,7 +860,7 @@ AC_MSG_CHECKING(LDLIBRARY)
if test "$enable_framework"
then
LDLIBRARY='$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)'
- RUNSHARED=DYLD_FRAMEWORK_PATH="`pwd`:$DYLD_FRAMEWORK_PATH"
+ RUNSHARED=DYLD_FRAMEWORK_PATH=`pwd`${DYLD_FRAMEWORK_PATH:+:${DYLD_FRAMEWORK_PATH}}
BLDLIBRARY=''
else
BLDLIBRARY='$(LDLIBRARY)'
@@ -776,13 +880,13 @@ if test $enable_shared = "yes"; then
SunOS*)
LDLIBRARY='libpython$(VERSION).so'
BLDLIBRARY='-Wl,-R,$(LIBDIR) -L. -lpython$(VERSION)'
- RUNSHARED=LD_LIBRARY_PATH=`pwd`:${LD_LIBRARY_PATH}
+ RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
INSTSONAME="$LDLIBRARY".$SOVERSION
;;
Linux*|GNU*|NetBSD*|FreeBSD*|DragonFly*|OpenBSD*)
LDLIBRARY='libpython$(VERSION).so'
BLDLIBRARY='-L. -lpython$(VERSION)'
- RUNSHARED=LD_LIBRARY_PATH=`pwd`:${LD_LIBRARY_PATH}
+ RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
case $ac_sys_system in
FreeBSD*)
SOVERSION=`echo $SOVERSION|cut -d "." -f 1`
@@ -800,12 +904,12 @@ if test $enable_shared = "yes"; then
;;
esac
BLDLIBRARY='-Wl,+b,$(LIBDIR) -L. -lpython$(VERSION)'
- RUNSHARED=SHLIB_PATH=`pwd`:${SHLIB_PATH}
+ RUNSHARED=SHLIB_PATH=`pwd`${SHLIB_PATH:+:${SHLIB_PATH}}
;;
OSF*)
LDLIBRARY='libpython$(VERSION).so'
BLDLIBRARY='-rpath $(LIBDIR) -L. -lpython$(VERSION)'
- RUNSHARED=LD_LIBRARY_PATH=`pwd`:${LD_LIBRARY_PATH}
+ RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
;;
atheos*)
LDLIBRARY='libpython$(VERSION).so'
@@ -815,11 +919,11 @@ if test $enable_shared = "yes"; then
Darwin*)
LDLIBRARY='libpython$(VERSION).dylib'
BLDLIBRARY='-L. -lpython$(VERSION)'
- RUNSHARED='DYLD_LIBRARY_PATH=`pwd`:${DYLD_LIBRARY_PATH}'
+ RUNSHARED=DYLD_LIBRARY_PATH=`pwd`${DYLD_LIBRARY_PATH:+:${DYLD_LIBRARY_PATH}}
;;
AIX*)
LDLIBRARY='libpython$(VERSION).so'
- RUNSHARED=LIBPATH=`pwd`:${LIBPATH}
+ RUNSHARED=LIBPATH=`pwd`${LIBPATH:+:${LIBPATH}}
;;
esac
@@ -832,11 +936,15 @@ else # shared is disabled
esac
fi
+if test "$cross_compiling" = yes; then
+ RUNSHARED=
+fi
+
AC_MSG_RESULT($LDLIBRARY)
AC_PROG_RANLIB
AC_SUBST(AR)
-AC_CHECK_PROGS(AR, ar aal, ar)
+AC_CHECK_TOOLS(AR, ar aal, ar)
# tweak ARFLAGS only if the user didn't set it on the command line
AC_SUBST(ARFLAGS)
@@ -854,6 +962,20 @@ else
SVNVERSION="echo Unversioned directory"
fi
+AC_SUBST(BASECPPFLAGS)
+if test "$abs_srcdir" != "$abs_builddir"; then
+ # If we're building out-of-tree make sure Include (in the current dir)
+ # gets picked up before its $srcdir counterpart in order for Python-ast.h
+ # and graminit.h to get picked up from the correct directory.
+ # (A side effect of this is that these resources will automatically be
+ # regenerated when building out-of-tree, regardless of whether or not
+ # the $srcdir counterpart is up-to-date. This is an acceptable trade
+ # off.)
+ BASECPPFLAGS="-IInclude"
+else
+ BASECPPFLAGS=""
+fi
+
AC_SUBST(HGVERSION)
AC_SUBST(HGTAG)
AC_SUBST(HGBRANCH)
@@ -878,6 +1000,7 @@ bsdos*|hp*|HP*)
fi
esac
AC_PROG_INSTALL
+AC_PROG_MKDIR_P
# Not every filesystem supports hard links
AC_SUBST(LN)
@@ -995,7 +1118,7 @@ yes)
# if using gcc on alpha, use -mieee to get (near) full IEEE 754
# support. Without this, treatment of subnormals doesn't follow
# the standard.
- case $ac_sys_machine in
+ case $host in
alpha*)
BASECFLAGS="$BASECFLAGS -mieee"
;;
@@ -1026,8 +1149,14 @@ yes)
# Calculate the right deployment target for this build.
#
- cur_target=`sw_vers -productVersion | sed 's/\(10\.[[0-9]]*\).*/\1/'`
- if test ${cur_target} '>' 10.2; then
+ cur_target_major=`sw_vers -productVersion | \
+ sed 's/\([[0-9]]*\)\.\([[0-9]]*\).*/\1/'`
+ cur_target_minor=`sw_vers -productVersion | \
+ sed 's/\([[0-9]]*\)\.\([[0-9]]*\).*/\2/'`
+ cur_target="${cur_target_major}.${cur_target_minor}"
+ if test ${cur_target_major} -eq 10 && \
+ test ${cur_target_minor} -ge 3
+ then
cur_target=10.3
if test ${enable_universalsdk}; then
if test "${UNIVERSAL_ARCHS}" = "all"; then
@@ -1166,6 +1295,11 @@ if test $ac_cv_opt_olimit_ok = yes; then
# environment?
Darwin*)
;;
+ # XXX thankfully this useless troublemaker of a flag has been
+ # eradicated in the 3.x line. For now, make sure it isn't picked
+ # up by any of our other platforms that use CC.
+ AIX*|SunOS*|HP-UX*|IRIX*)
+ ;;
*)
BASECFLAGS="$BASECFLAGS -OPT:Olimit=0"
;;
@@ -1183,7 +1317,14 @@ else
CC="$ac_save_cc"])
AC_MSG_RESULT($ac_cv_olimit_ok)
if test $ac_cv_olimit_ok = yes; then
- BASECFLAGS="$BASECFLAGS -Olimit 1500"
+ case $ac_sys_system in
+ # Issue #16534: On HP-UX ac_cv_olimit_ok=yes is a false positive.
+ HP-UX*)
+ ;;
+ *)
+ BASECFLAGS="$BASECFLAGS -Olimit 1500"
+ ;;
+ esac
fi
fi
@@ -1192,7 +1333,7 @@ if test "$GCC" = "yes"
then
AC_MSG_CHECKING(whether gcc supports ParseTuple __format__)
save_CFLAGS=$CFLAGS
- CFLAGS="$CFLAGS -Werror"
+ CFLAGS="$CFLAGS -Werror -Wformat"
AC_COMPILE_IFELSE([
AC_LANG_PROGRAM([[void f(char*,...)__attribute((format(PyArg_ParseTuple, 1, 2)));]], [[]])
],[
@@ -1213,6 +1354,7 @@ fi
AC_MSG_CHECKING(whether pthreads are available without options)
AC_CACHE_VAL(ac_cv_pthread_is_default,
[AC_RUN_IFELSE([AC_LANG_SOURCE([[
+#include <stdio.h>
#include <pthread.h>
void* routine(void* p){return NULL;}
@@ -1247,6 +1389,7 @@ AC_CACHE_VAL(ac_cv_kpthread,
[ac_save_cc="$CC"
CC="$CC -Kpthread"
AC_RUN_IFELSE([AC_LANG_SOURCE([[
+#include <stdio.h>
#include <pthread.h>
void* routine(void* p){return NULL;}
@@ -1275,6 +1418,7 @@ AC_CACHE_VAL(ac_cv_kthread,
[ac_save_cc="$CC"
CC="$CC -Kthread"
AC_RUN_IFELSE([AC_LANG_SOURCE([[
+#include <stdio.h>
#include <pthread.h>
void* routine(void* p){return NULL;}
@@ -1299,10 +1443,11 @@ then
# so we need to run a program to see whether it really made the
# function available.
AC_MSG_CHECKING(whether $CC accepts -pthread)
-AC_CACHE_VAL(ac_cv_thread,
+AC_CACHE_VAL(ac_cv_pthread,
[ac_save_cc="$CC"
CC="$CC -pthread"
AC_RUN_IFELSE([AC_LANG_SOURCE([[
+#include <stdio.h>
#include <pthread.h>
void* routine(void* p){return NULL;}
@@ -1369,9 +1514,9 @@ dnl AC_MSG_RESULT($cpp_type)
# checks for header files
AC_HEADER_STDC
-AC_CHECK_HEADERS(asm/types.h conio.h curses.h direct.h dlfcn.h errno.h \
+AC_CHECK_HEADERS(asm/types.h conio.h direct.h dlfcn.h errno.h \
fcntl.h grp.h \
-ieeefp.h io.h langinfo.h libintl.h ncurses.h poll.h process.h pthread.h \
+ieeefp.h io.h langinfo.h libintl.h poll.h process.h pthread.h \
shadow.h signal.h stdint.h stropts.h termios.h thread.h \
unistd.h utime.h \
sys/audioio.h sys/bsdtty.h sys/epoll.h sys/event.h sys/file.h sys/loadavg.h \
@@ -1380,17 +1525,10 @@ sys/param.h sys/poll.h sys/select.h sys/socket.h sys/statvfs.h sys/stat.h \
sys/termio.h sys/time.h \
sys/times.h sys/types.h sys/un.h sys/utsname.h sys/wait.h pty.h libutil.h \
sys/resource.h netpacket/packet.h sysexits.h bluetooth.h \
-bluetooth/bluetooth.h linux/tipc.h spawn.h util.h)
+bluetooth/bluetooth.h linux/tipc.h spawn.h util.h alloca.h)
AC_HEADER_DIRENT
AC_HEADER_MAJOR
-# On Solaris, term.h requires curses.h
-AC_CHECK_HEADERS(term.h,,,[
-#ifdef HAVE_CURSES_H
-#include <curses.h>
-#endif
-])
-
# On Linux, netlink.h requires asm/types.h
AC_CHECK_HEADERS(linux/netlink.h,,,[
#ifdef HAVE_ASM_TYPES_H
@@ -1486,10 +1624,30 @@ AC_TYPE_PID_T
AC_DEFINE_UNQUOTED([RETSIGTYPE],[void],[assume C89 semantics that RETSIGTYPE is always void])
AC_TYPE_SIZE_T
AC_TYPE_UID_T
+
+# There are two separate checks for each of the exact-width integer types we
+# need. First we check whether the type is available using the usual
+# AC_CHECK_TYPE macro with the default includes (which includes <inttypes.h>
+# and <stdint.h> where available). We then also use the special type checks of
+# the form AC_TYPE_UINT32_T, which in the case that uint32_t is not available
+# directly, #define's uint32_t to be a suitable type.
+
+AC_CHECK_TYPE(uint32_t,
+ AC_DEFINE(HAVE_UINT32_T, 1, [Define if your compiler provides uint32_t.]),,)
AC_TYPE_UINT32_T
+
+AC_CHECK_TYPE(uint64_t,
+ AC_DEFINE(HAVE_UINT64_T, 1, [Define if your compiler provides uint64_t.]),,)
AC_TYPE_UINT64_T
+
+AC_CHECK_TYPE(int32_t,
+ AC_DEFINE(HAVE_INT32_T, 1, [Define if your compiler provides int32_t.]),,)
AC_TYPE_INT32_T
+
+AC_CHECK_TYPE(int64_t,
+ AC_DEFINE(HAVE_INT64_T, 1, [Define if your compiler provides int64_t.]),,)
AC_TYPE_INT64_T
+
AC_CHECK_TYPE(ssize_t,
AC_DEFINE(HAVE_SSIZE_T, 1, [Define if your compiler provides ssize_t]),,)
@@ -1643,7 +1801,6 @@ case $ac_sys_system/$ac_sys_release in
esac
-ARCH_RUN_32BIT=""
AC_SUBST(LIBTOOL_CRUFT)
case $ac_sys_system/$ac_sys_release in
Darwin/@<:@01567@:>@\..*)
@@ -1842,15 +1999,14 @@ then
# Use -undefined dynamic_lookup whenever possible (10.3 and later).
# This allows an extension to be used in any Python
- if test ${MACOSX_DEPLOYMENT_TARGET} '>' 10.2
+ dep_target_major=`echo ${MACOSX_DEPLOYMENT_TARGET} | \
+ sed 's/\([[0-9]]*\)\.\([[0-9]]*\).*/\1/'`
+ dep_target_minor=`echo ${MACOSX_DEPLOYMENT_TARGET} | \
+ sed 's/\([[0-9]]*\)\.\([[0-9]]*\).*/\2/'`
+ if test ${dep_target_major} -eq 10 && \
+ test ${dep_target_minor} -le 2
then
- if test "${enable_universalsdk}"; then
- LDFLAGS="${UNIVERSAL_ARCH_FLAGS} -isysroot ${UNIVERSALSDK} ${LDFLAGS}"
- fi
- LDSHARED='$(CC) -bundle -undefined dynamic_lookup'
- LDCXXSHARED='$(CXX) -bundle -undefined dynamic_lookup'
- BLDSHARED="$LDSHARED"
- else
+ # building for OS X 10.0 through 10.2
LDSHARED='$(CC) -bundle'
LDCXXSHARED='$(CXX) -bundle'
if test "$enable_framework" ; then
@@ -1864,6 +2020,14 @@ then
LDSHARED="$LDSHARED "'-bundle_loader $(BINDIR)/python$(VERSION)$(EXE)'
LDCXXSHARED="$LDCXXSHARED "'-bundle_loader $(BINDIR)/python$(VERSION)$(EXE)'
fi
+ else
+ # building for OS X 10.3 and later
+ if test "${enable_universalsdk}"; then
+ LDFLAGS="${UNIVERSAL_ARCH_FLAGS} -isysroot ${UNIVERSALSDK} ${LDFLAGS}"
+ fi
+ LDSHARED='$(CC) -bundle -undefined dynamic_lookup'
+ LDCXXSHARED='$(CXX) -bundle -undefined dynamic_lookup'
+ BLDSHARED="$LDSHARED"
fi
;;
Linux*|GNU*|QNX*)
@@ -2131,6 +2295,34 @@ AC_SUBST(LIBFFI_INCLUDEDIR)
AC_MSG_RESULT($with_system_ffi)
+# Check for --with-tcltk-includes=path and --with-tcltk-libs=path
+AC_SUBST(TCLTK_INCLUDES)
+AC_SUBST(TCLTK_LIBS)
+AC_MSG_CHECKING(for --with-tcltk-includes)
+AC_ARG_WITH(tcltk-includes,
+ AS_HELP_STRING([--with-tcltk-includes='-I...'], [override search for Tcl and Tk include files]),
+ [],
+ [with_tcltk_includes="default"])
+AC_MSG_RESULT($with_tcltk_includes)
+AC_MSG_CHECKING(for --with-tcltk-libs)
+AC_ARG_WITH(tcltk-libs,
+ AS_HELP_STRING([--with-tcltk-libs='-L...'], [override search for Tcl and Tk libs]),
+ [],
+ [with_tcltk_libs="default"])
+AC_MSG_RESULT($with_tcltk_libs)
+if test "x$with_tcltk_includes" = xdefault || test "x$with_tcltk_libs" = xdefault
+then
+ if test "x$with_tcltk_includes" != "x$with_tcltk_libs"
+ then
+ AC_MSG_ERROR([use both --with-tcltk-includes='...' and --with-tcltk-libs='...' or neither])
+ fi
+ TCLTK_INCLUDES=""
+ TCLTK_LIBS=""
+else
+ TCLTK_INCLUDES="$with_tcltk_includes"
+ TCLTK_LIBS="$with_tcltk_libs"
+fi
+
# Check for --with-dbmliborder
AC_MSG_CHECKING(for --with-dbmliborder)
AC_ARG_WITH(dbmliborder,
@@ -2297,7 +2489,9 @@ yes
_libs=$LIBS
LIBS="$LIBS -lpthread"
AC_MSG_CHECKING([for pthread_create in -lpthread])
- AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <pthread.h>
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([[
+#include <stdio.h>
+#include <pthread.h>
void * start_routine (void *arg) { exit (0); }]], [[
pthread_create (NULL, NULL, start_routine, NULL)]])],[
@@ -2380,7 +2574,9 @@ if test "$posix_threads" = "yes"; then
AC_MSG_CHECKING(if PTHREAD_SCOPE_SYSTEM is supported)
AC_CACHE_VAL(ac_cv_pthread_system_supported,
- [AC_RUN_IFELSE([AC_LANG_SOURCE([[#include <pthread.h>
+ [AC_RUN_IFELSE([AC_LANG_SOURCE([[
+ #include <stdio.h>
+ #include <pthread.h>
void *foo(void *parm) {
return NULL;
}
@@ -2407,6 +2603,7 @@ if test "$posix_threads" = "yes"; then
[Define if pthread_sigmask() does not work on your system.])
;;
esac])
+ AC_CHECK_FUNCS(pthread_atfork)
fi
@@ -2429,25 +2626,15 @@ AC_ARG_ENABLE(ipv6,
[
dnl the check does not work on cross compilation case...
- AC_RUN_IFELSE([AC_LANG_SOURCE([[ /* AF_INET6 available check */
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ /* AF_INET6 available check */
#include <sys/types.h>
-#include <sys/socket.h>
-main()
-{
- if (socket(AF_INET6, SOCK_STREAM, 0) < 0)
- exit(1);
- else
- exit(0);
-}
-]])],[
+#include <sys/socket.h>]],
+[[int domain = AF_INET6;]])],[
AC_MSG_RESULT(yes)
ipv6=yes
],[
AC_MSG_RESULT(no)
ipv6=no
-],[
- AC_MSG_RESULT(no)
- ipv6=no
])
if test "$ipv6" = "yes"; then
@@ -2724,7 +2911,7 @@ AC_CHECK_FUNCS(alarm setitimer getitimer bind_textdomain_codeset chown \
clock confstr ctermid execv fchmod fchown fork fpathconf ftime ftruncate \
gai_strerror getgroups getlogin getloadavg getpeername getpgid getpid \
getpriority getresuid getresgid getpwent getspnam getspent getsid getwd \
- initgroups kill killpg lchmod lchown lstat mkfifo mknod mktime \
+ initgroups kill killpg lchmod lchown lstat mkfifo mknod mktime mmap \
mremap nice pathconf pause plock poll pthread_init \
putenv readlink realpath \
select sem_open sem_timedwait sem_getvalue sem_unlink setegid seteuid \
@@ -3046,6 +3233,7 @@ then
AC_MSG_CHECKING(getaddrinfo bug)
AC_CACHE_VAL(ac_cv_buggy_getaddrinfo,
AC_RUN_IFELSE([AC_LANG_SOURCE([[[
+#include <stdio.h>
#include <sys/types.h>
#include <netdb.h>
#include <string.h>
@@ -3134,7 +3322,12 @@ int main()
]]])],
[ac_cv_buggy_getaddrinfo=no],
[ac_cv_buggy_getaddrinfo=yes],
-[ac_cv_buggy_getaddrinfo=yes]))
+[
+if test "${enable_ipv6+set}" = set; then
+ ac_cv_buggy_getaddrinfo="no -- configured with --(en|dis)able-ipv6"
+else
+ ac_cv_buggy_getaddrinfo=yes
+fi]))
fi
AC_MSG_RESULT($ac_cv_buggy_getaddrinfo)
@@ -3776,6 +3969,7 @@ ucs2) unicode_size="2"
ucs4) unicode_size="4"
AC_DEFINE(Py_UNICODE_SIZE,4)
;;
+no) ;; # To allow --disable-unicode
*) AC_MSG_ERROR([invalid value for --enable-unicode. Use either ucs2 or ucs4 (lowercase).]) ;;
esac
@@ -4108,6 +4302,19 @@ then
[Define if you have struct stat.st_mtimensec])
fi
+# first curses configure check
+ac_save_cppflags="$CPPFLAGS"
+CPPFLAGS="$CPPFLAGS -I/usr/include/ncursesw"
+
+AC_CHECK_HEADERS(curses.h ncurses.h)
+
+# On Solaris, term.h requires curses.h
+AC_CHECK_HEADERS(term.h,,,[
+#ifdef HAVE_CURSES_H
+#include <curses.h>
+#endif
+])
+
# On HP/UX 11.0, mvwdelch is a block with a return statement
AC_MSG_CHECKING(whether mvwdelch is an expression)
AC_CACHE_VAL(ac_cv_mvwdelch_is_expression,
@@ -4162,27 +4369,34 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <curses.h>]], [[void *x=resizeterm
AC_MSG_RESULT(yes)],
[AC_MSG_RESULT(no)]
)
+# last curses configure check
+CPPFLAGS=$ac_save_cppflags
-AC_MSG_CHECKING(for /dev/ptmx)
+AC_MSG_NOTICE([checking for device files])
-if test -r /dev/ptmx
-then
- AC_MSG_RESULT(yes)
- AC_DEFINE(HAVE_DEV_PTMX, 1,
- [Define if we have /dev/ptmx.])
-else
- AC_MSG_RESULT(no)
+dnl NOTE: Inform user how to proceed with files when cross compiling.
+if test "x$cross_compiling" = xyes; then
+ if test "${ac_cv_file__dev_ptmx+set}" != set; then
+ AC_MSG_CHECKING([for /dev/ptmx])
+ AC_MSG_RESULT([not set])
+ AC_MSG_ERROR([set ac_cv_file__dev_ptmx to yes/no in your CONFIG_SITE file when cross compiling])
+ fi
+ if test "${ac_cv_file__dev_ptc+set}" != set; then
+ AC_MSG_CHECKING([for /dev/ptc])
+ AC_MSG_RESULT([not set])
+ AC_MSG_ERROR([set ac_cv_file__dev_ptc to yes/no in your CONFIG_SITE file when cross compiling])
+ fi
fi
-AC_MSG_CHECKING(for /dev/ptc)
-
-if test -r /dev/ptc
-then
- AC_MSG_RESULT(yes)
+AC_CHECK_FILE(/dev/ptmx, [], [])
+if test "x$ac_cv_file__dev_ptmx" = xyes; then
+ AC_DEFINE(HAVE_DEV_PTMX, 1,
+ [Define to 1 if you have the /dev/ptmx device file.])
+fi
+AC_CHECK_FILE(/dev/ptc, [], [])
+if test "x$ac_cv_file__dev_ptc" = xyes; then
AC_DEFINE(HAVE_DEV_PTC, 1,
- [Define if we have /dev/ptc.])
-else
- AC_MSG_RESULT(no)
+ [Define to 1 if you have the /dev/ptc device file.])
fi
if test "$have_long_long" = yes
@@ -4222,7 +4436,23 @@ then
]]])],
[ac_cv_have_long_long_format=yes],
[ac_cv_have_long_long_format=no],
- [ac_cv_have_long_long_format=no])
+ [ac_cv_have_long_long_format="cross -- assuming no"
+ if test x$GCC = xyes; then
+ save_CFLAGS=$CFLAGS
+ CFLAGS="$CFLAGS -Werror -Wformat"
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+ #include <stdio.h>
+ #include <stddef.h>
+ ]], [[
+ char *buffer;
+ sprintf(buffer, "%lld", (long long)123);
+ sprintf(buffer, "%lld", (long long)-123);
+ sprintf(buffer, "%llu", (unsigned long long)123);
+ ]])],
+ ac_cv_have_long_long_format=yes
+ )
+ CFLAGS=$save_CFLAGS
+ fi])
)
AC_MSG_RESULT($ac_cv_have_long_long_format)
fi
diff --git a/pyconfig.h.in b/pyconfig.h.in
index b9da3d9..d6d479a 100644
--- a/pyconfig.h.in
+++ b/pyconfig.h.in
@@ -1,4 +1,4 @@
-/* pyconfig.h.in. Generated from configure.in by autoheader. */
+/* pyconfig.h.in. Generated from configure.ac by autoheader. */
#ifndef Py_PYCONFIG_H
@@ -55,6 +55,9 @@
/* Define to 1 if you have the `alarm' function. */
#undef HAVE_ALARM
+/* Define to 1 if you have the <alloca.h> header file. */
+#undef HAVE_ALLOCA_H
+
/* Define this if your time.h defines altzone. */
#undef HAVE_ALTZONE
@@ -161,10 +164,10 @@
/* Define to 1 if you have the device macros. */
#undef HAVE_DEVICE_MACROS
-/* Define if we have /dev/ptc. */
+/* Define to 1 if you have the /dev/ptc device file. */
#undef HAVE_DEV_PTC
-/* Define if we have /dev/ptmx. */
+/* Define to 1 if you have the /dev/ptmx device file. */
#undef HAVE_DEV_PTMX
/* Define to 1 if you have the <direct.h> header file. */
@@ -370,6 +373,12 @@
/* Define to 1 if you have the `initgroups' function. */
#undef HAVE_INITGROUPS
+/* Define if your compiler provides int32_t. */
+#undef HAVE_INT32_T
+
+/* Define if your compiler provides int64_t. */
+#undef HAVE_INT64_T
+
/* Define to 1 if you have the <inttypes.h> header file. */
#undef HAVE_INTTYPES_H
@@ -466,6 +475,9 @@
/* Define to 1 if you have the `mktime' function. */
#undef HAVE_MKTIME
+/* Define to 1 if you have the `mmap' function. */
+#undef HAVE_MMAP
+
/* Define to 1 if you have the `mremap' function. */
#undef HAVE_MREMAP
@@ -511,6 +523,9 @@
/* Define if you have GNU PTH threads. */
#undef HAVE_PTH
+/* Define to 1 if you have the `pthread_atfork' function. */
+#undef HAVE_PTHREAD_ATFORK
+
/* Defined for Solaris 2.6 bug in pthread header. */
#undef HAVE_PTHREAD_DESTRUCTOR
@@ -847,6 +862,12 @@
/* Define this if you have tcl and TCL_UTF_MAX==6 */
#undef HAVE_UCS4_TCL
+/* Define if your compiler provides uint32_t. */
+#undef HAVE_UINT32_T
+
+/* Define if your compiler provides uint64_t. */
+#undef HAVE_UINT64_T
+
/* Define to 1 if the system has the type `uintptr_t'. */
#undef HAVE_UINTPTR_T
diff --git a/setup.py b/setup.py
index 6b47451..a46bf35 100644
--- a/setup.py
+++ b/setup.py
@@ -17,8 +17,20 @@ from distutils.command.install import install
from distutils.command.install_lib import install_lib
from distutils.spawn import find_executable
+cross_compiling = "_PYTHON_HOST_PLATFORM" in os.environ
+
+def get_platform():
+ # cross build
+ if "_PYTHON_HOST_PLATFORM" in os.environ:
+ return os.environ["_PYTHON_HOST_PLATFORM"]
+ # Get value of sys.platform
+ if sys.platform.startswith('osf1'):
+ return 'osf1'
+ return sys.platform
+host_platform = get_platform()
+
# Were we compiled --with-pydebug or with #define Py_DEBUG?
-COMPILED_WITH_PYDEBUG = hasattr(sys, 'gettotalrefcount')
+COMPILED_WITH_PYDEBUG = ('--with-pydebug' in sysconfig.get_config_var("CONFIG_ARGS"))
# This global variable is used to hold the list of modules to be disabled.
disabled_module_list = []
@@ -47,7 +59,9 @@ def is_macosx_sdk_path(path):
"""
Returns True if 'path' can be located in an OSX SDK
"""
- return (path.startswith('/usr/') and not path.startswith('/usr/local')) or path.startswith('/System/')
+ return ( (path.startswith('/usr/') and not path.startswith('/usr/local'))
+ or path.startswith('/System/')
+ or path.startswith('/Library/') )
def find_file(filename, std_dirs, paths):
"""Searches for the directory where a given file is located,
@@ -60,7 +74,7 @@ def find_file(filename, std_dirs, paths):
'paths' is a list of additional locations to check; if the file is
found in one of them, the resulting list will contain the directory.
"""
- if sys.platform == 'darwin':
+ if host_platform == 'darwin':
# Honor the MacOSX SDK setting when one was specified.
# An SDK is a directory with the same structure as a real
# system, but with only header files and libraries.
@@ -70,7 +84,7 @@ def find_file(filename, std_dirs, paths):
for dir in std_dirs:
f = os.path.join(dir, filename)
- if sys.platform == 'darwin' and is_macosx_sdk_path(dir):
+ if host_platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f): return []
@@ -79,7 +93,7 @@ def find_file(filename, std_dirs, paths):
for dir in paths:
f = os.path.join(dir, filename)
- if sys.platform == 'darwin' and is_macosx_sdk_path(dir):
+ if host_platform == 'darwin' and is_macosx_sdk_path(dir):
f = os.path.join(sysroot, dir[1:], filename)
if os.path.exists(f):
@@ -93,7 +107,7 @@ def find_library_file(compiler, libname, std_dirs, paths):
if result is None:
return None
- if sys.platform == 'darwin':
+ if host_platform == 'darwin':
sysroot = macosx_sdk_root()
# Check whether the found file is in one of the standard directories
@@ -102,7 +116,7 @@ def find_library_file(compiler, libname, std_dirs, paths):
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
- if sys.platform == 'darwin' and is_macosx_sdk_path(p):
+ if host_platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ ]
@@ -115,7 +129,7 @@ def find_library_file(compiler, libname, std_dirs, paths):
# Ensure path doesn't end with path separator
p = p.rstrip(os.sep)
- if sys.platform == 'darwin' and is_macosx_sdk_path(p):
+ if host_platform == 'darwin' and is_macosx_sdk_path(p):
if os.path.join(sysroot, p[1:]) == dirname:
return [ p ]
@@ -172,8 +186,8 @@ class PyBuildExt(build_ext):
# Platform-dependent module source and include directories
incdirlist = []
- platform = self.get_platform()
- if platform == 'darwin' and ("--disable-toolbox-glue" not in
+
+ if host_platform == 'darwin' and ("--disable-toolbox-glue" not in
sysconfig.get_config_var("CONFIG_ARGS")):
# Mac OS X also includes some mac-specific modules
macmoddir = os.path.join(srcdir, 'Mac/Modules')
@@ -186,7 +200,7 @@ class PyBuildExt(build_ext):
# Python header files
headers = [sysconfig.get_config_h_filename()]
- headers += glob(os.path.join(sysconfig.get_path('platinclude'), "*.h"))
+ headers += glob(os.path.join(sysconfig.get_path('include'), "*.h"))
for ext in self.extensions[:]:
ext.sources = [ find_module_file(filename, moddirlist)
for filename in ext.sources ]
@@ -286,7 +300,7 @@ class PyBuildExt(build_ext):
ext.name)
return
- if self.get_platform() == 'darwin' and (
+ if host_platform == 'darwin' and (
sys.maxint > 2**32 and '-arch' in ext.extra_link_args):
# Don't bother doing an import check when an extension was
# build with an explicit '-arch' flag on OSX. That's currently
@@ -300,13 +314,18 @@ class PyBuildExt(build_ext):
# Workaround for Cygwin: Cygwin currently has fork issues when many
# modules have been imported
- if self.get_platform() == 'cygwin':
+ if host_platform == 'cygwin':
self.announce('WARNING: skipping import check for Cygwin-based "%s"'
% ext.name)
return
ext_filename = os.path.join(
self.build_lib,
self.get_ext_filename(self.get_ext_fullname(ext.name)))
+
+ # Don't try to load extensions for cross builds
+ if cross_compiling:
+ return
+
try:
imp.load_dynamic(ext.name, ext_filename)
except ImportError, why:
@@ -338,24 +357,41 @@ class PyBuildExt(build_ext):
level=3)
self.failed.append(ext.name)
- def get_platform(self):
- # Get value of sys.platform
- for platform in ['cygwin', 'beos', 'darwin', 'atheos', 'osf1']:
- if sys.platform.startswith(platform):
- return platform
- return sys.platform
-
def add_multiarch_paths(self):
# Debian/Ubuntu multiarch support.
# https://wiki.ubuntu.com/MultiarchSpec
+ cc = sysconfig.get_config_var('CC')
+ tmpfile = os.path.join(self.build_temp, 'multiarch')
+ if not os.path.exists(self.build_temp):
+ os.makedirs(self.build_temp)
+ ret = os.system(
+ '%s -print-multiarch > %s 2> /dev/null' % (cc, tmpfile))
+ multiarch_path_component = ''
+ try:
+ if ret >> 8 == 0:
+ with open(tmpfile) as fp:
+ multiarch_path_component = fp.readline().strip()
+ finally:
+ os.unlink(tmpfile)
+
+ if multiarch_path_component != '':
+ add_dir_to_list(self.compiler.library_dirs,
+ '/usr/lib/' + multiarch_path_component)
+ add_dir_to_list(self.compiler.include_dirs,
+ '/usr/include/' + multiarch_path_component)
+ return
+
if not find_executable('dpkg-architecture'):
return
+ opt = ''
+ if cross_compiling:
+ opt = '-t' + sysconfig.get_config_var('HOST_GNU_TYPE')
tmpfile = os.path.join(self.build_temp, 'multiarch')
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
ret = os.system(
- 'dpkg-architecture -qDEB_HOST_MULTIARCH > %s 2> /dev/null' %
- tmpfile)
+ 'dpkg-architecture %s -qDEB_HOST_MULTIARCH > %s 2> /dev/null' %
+ (opt, tmpfile))
try:
if ret >> 8 == 0:
with open(tmpfile) as fp:
@@ -367,10 +403,45 @@ class PyBuildExt(build_ext):
finally:
os.unlink(tmpfile)
+ def add_gcc_paths(self):
+ gcc = sysconfig.get_config_var('CC')
+ tmpfile = os.path.join(self.build_temp, 'gccpaths')
+ if not os.path.exists(self.build_temp):
+ os.makedirs(self.build_temp)
+ ret = os.system('%s -E -v - </dev/null 2>%s 1>/dev/null' % (gcc, tmpfile))
+ is_gcc = False
+ in_incdirs = False
+ inc_dirs = []
+ lib_dirs = []
+ try:
+ if ret >> 8 == 0:
+ with open(tmpfile) as fp:
+ for line in fp.readlines():
+ if line.startswith("gcc version"):
+ is_gcc = True
+ elif line.startswith("#include <...>"):
+ in_incdirs = True
+ elif line.startswith("End of search list"):
+ in_incdirs = False
+ elif is_gcc and line.startswith("LIBRARY_PATH"):
+ for d in line.strip().split("=")[1].split(":"):
+ d = os.path.normpath(d)
+ if '/gcc/' not in d:
+ add_dir_to_list(self.compiler.library_dirs,
+ d)
+ elif is_gcc and in_incdirs and '/gcc/' not in line:
+ add_dir_to_list(self.compiler.include_dirs,
+ line.strip())
+ finally:
+ os.unlink(tmpfile)
+
def detect_modules(self):
# Ensure that /usr/local is always used
- add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')
- add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
+ if not cross_compiling:
+ add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')
+ add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')
+ if cross_compiling:
+ self.add_gcc_paths()
self.add_multiarch_paths()
# Add paths specified in the environment variables LDFLAGS and
@@ -426,32 +497,42 @@ class PyBuildExt(build_ext):
# lib_dirs and inc_dirs are used to search for files;
# if a file is found in one of those directories, it can
# be assumed that no additional -I,-L directives are needed.
- lib_dirs = self.compiler.library_dirs + [
- '/lib64', '/usr/lib64',
- '/lib', '/usr/lib',
- ]
- inc_dirs = self.compiler.include_dirs + ['/usr/include']
+ inc_dirs = self.compiler.include_dirs[:]
+ lib_dirs = self.compiler.library_dirs[:]
+ if not cross_compiling:
+ for d in (
+ '/usr/include',
+ ):
+ add_dir_to_list(inc_dirs, d)
+ for d in (
+ '/lib64', '/usr/lib64',
+ '/lib', '/usr/lib',
+ ):
+ add_dir_to_list(lib_dirs, d)
exts = []
missing = []
config_h = sysconfig.get_config_h_filename()
config_h_vars = sysconfig.parse_config_h(open(config_h))
- platform = self.get_platform()
srcdir = sysconfig.get_config_var('srcdir')
# Check for AtheOS which has libraries in non-standard locations
- if platform == 'atheos':
+ if host_platform == 'atheos':
lib_dirs += ['/system/libs', '/atheos/autolnk/lib']
lib_dirs += os.getenv('LIBRARY_PATH', '').split(os.pathsep)
inc_dirs += ['/system/include', '/atheos/autolnk/include']
inc_dirs += os.getenv('C_INCLUDE_PATH', '').split(os.pathsep)
# OSF/1 and Unixware have some stuff in /usr/ccs/lib (like -ldb)
- if platform in ['osf1', 'unixware7', 'openunix8']:
+ if host_platform in ['osf1', 'unixware7', 'openunix8']:
lib_dirs += ['/usr/ccs/lib']
- if platform == 'darwin':
+ # HP-UX11iv3 keeps files in lib/hpux folders.
+ if host_platform == 'hp-ux11':
+ lib_dirs += ['/usr/lib/hpux64', '/usr/lib/hpux32']
+
+ if host_platform == 'darwin':
# This should work on any unixy platform ;-)
# If the user has bothered specifying additional -I and -L flags
# in OPT and LDFLAGS we might as well use them here.
@@ -470,7 +551,7 @@ class PyBuildExt(build_ext):
# Check for MacOS X, which doesn't need libm.a at all
math_libs = ['m']
- if platform in ['darwin', 'beos']:
+ if host_platform in ['darwin', 'beos']:
math_libs = []
# XXX Omitted modules: gl, pure, dl, SGI-specific modules
@@ -542,7 +623,7 @@ class PyBuildExt(build_ext):
locale_libs = ['intl']
else:
locale_libs = []
- if platform == 'darwin':
+ if host_platform == 'darwin':
locale_extra_link_args = ['-framework', 'CoreFoundation']
else:
locale_extra_link_args = []
@@ -584,7 +665,7 @@ class PyBuildExt(build_ext):
exts.append( Extension('cPickle', ['cPickle.c']) )
# Memory-mapped files (also works on Win32).
- if platform not in ['atheos']:
+ if host_platform not in ['atheos']:
exts.append( Extension('mmap', ['mmapmodule.c']) )
else:
missing.append('mmap')
@@ -649,10 +730,12 @@ class PyBuildExt(build_ext):
elif self.compiler.find_library_file(lib_dirs, 'curses'):
curses_library = 'curses'
- if platform == 'darwin':
+ if host_platform == 'darwin':
os_release = int(os.uname()[2].split('.')[0])
dep_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
- if dep_target and dep_target.split('.') < ['10', '5']:
+ if (dep_target and
+ (tuple(int(n) for n in dep_target.split('.')[0:2])
+ < (10, 5) ) ):
os_release = 8
if os_release < 9:
# MacOSX 10.4 has a broken readline. Don't try to build
@@ -661,7 +744,7 @@ class PyBuildExt(build_ext):
if find_file('readline/rlconf.h', inc_dirs, []) is None:
do_readline = False
if do_readline:
- if platform == 'darwin' and os_release < 9:
+ if host_platform == 'darwin' and os_release < 9:
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entiry path.
@@ -699,8 +782,9 @@ class PyBuildExt(build_ext):
exts.append( Extension('_csv', ['_csv.c']) )
# socket(2)
- exts.append( Extension('_socket', ['socketmodule.c'],
- depends = ['socketmodule.h']) )
+ exts.append( Extension('_socket', ['socketmodule.c', 'timemodule.c'],
+ depends=['socketmodule.h'],
+ libraries=math_libs) )
# Detect SSL support for the socket module (via _ssl)
search_for_ssl_incs_in = [
'/usr/local/ssl/include',
@@ -739,7 +823,7 @@ class PyBuildExt(build_ext):
inc_dirs + search_for_ssl_incs_in)
if opensslv_h:
name = os.path.join(opensslv_h[0], 'openssl/opensslv.h')
- if sys.platform == 'darwin' and is_macosx_sdk_path(name):
+ if host_platform == 'darwin' and is_macosx_sdk_path(name):
name = os.path.join(macosx_sdk_root(), name[1:])
try:
incfile = open(name, 'r')
@@ -799,8 +883,8 @@ class PyBuildExt(build_ext):
# a release. Most open source OSes come with one or more
# versions of BerkeleyDB already installed.
- max_db_ver = (4, 8)
- min_db_ver = (4, 1)
+ max_db_ver = (5, 3)
+ min_db_ver = (4, 3)
db_setup_debug = False # verbose debug prints from this script?
def allow_db_ver(db_ver):
@@ -821,8 +905,12 @@ class PyBuildExt(build_ext):
return True
def gen_db_minor_ver_nums(major):
- if major == 4:
+ if major == 5:
for x in range(max_db_ver[1]+1):
+ if allow_db_ver((5, x)):
+ yield x
+ elif major == 4:
+ for x in range(9):
if allow_db_ver((4, x)):
yield x
elif major == 3:
@@ -863,6 +951,9 @@ class PyBuildExt(build_ext):
db_inc_paths.append('/pkg/db-3.%d/include' % x)
db_inc_paths.append('/opt/db-3.%d/include' % x)
+ if cross_compiling:
+ db_inc_paths = []
+
# Add some common subdirectories for Sleepycat DB to the list,
# based on the standard include directories. This way DB3/4 gets
# picked up when it is installed in a non-standard prefix and
@@ -883,7 +974,7 @@ class PyBuildExt(build_ext):
db_ver_inc_map = {}
- if sys.platform == 'darwin':
+ if host_platform == 'darwin':
sysroot = macosx_sdk_root()
class db_found(Exception): pass
@@ -893,7 +984,7 @@ class PyBuildExt(build_ext):
for d in inc_dirs + db_inc_paths:
f = os.path.join(d, "db.h")
- if sys.platform == 'darwin' and is_macosx_sdk_path(d):
+ if host_platform == 'darwin' and is_macosx_sdk_path(d):
f = os.path.join(sysroot, d[1:], "db.h")
if db_setup_debug: print "db: looking for db.h in", f
@@ -943,7 +1034,7 @@ class PyBuildExt(build_ext):
db_incdir.replace("include", 'lib'),
]
- if sys.platform != 'darwin':
+ if host_platform != 'darwin':
db_dirs_to_check = filter(os.path.isdir, db_dirs_to_check)
else:
@@ -1011,6 +1102,8 @@ class PyBuildExt(build_ext):
'/usr/local/include/sqlite',
'/usr/local/include/sqlite3',
]
+ if cross_compiling:
+ sqlite_inc_paths = []
MIN_SQLITE_VERSION_NUMBER = (3, 0, 8)
MIN_SQLITE_VERSION = ".".join([str(x)
for x in MIN_SQLITE_VERSION_NUMBER])
@@ -1018,20 +1111,20 @@ class PyBuildExt(build_ext):
# Scan the default include directories before the SQLite specific
# ones. This allows one to override the copy of sqlite on OSX,
# where /usr/include contains an old version of sqlite.
- if sys.platform == 'darwin':
+ if host_platform == 'darwin':
sysroot = macosx_sdk_root()
- for d in inc_dirs + sqlite_inc_paths:
- f = os.path.join(d, "sqlite3.h")
-
- if sys.platform == 'darwin' and is_macosx_sdk_path(d):
- f = os.path.join(sysroot, d[1:], "sqlite3.h")
+ for d_ in inc_dirs + sqlite_inc_paths:
+ d = d_
+ if host_platform == 'darwin' and is_macosx_sdk_path(d):
+ d = os.path.join(sysroot, d[1:])
+ f = os.path.join(d, "sqlite3.h")
if os.path.exists(f):
if sqlite_setup_debug: print "sqlite: found %s"%f
incf = open(f).read()
m = re.search(
- r'\s*.*#\s*.*define\s.*SQLITE_VERSION\W*"(.*)"', incf)
+ r'\s*.*#\s*.*define\s.*SQLITE_VERSION\W*"([\d\.]*)"', incf)
if m:
sqlite_version = m.group(1)
sqlite_version_tuple = tuple([int(x)
@@ -1073,7 +1166,7 @@ class PyBuildExt(build_ext):
'_sqlite/util.c', ]
sqlite_defines = []
- if sys.platform != "win32":
+ if host_platform != "win32":
sqlite_defines.append(('MODULE_NAME', '"sqlite3"'))
else:
sqlite_defines.append(('MODULE_NAME', '\\"sqlite3\\"'))
@@ -1081,7 +1174,7 @@ class PyBuildExt(build_ext):
# Comment this out if you want the sqlite3 module to be able to load extensions.
sqlite_defines.append(("SQLITE_OMIT_LOAD_EXTENSION", "1"))
- if sys.platform == 'darwin':
+ if host_platform == 'darwin':
# In every directory on the search path search for a dynamic
# library and then a static library, instead of first looking
# for dynamic libraries on the entire path.
@@ -1096,7 +1189,6 @@ class PyBuildExt(build_ext):
include_dirs=["Modules/_sqlite",
sqlite_incdir],
library_dirs=sqlite_libdir,
- runtime_library_dirs=sqlite_libdir,
extra_link_args=sqlite_extra_link_args,
libraries=["sqlite3",]))
else:
@@ -1115,7 +1207,7 @@ class PyBuildExt(build_ext):
# when attempting to compile and it will fail.
f = "/usr/include/db.h"
- if sys.platform == 'darwin':
+ if host_platform == 'darwin':
if is_macosx_sdk_path(f):
sysroot = macosx_sdk_root()
f = os.path.join(sysroot, f[1:])
@@ -1128,7 +1220,7 @@ class PyBuildExt(build_ext):
### XXX this should be fixed to not be platform-dependent
### but I don't have direct access to an osf1 platform and
### seemed to be muffing the search somehow
- libraries = platform == "osf1" and ['db'] or None
+ libraries = host_platform == "osf1" and ['db'] or None
if libraries is not None:
exts.append(Extension('bsddb185', ['bsddbmodule.c'],
libraries=libraries))
@@ -1141,7 +1233,7 @@ class PyBuildExt(build_ext):
dbm_order = ['gdbm']
# The standard Unix dbm module:
- if platform not in ['cygwin']:
+ if host_platform not in ['cygwin']:
config_args = [arg.strip("'")
for arg in sysconfig.get_config_var("CONFIG_ARGS").split()]
dbm_args = [arg for arg in config_args
@@ -1154,10 +1246,14 @@ class PyBuildExt(build_ext):
for cand in dbm_order:
if cand == "ndbm":
if find_file("ndbm.h", inc_dirs, []) is not None:
- # Some systems have -lndbm, others don't
+ # Some systems have -lndbm, others have -lgdbm_compat,
+ # others don't have either
if self.compiler.find_library_file(lib_dirs,
'ndbm'):
ndbm_libs = ['ndbm']
+ elif self.compiler.find_library_file(lib_dirs,
+ 'gdbm_compat'):
+ ndbm_libs = ['gdbm_compat']
else:
ndbm_libs = []
print "building dbm using ndbm"
@@ -1219,17 +1315,17 @@ class PyBuildExt(build_ext):
missing.append('gdbm')
# Unix-only modules
- if platform not in ['win32']:
+ if host_platform not in ['win32']:
# Steen Lumholt's termios module
exts.append( Extension('termios', ['termios.c']) )
# Jeremy Hylton's rlimit interface
- if platform not in ['atheos']:
+ if host_platform not in ['atheos']:
exts.append( Extension('resource', ['resource.c']) )
else:
missing.append('resource')
# Sun yellow pages. Some systems have the functions in libc.
- if (platform not in ['cygwin', 'atheos', 'qnx6'] and
+ if (host_platform not in ['cygwin', 'atheos', 'qnx6'] and
find_file('rpcsvc/yp_prot.h', inc_dirs, []) is not None):
if (self.compiler.find_library_file(lib_dirs, 'nsl')):
libs = ['nsl']
@@ -1245,15 +1341,19 @@ class PyBuildExt(build_ext):
# Curses support, requiring the System V version of curses, often
# provided by the ncurses library.
panel_library = 'panel'
+ curses_incs = None
if curses_library.startswith('ncurses'):
if curses_library == 'ncursesw':
# Bug 1464056: If _curses.so links with ncursesw,
# _curses_panel.so must link with panelw.
panel_library = 'panelw'
curses_libs = [curses_library]
+ curses_incs = find_file('curses.h', inc_dirs,
+ [os.path.join(d, 'ncursesw') for d in inc_dirs])
exts.append( Extension('_curses', ['_cursesmodule.c'],
+ include_dirs = curses_incs,
libraries = curses_libs) )
- elif curses_library == 'curses' and platform != 'darwin':
+ elif curses_library == 'curses' and host_platform != 'darwin':
# OSX has an old Berkeley curses, not good enough for
# the _curses module.
if (self.compiler.find_library_file(lib_dirs, 'terminfo')):
@@ -1272,6 +1372,7 @@ class PyBuildExt(build_ext):
if (module_enabled(exts, '_curses') and
self.compiler.find_library_file(lib_dirs, panel_library)):
exts.append( Extension('_curses_panel', ['_curses_panel.c'],
+ include_dirs = curses_incs,
libraries = [panel_library] + curses_libs) )
else:
missing.append('_curses_panel')
@@ -1294,6 +1395,8 @@ class PyBuildExt(build_ext):
zlib_h = zlib_inc[0] + '/zlib.h'
version = '"0.0.0"'
version_req = '"1.1.3"'
+ if host_platform == 'darwin' and is_macosx_sdk_path(zlib_h):
+ zlib_h = os.path.join(macosx_sdk_root(), zlib_h[1:])
fp = open(zlib_h)
while 1:
line = fp.readline()
@@ -1304,7 +1407,7 @@ class PyBuildExt(build_ext):
break
if version >= version_req:
if (self.compiler.find_library_file(lib_dirs, 'z')):
- if sys.platform == "darwin":
+ if host_platform == "darwin":
zlib_extra_link_args = ('-Wl,-search_paths_first',)
else:
zlib_extra_link_args = ()
@@ -1336,7 +1439,7 @@ class PyBuildExt(build_ext):
# Gustavo Niemeyer's bz2 module.
if (self.compiler.find_library_file(lib_dirs, 'bz2')):
- if sys.platform == "darwin":
+ if host_platform == "darwin":
bz2_extra_link_args = ('-Wl,-search_paths_first',)
else:
bz2_extra_link_args = ()
@@ -1362,6 +1465,7 @@ class PyBuildExt(build_ext):
define_macros = []
expat_lib = ['expat']
expat_sources = []
+ expat_depends = []
else:
expat_inc = [os.path.join(os.getcwd(), srcdir, 'Modules', 'expat')]
define_macros = [
@@ -1371,12 +1475,25 @@ class PyBuildExt(build_ext):
expat_sources = ['expat/xmlparse.c',
'expat/xmlrole.c',
'expat/xmltok.c']
+ expat_depends = ['expat/ascii.h',
+ 'expat/asciitab.h',
+ 'expat/expat.h',
+ 'expat/expat_config.h',
+ 'expat/expat_external.h',
+ 'expat/internal.h',
+ 'expat/latin1tab.h',
+ 'expat/utf8tab.h',
+ 'expat/xmlrole.h',
+ 'expat/xmltok.h',
+ 'expat/xmltok_impl.h'
+ ]
exts.append(Extension('pyexpat',
define_macros = define_macros,
include_dirs = expat_inc,
libraries = expat_lib,
- sources = ['pyexpat.c'] + expat_sources
+ sources = ['pyexpat.c'] + expat_sources,
+ depends = expat_depends,
))
# Fredrik Lundh's cElementTree module. Note that this also
@@ -1389,6 +1506,8 @@ class PyBuildExt(build_ext):
include_dirs = expat_inc,
libraries = expat_lib,
sources = ['_elementtree.c'],
+ depends = ['pyexpat.c'] + expat_sources +
+ expat_depends,
))
else:
missing.append('_elementtree')
@@ -1409,7 +1528,7 @@ class PyBuildExt(build_ext):
if sys.maxint == 0x7fffffff:
# This requires sizeof(int) == sizeof(long) == sizeof(char*)
dl_inc = find_file('dlfcn.h', [], inc_dirs)
- if (dl_inc is not None) and (platform not in ['atheos']):
+ if (dl_inc is not None) and (host_platform not in ['atheos']):
exts.append( Extension('dl', ['dlmodule.c']) )
else:
missing.append('dl')
@@ -1420,29 +1539,29 @@ class PyBuildExt(build_ext):
self.detect_ctypes(inc_dirs, lib_dirs)
# Richard Oudkerk's multiprocessing module
- if platform == 'win32': # Windows
+ if host_platform == 'win32': # Windows
macros = dict()
libraries = ['ws2_32']
- elif platform == 'darwin': # Mac OSX
+ elif host_platform == 'darwin': # Mac OSX
macros = dict()
libraries = []
- elif platform == 'cygwin': # Cygwin
+ elif host_platform == 'cygwin': # Cygwin
macros = dict()
libraries = []
- elif platform in ('freebsd4', 'freebsd5', 'freebsd6', 'freebsd7', 'freebsd8'):
+ elif host_platform in ('freebsd4', 'freebsd5', 'freebsd6', 'freebsd7', 'freebsd8'):
# FreeBSD's P1003.1b semaphore support is very experimental
# and has many known problems. (as of June 2008)
macros = dict()
libraries = []
- elif platform.startswith('openbsd'):
+ elif host_platform.startswith('openbsd'):
macros = dict()
libraries = []
- elif platform.startswith('netbsd'):
+ elif host_platform.startswith('netbsd'):
macros = dict()
libraries = []
@@ -1450,7 +1569,7 @@ class PyBuildExt(build_ext):
macros = dict()
libraries = ['rt']
- if platform == 'win32':
+ if host_platform == 'win32':
multiprocessing_srcs = [ '_multiprocessing/multiprocessing.c',
'_multiprocessing/semaphore.c',
'_multiprocessing/pipe_connection.c',
@@ -1477,26 +1596,26 @@ class PyBuildExt(build_ext):
# Platform-specific libraries
- if platform == 'linux2':
+ if host_platform == 'linux2':
# Linux-specific modules
exts.append( Extension('linuxaudiodev', ['linuxaudiodev.c']) )
else:
missing.append('linuxaudiodev')
- if (platform in ('linux2', 'freebsd4', 'freebsd5', 'freebsd6',
+ if (host_platform in ('linux2', 'freebsd4', 'freebsd5', 'freebsd6',
'freebsd7', 'freebsd8')
- or platform.startswith("gnukfreebsd")):
+ or host_platform.startswith("gnukfreebsd")):
exts.append( Extension('ossaudiodev', ['ossaudiodev.c']) )
else:
missing.append('ossaudiodev')
- if platform == 'sunos5':
+ if host_platform == 'sunos5':
# SunOS specific modules
exts.append( Extension('sunaudiodev', ['sunaudiodev.c']) )
else:
missing.append('sunaudiodev')
- if platform == 'darwin':
+ if host_platform == 'darwin':
# _scproxy
exts.append(Extension("_scproxy", [os.path.join(srcdir, "Mac/Modules/_scproxy.c")],
extra_link_args= [
@@ -1505,7 +1624,7 @@ class PyBuildExt(build_ext):
]))
- if platform == 'darwin' and ("--disable-toolbox-glue" not in
+ if host_platform == 'darwin' and ("--disable-toolbox-glue" not in
sysconfig.get_config_var("CONFIG_ARGS")):
if int(os.uname()[2].split('.')[0]) >= 8:
@@ -1610,8 +1729,47 @@ class PyBuildExt(build_ext):
if '_tkinter' not in [e.name for e in self.extensions]:
missing.append('_tkinter')
+## # Uncomment these lines if you want to play with xxmodule.c
+## ext = Extension('xx', ['xxmodule.c'])
+## self.extensions.append(ext)
+
return missing
+ def detect_tkinter_explicitly(self):
+ # Build _tkinter using explicit locations for Tcl/Tk.
+ #
+ # This is enabled when both arguments are given to ./configure:
+ #
+ # --with-tcltk-includes="-I/path/to/tclincludes \
+ # -I/path/to/tkincludes"
+ # --with-tcltk-libs="-L/path/to/tcllibs -ltclm.n \
+ # -L/path/to/tklibs -ltkm.n"
+ #
+ # These values can also be specified or overriden via make:
+ # make TCLTK_INCLUDES="..." TCLTK_LIBS="..."
+ #
+ # This can be useful for building and testing tkinter with multiple
+ # versions of Tcl/Tk. Note that a build of Tk depends on a particular
+ # build of Tcl so you need to specify both arguments and use care when
+ # overriding.
+
+ # The _TCLTK variables are created in the Makefile sharedmods target.
+ tcltk_includes = os.environ.get('_TCLTK_INCLUDES')
+ tcltk_libs = os.environ.get('_TCLTK_LIBS')
+ if not (tcltk_includes and tcltk_libs):
+ # Resume default configuration search.
+ return 0
+
+ extra_compile_args = tcltk_includes.split()
+ extra_link_args = tcltk_libs.split()
+ ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
+ define_macros=[('WITH_APPINIT', 1)],
+ extra_compile_args = extra_compile_args,
+ extra_link_args = extra_link_args,
+ )
+ self.extensions.append(ext)
+ return 1
+
def detect_tkinter_darwin(self, inc_dirs, lib_dirs):
# The _tkinter module, using frameworks. Since frameworks are quite
# different the UNIX search logic is not sharable.
@@ -1694,15 +1852,20 @@ class PyBuildExt(build_ext):
self.extensions.append(ext)
return 1
-
def detect_tkinter(self, inc_dirs, lib_dirs):
# The _tkinter module.
+ # Check whether --with-tcltk-includes and --with-tcltk-libs were
+ # configured or passed into the make target. If so, use these values
+ # to build tkinter and bypass the searches for Tcl and TK in standard
+ # locations.
+ if self.detect_tkinter_explicitly():
+ return
+
# Rather than complicate the code below, detecting and building
# AquaTk is a separate method. Only one Tkinter will be built on
# Darwin - either AquaTk, if it is found, or X11 based Tk.
- platform = self.get_platform()
- if (platform == 'darwin' and
+ if (host_platform == 'darwin' and
self.detect_tkinter_darwin(inc_dirs, lib_dirs)):
return
@@ -1725,7 +1888,7 @@ class PyBuildExt(build_ext):
# Check for the include files on Debian and {Free,Open}BSD, where
# they're put in /usr/include/{tcl,tk}X.Y
dotversion = version
- if '.' not in dotversion and "bsd" in sys.platform.lower():
+ if '.' not in dotversion and "bsd" in host_platform.lower():
# OpenBSD and FreeBSD use Tcl/Tk library names like libtcl83.a,
# but the include subdirs are named like .../include/tcl8.3.
dotversion = dotversion[:-1] + '.' + dotversion[-1]
@@ -1751,7 +1914,7 @@ class PyBuildExt(build_ext):
include_dirs.append(dir)
# Check for various platform-specific directories
- if platform == 'sunos5':
+ if host_platform == 'sunos5':
include_dirs.append('/usr/openwin/include')
added_lib_dirs.append('/usr/openwin/lib')
elif os.path.exists('/usr/X11R6/include'):
@@ -1767,7 +1930,7 @@ class PyBuildExt(build_ext):
added_lib_dirs.append('/usr/X11/lib')
# If Cygwin, then verify that X is installed before proceeding
- if platform == 'cygwin':
+ if host_platform == 'cygwin':
x11_inc = find_file('X11/Xlib.h', [], include_dirs)
if x11_inc is None:
return
@@ -1786,11 +1949,11 @@ class PyBuildExt(build_ext):
libs.append('tk'+ version)
libs.append('tcl'+ version)
- if platform in ['aix3', 'aix4']:
+ if host_platform in ['aix3', 'aix4']:
libs.append('ld')
# Finally, link with the X11 libraries (not appropriate on cygwin)
- if platform != "cygwin":
+ if host_platform != "cygwin":
libs.append('X11')
ext = Extension('_tkinter', ['_tkinter.c', 'tkappinit.c'],
@@ -1801,10 +1964,6 @@ class PyBuildExt(build_ext):
)
self.extensions.append(ext)
-## # Uncomment these lines if you want to play with xxmodule.c
-## ext = Extension('xx', ['xxmodule.c'])
-## self.extensions.append(ext)
-
# XXX handle these, but how to detect?
# *** Uncomment and edit for PIL (TkImaging) extension only:
# -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \
@@ -1842,7 +2001,7 @@ class PyBuildExt(build_ext):
def configure_ctypes(self, ext):
if not self.use_system_libffi:
- if sys.platform == 'darwin':
+ if host_platform == 'darwin':
return self.configure_ctypes_darwin(ext)
srcdir = sysconfig.get_config_var('srcdir')
@@ -1860,7 +2019,10 @@ class PyBuildExt(build_ext):
ffi_configfile):
from distutils.dir_util import mkpath
mkpath(ffi_builddir)
- config_args = []
+ config_args = [arg for arg in sysconfig.get_config_var("CONFIG_ARGS").split()
+ if (('--host=' in arg) or ('--build=' in arg))]
+ if not self.verbose:
+ config_args.append("-q")
# Pass empty CFLAGS because we'll just append the resulting
# CFLAGS to Python's; -g or -O2 is to be avoided.
@@ -1902,7 +2064,7 @@ class PyBuildExt(build_ext):
'_ctypes/cfield.c']
depends = ['_ctypes/ctypes.h']
- if sys.platform == 'darwin':
+ if host_platform == 'darwin':
sources.append('_ctypes/malloc_closure.c')
sources.append('_ctypes/darwin/dlfcn_simple.c')
extra_compile_args.append('-DMACOSX')
@@ -1910,7 +2072,7 @@ class PyBuildExt(build_ext):
# XXX Is this still needed?
## extra_link_args.extend(['-read_only_relocs', 'warning'])
- elif sys.platform == 'sunos5':
+ elif host_platform == 'sunos5':
# XXX This shouldn't be necessary; it appears that some
# of the assembler code is non-PIC (i.e. it has relocations
# when it shouldn't. The proper fix would be to rewrite
@@ -1921,7 +2083,7 @@ class PyBuildExt(build_ext):
# finding some -z option for the Sun compiler.
extra_link_args.append('-mimpure-text')
- elif sys.platform.startswith('hp-ux'):
+ elif host_platform.startswith('hp-ux'):
extra_link_args.append('-fPIC')
ext = Extension('_ctypes',
@@ -1938,7 +2100,7 @@ class PyBuildExt(build_ext):
if not '--with-system-ffi' in sysconfig.get_config_var("CONFIG_ARGS"):
return
- if sys.platform == 'darwin':
+ if host_platform == 'darwin':
# OS X 10.5 comes with libffi.dylib; the include files are
# in /usr/include/ffi
inc_dirs.append('/usr/include/ffi')